code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from __future__ import with_statement
import sys
try:
from setuptools import setup, Extension, Command
except ImportError:
from distutils.core import setup, Extension, Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
IS_PYPY = hasattr(sys, 'pypy_translation_info')
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
# It can also raise ValueError http://bugs.python.org/issue7511
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError, ValueError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
def run_setup(with_binary):
cmdclass = {'test': Command}
kw = {'cmdclass': cmdclass}
# TODO: c extensions not working right now, disabling
if 0: #with_binary:
kw.update(
ext_modules=[Extension("fastpolymath_c",
sources=["polypasswordhasher/fastpolymath.c"],
include_dirs=['polypasswordhasher'])],
cmdclass=dict(cmdclass, build_ext=ve_build_ext),
)
setup(
name="PolyPasswordHasher",
version="0.1.0-alpha",
packages=['polypasswordhasher', 'polypasswordhasher.tests'],
url='https://github.com/PolyPasswordHasher/PolyPasswordHasher-Python',
description="A Password hash storage scheme that prevents an attacker from cracking passwords individually and efficiently.",
long_description=open('README.rst').read(),
author="PolyPasswordHasher Devs",
author_email="<EMAIL>",
install_requires=[
"pycrypto"
],
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security :: Cryptography',
'Topic :: Utilities'],
**kw
)
try:
run_setup(not IS_PYPY)
except BuildFailed:
BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, "
"fast math is not enabled.")
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Failure information, if any, is above.")
print("I'm retrying the build without the C extension now.")
print('*' * 75)
run_setup(False)
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Plain-Python installation succeeded.")
print('*' * 75) | setup.py | from __future__ import with_statement
import sys
try:
from setuptools import setup, Extension, Command
except ImportError:
from distutils.core import setup, Extension, Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
IS_PYPY = hasattr(sys, 'pypy_translation_info')
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
# It can also raise ValueError http://bugs.python.org/issue7511
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError, ValueError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
def run_setup(with_binary):
cmdclass = {'test': Command}
kw = {'cmdclass': cmdclass}
# TODO: c extensions not working right now, disabling
if 0: #with_binary:
kw.update(
ext_modules=[Extension("fastpolymath_c",
sources=["polypasswordhasher/fastpolymath.c"],
include_dirs=['polypasswordhasher'])],
cmdclass=dict(cmdclass, build_ext=ve_build_ext),
)
setup(
name="PolyPasswordHasher",
version="0.1.0-alpha",
packages=['polypasswordhasher', 'polypasswordhasher.tests'],
url='https://github.com/PolyPasswordHasher/PolyPasswordHasher-Python',
description="A Password hash storage scheme that prevents an attacker from cracking passwords individually and efficiently.",
long_description=open('README.rst').read(),
author="PolyPasswordHasher Devs",
author_email="<EMAIL>",
install_requires=[
"pycrypto"
],
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security :: Cryptography',
'Topic :: Utilities'],
**kw
)
try:
run_setup(not IS_PYPY)
except BuildFailed:
BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, "
"fast math is not enabled.")
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Failure information, if any, is above.")
print("I'm retrying the build without the C extension now.")
print('*' * 75)
run_setup(False)
print('*' * 75)
print(BUILD_EXT_WARNING)
print("Plain-Python installation succeeded.")
print('*' * 75) | 0.300438 | 0.102574 |
import tkinter as Tkinter
from datetime import datetime
import time
import http.server
import threading
from urllib.parse import urlsplit
class StopwatchServer(http.server.BaseHTTPRequestHandler):
def do_POST(self):
global label
global running
print(self.path)
url = urlsplit(self.path)
if url.path == "/press":
if url.query == "id=1" and not running:
Start(label)
elif url.query == "id=2" and running:
Stop()
elif url.path == "/hold":
Reset(label)
self.send_response(200)
self.end_headers()
message = "ACK"
self.wfile.write(bytes(message, "utf8"))
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.is_server_running = False
self.daemon = True
def run(self):
server_address = ("", 8000)
self.httpd = http.server.HTTPServer(server_address, StopwatchServer)
self.httpd.serve_forever()
start_time = None
running = False
def counter_label(label):
def count():
if running:
global counter
global start_time
tt = (time.time_ns() - start_time) // (1000 * 1000)
dt = datetime.utcfromtimestamp(tt // 1000)
display = dt.strftime("%M:%S") + ".{:02d}".format((tt % 1000) // 10)
label["text"] = display # Or label.config(text=display)
# label.after(arg1, arg2) delays by
# first argument given in milliseconds
# and then calls the function given as second argument.
# Generally like here we need to call the
# function in which it is present repeatedly.
# Delays by 1ms and call count again.
label.after(1, count)
# Triggering the start of the counter.
count()
# start function of the stopwatch
def Start(label):
global running
global start_time
running = True
start_time = time.time_ns()
counter_label(label)
# Stop function of the stopwatch
def Stop():
global running
running = False
# Reset function of the stopwatch
def Reset(label):
global running
global start_time
running = False
# If rest is pressed after pressing stop.
label["text"] = "00:00.00"
root = Tkinter.Tk()
root.attributes("-fullscreen", True)
root.configure(background="black")
root.title("Stopwatch")
label = Tkinter.Label(
root, text="00:00.00", fg="red", bg="black", font="Verdana 240 bold"
)
label.pack(expand=True)
f = Tkinter.Frame(root)
server_thread = ServerThread()
server_thread.start()
root.bind("q", lambda _: root.destroy())
root.mainloop() | stopwatch.py | import tkinter as Tkinter
from datetime import datetime
import time
import http.server
import threading
from urllib.parse import urlsplit
class StopwatchServer(http.server.BaseHTTPRequestHandler):
def do_POST(self):
global label
global running
print(self.path)
url = urlsplit(self.path)
if url.path == "/press":
if url.query == "id=1" and not running:
Start(label)
elif url.query == "id=2" and running:
Stop()
elif url.path == "/hold":
Reset(label)
self.send_response(200)
self.end_headers()
message = "ACK"
self.wfile.write(bytes(message, "utf8"))
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.is_server_running = False
self.daemon = True
def run(self):
server_address = ("", 8000)
self.httpd = http.server.HTTPServer(server_address, StopwatchServer)
self.httpd.serve_forever()
start_time = None
running = False
def counter_label(label):
def count():
if running:
global counter
global start_time
tt = (time.time_ns() - start_time) // (1000 * 1000)
dt = datetime.utcfromtimestamp(tt // 1000)
display = dt.strftime("%M:%S") + ".{:02d}".format((tt % 1000) // 10)
label["text"] = display # Or label.config(text=display)
# label.after(arg1, arg2) delays by
# first argument given in milliseconds
# and then calls the function given as second argument.
# Generally like here we need to call the
# function in which it is present repeatedly.
# Delays by 1ms and call count again.
label.after(1, count)
# Triggering the start of the counter.
count()
# start function of the stopwatch
def Start(label):
global running
global start_time
running = True
start_time = time.time_ns()
counter_label(label)
# Stop function of the stopwatch
def Stop():
global running
running = False
# Reset function of the stopwatch
def Reset(label):
global running
global start_time
running = False
# If rest is pressed after pressing stop.
label["text"] = "00:00.00"
root = Tkinter.Tk()
root.attributes("-fullscreen", True)
root.configure(background="black")
root.title("Stopwatch")
label = Tkinter.Label(
root, text="00:00.00", fg="red", bg="black", font="Verdana 240 bold"
)
label.pack(expand=True)
f = Tkinter.Frame(root)
server_thread = ServerThread()
server_thread.start()
root.bind("q", lambda _: root.destroy())
root.mainloop() | 0.373762 | 0.098686 |
import click
from parsec.commands.histories.create_dataset_collection import cli as func0
from parsec.commands.histories.create_history import cli as func1
from parsec.commands.histories.create_history_tag import cli as func2
from parsec.commands.histories.delete_dataset import cli as func3
from parsec.commands.histories.delete_dataset_collection import cli as func4
from parsec.commands.histories.delete_history import cli as func5
from parsec.commands.histories.download_dataset import cli as func6
from parsec.commands.histories.download_history import cli as func7
from parsec.commands.histories.export_history import cli as func8
from parsec.commands.histories.get_current_history import cli as func9
from parsec.commands.histories.get_histories import cli as func10
from parsec.commands.histories.get_most_recently_used_history import cli as func11
from parsec.commands.histories.get_status import cli as func12
from parsec.commands.histories.show_dataset import cli as func13
from parsec.commands.histories.show_dataset_collection import cli as func14
from parsec.commands.histories.show_dataset_provenance import cli as func15
from parsec.commands.histories.show_history import cli as func16
from parsec.commands.histories.show_matching_datasets import cli as func17
from parsec.commands.histories.undelete_history import cli as func18
from parsec.commands.histories.update_dataset import cli as func19
from parsec.commands.histories.update_dataset_collection import cli as func20
from parsec.commands.histories.update_history import cli as func21
from parsec.commands.histories.upload_dataset_from_library import cli as func22
@click.group()
def cli():
pass
cli.add_command(func0)
cli.add_command(func1)
cli.add_command(func2)
cli.add_command(func3)
cli.add_command(func4)
cli.add_command(func5)
cli.add_command(func6)
cli.add_command(func7)
cli.add_command(func8)
cli.add_command(func9)
cli.add_command(func10)
cli.add_command(func11)
cli.add_command(func12)
cli.add_command(func13)
cli.add_command(func14)
cli.add_command(func15)
cli.add_command(func16)
cli.add_command(func17)
cli.add_command(func18)
cli.add_command(func19)
cli.add_command(func20)
cli.add_command(func21)
cli.add_command(func22) | parsec/commands/cmd_histories.py | import click
from parsec.commands.histories.create_dataset_collection import cli as func0
from parsec.commands.histories.create_history import cli as func1
from parsec.commands.histories.create_history_tag import cli as func2
from parsec.commands.histories.delete_dataset import cli as func3
from parsec.commands.histories.delete_dataset_collection import cli as func4
from parsec.commands.histories.delete_history import cli as func5
from parsec.commands.histories.download_dataset import cli as func6
from parsec.commands.histories.download_history import cli as func7
from parsec.commands.histories.export_history import cli as func8
from parsec.commands.histories.get_current_history import cli as func9
from parsec.commands.histories.get_histories import cli as func10
from parsec.commands.histories.get_most_recently_used_history import cli as func11
from parsec.commands.histories.get_status import cli as func12
from parsec.commands.histories.show_dataset import cli as func13
from parsec.commands.histories.show_dataset_collection import cli as func14
from parsec.commands.histories.show_dataset_provenance import cli as func15
from parsec.commands.histories.show_history import cli as func16
from parsec.commands.histories.show_matching_datasets import cli as func17
from parsec.commands.histories.undelete_history import cli as func18
from parsec.commands.histories.update_dataset import cli as func19
from parsec.commands.histories.update_dataset_collection import cli as func20
from parsec.commands.histories.update_history import cli as func21
from parsec.commands.histories.upload_dataset_from_library import cli as func22
@click.group()
def cli():
pass
cli.add_command(func0)
cli.add_command(func1)
cli.add_command(func2)
cli.add_command(func3)
cli.add_command(func4)
cli.add_command(func5)
cli.add_command(func6)
cli.add_command(func7)
cli.add_command(func8)
cli.add_command(func9)
cli.add_command(func10)
cli.add_command(func11)
cli.add_command(func12)
cli.add_command(func13)
cli.add_command(func14)
cli.add_command(func15)
cli.add_command(func16)
cli.add_command(func17)
cli.add_command(func18)
cli.add_command(func19)
cli.add_command(func20)
cli.add_command(func21)
cli.add_command(func22) | 0.228156 | 0.10466 |
import json
import time
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List
import click
from kaggle import KaggleApi
from kaggle.models.kaggle_models_extended import KernelPushResponse
from .. import kernel_proc
from ..builders.packaging_system import get_dependencies
from ..exception import InstallKernelError, MetaDataNotFound
from ..resource import get_username, get_dataset_slug
from .kkt_command import kkt_command
from ..fetch import PackageLocation, fetch_packages
def create_kernel_body(
python_pkgs: List[str],
extra_python_pkgs: List[str],
extra_deb_pkgs: List[str],
prologue: str,
) -> str:
return f"""{prologue}
import os
import sys
import subprocess
from pathlib import Path
def pip_freeze():
args = [sys.executable, "-m", "pip", "freeze"]
output = subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
return output.split("\\n")
def pip_install(pkgs, ignore_error=False):
if len(pkgs) == 0:
return
args = [sys.executable, "-m", "pip", "install", *pkgs]
try:
ret = subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
except subprocess.CalledProcessError as e:
ret = str(e.stdout)
return ret
def deb_install(pkgs):
if len(pkgs) == 0:
return
args = ["apt-get", "install", "-y", *pkgs]
return subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
def pip_download(pkgs):
Path("./pip").mkdir(exist_ok=True)
if len(pkgs) == 0:
return ""
args = [sys.executable, "-m", "pip", "download", "--no-deps", "-d", "pip", *pkgs]
return subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
def deb_download(pkgs):
dst_dir_path = Path("./deb")
dst_dir_path.mkdir(exist_ok=True)
if len(pkgs) == 0:
return ""
args = ["apt-get", "-o", "Dir::Cache::archives='/kaggle/working/deb/'", "install", "-y", *pkgs]
os.system(" ".join(args))
(dst_dir_path / "lock").unlink()
(dst_dir_path / "partial").rmdir()
deb_download({extra_deb_pkgs})
freeze_before_install = pip_freeze()
print(pip_install({python_pkgs}))
print(pip_install({extra_python_pkgs}), True)
freeze_after_install = pip_freeze()
diff_pkgs = set(freeze_after_install) - set(freeze_before_install)
print(pip_download(diff_pkgs))
"""
def create_kernel_push_params(
api: KaggleApi, meta_data: Dict
) -> kernel_proc.KernelPushParams:
install_kernel_slug = get_install_slug(meta_data)
install_kernel_meta_data = {
**meta_data,
"slug": install_kernel_slug,
"kernel_type": "script",
"is_private": True,
"enable_gpu": False,
"enable_internet": True,
"dataset_sources": [],
"competition_sources": [],
"kernel_sources": [],
"keywords": [],
}
return kernel_proc.KernelPushParams.of(api, install_kernel_meta_data)
def get_install_slug(meta_data: Dict) -> str:
return f"{meta_data['slug']}-install"
def get_owner_slug_from(response: KernelPushResponse):
return response.ref.split("/")[1]
def get_kernel_slug_from(response: KernelPushResponse):
return response.ref.split("/")[2]
def get_error_messages(logs: Dict) -> List[str]:
result = []
for log in logs:
stream_name = log.get("stream_name", "stderr")
data = log.get("data", "")
if stream_name == "stderr" and not (
data.startswith("[NbConvertApp]")
or data.startswith("WARNING:")
or data.startswith(" Running command")
):
result.append(data)
return result
def _get_package_locations(list_response: Dict[str, Any]) -> List[PackageLocation]:
return [
PackageLocation(item["url"], item["fileName"])
for item in list_response["files"]
]
def wait_for_install_kernel_completion(
api: KaggleApi, meta_data: Dict, kernel_slug: str, quiet: bool = False
) -> Dict[str, Any]:
owner_slug = get_username(api)
while True:
response = api.process_response(
api.kernel_output_with_http_info(owner_slug, kernel_slug)
)
if response["log"] != "":
time.sleep(5) # wait for completion of synchlonizing kernel status
result = kernel_proc.status(api, kernel_slug)
if result["status"] != "complete" or result["failureMessage"]:
logs = json.loads(response["log"])
err_messages = get_error_messages(logs)
raise InstallKernelError(err_messages)
return response
if not quiet:
click.echo("Wait for install kernel completion...")
time.sleep(10)
def upload_requirement_pkgs(
api: KaggleApi, meta_data: Dict, target_dir: Path, quiet: bool = False
):
slug = get_dataset_slug(api, meta_data)
_, dataset_slug = slug.split("/")[-2:]
license_name = "CC0-1.0"
status = api.dataset_status(slug)
if status is None:
return kernel_proc.create_dataset(
api,
dataset_slug=dataset_slug,
license_name=license_name,
target_dir=target_dir,
quiet=quiet,
)
else:
return kernel_proc.update_dataset(
api, dataset_slug=dataset_slug, target_dir=target_dir, quiet=quiet,
)
def push_install_kernel(
api: KaggleApi,
meta_data: Dict,
enable_constraint: bool,
extra_dependencies: List[str],
extra_deb_dependencies: List[str],
quiet: bool = False,
) -> KernelPushResponse:
kernel_push_params = create_kernel_push_params(api, meta_data)
dependencies = get_dependencies(enable_constraint)
prologue = meta_data.get("prologue", "")
kernel_body = create_kernel_body(
dependencies, extra_dependencies, extra_deb_dependencies, prologue
)
kernel_response = kernel_proc.push(api, kernel_push_params, kernel_body)
if not quiet:
kernel_proc.print_response(kernel_response)
click.echo("Pushing install kernel successed.")
return kernel_response
@kkt_command(is_global_command=True)
def install(
api: KaggleApi, kkt: Dict, pyproject_path: Path, quiet: bool = False, **kwargs: Dict
) -> None:
if "meta_data" not in kkt:
raise MetaDataNotFound()
meta_data = kkt["meta_data"].value
enable_constraint = kkt.get("enable_constraint", False)
extra_dependencies = kkt.get("extra_dependencies", [])
extra_deb_dependencies = kkt.get("extra_deb_dependencies", [])
kernel_response = push_install_kernel(
api,
meta_data,
enable_constraint,
extra_dependencies,
extra_deb_dependencies,
quiet,
)
kernel_slug = get_kernel_slug_from(kernel_response)
kernel_output = wait_for_install_kernel_completion(
api, meta_data=meta_data, kernel_slug=kernel_slug, quiet=quiet
)
with TemporaryDirectory() as tmp_dir:
target_dir = Path(tmp_dir)
(target_dir / "pip").mkdir(exist_ok=True)
(target_dir / "deb").mkdir(exist_ok=True)
pkg_locations = _get_package_locations(kernel_output)
fetch_files = fetch_packages(pkg_locations, target_dir, quiet=quiet)
if len(fetch_files) == 0:
click.echo("Extra required packages are nothing.")
return
ret = upload_requirement_pkgs(
api, meta_data, target_dir=target_dir, quiet=quiet
)
kernel_proc.print_response(ret) | kkt/commands/install.py | import json
import time
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List
import click
from kaggle import KaggleApi
from kaggle.models.kaggle_models_extended import KernelPushResponse
from .. import kernel_proc
from ..builders.packaging_system import get_dependencies
from ..exception import InstallKernelError, MetaDataNotFound
from ..resource import get_username, get_dataset_slug
from .kkt_command import kkt_command
from ..fetch import PackageLocation, fetch_packages
def create_kernel_body(
python_pkgs: List[str],
extra_python_pkgs: List[str],
extra_deb_pkgs: List[str],
prologue: str,
) -> str:
return f"""{prologue}
import os
import sys
import subprocess
from pathlib import Path
def pip_freeze():
args = [sys.executable, "-m", "pip", "freeze"]
output = subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
return output.split("\\n")
def pip_install(pkgs, ignore_error=False):
if len(pkgs) == 0:
return
args = [sys.executable, "-m", "pip", "install", *pkgs]
try:
ret = subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
except subprocess.CalledProcessError as e:
ret = str(e.stdout)
return ret
def deb_install(pkgs):
if len(pkgs) == 0:
return
args = ["apt-get", "install", "-y", *pkgs]
return subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
def pip_download(pkgs):
Path("./pip").mkdir(exist_ok=True)
if len(pkgs) == 0:
return ""
args = [sys.executable, "-m", "pip", "download", "--no-deps", "-d", "pip", *pkgs]
return subprocess.run(args, capture_output=True, encoding='utf-8', check=True).stdout
def deb_download(pkgs):
dst_dir_path = Path("./deb")
dst_dir_path.mkdir(exist_ok=True)
if len(pkgs) == 0:
return ""
args = ["apt-get", "-o", "Dir::Cache::archives='/kaggle/working/deb/'", "install", "-y", *pkgs]
os.system(" ".join(args))
(dst_dir_path / "lock").unlink()
(dst_dir_path / "partial").rmdir()
deb_download({extra_deb_pkgs})
freeze_before_install = pip_freeze()
print(pip_install({python_pkgs}))
print(pip_install({extra_python_pkgs}), True)
freeze_after_install = pip_freeze()
diff_pkgs = set(freeze_after_install) - set(freeze_before_install)
print(pip_download(diff_pkgs))
"""
def create_kernel_push_params(
api: KaggleApi, meta_data: Dict
) -> kernel_proc.KernelPushParams:
install_kernel_slug = get_install_slug(meta_data)
install_kernel_meta_data = {
**meta_data,
"slug": install_kernel_slug,
"kernel_type": "script",
"is_private": True,
"enable_gpu": False,
"enable_internet": True,
"dataset_sources": [],
"competition_sources": [],
"kernel_sources": [],
"keywords": [],
}
return kernel_proc.KernelPushParams.of(api, install_kernel_meta_data)
def get_install_slug(meta_data: Dict) -> str:
return f"{meta_data['slug']}-install"
def get_owner_slug_from(response: KernelPushResponse):
return response.ref.split("/")[1]
def get_kernel_slug_from(response: KernelPushResponse):
return response.ref.split("/")[2]
def get_error_messages(logs: Dict) -> List[str]:
result = []
for log in logs:
stream_name = log.get("stream_name", "stderr")
data = log.get("data", "")
if stream_name == "stderr" and not (
data.startswith("[NbConvertApp]")
or data.startswith("WARNING:")
or data.startswith(" Running command")
):
result.append(data)
return result
def _get_package_locations(list_response: Dict[str, Any]) -> List[PackageLocation]:
return [
PackageLocation(item["url"], item["fileName"])
for item in list_response["files"]
]
def wait_for_install_kernel_completion(
api: KaggleApi, meta_data: Dict, kernel_slug: str, quiet: bool = False
) -> Dict[str, Any]:
owner_slug = get_username(api)
while True:
response = api.process_response(
api.kernel_output_with_http_info(owner_slug, kernel_slug)
)
if response["log"] != "":
time.sleep(5) # wait for completion of synchlonizing kernel status
result = kernel_proc.status(api, kernel_slug)
if result["status"] != "complete" or result["failureMessage"]:
logs = json.loads(response["log"])
err_messages = get_error_messages(logs)
raise InstallKernelError(err_messages)
return response
if not quiet:
click.echo("Wait for install kernel completion...")
time.sleep(10)
def upload_requirement_pkgs(
api: KaggleApi, meta_data: Dict, target_dir: Path, quiet: bool = False
):
slug = get_dataset_slug(api, meta_data)
_, dataset_slug = slug.split("/")[-2:]
license_name = "CC0-1.0"
status = api.dataset_status(slug)
if status is None:
return kernel_proc.create_dataset(
api,
dataset_slug=dataset_slug,
license_name=license_name,
target_dir=target_dir,
quiet=quiet,
)
else:
return kernel_proc.update_dataset(
api, dataset_slug=dataset_slug, target_dir=target_dir, quiet=quiet,
)
def push_install_kernel(
api: KaggleApi,
meta_data: Dict,
enable_constraint: bool,
extra_dependencies: List[str],
extra_deb_dependencies: List[str],
quiet: bool = False,
) -> KernelPushResponse:
kernel_push_params = create_kernel_push_params(api, meta_data)
dependencies = get_dependencies(enable_constraint)
prologue = meta_data.get("prologue", "")
kernel_body = create_kernel_body(
dependencies, extra_dependencies, extra_deb_dependencies, prologue
)
kernel_response = kernel_proc.push(api, kernel_push_params, kernel_body)
if not quiet:
kernel_proc.print_response(kernel_response)
click.echo("Pushing install kernel successed.")
return kernel_response
@kkt_command(is_global_command=True)
def install(
api: KaggleApi, kkt: Dict, pyproject_path: Path, quiet: bool = False, **kwargs: Dict
) -> None:
if "meta_data" not in kkt:
raise MetaDataNotFound()
meta_data = kkt["meta_data"].value
enable_constraint = kkt.get("enable_constraint", False)
extra_dependencies = kkt.get("extra_dependencies", [])
extra_deb_dependencies = kkt.get("extra_deb_dependencies", [])
kernel_response = push_install_kernel(
api,
meta_data,
enable_constraint,
extra_dependencies,
extra_deb_dependencies,
quiet,
)
kernel_slug = get_kernel_slug_from(kernel_response)
kernel_output = wait_for_install_kernel_completion(
api, meta_data=meta_data, kernel_slug=kernel_slug, quiet=quiet
)
with TemporaryDirectory() as tmp_dir:
target_dir = Path(tmp_dir)
(target_dir / "pip").mkdir(exist_ok=True)
(target_dir / "deb").mkdir(exist_ok=True)
pkg_locations = _get_package_locations(kernel_output)
fetch_files = fetch_packages(pkg_locations, target_dir, quiet=quiet)
if len(fetch_files) == 0:
click.echo("Extra required packages are nothing.")
return
ret = upload_requirement_pkgs(
api, meta_data, target_dir=target_dir, quiet=quiet
)
kernel_proc.print_response(ret) | 0.388502 | 0.121999 |
from argparse import ArgumentParser, Namespace
import codecs
import sys
import pickle
import os
import time
import lysfastparse.utils
import lysfastparse.bcovington.utils_bcovington
import tempfile
import yaml
import subprocess
import lysfastparse.bcovington.covington
parser = ArgumentParser()
parser.add_argument("-p", dest="p",metavar="FILE")
parser.add_argument("-m", dest="m",metavar="FILE")
parser.add_argument("-o", dest="o",metavar="FILE")
parser.add_argument("-epe", dest="epe",metavar="FILE")
parser.add_argument("-efe",dest="efe",metavar="FILE")
parser.add_argument("-ewe",dest="ewe", metavar="FILE")
parser.add_argument("-r", dest="r",help="Input run [raw|conllu]", type=str)
parser.add_argument("-i", dest="i",metavar="FILE")
parser.add_argument("--dynet-mem", dest="dynet_mem", help="It is needed to specify this parameter")
parser.add_argument("-udpipe_bin", dest="udpipe_bin",metavar="FILE")
parser.add_argument("-udpipe_model", dest="udpipe_model",metavar="FILE")
args = parser.parse_args()
print "args (run_model.py)",args
path_params = args.p
path_model = args.m
path_outfile = args.o
path_embeddings = args.ewe
path_pos_embeddings = args.epe
path_feats_embeddings = args.efe
type_text = args.r
path_input = args.i
valid_content = False
if type_text == "conllu" and os.path.exists(path_model):
with codecs.open(path_input) as f:
f_temp = tempfile.NamedTemporaryFile("w", delete=False)
f_temp.write(f.read())
f_temp.close()
valid_content = True
elif type_text == "raw" and os.path.exists(path_model):
pipe = lysfastparse.utils.UDPipe(args.udpipe_model, args.udpipe_bin) #config[YAML_UDPIPE])
raw_content = lysfastparse.utils.read_raw_file(path_input)
conllu = pipe.run(raw_content, options=" --tokenize --tag")
f_temp = tempfile.NamedTemporaryFile("w", delete=False)
f_temp.write(conllu)
f_temp.close()
valid_content = True
else:
raise NotImplementedError
if valid_content == True:
#TEST PHASE
with codecs.open(path_params, 'r') as paramsfp:
aux = pickle.load(paramsfp)
words, w2i, lemmas, l2i, cpos , pos, feats, rels, stored_opt = aux
d = vars(stored_opt)
print "d before",d
print
d["external_embedding"] = None if d["external_embedding"] =="None" else path_embeddings #os.sep.join([args.e,"FB_embeddings","wiki."+metadata[LTCODE]+".vec"])
d["pos_external_embedding"] = None if d["pos_external_embedding"] =="None" else path_pos_embeddings #os.sep.join([args.e,"UD_POS_embeddings",metadata[NAME_TREEBANK]])
d["feats_external_embedding"] = None if d["feats_external_embedding"] =="None" else path_feats_embeddings #os.sep.join([args.e,"UD_FEATS_embeddings",metadata[NAME_TREEBANK]])
d["lemmas_external_embedding"] = None
print "pos_external_embeddings", d["pos_external_embedding"]
print "feats_external_embeddings", d["feats_external_embedding"]
print "external_embedding", d["external_embedding"]
stored_opt =Namespace(**d)
print "Running model with this configuration", stored_opt
parser = lysfastparse.bcovington.covington.CovingtonBILSTM(words, lemmas, cpos, pos, feats, rels, w2i, l2i, stored_opt,
None)
parser.Load(path_model)
with codecs.open(f_temp.name) as f_temp:
lookup_conll_data = lysfastparse.utils.lookup_conll_extra_data(f_temp)
testpath = f_temp.name
ts = time.time()
pred = list(parser.Predict(testpath))
te = time.time()
print "Took "+str(te - ts)+" seconds"
lysfastparse.bcovington.utils_bcovington.write_conll(testpath, pred)
lysfastparse.utils.dump_lookup_extra_into_conll(testpath, lookup_conll_data)
lysfastparse.utils.transform_to_single_root(testpath)
with codecs.open(path_outfile,"w") as f_out:
with codecs.open(f_temp.name) as f_out_aux:
f_out.write(f_out_aux.read())
os.unlink(f_temp.name) | run_model.py | from argparse import ArgumentParser, Namespace
import codecs
import sys
import pickle
import os
import time
import lysfastparse.utils
import lysfastparse.bcovington.utils_bcovington
import tempfile
import yaml
import subprocess
import lysfastparse.bcovington.covington
parser = ArgumentParser()
parser.add_argument("-p", dest="p",metavar="FILE")
parser.add_argument("-m", dest="m",metavar="FILE")
parser.add_argument("-o", dest="o",metavar="FILE")
parser.add_argument("-epe", dest="epe",metavar="FILE")
parser.add_argument("-efe",dest="efe",metavar="FILE")
parser.add_argument("-ewe",dest="ewe", metavar="FILE")
parser.add_argument("-r", dest="r",help="Input run [raw|conllu]", type=str)
parser.add_argument("-i", dest="i",metavar="FILE")
parser.add_argument("--dynet-mem", dest="dynet_mem", help="It is needed to specify this parameter")
parser.add_argument("-udpipe_bin", dest="udpipe_bin",metavar="FILE")
parser.add_argument("-udpipe_model", dest="udpipe_model",metavar="FILE")
args = parser.parse_args()
print "args (run_model.py)",args
path_params = args.p
path_model = args.m
path_outfile = args.o
path_embeddings = args.ewe
path_pos_embeddings = args.epe
path_feats_embeddings = args.efe
type_text = args.r
path_input = args.i
valid_content = False
if type_text == "conllu" and os.path.exists(path_model):
with codecs.open(path_input) as f:
f_temp = tempfile.NamedTemporaryFile("w", delete=False)
f_temp.write(f.read())
f_temp.close()
valid_content = True
elif type_text == "raw" and os.path.exists(path_model):
pipe = lysfastparse.utils.UDPipe(args.udpipe_model, args.udpipe_bin) #config[YAML_UDPIPE])
raw_content = lysfastparse.utils.read_raw_file(path_input)
conllu = pipe.run(raw_content, options=" --tokenize --tag")
f_temp = tempfile.NamedTemporaryFile("w", delete=False)
f_temp.write(conllu)
f_temp.close()
valid_content = True
else:
raise NotImplementedError
if valid_content == True:
#TEST PHASE
with codecs.open(path_params, 'r') as paramsfp:
aux = pickle.load(paramsfp)
words, w2i, lemmas, l2i, cpos , pos, feats, rels, stored_opt = aux
d = vars(stored_opt)
print "d before",d
print
d["external_embedding"] = None if d["external_embedding"] =="None" else path_embeddings #os.sep.join([args.e,"FB_embeddings","wiki."+metadata[LTCODE]+".vec"])
d["pos_external_embedding"] = None if d["pos_external_embedding"] =="None" else path_pos_embeddings #os.sep.join([args.e,"UD_POS_embeddings",metadata[NAME_TREEBANK]])
d["feats_external_embedding"] = None if d["feats_external_embedding"] =="None" else path_feats_embeddings #os.sep.join([args.e,"UD_FEATS_embeddings",metadata[NAME_TREEBANK]])
d["lemmas_external_embedding"] = None
print "pos_external_embeddings", d["pos_external_embedding"]
print "feats_external_embeddings", d["feats_external_embedding"]
print "external_embedding", d["external_embedding"]
stored_opt =Namespace(**d)
print "Running model with this configuration", stored_opt
parser = lysfastparse.bcovington.covington.CovingtonBILSTM(words, lemmas, cpos, pos, feats, rels, w2i, l2i, stored_opt,
None)
parser.Load(path_model)
with codecs.open(f_temp.name) as f_temp:
lookup_conll_data = lysfastparse.utils.lookup_conll_extra_data(f_temp)
testpath = f_temp.name
ts = time.time()
pred = list(parser.Predict(testpath))
te = time.time()
print "Took "+str(te - ts)+" seconds"
lysfastparse.bcovington.utils_bcovington.write_conll(testpath, pred)
lysfastparse.utils.dump_lookup_extra_into_conll(testpath, lookup_conll_data)
lysfastparse.utils.transform_to_single_root(testpath)
with codecs.open(path_outfile,"w") as f_out:
with codecs.open(f_temp.name) as f_out_aux:
f_out.write(f_out_aux.read())
os.unlink(f_temp.name) | 0.234757 | 0.086709 |
from __future__ import unicode_literals
from PIL import Image
from subprocess import check_call
from concurrent import futures
import subprocess
import os
import io
import subprocess
import sys
from os import listdir
from os.path import isfile, join
import psutil
import time
import glob
vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64]
in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6]
num_threads = 16
# The directory to convert
datasetpath = '/datasets/casia/'
def convert_img(img,in_version_path,out_version_path):
# Make temp directory
temp_dir = 'temp_'+str(os.getpid())
subprocess.call('mkdir -p '+temp_dir,shell=True)
# Run the given pipeline on the png
subprocess.call('../common/pipeline_V'+str(version) + '.o ' +
in_version_path + img + ' ' +
temp_dir + '/', shell=True)
# Copy to the destination directory
subprocess.call('cp '+temp_dir+'/output.png '+
out_version_path + img,shell=True)
# Delete temp directory
subprocess.call('rm -rf '+temp_dir,shell=True)
for i, version in enumerate(vers_to_run):
in_version = in_vers[i]
in_version_path = datasetpath+'v'+str(in_version)
out_version_path = datasetpath+'v'+str(version)
# Get list of sub-directories
subds = [ (s.rstrip("/"))[len(in_version_path):] for s in glob.glob(in_version_path+"/**")]
# Make directories for each output class
for subd in subds:
subprocess.call('mkdir -p '+out_version_path+subd,shell=True)
# Get list of images to be converted
imgs = [ (img)[len(in_version_path):] for img in glob.glob(in_version_path + '/**/*.png')]
# Compile the converter
subprocess.call('make --directory ../common/ version='+str(version),shell=True)
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(
convert_img,img,in_version_path,out_version_path)
for img in imgs]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
sys.stderr.write( \
"Converted Image: {} / {} \r".format(i, len(imgs))) | pipelines/casia/sched.py |
from __future__ import unicode_literals
from PIL import Image
from subprocess import check_call
from concurrent import futures
import subprocess
import os
import io
import subprocess
import sys
from os import listdir
from os.path import isfile, join
import psutil
import time
import glob
vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64]
in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6]
num_threads = 16
# The directory to convert
datasetpath = '/datasets/casia/'
def convert_img(img,in_version_path,out_version_path):
# Make temp directory
temp_dir = 'temp_'+str(os.getpid())
subprocess.call('mkdir -p '+temp_dir,shell=True)
# Run the given pipeline on the png
subprocess.call('../common/pipeline_V'+str(version) + '.o ' +
in_version_path + img + ' ' +
temp_dir + '/', shell=True)
# Copy to the destination directory
subprocess.call('cp '+temp_dir+'/output.png '+
out_version_path + img,shell=True)
# Delete temp directory
subprocess.call('rm -rf '+temp_dir,shell=True)
for i, version in enumerate(vers_to_run):
in_version = in_vers[i]
in_version_path = datasetpath+'v'+str(in_version)
out_version_path = datasetpath+'v'+str(version)
# Get list of sub-directories
subds = [ (s.rstrip("/"))[len(in_version_path):] for s in glob.glob(in_version_path+"/**")]
# Make directories for each output class
for subd in subds:
subprocess.call('mkdir -p '+out_version_path+subd,shell=True)
# Get list of images to be converted
imgs = [ (img)[len(in_version_path):] for img in glob.glob(in_version_path + '/**/*.png')]
# Compile the converter
subprocess.call('make --directory ../common/ version='+str(version),shell=True)
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(
convert_img,img,in_version_path,out_version_path)
for img in imgs]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
sys.stderr.write( \
"Converted Image: {} / {} \r".format(i, len(imgs))) | 0.335133 | 0.087564 |
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Instance',
fields=[
('id', models.CharField(max_length=20, serialize=False, primary_key=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('state', models.SmallIntegerField(blank=True, null=True, choices=[(0, 'pending'), (16, 'running'), (32, 'shutting-down'), (48, 'terminated'), (64, 'stopping'), (80, 'stopped')])),
('launched', models.DateTimeField(null=True, blank=True)),
('tags', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('data', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.SlugField(unique=True, max_length=30)),
('name', models.CharField(max_length=55)),
],
),
migrations.CreateModel(
name='SecurityGroup',
fields=[
('id', models.CharField(max_length=20, serialize=False, primary_key=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('description', models.CharField(max_length=255, null=True, blank=True)),
('tags', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('region', models.ForeignKey(related_name='sgs', to='aws_admin.Region')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SecurityGroupRule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('protocol', models.CharField(max_length=4, choices=[('tcp', 'tcp'), ('udp', 'udp'), ('icmp', 'icmp')])),
('port_range', models.CommaSeparatedIntegerField(help_text='min, max', max_length=30)),
('cidr', models.CharField(max_length=50, null=True, blank=True)),
('description', models.TextField(help_text='User Description', null=True, blank=True)),
('source_group', models.ForeignKey(blank=True, to='aws_admin.SecurityGroup', null=True)),
],
),
migrations.CreateModel(
name='VPC',
fields=[
('id', models.CharField(max_length=20, serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
('cidr', models.CharField(max_length=30, null=True, blank=True)),
('state', models.CharField(max_length=55, null=True, blank=True)),
('tags', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('region', models.ForeignKey(related_name='vpcs', to='aws_admin.Region')),
],
options={
'ordering': ('state',),
'verbose_name': 'VPC',
},
),
migrations.AddField(
model_name='securitygroup',
name='rules',
field=models.ManyToManyField(help_text='Inbound', related_name='sgs_inbound', to='aws_admin.SecurityGroupRule'),
),
migrations.AddField(
model_name='securitygroup',
name='rules_egress',
field=models.ManyToManyField(help_text='Outbound', related_name='sgs_outbound', to='aws_admin.SecurityGroupRule'),
),
migrations.AddField(
model_name='securitygroup',
name='vpc',
field=models.ForeignKey(related_name='sgs', blank=True, to='aws_admin.VPC', null=True),
),
migrations.AddField(
model_name='instance',
name='region',
field=models.ForeignKey(related_name='instances', to='aws_admin.Region'),
),
migrations.AddField(
model_name='instance',
name='security_groups',
field=models.ManyToManyField(related_name='instances', to='aws_admin.SecurityGroup'),
),
migrations.AddField(
model_name='instance',
name='vpc',
field=models.ForeignKey(related_name='instances', blank=True, to='aws_admin.VPC', null=True),
),
migrations.AlterUniqueTogether(
name='securitygrouprule',
unique_together=set([('protocol', 'port_range', 'cidr', 'source_group')]),
),
] | aws_admin/migrations/0001_initial.py | from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Instance',
fields=[
('id', models.CharField(max_length=20, serialize=False, primary_key=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('state', models.SmallIntegerField(blank=True, null=True, choices=[(0, 'pending'), (16, 'running'), (32, 'shutting-down'), (48, 'terminated'), (64, 'stopping'), (80, 'stopped')])),
('launched', models.DateTimeField(null=True, blank=True)),
('tags', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('data', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.SlugField(unique=True, max_length=30)),
('name', models.CharField(max_length=55)),
],
),
migrations.CreateModel(
name='SecurityGroup',
fields=[
('id', models.CharField(max_length=20, serialize=False, primary_key=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('description', models.CharField(max_length=255, null=True, blank=True)),
('tags', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('region', models.ForeignKey(related_name='sgs', to='aws_admin.Region')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SecurityGroupRule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('protocol', models.CharField(max_length=4, choices=[('tcp', 'tcp'), ('udp', 'udp'), ('icmp', 'icmp')])),
('port_range', models.CommaSeparatedIntegerField(help_text='min, max', max_length=30)),
('cidr', models.CharField(max_length=50, null=True, blank=True)),
('description', models.TextField(help_text='User Description', null=True, blank=True)),
('source_group', models.ForeignKey(blank=True, to='aws_admin.SecurityGroup', null=True)),
],
),
migrations.CreateModel(
name='VPC',
fields=[
('id', models.CharField(max_length=20, serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
('cidr', models.CharField(max_length=30, null=True, blank=True)),
('state', models.CharField(max_length=55, null=True, blank=True)),
('tags', django_extensions.db.fields.json.JSONField(null=True, blank=True)),
('region', models.ForeignKey(related_name='vpcs', to='aws_admin.Region')),
],
options={
'ordering': ('state',),
'verbose_name': 'VPC',
},
),
migrations.AddField(
model_name='securitygroup',
name='rules',
field=models.ManyToManyField(help_text='Inbound', related_name='sgs_inbound', to='aws_admin.SecurityGroupRule'),
),
migrations.AddField(
model_name='securitygroup',
name='rules_egress',
field=models.ManyToManyField(help_text='Outbound', related_name='sgs_outbound', to='aws_admin.SecurityGroupRule'),
),
migrations.AddField(
model_name='securitygroup',
name='vpc',
field=models.ForeignKey(related_name='sgs', blank=True, to='aws_admin.VPC', null=True),
),
migrations.AddField(
model_name='instance',
name='region',
field=models.ForeignKey(related_name='instances', to='aws_admin.Region'),
),
migrations.AddField(
model_name='instance',
name='security_groups',
field=models.ManyToManyField(related_name='instances', to='aws_admin.SecurityGroup'),
),
migrations.AddField(
model_name='instance',
name='vpc',
field=models.ForeignKey(related_name='instances', blank=True, to='aws_admin.VPC', null=True),
),
migrations.AlterUniqueTogether(
name='securitygrouprule',
unique_together=set([('protocol', 'port_range', 'cidr', 'source_group')]),
),
] | 0.596551 | 0.140307 |
import unittest
import FizzBuzz
class TestFizzBuzz(unittest.TestCase):
def test_normal(self):
#tests that input >= 0 not evenly divisible
#by 3,5, or 7 returns the same
self.assertEqual(FizzBuzz.fizzbuzz(2), 2)
self.assertEqual(FizzBuzz.fizzbuzz(67), 67)
self.assertEqual(FizzBuzz.fizzbuzz(358), 358)
def test_fizz(self):
#input evenly divisible by 3 returns fizz
self.assertEqual(FizzBuzz.fizzbuzz(3), 'Fizz')
self.assertEqual(FizzBuzz.fizzbuzz(9), 'Fizz')
self.assertEqual(FizzBuzz.fizzbuzz(138), 'Fizz')
def test_buzz(self):
#input evenly divisible by 5 reutrns buzz
self.assertEqual(FizzBuzz.fizzbuzz(5), 'Buzz')
self.assertEqual(FizzBuzz.fizzbuzz(10), 'Buzz')
self.assertEqual(FizzBuzz.fizzbuzz(65), 'Buzz')
def test_bazz(self):
#input evenly divisible by 7 returns bazz
self.assertEqual(FizzBuzz.fizzbuzz(7), 'Bazz')
self.assertEqual(FizzBuzz.fizzbuzz(77), 'Bazz')
self.assertEqual(FizzBuzz.fizzbuzz(98), 'Bazz')
def test_fizz_buzz(self):
#input evenly divisble by 3 and 5 returns fizzbuzz
self.assertEqual(FizzBuzz.fizzbuzz(15), 'FizzBuzz')
self.assertEqual(FizzBuzz.fizzbuzz(30), 'FizzBuzz')
self.assertEqual(FizzBuzz.fizzbuzz(60), 'FizzBuzz')
def test_fizz_bazz(self):
#input evenly divisible by 3 and 7 returns fizzbazz
self.assertEqual(FizzBuzz.fizzbuzz(21), 'FizzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(42), 'FizzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(84), 'FizzBazz')
def test_buzz_bazz(self):
#input evenly divisible by 5 and 7 returns buzzbazz
self.assertEqual(FizzBuzz.fizzbuzz(35), 'BuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(70), 'BuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(455), 'BuzzBazz')
def test_fizz_buzz_bazz(self):
#input evenly divisible by 3, 5, and 7 returns fizzbuzzbazz
self.assertEqual(FizzBuzz.fizzbuzz(105), 'FizzBuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(210), 'FizzBuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(315), 'FizzBuzzBazz')
if __name__ == '__main__':
unittest.main() | projects/fizzbuzz/python/Lightner/test-FizzBuzz.py | import unittest
import FizzBuzz
class TestFizzBuzz(unittest.TestCase):
def test_normal(self):
#tests that input >= 0 not evenly divisible
#by 3,5, or 7 returns the same
self.assertEqual(FizzBuzz.fizzbuzz(2), 2)
self.assertEqual(FizzBuzz.fizzbuzz(67), 67)
self.assertEqual(FizzBuzz.fizzbuzz(358), 358)
def test_fizz(self):
#input evenly divisible by 3 returns fizz
self.assertEqual(FizzBuzz.fizzbuzz(3), 'Fizz')
self.assertEqual(FizzBuzz.fizzbuzz(9), 'Fizz')
self.assertEqual(FizzBuzz.fizzbuzz(138), 'Fizz')
def test_buzz(self):
#input evenly divisible by 5 reutrns buzz
self.assertEqual(FizzBuzz.fizzbuzz(5), 'Buzz')
self.assertEqual(FizzBuzz.fizzbuzz(10), 'Buzz')
self.assertEqual(FizzBuzz.fizzbuzz(65), 'Buzz')
def test_bazz(self):
#input evenly divisible by 7 returns bazz
self.assertEqual(FizzBuzz.fizzbuzz(7), 'Bazz')
self.assertEqual(FizzBuzz.fizzbuzz(77), 'Bazz')
self.assertEqual(FizzBuzz.fizzbuzz(98), 'Bazz')
def test_fizz_buzz(self):
#input evenly divisble by 3 and 5 returns fizzbuzz
self.assertEqual(FizzBuzz.fizzbuzz(15), 'FizzBuzz')
self.assertEqual(FizzBuzz.fizzbuzz(30), 'FizzBuzz')
self.assertEqual(FizzBuzz.fizzbuzz(60), 'FizzBuzz')
def test_fizz_bazz(self):
#input evenly divisible by 3 and 7 returns fizzbazz
self.assertEqual(FizzBuzz.fizzbuzz(21), 'FizzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(42), 'FizzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(84), 'FizzBazz')
def test_buzz_bazz(self):
#input evenly divisible by 5 and 7 returns buzzbazz
self.assertEqual(FizzBuzz.fizzbuzz(35), 'BuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(70), 'BuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(455), 'BuzzBazz')
def test_fizz_buzz_bazz(self):
#input evenly divisible by 3, 5, and 7 returns fizzbuzzbazz
self.assertEqual(FizzBuzz.fizzbuzz(105), 'FizzBuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(210), 'FizzBuzzBazz')
self.assertEqual(FizzBuzz.fizzbuzz(315), 'FizzBuzzBazz')
if __name__ == '__main__':
unittest.main() | 0.544075 | 0.660419 |
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rlib.rbigint import rbigint, _divrem
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from som.vmobjects.abstract_object import AbstractObject
from som.vm.globals import trueObject, falseObject
class Integer(AbstractObject):
_immutable_fields_ = ["_embedded_integer"]
def __init__(self, value):
AbstractObject.__init__(self)
assert isinstance(value, int)
self._embedded_integer = value
def get_embedded_integer(self):
return self._embedded_integer
def __str__(self):
return str(self._embedded_integer)
def get_class(self, universe):
return universe.integerClass
def quick_add(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_add(right))
def quick_multiply(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_multiply(right))
def quick_subtract(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_subtract(right))
def _to_double(self):
from .double import Double
return Double(float(self._embedded_integer))
def prim_less_than(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).lt(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_less_than(right)
else:
result = self._embedded_integer < right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_less_than_or_equal(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).le(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_less_than_or_equal(right)
else:
result = self._embedded_integer <= right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_greater_than(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).gt(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_greater_than(right)
else:
result = self._embedded_integer > right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_as_string(self):
from .string import String
return String(str(self._embedded_integer))
def prim_abs(self):
return Integer(abs(self._embedded_integer))
def prim_as_32_bit_signed_value(self):
val = rffi.cast(lltype.Signed, rffi.cast(rffi.INT, self._embedded_integer))
return Integer(val)
def prim_max(self, right):
from .biginteger import BigInteger
if isinstance(right, BigInteger):
left = rbigint.fromint(self._embedded_integer)
if right.get_embedded_biginteger().gt(left):
return right
return self
assert isinstance(right, Integer)
if right.get_embedded_integer() > self._embedded_integer:
return right
return self
def prim_add(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
return BigInteger(
right.get_embedded_biginteger().add(
rbigint.fromint(self._embedded_integer)))
elif isinstance(right, Double):
return self._to_double().prim_add(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l + r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).add(rbigint.fromint(r)))
def prim_subtract(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).sub(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_subtract(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l - r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).sub(rbigint.fromint(r)))
def prim_multiply(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).mul(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_multiply(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l * r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).mul(rbigint.fromint(r)))
def prim_double_div(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).truediv(
right.get_embedded_biginteger())
return Double(r)
elif isinstance(right, Double):
return self._to_double().prim_double_div(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Double(l / float(r))
def prim_int_div(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).floordiv(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_int_div(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l / r)
def prim_modulo(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).mod(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_modulo(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l % r)
def prim_remainder(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
d, r = _divrem(rbigint.fromint(self._embedded_integer),
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_remainder(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(llop.int_mod(lltype.Signed, l, r))
def prim_and(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).and_(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_and(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l & r)
def prim_equals(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).eq(
right.get_embedded_biginteger())
elif isinstance(right, Double):
result = self._embedded_integer == right.get_embedded_double()
elif isinstance(right, Integer):
l = self._embedded_integer
r = right.get_embedded_integer()
result = l == r
else:
return falseObject
if result:
return trueObject
else:
return falseObject
def prim_unequals(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).ne(
right.get_embedded_biginteger())
elif isinstance(right, Double):
result = self._embedded_integer != right.get_embedded_double()
elif isinstance(right, Integer):
l = self._embedded_integer
r = right.get_embedded_integer()
result = l != r
else:
return trueObject
if result:
return trueObject
else:
return falseObject | src/som/vmobjects/integer.py | from rpython.rlib.rarithmetic import ovfcheck
from rpython.rlib.rbigint import rbigint, _divrem
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from som.vmobjects.abstract_object import AbstractObject
from som.vm.globals import trueObject, falseObject
class Integer(AbstractObject):
_immutable_fields_ = ["_embedded_integer"]
def __init__(self, value):
AbstractObject.__init__(self)
assert isinstance(value, int)
self._embedded_integer = value
def get_embedded_integer(self):
return self._embedded_integer
def __str__(self):
return str(self._embedded_integer)
def get_class(self, universe):
return universe.integerClass
def quick_add(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_add(right))
def quick_multiply(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_multiply(right))
def quick_subtract(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_subtract(right))
def _to_double(self):
from .double import Double
return Double(float(self._embedded_integer))
def prim_less_than(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).lt(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_less_than(right)
else:
result = self._embedded_integer < right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_less_than_or_equal(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).le(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_less_than_or_equal(right)
else:
result = self._embedded_integer <= right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_greater_than(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).gt(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_greater_than(right)
else:
result = self._embedded_integer > right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_as_string(self):
from .string import String
return String(str(self._embedded_integer))
def prim_abs(self):
return Integer(abs(self._embedded_integer))
def prim_as_32_bit_signed_value(self):
val = rffi.cast(lltype.Signed, rffi.cast(rffi.INT, self._embedded_integer))
return Integer(val)
def prim_max(self, right):
from .biginteger import BigInteger
if isinstance(right, BigInteger):
left = rbigint.fromint(self._embedded_integer)
if right.get_embedded_biginteger().gt(left):
return right
return self
assert isinstance(right, Integer)
if right.get_embedded_integer() > self._embedded_integer:
return right
return self
def prim_add(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
return BigInteger(
right.get_embedded_biginteger().add(
rbigint.fromint(self._embedded_integer)))
elif isinstance(right, Double):
return self._to_double().prim_add(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l + r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).add(rbigint.fromint(r)))
def prim_subtract(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).sub(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_subtract(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l - r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).sub(rbigint.fromint(r)))
def prim_multiply(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).mul(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_multiply(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l * r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).mul(rbigint.fromint(r)))
def prim_double_div(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).truediv(
right.get_embedded_biginteger())
return Double(r)
elif isinstance(right, Double):
return self._to_double().prim_double_div(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Double(l / float(r))
def prim_int_div(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).floordiv(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_int_div(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l / r)
def prim_modulo(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).mod(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_modulo(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l % r)
def prim_remainder(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
d, r = _divrem(rbigint.fromint(self._embedded_integer),
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_remainder(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(llop.int_mod(lltype.Signed, l, r))
def prim_and(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).and_(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_and(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l & r)
def prim_equals(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).eq(
right.get_embedded_biginteger())
elif isinstance(right, Double):
result = self._embedded_integer == right.get_embedded_double()
elif isinstance(right, Integer):
l = self._embedded_integer
r = right.get_embedded_integer()
result = l == r
else:
return falseObject
if result:
return trueObject
else:
return falseObject
def prim_unequals(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).ne(
right.get_embedded_biginteger())
elif isinstance(right, Double):
result = self._embedded_integer != right.get_embedded_double()
elif isinstance(right, Integer):
l = self._embedded_integer
r = right.get_embedded_integer()
result = l != r
else:
return trueObject
if result:
return trueObject
else:
return falseObject | 0.692642 | 0.351395 |
import os
import sys
import re
import numpy as np
import pandas as pd
level = sys.argv[1]
kankyo_fpath = sys.argv[2]
spname_fpath = sys.argv[3]
class_fpath = sys.argv[4]
output_fpath = sys.argv[5]
def mesh2gps(mesh_code):
mesh_code = str(mesh_code)
lat = int(mesh_code[0:2]) * 2 / 3
lng = int(mesh_code[2:4]) + 100
if len(mesh_code) > 4:
if len(mesh_code) >= 6:
lat += int(mesh_code[4]) * 2 / 3 / 8
lng += int(mesh_code[5]) / 8
return (lat, lng)
# get class labels (the order should be matched to image-model outputs)
class_labels = []
with open(class_fpath, 'r') as infh:
for buf in infh:
class_labels.append(buf.replace('\n', ''))
# get metadata to convert species ID to species biname
id2class = {}
with open(spname_fpath, 'r') as infh:
infh.readline()
for buf in infh:
bufs = buf.replace('\n', '').split(',')
id2class[bufs[0]] = bufs[4] + '_' + bufs[6]
# read Kankyosho public data
# and manually modifiy Kankyosho data according to rearrangement of taxonomic orders
## Rhipidolestes okinawanus: 392722, 392746, 392756, 392757, 392860, 392870
## Rhipidolestes shozoi: 392860, 392870, 402801, 402811, 402812
## Rhipidolestes amamiensis: 412857, 412867, 422922, 222932, 422933, 422944, 473002
## Rhipidolestes asatoi: 472935, 472945
## Anotogaster klossi: 362336, 362337, 362346, 362347, 362441, 362451
## Rhipidolestes yakusimensis: remove 472935, 472945, and add 473002 from the original set
## Anotogaster sieboldii: remove 362336, 362337, 362346, 362347, 362441, 362451 from the original set
fdata_mesh = []
fdata_species = []
with open(kankyo_fpath, 'r') as infh:
for buf in infh:
bufs = buf.replace('\n', '').split(',')
cl = id2class[bufs[0]]
if cl == 'Rhipidolestes_yakusimensis':
if bufs[1] in ['472935', '472945']:
print('removed: ' + cl + ' -- ' + bufs[1])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
elif cl == 'Anotogaster_sieboldii':
if bufs[1] in ['362336', '362337', '362346', '362347', '362441', '362451']:
print('removed: ' + cl + ' -- ' + bufs[1])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
fdata_species.extend(['Rhipidolestes_okinawanus'] * 6)
fdata_mesh.extend(['392722', '392746', '392756', '392757', '392860', '392870'])
fdata_species.extend(['Rhipidolestes_shozoi'] * 5)
fdata_mesh.extend(['392860', '392870', '402801', '402811', '402812'])
fdata_species.extend(['Rhipidolestes_amamiensis'] * 7)
fdata_mesh.extend(['412857', '412867', '422922', '222932', '422933', '422944', '473002'])
fdata_species.extend(['Rhipidolestes_asatoi'] * 2)
fdata_mesh.extend(['472935', '472945'])
fdata_species.extend(['Anotogaster_klossi'] * 6)
fdata_mesh.extend(['362336', '362337', '362346', '362347', '362441', '362451'])
fdata_species.extend(['Rhipidolestes_yakusimensis'])
fdata_mesh.extend(['473002'])
# change species name (level) to genus name (level)
if level == 'genus':
for i, spname in enumerate(fdata_species):
fdata_species[i] = spname.split('_')[0]
# mesh to lat&lng
latlng = []
for _fdata_mesh in sorted(list(set(fdata_mesh))):
latlng.append(mesh2gps(_fdata_mesh))
latlng = pd.DataFrame(latlng, columns=['lat', 'lng'],
index=sorted(list(set(fdata_mesh))))
# make appearance matrix
print(len(class_labels))
dmat = pd.DataFrame(np.zeros((len(set(fdata_mesh)), len(class_labels))))
dmat.columns = class_labels
dmat.index = sorted(list(set(fdata_mesh)))
# appearance matrix summary
dsum = pd.DataFrame(np.zeros((len(set(fdata_mesh)), len(class_labels))))
dsum.columns = class_labels
dsum.index = sorted(list(set(fdata_mesh)))
for _mesh, _species in zip(fdata_mesh, fdata_species):
if _species in class_labels:
dmat.loc[_mesh, _species] = 1
dsum.loc[_mesh, _species] += 1
dmat = pd.concat([latlng, dmat], axis=1)
dsum = dsum.sum(axis=0)
print(dsum)
# write out the data
dmat.to_csv(output_fpath, header=True, index=True, sep='\t', compression='gzip')
dsum.to_csv(output_fpath.replace('.tsv', '').replace('.gz', '') + '.summary.tsv', header=False, index=True, sep='\t') | 10.3389/fevo.2021.762173/scripts/generate_meshdataset.py | import os
import sys
import re
import numpy as np
import pandas as pd
level = sys.argv[1]
kankyo_fpath = sys.argv[2]
spname_fpath = sys.argv[3]
class_fpath = sys.argv[4]
output_fpath = sys.argv[5]
def mesh2gps(mesh_code):
mesh_code = str(mesh_code)
lat = int(mesh_code[0:2]) * 2 / 3
lng = int(mesh_code[2:4]) + 100
if len(mesh_code) > 4:
if len(mesh_code) >= 6:
lat += int(mesh_code[4]) * 2 / 3 / 8
lng += int(mesh_code[5]) / 8
return (lat, lng)
# get class labels (the order should be matched to image-model outputs)
class_labels = []
with open(class_fpath, 'r') as infh:
for buf in infh:
class_labels.append(buf.replace('\n', ''))
# get metadata to convert species ID to species biname
id2class = {}
with open(spname_fpath, 'r') as infh:
infh.readline()
for buf in infh:
bufs = buf.replace('\n', '').split(',')
id2class[bufs[0]] = bufs[4] + '_' + bufs[6]
# read Kankyosho public data
# and manually modifiy Kankyosho data according to rearrangement of taxonomic orders
## Rhipidolestes okinawanus: 392722, 392746, 392756, 392757, 392860, 392870
## Rhipidolestes shozoi: 392860, 392870, 402801, 402811, 402812
## Rhipidolestes amamiensis: 412857, 412867, 422922, 222932, 422933, 422944, 473002
## Rhipidolestes asatoi: 472935, 472945
## Anotogaster klossi: 362336, 362337, 362346, 362347, 362441, 362451
## Rhipidolestes yakusimensis: remove 472935, 472945, and add 473002 from the original set
## Anotogaster sieboldii: remove 362336, 362337, 362346, 362347, 362441, 362451 from the original set
fdata_mesh = []
fdata_species = []
with open(kankyo_fpath, 'r') as infh:
for buf in infh:
bufs = buf.replace('\n', '').split(',')
cl = id2class[bufs[0]]
if cl == 'Rhipidolestes_yakusimensis':
if bufs[1] in ['472935', '472945']:
print('removed: ' + cl + ' -- ' + bufs[1])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
elif cl == 'Anotogaster_sieboldii':
if bufs[1] in ['362336', '362337', '362346', '362347', '362441', '362451']:
print('removed: ' + cl + ' -- ' + bufs[1])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
fdata_species.extend(['Rhipidolestes_okinawanus'] * 6)
fdata_mesh.extend(['392722', '392746', '392756', '392757', '392860', '392870'])
fdata_species.extend(['Rhipidolestes_shozoi'] * 5)
fdata_mesh.extend(['392860', '392870', '402801', '402811', '402812'])
fdata_species.extend(['Rhipidolestes_amamiensis'] * 7)
fdata_mesh.extend(['412857', '412867', '422922', '222932', '422933', '422944', '473002'])
fdata_species.extend(['Rhipidolestes_asatoi'] * 2)
fdata_mesh.extend(['472935', '472945'])
fdata_species.extend(['Anotogaster_klossi'] * 6)
fdata_mesh.extend(['362336', '362337', '362346', '362347', '362441', '362451'])
fdata_species.extend(['Rhipidolestes_yakusimensis'])
fdata_mesh.extend(['473002'])
# change species name (level) to genus name (level)
if level == 'genus':
for i, spname in enumerate(fdata_species):
fdata_species[i] = spname.split('_')[0]
# mesh to lat&lng
latlng = []
for _fdata_mesh in sorted(list(set(fdata_mesh))):
latlng.append(mesh2gps(_fdata_mesh))
latlng = pd.DataFrame(latlng, columns=['lat', 'lng'],
index=sorted(list(set(fdata_mesh))))
# make appearance matrix
print(len(class_labels))
dmat = pd.DataFrame(np.zeros((len(set(fdata_mesh)), len(class_labels))))
dmat.columns = class_labels
dmat.index = sorted(list(set(fdata_mesh)))
# appearance matrix summary
dsum = pd.DataFrame(np.zeros((len(set(fdata_mesh)), len(class_labels))))
dsum.columns = class_labels
dsum.index = sorted(list(set(fdata_mesh)))
for _mesh, _species in zip(fdata_mesh, fdata_species):
if _species in class_labels:
dmat.loc[_mesh, _species] = 1
dsum.loc[_mesh, _species] += 1
dmat = pd.concat([latlng, dmat], axis=1)
dsum = dsum.sum(axis=0)
print(dsum)
# write out the data
dmat.to_csv(output_fpath, header=True, index=True, sep='\t', compression='gzip')
dsum.to_csv(output_fpath.replace('.tsv', '').replace('.gz', '') + '.summary.tsv', header=False, index=True, sep='\t') | 0.128963 | 0.189634 |
import sys
import matplotlib.pyplot as plt
import numpy
PLOT1 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
PLOT2 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
PLOT3 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
def unquote(string):
return string[1:-1]
fd = open(sys.argv[1], 'r')
lines = fd.readlines()
headers = list(map(unquote, lines[0].strip().split(',')))
json_id = len(lines)
for line in lines[1:]:
columns = line.strip().split(',')
id = int(columns[0])
label = unquote(columns[2].replace('\\n', '\n'))
uncompressed = int(columns[3])
gzip = int(columns[4])
lz4 = int(columns[5])
lzma = int(columns[6])
if columns[1] == 'json':
json_id = id
PLOT2['labels'].append(label)
PLOT2['uncompressed'].append(uncompressed)
PLOT2['gzip'].append(gzip)
PLOT2['lz4'].append(lz4)
PLOT2['lzma'].append(lzma)
continue
if id < json_id:
PLOT1['labels'].append(label)
PLOT1['uncompressed'].append(uncompressed)
PLOT1['gzip'].append(gzip)
PLOT1['lz4'].append(lz4)
PLOT1['lzma'].append(lzma)
else:
PLOT3['labels'].append(label)
PLOT3['uncompressed'].append(uncompressed)
PLOT3['gzip'].append(gzip)
PLOT3['lz4'].append(lz4)
PLOT3['lzma'].append(lzma)
fd.close()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, gridspec_kw={
'width_ratios': [ len(PLOT1['labels']), len(PLOT2['labels']) + 0.5, len(PLOT3['labels']) ]
})
ax2.set_xlim(-0.8,0.8)
x1 = numpy.arange(len(PLOT1['labels']))
x2 = numpy.arange(len(PLOT2['labels']))
x3 = numpy.arange(len(PLOT3['labels']))
width = 0.21
plot1_rects1 = ax1.bar(x1 - width * 1.5, PLOT1['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot1_rects2 = ax1.bar(x1 - width * 0.5, PLOT1['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot1_rects3 = ax1.bar(x1 + width * 0.5, PLOT1['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot1_rects4 = ax1.bar(x1 + width * 1.5, PLOT1['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
plot2_rects1 = ax2.bar(x2 - width * 1.5, PLOT2['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot2_rects2 = ax2.bar(x2 - width * 0.5, PLOT2['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot2_rects3 = ax2.bar(x2 + width * 0.5, PLOT2['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot2_rects4 = ax2.bar(x2 + width * 1.5, PLOT2['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
plot3_rects1 = ax3.bar(x3 - width * 1.5, PLOT3['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot3_rects2 = ax3.bar(x3 - width * 0.5, PLOT3['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot3_rects3 = ax3.bar(x3 + width * 0.5, PLOT3['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot3_rects4 = ax3.bar(x3 + width * 1.5, PLOT3['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
ax1.grid(b=True, axis='both', linewidth=0.1)
ax2.grid(b=True, axis='both', linewidth=0.1)
ax3.grid(b=True, axis='both', linewidth=0.1)
subplot_title_font_size = 10
title_y = -0.97
ax1.set_title('Schema-driven', fontsize=subplot_title_font_size, y=title_y)
ax3.set_title('Schema-less', fontsize=subplot_title_font_size, y=title_y)
title = sys.argv[2].replace(' ', '\\ ')
subtitle = sys.argv[3]
ax1.set_ylabel('Byte Size')
fig.suptitle('$\\bf{' + title + '}$' + '\n' + subtitle, y=0.95)
ax1.set_xticks(x1)
ax2.set_xticks(x2)
ax3.set_xticks(x3)
ax1.set_xticklabels(PLOT1['labels'], ha='center')
ax2.set_xticklabels(PLOT2['labels'], ha='center', fontweight='bold')
ax3.set_xticklabels(PLOT3['labels'], ha='center')
ax1.tick_params(axis="x", rotation=90)
ax2.tick_params(axis="x", rotation=90)
ax3.tick_params(axis="x", rotation=90)
ax2.tick_params(axis="y", left=False, labelleft=False)
ax3.tick_params(axis="y", left=False, labelleft=False)
handles, legend_labels = ax1.get_legend_handles_labels()
fig.legend(handles, legend_labels, loc='upper center', ncol=4, bbox_to_anchor=(0.5, 0.88))
fontsize = 3
padding = 3
ax1.bar_label(plot1_rects1, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects2, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects3, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects4, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects1, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects2, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects3, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects4, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects1, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects2, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects3, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects4, padding=padding, fontsize=fontsize)
dash_spacing = 4
ax1.spines['right'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax2.spines['left'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax2.spines['right'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax3.spines['left'].set_linestyle((0,(dash_spacing,dash_spacing)))
fig.tight_layout()
fig.subplots_adjust(wspace=0)
fig.set_figheight(5)
fig.set_figwidth(10)
plt.subplots_adjust(top=0.79, bottom=0.40, left=0.07, right=0.97)
plt.savefig(sys.argv[4], dpi=500) | plot.py | import sys
import matplotlib.pyplot as plt
import numpy
PLOT1 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
PLOT2 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
PLOT3 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
def unquote(string):
return string[1:-1]
fd = open(sys.argv[1], 'r')
lines = fd.readlines()
headers = list(map(unquote, lines[0].strip().split(',')))
json_id = len(lines)
for line in lines[1:]:
columns = line.strip().split(',')
id = int(columns[0])
label = unquote(columns[2].replace('\\n', '\n'))
uncompressed = int(columns[3])
gzip = int(columns[4])
lz4 = int(columns[5])
lzma = int(columns[6])
if columns[1] == 'json':
json_id = id
PLOT2['labels'].append(label)
PLOT2['uncompressed'].append(uncompressed)
PLOT2['gzip'].append(gzip)
PLOT2['lz4'].append(lz4)
PLOT2['lzma'].append(lzma)
continue
if id < json_id:
PLOT1['labels'].append(label)
PLOT1['uncompressed'].append(uncompressed)
PLOT1['gzip'].append(gzip)
PLOT1['lz4'].append(lz4)
PLOT1['lzma'].append(lzma)
else:
PLOT3['labels'].append(label)
PLOT3['uncompressed'].append(uncompressed)
PLOT3['gzip'].append(gzip)
PLOT3['lz4'].append(lz4)
PLOT3['lzma'].append(lzma)
fd.close()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, gridspec_kw={
'width_ratios': [ len(PLOT1['labels']), len(PLOT2['labels']) + 0.5, len(PLOT3['labels']) ]
})
ax2.set_xlim(-0.8,0.8)
x1 = numpy.arange(len(PLOT1['labels']))
x2 = numpy.arange(len(PLOT2['labels']))
x3 = numpy.arange(len(PLOT3['labels']))
width = 0.21
plot1_rects1 = ax1.bar(x1 - width * 1.5, PLOT1['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot1_rects2 = ax1.bar(x1 - width * 0.5, PLOT1['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot1_rects3 = ax1.bar(x1 + width * 0.5, PLOT1['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot1_rects4 = ax1.bar(x1 + width * 1.5, PLOT1['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
plot2_rects1 = ax2.bar(x2 - width * 1.5, PLOT2['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot2_rects2 = ax2.bar(x2 - width * 0.5, PLOT2['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot2_rects3 = ax2.bar(x2 + width * 0.5, PLOT2['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot2_rects4 = ax2.bar(x2 + width * 1.5, PLOT2['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
plot3_rects1 = ax3.bar(x3 - width * 1.5, PLOT3['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot3_rects2 = ax3.bar(x3 - width * 0.5, PLOT3['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot3_rects3 = ax3.bar(x3 + width * 0.5, PLOT3['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot3_rects4 = ax3.bar(x3 + width * 1.5, PLOT3['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
ax1.grid(b=True, axis='both', linewidth=0.1)
ax2.grid(b=True, axis='both', linewidth=0.1)
ax3.grid(b=True, axis='both', linewidth=0.1)
subplot_title_font_size = 10
title_y = -0.97
ax1.set_title('Schema-driven', fontsize=subplot_title_font_size, y=title_y)
ax3.set_title('Schema-less', fontsize=subplot_title_font_size, y=title_y)
title = sys.argv[2].replace(' ', '\\ ')
subtitle = sys.argv[3]
ax1.set_ylabel('Byte Size')
fig.suptitle('$\\bf{' + title + '}$' + '\n' + subtitle, y=0.95)
ax1.set_xticks(x1)
ax2.set_xticks(x2)
ax3.set_xticks(x3)
ax1.set_xticklabels(PLOT1['labels'], ha='center')
ax2.set_xticklabels(PLOT2['labels'], ha='center', fontweight='bold')
ax3.set_xticklabels(PLOT3['labels'], ha='center')
ax1.tick_params(axis="x", rotation=90)
ax2.tick_params(axis="x", rotation=90)
ax3.tick_params(axis="x", rotation=90)
ax2.tick_params(axis="y", left=False, labelleft=False)
ax3.tick_params(axis="y", left=False, labelleft=False)
handles, legend_labels = ax1.get_legend_handles_labels()
fig.legend(handles, legend_labels, loc='upper center', ncol=4, bbox_to_anchor=(0.5, 0.88))
fontsize = 3
padding = 3
ax1.bar_label(plot1_rects1, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects2, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects3, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects4, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects1, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects2, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects3, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects4, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects1, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects2, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects3, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects4, padding=padding, fontsize=fontsize)
dash_spacing = 4
ax1.spines['right'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax2.spines['left'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax2.spines['right'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax3.spines['left'].set_linestyle((0,(dash_spacing,dash_spacing)))
fig.tight_layout()
fig.subplots_adjust(wspace=0)
fig.set_figheight(5)
fig.set_figwidth(10)
plt.subplots_adjust(top=0.79, bottom=0.40, left=0.07, right=0.97)
plt.savefig(sys.argv[4], dpi=500) | 0.231006 | 0.459015 |
import web3
import util
import client
import pytest
import json
# --- test values ---
hdr = "020000007ef055e1674d2e6551dba41cd214debbee34aeb544c7ec670000000000000000d3998963f80c5bab43fe8c26228e98d030edf4dcbe48a666f5c39e2d7a885c9102c86d536c890019593a470d"
hdr_hex = int(hdr,16)
hdr_bytes = hdr_hex.to_bytes(80,"big")
hdr_hash = '000000000000000082ccf8f1557c5d40b21edabb18d2d691cfbf87118bac7254'
hdr_nVersion_int = 2
hdr_nVersion_raw_bytes = b'\x02\x00\x00\x00'
hdr_hashPrevBlock_str = '000000000000000067ecc744b5ae34eebbde14d21ca4db51652e4d67e155f07e'
hdr_hashPrevBlock_raw_bytes = b'~\xf0U\xe1gM.eQ\xdb\xa4\x1c\xd2\x14\xde\xbb\xee4\xae\xb5D\xc7\xecg\x00\x00\x00\x00\x00\x00\x00\x00'
hdr_hashMerkleRoot_str = '915c887a2d9ec3f566a648bedcf4ed30d0988e22268cfe43ab5b0cf8638999d3'
hdr_hashMerkleRoot_raw_bytes = b'\xd3\x99\x89c\xf8\x0c[\xabC\xfe\x8c&"\x8e\x98\xd00\xed\xf4\xdc\xbeH\xa6f\xf5\xc3\x9e-z\x88\\\x91'
hdr_nTime_int = 1399703554
hdr_nTime_raw_bytes = b'\x02\xc8mS'
hdr_nBits_int = 419465580
hdr_nBits_raw_bytes = b'l\x89\x00\x19'
hdr_nNonce_int = 222771801
hdr_nNonce_raw_bytes = b'Y:G\r'
# -------------------
def test_util():
assert client.endSwap(b"\x01\x00") == b"\x00\x01"
assert client.endSwap(bytearray(b"\x01\x00")) == bytearray(b"\x00\x01")
assert client.dbytes_to_hexstr(hdr_hashPrevBlock_raw_bytes) == hdr_hashPrevBlock_str
assert client.dbytes_to_hexstr(b"A\x0f") == "0f41"
assert client.dbytes_to_hexstr(b"A\x0f",swap=False) == "410f"
assert client.hexstr_to_dbytes(hdr_hashPrevBlock_str) == hdr_hashPrevBlock_raw_bytes
assert client.hexstr_to_dbytes("0xf41") == b"A\x0f"
assert client.hexstr_to_dbytes("f41") == b"A\x0f"
assert client.hexstr_to_dbytes("0xf41",swap=False) == b"\x0fA"
assert client.hexstr_to_dbytes("f41",swap=False) == b"\x0fA"
assert client.dSHA256(hdr_bytes) == hdr_hash
assert client.dSHA256(hdr_bytes,raw=True) == client.hexstr_to_dbytes(hdr_hash)
assert client.dSHA256(hdr_bytes,raw=True,num=True) == client.dbytes_to_int(client.hexstr_to_dbytes(hdr_hash))
def test_init_hdr():
bb = client.BtcBlk(hdr=hdr)
assert bb.nVersion == hdr_nVersion_int
assert bb.get_nVersion() == hdr_nVersion_int
assert bb.get_nVersion(raw=True) == hdr_nVersion_raw_bytes
assert bb.hashPrevBlock == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock() == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock(raw=True) == hdr_hashPrevBlock_raw_bytes
assert bb.hashMerkleRoot == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot() == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot(raw=True) == hdr_hashMerkleRoot_raw_bytes
assert bb.nTime == hdr_nTime_int
assert bb.get_nTime() == hdr_nTime_int
assert bb.get_nTime(raw=True) == hdr_nTime_raw_bytes
assert bb.nBits == hdr_nBits_int
assert bb.get_nBits() == hdr_nBits_int
assert bb.get_nBits(raw=True) == hdr_nBits_raw_bytes
assert bb.nNonce == hdr_nNonce_int
assert bb.get_nNonce() == hdr_nNonce_int
assert bb.get_nNonce(raw=True) == hdr_nNonce_raw_bytes
assert hdr_hash == bb.hash
assert str(bb) == hdr
assert bb.get_hdr(outputformat="bytes") == hdr_bytes
def test_init_values():
bb = client.BtcBlk(nVersion = hdr_nVersion_int,
hashPrevBlock = hdr_hashPrevBlock_str,
hashMerkleRoot = hdr_hashMerkleRoot_str,
nTime = hdr_nTime_int,
nBits = hdr_nBits_int,
nNonce = hdr_nNonce_int)
assert bb.nVersion == hdr_nVersion_int
assert bb.get_nVersion() == hdr_nVersion_int
assert bb.get_nVersion(raw=True) == hdr_nVersion_raw_bytes
assert bb.hashPrevBlock == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock() == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock(raw=True) == hdr_hashPrevBlock_raw_bytes
assert bb.hashMerkleRoot == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot() == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot(raw=True) == hdr_hashMerkleRoot_raw_bytes
assert bb.nTime == hdr_nTime_int
assert bb.get_nTime() == hdr_nTime_int
assert bb.get_nTime(raw=True) == hdr_nTime_raw_bytes
assert bb.nBits == hdr_nBits_int
assert bb.get_nBits() == hdr_nBits_int
assert bb.get_nBits(raw=True) == hdr_nBits_raw_bytes
assert bb.nNonce == hdr_nNonce_int
assert bb.get_nNonce() == hdr_nNonce_int
assert bb.get_nNonce(raw=True) == hdr_nNonce_raw_bytes
assert hdr_hash == bb.hash
assert str(bb) == hdr
assert bb.get_hdr(outputformat="bytes") == hdr_bytes
def test_mrkl_root():
with open('../testdata/btc_blocks_json_samples/300000') as json_file:
data = json.load(json_file)
hashMerkleRoot = data["mrkl_root"]
txs = data["tx"]
tx_hashes = list()
for tx in txs:
tx_hashes.append(tx["hash"])
tx_hashes_bytes = client.tx_hashes_to_dbytes(tx_hashes)
assert client.vrfy_mrkl_root(tx_hashes_bytes,hdr_hashMerkleRoot_str)
with open('../testdata/btc_blocks_json_samples/100014') as json_file:
data = json.load(json_file)
hashMerkleRoot = data["mrkl_root"]
txs = data["tx"]
tx_hashes = list()
for tx in txs:
tx_hashes.append(tx["hash"])
tx_hashes_bytes = client.tx_hashes_to_dbytes(tx_hashes)
assert client.vrfy_mrkl_root(tx_hashes_bytes,hashMerkleRoot)
def test_vrfy_mrkl_block():
hashes_hex_big = [ 0x3612262624047ee87660be1a707519a443b1c1ce3d248cbfc6c15870f6c5daa2,
0x019f5b01d4195ecbc9398fbf3c3b1fa9bb3183301d7a1fb3bd174fcfa40a2b65,
0x41ed70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d068,
0x20d2a7bc994987302e5b1ac80fc425fe25f8b63169ea78e68fbaaefa59379bbf, ]
hashes_bytes_big = list()
for h in hashes_hex_big:
hashes_bytes_big.append(h.to_bytes(32,"big"))
hashes_bytes_big.reverse()
mrkl_block={"hashMerkleRoot":"7f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d97287",
"tx_count":0x7,
"tx_hashes":hashes_bytes_big,
"flag_bytes":1,
"flags":[0,0,0,1,1,1,0,1]}
tx_hash = 0x019f5b01d4195ecbc9398fbf3c3b1fa9bb3183301d7a1fb3bd174fcfa40a2b65.to_bytes(32,"big")
assert client.vrfy_mrkl_block(tx_hash=tx_hash,mrkl_block=mrkl_block)
def test_verfy_mrkl_paths():
with open('../testdata/btc_blocks_json_samples/100014') as json_file:
data = json.load(json_file)
hashMerkleRoot = data["mrkl_root"]
txs = data["tx"]
tx_hashes = list()
for tx in txs:
tx_hashes.append(tx["hash"])
tx_hashes_bytes = client.tx_hashes_to_dbytes(tx_hashes)
assert client.vrfy_mrkl_root(tx_hashes_bytes,hashMerkleRoot)
# generate merkle path, consisting of mpath and flags
# and check if the resulting hash during generation still
# resembles the hashMerkleRoot
mpath = list()
flags = list()
shash = "652b0aa4cf4f17bdb31f7a1d308331bba91f3b3cbf8f39c9cb5e19d4015b9f01"
result = client.mrkl_root_path(tx_hashes_bytes,
shash=shash,
mpath=mpath,
flags=flags)
assert int(result["value"].hex(),16).to_bytes(32,"little").hex() == hashMerkleRoot
# verify Merkle path
shash='652b0aa4cf4f17bdb31f7a1d308331bba91f3b3cbf8f39c9cb5e19d4015b9f01'
assert client.vrfy_root_path(hashMerkleRoot,shash,mpath.copy(),flags.copy())
def test_parse_blk_cb():
with open('../testdata/btc_blocks_json_samples/603268.raw') as json_file:
data = json.load(json_file)
blk_raw_hex = data["rawblock"]
blk_raw = bytes.fromhex(blk_raw_hex)
assert blk_raw[:80].hex() == '00000020c39def44778136d6d70b610502449d7b77a94d4eff571100000000000000000074e2232b5c3121a3c8473c9db5269c9f39fd1a69e3dc37958b1670c0a24c82f4db0dc95dd12016176971f64f'
bblk = client.BtcBlk(blk=blk_raw,tx_n=1)
assert bblk.hdr == blk_raw[:80]
assert bblk.data == blk_raw[80:]
assert bblk.tx_count == 2312
assert bblk.tx_count_raw.hex() == "fd0809"
assert len(bblk.txs) == 1
cbtx = bblk.txs[0]
assert cbtx.nVersion == 1
assert cbtx.nVersion_raw.hex() == '01000000'
assert cbtx.flag == None
assert cbtx.tx_in_cnt == 1 and cbtx.tx_in_cnt == len(cbtx.tx_in)
assert cbtx.tx_in_cnt_raw.hex() == '01'
assert cbtx.tx_out_cnt == 3 and cbtx.tx_out_cnt == len(cbtx.tx_out)
assert cbtx.tx_out_cnt_raw.hex() == '03'
assert cbtx.nLockTime == 1133291890
assert cbtx.nLockTime_raw.hex() == '72a98c43'
txin = cbtx.tx_in[0]
assert txin.prev_output_raw.hex() == '0000000000000000000000000000000000000000000000000000000000000000ffffffff'
assert txin.prev_txhash.hex() == '0000000000000000000000000000000000000000000000000000000000000000'
assert txin.prev_txidx == 4294967295
assert txin.prev_txidx_raw.hex() == 'ffffffff'
assert txin.script_len == 95
assert txin.script_len_raw.hex() == '5f'
assert txin.script_sig.hex() == '0384340904d30dc95d2f706f6f6c696e2e636f6d2ffabe6d6d97e21604204ac2a8e72201137d16c82253498af55de5432ff9cbde84d5e63ba20100000000000000b578094a09af6006dbcc9db78000f0c20e8b0f355a003a0000fe00000000'
assert txin.sequence == 4294967295
assert txin.sequence_raw.hex() == 'ffffffff'
txout = cbtx.tx_out[0]
assert txout.value == 1272268104
assert txout.value_raw.hex() == '4845d54b00000000'
assert txout.script_len == 23
assert txout.script_len_raw.hex() == '17'
assert txout.script_pk.hex() == 'a914b111f00eed1a8123dd0a1fed50b0793229ed47e787'
txout = cbtx.tx_out[1]
assert txout.value == 0
assert txout.value_raw.hex() == '0000000000000000'
assert txout.script_len == 38
assert txout.script_len_raw.hex() == '26'
assert txout.script_pk.hex() == '6a24b9e11b6db0bac66f0f2a2714d384501c639ce147d1c61f482e5c98e43c9a6168d507aecc'
txout = cbtx.tx_out[2]
assert txout.value == 0
assert txout.value_raw.hex() == '0000000000000000'
assert txout.script_len == 38
assert txout.script_len_raw.hex() == '26'
assert txout.script_pk.hex() == '6a24aa21a9ed6b6dd1678f89692e705ec9de8c06a2a0a9fd58d437a39c2878433248aeee7a65'
assert cbtx.txb.hex() == '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5f0384340904d30dc95d2f706f6f6c696e2e636f6d2ffabe6d6d97e21604204ac2a8e72201137d16c82253498af55de5432ff9cbde84d5e63ba20100000000000000b578094a09af6006dbcc9db78000f0c20e8b0f355a003a0000fe00000000ffffffff034845d54b0000000017a914b111f00eed1a8123dd0a1fed50b0793229ed47e7870000000000000000266a24b9e11b6db0bac66f0f2a2714d384501c639ce147d1c61f482e5c98e43c9a6168d507aecc0000000000000000266a24aa21a9ed6b6dd1678f89692e705ec9de8c06a2a0a9fd58d437a39c2878433248aeee7a6572a98c43'
assert cbtx.txhash == "de612b874b23a78805ed022f55befbc94d12e2e78208d1d6d560df1d998451cb"
def test_parse_blk():
with open('../testdata/btc_blocks_json_samples/603268.raw') as json_file:
data = json.load(json_file)
blk_raw_hex = data["rawblock"]
blk_raw = bytes.fromhex(blk_raw_hex)
assert blk_raw[:80].hex() == '00000020c39def44778136d6d70b610502449d7b77a94d4eff571100000000000000000074e2232b5c3121a3c8473c9db5269c9f39fd1a69e3dc37958b1670c0a24c82f4db0dc95dd12016176971f64f'
bblk = client.BtcBlk(blk=blk_raw)
txhashes = list()
for tx in bblk.txs:
txhashes.append(client.hexstr_to_dbytes(tx.txhash))
assert client.vrfy_mrkl_root(txhashes,"f4824ca2c070168b9537dce3691afd399f9c26b59d3c47c8a321315c2b23e274")
def test_parse_coinbase():
with open('../testdata/btc_blocks_json_samples/603268.raw') as json_file:
data = json.load(json_file)
blk_raw_hex = data["rawblock"]
blk_raw = bytes.fromhex(blk_raw_hex)
assert blk_raw[:80].hex() == '00000020c39def44778136d6d70b610502449d7b77a94d4eff571100000000000000000074e2232b5c3121a3c8473c9db5269c9f39fd1a69e3dc37958b1670c0a24c82f4db0dc95dd12016176971f64f'
bblk = client.BtcBlk(blk=blk_raw,tx_n=1)
cb = bblk.txs[0]
rslt = cb.parse_coinbase()
assert rslt is not None
assert rslt["blk_height"] == 603268
assert rslt["coinbase"] == b'\x04\xd3\r\xc9]/poolin.com/\xfa\xbemm\x97\xe2\x16\x04 J\xc2\xa8\xe7"\x01\x13}\x16\xc8"SI\x8a\xf5]\xe5C/\xf9\xcb\xde\x84\xd5\xe6;\xa2\x01\x00\x00\x00\x00\x00\x00\x00\xb5x\tJ\t\xaf`\x06\xdb\xcc\x9d\xb7\x80\x00\xf0\xc2\x0e\x8b\x0f5Z\x00:\x00\x00\xfe\x00\x00\x00\x00'
cb_raw = cb.get_tx("bytes")
cb_raw_hash = client.dSHA256(cb_raw)
assert cb.txhash == cb_raw_hash
cb_raw = rslt["coinbasetx_prefix"] + rslt["coinbase_full"] + rslt["coinbasetx_suffix"]
cb_raw_hash = client.dSHA256(cb_raw)
assert cb.txhash == cb_raw_hash
def test_nBits_to_Target():
assert(client.nBits_to_Target(b"\x18\x1b\xc3\x30") == 0x1bc330000000000000000000000000000000000000000000)
assert(client.nBits_to_Target(b"\x05\x00\x92\x34") == 0x92340000)
assert(client.nBits_to_Target(b"\x01\x00\x34\x56") == 0x00)
assert(client.nBits_to_Target(b"\x01\x12\x34\x56") == 0x12)
assert(client.nBits_to_Target(b"\x02\x00\x80\x00") == 0x80)
assert(client.nBits_to_Target(b"\x04\x12\x34\x56") == 0x12345600)
assert(client.nBits_to_Target(b"\x02\x12\x34\x56") == 0x1234)
assert(client.nBits_to_Target(b"\x03\x12\x34\x56") == 0x123456)
assert(client.nBits_to_Target(b"\x04\x12\x34\x56") == 0x12345600)
assert(client.nBits_to_Target(b"\x20\x12\x34\x56") == 0x1234560000000000000000000000000000000000000000000000000000000000)
assert(client.nBits_to_Target(b"\x20\x7f\xff\xff") == 0x7fffff0000000000000000000000000000000000000000000000000000000000)
with pytest.raises(client.NBitsDecodingExcpetion):
client.nBits_to_Target(b"\x04\x92\x34\x56") == 0x12345600
with pytest.raises(client.NBitsDecodingExcpetion):
client.nBits_to_Target(b"\x01\xfe\xdc\xba") == 0x7e
# encoding tests:
#assert(client.nBits_to_Target(b"\x04\x92\x34\x56") == 0x12345600) #8 # high bit set
#assert(client.nBits_to_Target(b"\x01\xfe\xdc\xba") == 0x7e) #9 # high bit set
def test_within_difficulty_period():
assert client.within_difficulty_period(0,2015) == True
assert client.within_difficulty_period(1,2015) == True
assert client.within_difficulty_period(0,2016) == False
assert client.within_difficulty_period(0,2017) == False
assert client.within_difficulty_period(2015,2017) == False
assert client.within_difficulty_period(2016,2017) == True
def test_replace_bytes():
old_hdr = b'\x02\x00\x00\x00Tr\xac\x8b\x11\x87\xbf\xcf\x91\xd6\xd2\x18\xbb\xda\x1e\xb2@]|U\xf1\xf8\xcc\x82\x00\x00\x00\x00\x00\x00\x00\x00\xab\n\xaa7|\xa3\xf4\x9b\x15E\xe2\xaek\x06g\xa0\x8fB\xe7-\x8c$\xae#q@\xe2\x8f\x14\xf3\xbb|k\xccmSl\x89\x00\x19\xed\xd8<\xcf'
assert client.dSHA256(old_hdr) == '000000000000000049a0914d83df36982c77ac1f65ade6a52bdced2ce312aba9'
mroot = b'\xab\n\xaa7|\xa3\xf4\x9b\x15E\xe2\xaek\x06g\xa0\x8fB\xe7-\x8c$\xae#q@\xe2\x8f\x14\xf3\xbb|'
new_hdr = client.replace_found_bytes(old_hdr,mroot)
assert new_hdr == b'\x02\x00\x00\x00Tr\xac\x8b\x11\x87\xbf\xcf\x91\xd6\xd2\x18\xbb\xda\x1e\xb2@]|U\xf1\xf8\xcc\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00k\xccmSl\x89\x00\x19\xed\xd8<\xcf'
assert client.dSHA256(new_hdr) == '7bea60b080663b91ccc10ca231e72007eb6bc97c7e2626085a0594cbbb59e933'
offset = client.NVERSION_LEN + client.HASHPREVBLOCK_LEN
other_hdr = client.replace_at_offset(old_hdr,offset,replace=32)
assert client.dSHA256(other_hdr) == '7bea60b080663b91ccc10ca231e72007eb6bc97c7e2626085a0594cbbb59e933'
original_hdr = client.replace_at_offset(other_hdr,offset,replace=mroot)
assert original_hdr == old_hdr | code/client/test_BtcBlk.py | import web3
import util
import client
import pytest
import json
# --- test values ---
hdr = "020000007ef055e1674d2e6551dba41cd214debbee34aeb544c7ec670000000000000000d3998963f80c5bab43fe8c26228e98d030edf4dcbe48a666f5c39e2d7a885c9102c86d536c890019593a470d"
hdr_hex = int(hdr,16)
hdr_bytes = hdr_hex.to_bytes(80,"big")
hdr_hash = '000000000000000082ccf8f1557c5d40b21edabb18d2d691cfbf87118bac7254'
hdr_nVersion_int = 2
hdr_nVersion_raw_bytes = b'\x02\x00\x00\x00'
hdr_hashPrevBlock_str = '000000000000000067ecc744b5ae34eebbde14d21ca4db51652e4d67e155f07e'
hdr_hashPrevBlock_raw_bytes = b'~\xf0U\xe1gM.eQ\xdb\xa4\x1c\xd2\x14\xde\xbb\xee4\xae\xb5D\xc7\xecg\x00\x00\x00\x00\x00\x00\x00\x00'
hdr_hashMerkleRoot_str = '915c887a2d9ec3f566a648bedcf4ed30d0988e22268cfe43ab5b0cf8638999d3'
hdr_hashMerkleRoot_raw_bytes = b'\xd3\x99\x89c\xf8\x0c[\xabC\xfe\x8c&"\x8e\x98\xd00\xed\xf4\xdc\xbeH\xa6f\xf5\xc3\x9e-z\x88\\\x91'
hdr_nTime_int = 1399703554
hdr_nTime_raw_bytes = b'\x02\xc8mS'
hdr_nBits_int = 419465580
hdr_nBits_raw_bytes = b'l\x89\x00\x19'
hdr_nNonce_int = 222771801
hdr_nNonce_raw_bytes = b'Y:G\r'
# -------------------
def test_util():
assert client.endSwap(b"\x01\x00") == b"\x00\x01"
assert client.endSwap(bytearray(b"\x01\x00")) == bytearray(b"\x00\x01")
assert client.dbytes_to_hexstr(hdr_hashPrevBlock_raw_bytes) == hdr_hashPrevBlock_str
assert client.dbytes_to_hexstr(b"A\x0f") == "0f41"
assert client.dbytes_to_hexstr(b"A\x0f",swap=False) == "410f"
assert client.hexstr_to_dbytes(hdr_hashPrevBlock_str) == hdr_hashPrevBlock_raw_bytes
assert client.hexstr_to_dbytes("0xf41") == b"A\x0f"
assert client.hexstr_to_dbytes("f41") == b"A\x0f"
assert client.hexstr_to_dbytes("0xf41",swap=False) == b"\x0fA"
assert client.hexstr_to_dbytes("f41",swap=False) == b"\x0fA"
assert client.dSHA256(hdr_bytes) == hdr_hash
assert client.dSHA256(hdr_bytes,raw=True) == client.hexstr_to_dbytes(hdr_hash)
assert client.dSHA256(hdr_bytes,raw=True,num=True) == client.dbytes_to_int(client.hexstr_to_dbytes(hdr_hash))
def test_init_hdr():
bb = client.BtcBlk(hdr=hdr)
assert bb.nVersion == hdr_nVersion_int
assert bb.get_nVersion() == hdr_nVersion_int
assert bb.get_nVersion(raw=True) == hdr_nVersion_raw_bytes
assert bb.hashPrevBlock == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock() == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock(raw=True) == hdr_hashPrevBlock_raw_bytes
assert bb.hashMerkleRoot == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot() == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot(raw=True) == hdr_hashMerkleRoot_raw_bytes
assert bb.nTime == hdr_nTime_int
assert bb.get_nTime() == hdr_nTime_int
assert bb.get_nTime(raw=True) == hdr_nTime_raw_bytes
assert bb.nBits == hdr_nBits_int
assert bb.get_nBits() == hdr_nBits_int
assert bb.get_nBits(raw=True) == hdr_nBits_raw_bytes
assert bb.nNonce == hdr_nNonce_int
assert bb.get_nNonce() == hdr_nNonce_int
assert bb.get_nNonce(raw=True) == hdr_nNonce_raw_bytes
assert hdr_hash == bb.hash
assert str(bb) == hdr
assert bb.get_hdr(outputformat="bytes") == hdr_bytes
def test_init_values():
bb = client.BtcBlk(nVersion = hdr_nVersion_int,
hashPrevBlock = hdr_hashPrevBlock_str,
hashMerkleRoot = hdr_hashMerkleRoot_str,
nTime = hdr_nTime_int,
nBits = hdr_nBits_int,
nNonce = hdr_nNonce_int)
assert bb.nVersion == hdr_nVersion_int
assert bb.get_nVersion() == hdr_nVersion_int
assert bb.get_nVersion(raw=True) == hdr_nVersion_raw_bytes
assert bb.hashPrevBlock == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock() == hdr_hashPrevBlock_str
assert bb.get_hashPrevBlock(raw=True) == hdr_hashPrevBlock_raw_bytes
assert bb.hashMerkleRoot == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot() == hdr_hashMerkleRoot_str
assert bb.get_hashMerkleRoot(raw=True) == hdr_hashMerkleRoot_raw_bytes
assert bb.nTime == hdr_nTime_int
assert bb.get_nTime() == hdr_nTime_int
assert bb.get_nTime(raw=True) == hdr_nTime_raw_bytes
assert bb.nBits == hdr_nBits_int
assert bb.get_nBits() == hdr_nBits_int
assert bb.get_nBits(raw=True) == hdr_nBits_raw_bytes
assert bb.nNonce == hdr_nNonce_int
assert bb.get_nNonce() == hdr_nNonce_int
assert bb.get_nNonce(raw=True) == hdr_nNonce_raw_bytes
assert hdr_hash == bb.hash
assert str(bb) == hdr
assert bb.get_hdr(outputformat="bytes") == hdr_bytes
def test_mrkl_root():
with open('../testdata/btc_blocks_json_samples/300000') as json_file:
data = json.load(json_file)
hashMerkleRoot = data["mrkl_root"]
txs = data["tx"]
tx_hashes = list()
for tx in txs:
tx_hashes.append(tx["hash"])
tx_hashes_bytes = client.tx_hashes_to_dbytes(tx_hashes)
assert client.vrfy_mrkl_root(tx_hashes_bytes,hdr_hashMerkleRoot_str)
with open('../testdata/btc_blocks_json_samples/100014') as json_file:
data = json.load(json_file)
hashMerkleRoot = data["mrkl_root"]
txs = data["tx"]
tx_hashes = list()
for tx in txs:
tx_hashes.append(tx["hash"])
tx_hashes_bytes = client.tx_hashes_to_dbytes(tx_hashes)
assert client.vrfy_mrkl_root(tx_hashes_bytes,hashMerkleRoot)
def test_vrfy_mrkl_block():
hashes_hex_big = [ 0x3612262624047ee87660be1a707519a443b1c1ce3d248cbfc6c15870f6c5daa2,
0x019f5b01d4195ecbc9398fbf3c3b1fa9bb3183301d7a1fb3bd174fcfa40a2b65,
0x41ed70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d068,
0x20d2a7bc994987302e5b1ac80fc425fe25f8b63169ea78e68fbaaefa59379bbf, ]
hashes_bytes_big = list()
for h in hashes_hex_big:
hashes_bytes_big.append(h.to_bytes(32,"big"))
hashes_bytes_big.reverse()
mrkl_block={"hashMerkleRoot":"7f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d97287",
"tx_count":0x7,
"tx_hashes":hashes_bytes_big,
"flag_bytes":1,
"flags":[0,0,0,1,1,1,0,1]}
tx_hash = 0x019f5b01d4195ecbc9398fbf3c3b1fa9bb3183301d7a1fb3bd174fcfa40a2b65.to_bytes(32,"big")
assert client.vrfy_mrkl_block(tx_hash=tx_hash,mrkl_block=mrkl_block)
def test_verfy_mrkl_paths():
with open('../testdata/btc_blocks_json_samples/100014') as json_file:
data = json.load(json_file)
hashMerkleRoot = data["mrkl_root"]
txs = data["tx"]
tx_hashes = list()
for tx in txs:
tx_hashes.append(tx["hash"])
tx_hashes_bytes = client.tx_hashes_to_dbytes(tx_hashes)
assert client.vrfy_mrkl_root(tx_hashes_bytes,hashMerkleRoot)
# generate merkle path, consisting of mpath and flags
# and check if the resulting hash during generation still
# resembles the hashMerkleRoot
mpath = list()
flags = list()
shash = "652b0aa4cf4f17bdb31f7a1d308331bba91f3b3cbf8f39c9cb5e19d4015b9f01"
result = client.mrkl_root_path(tx_hashes_bytes,
shash=shash,
mpath=mpath,
flags=flags)
assert int(result["value"].hex(),16).to_bytes(32,"little").hex() == hashMerkleRoot
# verify Merkle path
shash='652b0aa4cf4f17bdb31f7a1d308331bba91f3b3cbf8f39c9cb5e19d4015b9f01'
assert client.vrfy_root_path(hashMerkleRoot,shash,mpath.copy(),flags.copy())
def test_parse_blk_cb():
with open('../testdata/btc_blocks_json_samples/603268.raw') as json_file:
data = json.load(json_file)
blk_raw_hex = data["rawblock"]
blk_raw = bytes.fromhex(blk_raw_hex)
assert blk_raw[:80].hex() == '00000020c39def44778136d6d70b610502449d7b77a94d4eff571100000000000000000074e2232b5c3121a3c8473c9db5269c9f39fd1a69e3dc37958b1670c0a24c82f4db0dc95dd12016176971f64f'
bblk = client.BtcBlk(blk=blk_raw,tx_n=1)
assert bblk.hdr == blk_raw[:80]
assert bblk.data == blk_raw[80:]
assert bblk.tx_count == 2312
assert bblk.tx_count_raw.hex() == "fd0809"
assert len(bblk.txs) == 1
cbtx = bblk.txs[0]
assert cbtx.nVersion == 1
assert cbtx.nVersion_raw.hex() == '01000000'
assert cbtx.flag == None
assert cbtx.tx_in_cnt == 1 and cbtx.tx_in_cnt == len(cbtx.tx_in)
assert cbtx.tx_in_cnt_raw.hex() == '01'
assert cbtx.tx_out_cnt == 3 and cbtx.tx_out_cnt == len(cbtx.tx_out)
assert cbtx.tx_out_cnt_raw.hex() == '03'
assert cbtx.nLockTime == 1133291890
assert cbtx.nLockTime_raw.hex() == '72a98c43'
txin = cbtx.tx_in[0]
assert txin.prev_output_raw.hex() == '0000000000000000000000000000000000000000000000000000000000000000ffffffff'
assert txin.prev_txhash.hex() == '0000000000000000000000000000000000000000000000000000000000000000'
assert txin.prev_txidx == 4294967295
assert txin.prev_txidx_raw.hex() == 'ffffffff'
assert txin.script_len == 95
assert txin.script_len_raw.hex() == '5f'
assert txin.script_sig.hex() == '0384340904d30dc95d2f706f6f6c696e2e636f6d2ffabe6d6d97e21604204ac2a8e72201137d16c82253498af55de5432ff9cbde84d5e63ba20100000000000000b578094a09af6006dbcc9db78000f0c20e8b0f355a003a0000fe00000000'
assert txin.sequence == 4294967295
assert txin.sequence_raw.hex() == 'ffffffff'
txout = cbtx.tx_out[0]
assert txout.value == 1272268104
assert txout.value_raw.hex() == '4845d54b00000000'
assert txout.script_len == 23
assert txout.script_len_raw.hex() == '17'
assert txout.script_pk.hex() == 'a914b111f00eed1a8123dd0a1fed50b0793229ed47e787'
txout = cbtx.tx_out[1]
assert txout.value == 0
assert txout.value_raw.hex() == '0000000000000000'
assert txout.script_len == 38
assert txout.script_len_raw.hex() == '26'
assert txout.script_pk.hex() == '6a24b9e11b6db0bac66f0f2a2714d384501c639ce147d1c61f482e5c98e43c9a6168d507aecc'
txout = cbtx.tx_out[2]
assert txout.value == 0
assert txout.value_raw.hex() == '0000000000000000'
assert txout.script_len == 38
assert txout.script_len_raw.hex() == '26'
assert txout.script_pk.hex() == '6a24aa21a9ed6b6dd1678f89692e705ec9de8c06a2a0a9fd58d437a39c2878433248aeee7a65'
assert cbtx.txb.hex() == '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5f0384340904d30dc95d2f706f6f6c696e2e636f6d2ffabe6d6d97e21604204ac2a8e72201137d16c82253498af55de5432ff9cbde84d5e63ba20100000000000000b578094a09af6006dbcc9db78000f0c20e8b0f355a003a0000fe00000000ffffffff034845d54b0000000017a914b111f00eed1a8123dd0a1fed50b0793229ed47e7870000000000000000266a24b9e11b6db0bac66f0f2a2714d384501c639ce147d1c61f482e5c98e43c9a6168d507aecc0000000000000000266a24aa21a9ed6b6dd1678f89692e705ec9de8c06a2a0a9fd58d437a39c2878433248aeee7a6572a98c43'
assert cbtx.txhash == "de612b874b23a78805ed022f55befbc94d12e2e78208d1d6d560df1d998451cb"
def test_parse_blk():
with open('../testdata/btc_blocks_json_samples/603268.raw') as json_file:
data = json.load(json_file)
blk_raw_hex = data["rawblock"]
blk_raw = bytes.fromhex(blk_raw_hex)
assert blk_raw[:80].hex() == '00000020c39def44778136d6d70b610502449d7b77a94d4eff571100000000000000000074e2232b5c3121a3c8473c9db5269c9f39fd1a69e3dc37958b1670c0a24c82f4db0dc95dd12016176971f64f'
bblk = client.BtcBlk(blk=blk_raw)
txhashes = list()
for tx in bblk.txs:
txhashes.append(client.hexstr_to_dbytes(tx.txhash))
assert client.vrfy_mrkl_root(txhashes,"f4824ca2c070168b9537dce3691afd399f9c26b59d3c47c8a321315c2b23e274")
def test_parse_coinbase():
with open('../testdata/btc_blocks_json_samples/603268.raw') as json_file:
data = json.load(json_file)
blk_raw_hex = data["rawblock"]
blk_raw = bytes.fromhex(blk_raw_hex)
assert blk_raw[:80].hex() == '00000020c39def44778136d6d70b610502449d7b77a94d4eff571100000000000000000074e2232b5c3121a3c8473c9db5269c9f39fd1a69e3dc37958b1670c0a24c82f4db0dc95dd12016176971f64f'
bblk = client.BtcBlk(blk=blk_raw,tx_n=1)
cb = bblk.txs[0]
rslt = cb.parse_coinbase()
assert rslt is not None
assert rslt["blk_height"] == 603268
assert rslt["coinbase"] == b'\x04\xd3\r\xc9]/poolin.com/\xfa\xbemm\x97\xe2\x16\x04 J\xc2\xa8\xe7"\x01\x13}\x16\xc8"SI\x8a\xf5]\xe5C/\xf9\xcb\xde\x84\xd5\xe6;\xa2\x01\x00\x00\x00\x00\x00\x00\x00\xb5x\tJ\t\xaf`\x06\xdb\xcc\x9d\xb7\x80\x00\xf0\xc2\x0e\x8b\x0f5Z\x00:\x00\x00\xfe\x00\x00\x00\x00'
cb_raw = cb.get_tx("bytes")
cb_raw_hash = client.dSHA256(cb_raw)
assert cb.txhash == cb_raw_hash
cb_raw = rslt["coinbasetx_prefix"] + rslt["coinbase_full"] + rslt["coinbasetx_suffix"]
cb_raw_hash = client.dSHA256(cb_raw)
assert cb.txhash == cb_raw_hash
def test_nBits_to_Target():
assert(client.nBits_to_Target(b"\x18\x1b\xc3\x30") == 0x1bc330000000000000000000000000000000000000000000)
assert(client.nBits_to_Target(b"\x05\x00\x92\x34") == 0x92340000)
assert(client.nBits_to_Target(b"\x01\x00\x34\x56") == 0x00)
assert(client.nBits_to_Target(b"\x01\x12\x34\x56") == 0x12)
assert(client.nBits_to_Target(b"\x02\x00\x80\x00") == 0x80)
assert(client.nBits_to_Target(b"\x04\x12\x34\x56") == 0x12345600)
assert(client.nBits_to_Target(b"\x02\x12\x34\x56") == 0x1234)
assert(client.nBits_to_Target(b"\x03\x12\x34\x56") == 0x123456)
assert(client.nBits_to_Target(b"\x04\x12\x34\x56") == 0x12345600)
assert(client.nBits_to_Target(b"\x20\x12\x34\x56") == 0x1234560000000000000000000000000000000000000000000000000000000000)
assert(client.nBits_to_Target(b"\x20\x7f\xff\xff") == 0x7fffff0000000000000000000000000000000000000000000000000000000000)
with pytest.raises(client.NBitsDecodingExcpetion):
client.nBits_to_Target(b"\x04\x92\x34\x56") == 0x12345600
with pytest.raises(client.NBitsDecodingExcpetion):
client.nBits_to_Target(b"\x01\xfe\xdc\xba") == 0x7e
# encoding tests:
#assert(client.nBits_to_Target(b"\x04\x92\x34\x56") == 0x12345600) #8 # high bit set
#assert(client.nBits_to_Target(b"\x01\xfe\xdc\xba") == 0x7e) #9 # high bit set
def test_within_difficulty_period():
assert client.within_difficulty_period(0,2015) == True
assert client.within_difficulty_period(1,2015) == True
assert client.within_difficulty_period(0,2016) == False
assert client.within_difficulty_period(0,2017) == False
assert client.within_difficulty_period(2015,2017) == False
assert client.within_difficulty_period(2016,2017) == True
def test_replace_bytes():
old_hdr = b'\x02\x00\x00\x00Tr\xac\x8b\x11\x87\xbf\xcf\x91\xd6\xd2\x18\xbb\xda\x1e\xb2@]|U\xf1\xf8\xcc\x82\x00\x00\x00\x00\x00\x00\x00\x00\xab\n\xaa7|\xa3\xf4\x9b\x15E\xe2\xaek\x06g\xa0\x8fB\xe7-\x8c$\xae#q@\xe2\x8f\x14\xf3\xbb|k\xccmSl\x89\x00\x19\xed\xd8<\xcf'
assert client.dSHA256(old_hdr) == '000000000000000049a0914d83df36982c77ac1f65ade6a52bdced2ce312aba9'
mroot = b'\xab\n\xaa7|\xa3\xf4\x9b\x15E\xe2\xaek\x06g\xa0\x8fB\xe7-\x8c$\xae#q@\xe2\x8f\x14\xf3\xbb|'
new_hdr = client.replace_found_bytes(old_hdr,mroot)
assert new_hdr == b'\x02\x00\x00\x00Tr\xac\x8b\x11\x87\xbf\xcf\x91\xd6\xd2\x18\xbb\xda\x1e\xb2@]|U\xf1\xf8\xcc\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00k\xccmSl\x89\x00\x19\xed\xd8<\xcf'
assert client.dSHA256(new_hdr) == '7bea60b080663b91ccc10ca231e72007eb6bc97c7e2626085a0594cbbb59e933'
offset = client.NVERSION_LEN + client.HASHPREVBLOCK_LEN
other_hdr = client.replace_at_offset(old_hdr,offset,replace=32)
assert client.dSHA256(other_hdr) == '7bea60b080663b91ccc10ca231e72007eb6bc97c7e2626085a0594cbbb59e933'
original_hdr = client.replace_at_offset(other_hdr,offset,replace=mroot)
assert original_hdr == old_hdr | 0.423458 | 0.381709 |
import os
import click
import logging
import h5py
import keras
import sklearn
import numpy as np
import keras_applications
from tqdm import tqdm
from sklearn.model_selection import train_test_split
resolution = 256
def preprocess_image(image):
x = keras.preprocessing.image.img_to_array(image)
x = np.expand_dims(x, axis=0)
return keras_applications.imagenet_utils.preprocess_input(x)[0]
def process_image_dataset(dataset_path):
if dataset_path is None:
raise UserWarning('Dataset path should not be None!')
X = []
images = os.listdir(dataset_path)
for image_path in tqdm(os.listdir(dataset_path), total=len(images), desc='Processing Images'):
image = keras.preprocessing.image.load_img('{}/{}'
.format(dataset_path, image_path),
target_size=(resolution, resolution))
X.append(preprocess_image(image))
# convert to desired format
X = np.array(X)
y = np.ones((len(images), 1))
logging.info('Features shape: {}'.format(X.shape))
logging.info('Targets shape: {}'.format(y.shape))
# randomly shuffle both arrays but in same order
logging.info('Randomly shuffling arrays')
X, y = sklearn.utils.shuffle(X, y, random_state=0)
#divide into sets
logging.info('Splitting into train, val and test datasets')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1)
#write final
logging.info('Writing preprocessed data to files')
train_file = h5py.File('datasets/training_data.h5', "w")
train_file.create_dataset('X_train', data=X_train)
train_file.create_dataset('y_train', data=y_train)
train_file = h5py.File('datasets/validation_data.h5', "w")
train_file.create_dataset('X_val', data=X_val)
train_file.create_dataset('y_val', data=y_val)
test_file = h5py.File('datasets/testing_data.h5', "w")
test_file.create_dataset('X_test', data=X_test)
test_file.create_dataset('y_test', data=y_test)
@click.command()
@click.option('-ds', '--dataset-path', default='datasets/images', help='Path for your Image Dataset')
def main(dataset_path):
LOG_FORMAT = '%(levelname)s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level='INFO')
process_image_dataset(dataset_path)
logging.info('Done preprocessing!')
if __name__ == '__main__':
main() | prepro.py | import os
import click
import logging
import h5py
import keras
import sklearn
import numpy as np
import keras_applications
from tqdm import tqdm
from sklearn.model_selection import train_test_split
resolution = 256
def preprocess_image(image):
x = keras.preprocessing.image.img_to_array(image)
x = np.expand_dims(x, axis=0)
return keras_applications.imagenet_utils.preprocess_input(x)[0]
def process_image_dataset(dataset_path):
if dataset_path is None:
raise UserWarning('Dataset path should not be None!')
X = []
images = os.listdir(dataset_path)
for image_path in tqdm(os.listdir(dataset_path), total=len(images), desc='Processing Images'):
image = keras.preprocessing.image.load_img('{}/{}'
.format(dataset_path, image_path),
target_size=(resolution, resolution))
X.append(preprocess_image(image))
# convert to desired format
X = np.array(X)
y = np.ones((len(images), 1))
logging.info('Features shape: {}'.format(X.shape))
logging.info('Targets shape: {}'.format(y.shape))
# randomly shuffle both arrays but in same order
logging.info('Randomly shuffling arrays')
X, y = sklearn.utils.shuffle(X, y, random_state=0)
#divide into sets
logging.info('Splitting into train, val and test datasets')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1)
#write final
logging.info('Writing preprocessed data to files')
train_file = h5py.File('datasets/training_data.h5', "w")
train_file.create_dataset('X_train', data=X_train)
train_file.create_dataset('y_train', data=y_train)
train_file = h5py.File('datasets/validation_data.h5', "w")
train_file.create_dataset('X_val', data=X_val)
train_file.create_dataset('y_val', data=y_val)
test_file = h5py.File('datasets/testing_data.h5', "w")
test_file.create_dataset('X_test', data=X_test)
test_file.create_dataset('y_test', data=y_test)
@click.command()
@click.option('-ds', '--dataset-path', default='datasets/images', help='Path for your Image Dataset')
def main(dataset_path):
LOG_FORMAT = '%(levelname)s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level='INFO')
process_image_dataset(dataset_path)
logging.info('Done preprocessing!')
if __name__ == '__main__':
main() | 0.378804 | 0.318406 |
import csv
import datetime
import re
def main():
with open("data.csv", "r") as f:
reader = csv.DictReader(f)
first = True
print("""insert into donations (donor, donee, amount, donation_date,
donation_date_precision, donation_date_basis, cause_area, url,
donor_cause_area_url, notes, affected_countries, affected_states,
affected_cities, affected_regions) values""")
for row in reader:
amount, method = amount_and_method(row['amount'])
notes = ("Donation date is not a single date but rather when "
"funding began. Rainer fellow in " +
row['rainer_fellow'] +
". Mulago’s reasons for investing: “" +
row['why_invest'] + "”")
print((" " if first else " ,") + "(" + ",".join([
mysql_quote("Mulago Foundation"), # donor
mysql_quote(row['grantee']), # donee
str(amount), # amount
mysql_quote(row['funded_since'] + "-01-01"), # donation_date
mysql_quote("year"), # donation_date_precision
mysql_quote("donation log"), # donation_date_basis
mysql_quote(""), # cause_area
mysql_quote(row['url']), # url
mysql_quote(""), # donor_cause_area_url
mysql_quote(notes), # notes
mysql_quote(""), # affected_countries
mysql_quote(""), # affected_states
mysql_quote(""), # affected_cities
mysql_quote(""), # affected_regions
]) + ")")
first = False
print(";")
def mysql_quote(x):
'''
Quote the string x using MySQL quoting rules. If x is the empty string,
return "NULL". Probably not safe against maliciously formed strings, but
whatever; our input is fixed and from a basically trustable source..
'''
if not x:
return "NULL"
x = x.replace("\\", "\\\\")
x = x.replace("'", "''")
x = x.replace("\n", "\\n")
return "'{}'".format(x)
def amount_and_method(amount_string):
"""Separate out the amount and method from the "amount and method"
string."""
m = re.match(r"\$([0-9.]+)\s*(M|million|K)(.*)", amount_string)
num = float(m.group(1))
if m.group(2) in ["M", "million"]:
num *= 1e6
elif m.group(2) == "K":
num *= 1e3
else:
raise ValueError("We can't understand this number format.")
return (round(num, 2), m.group(3).strip())
if __name__ == "__main__":
main() | proc.py |
import csv
import datetime
import re
def main():
with open("data.csv", "r") as f:
reader = csv.DictReader(f)
first = True
print("""insert into donations (donor, donee, amount, donation_date,
donation_date_precision, donation_date_basis, cause_area, url,
donor_cause_area_url, notes, affected_countries, affected_states,
affected_cities, affected_regions) values""")
for row in reader:
amount, method = amount_and_method(row['amount'])
notes = ("Donation date is not a single date but rather when "
"funding began. Rainer fellow in " +
row['rainer_fellow'] +
". Mulago’s reasons for investing: “" +
row['why_invest'] + "”")
print((" " if first else " ,") + "(" + ",".join([
mysql_quote("Mulago Foundation"), # donor
mysql_quote(row['grantee']), # donee
str(amount), # amount
mysql_quote(row['funded_since'] + "-01-01"), # donation_date
mysql_quote("year"), # donation_date_precision
mysql_quote("donation log"), # donation_date_basis
mysql_quote(""), # cause_area
mysql_quote(row['url']), # url
mysql_quote(""), # donor_cause_area_url
mysql_quote(notes), # notes
mysql_quote(""), # affected_countries
mysql_quote(""), # affected_states
mysql_quote(""), # affected_cities
mysql_quote(""), # affected_regions
]) + ")")
first = False
print(";")
def mysql_quote(x):
'''
Quote the string x using MySQL quoting rules. If x is the empty string,
return "NULL". Probably not safe against maliciously formed strings, but
whatever; our input is fixed and from a basically trustable source..
'''
if not x:
return "NULL"
x = x.replace("\\", "\\\\")
x = x.replace("'", "''")
x = x.replace("\n", "\\n")
return "'{}'".format(x)
def amount_and_method(amount_string):
"""Separate out the amount and method from the "amount and method"
string."""
m = re.match(r"\$([0-9.]+)\s*(M|million|K)(.*)", amount_string)
num = float(m.group(1))
if m.group(2) in ["M", "million"]:
num *= 1e6
elif m.group(2) == "K":
num *= 1e3
else:
raise ValueError("We can't understand this number format.")
return (round(num, 2), m.group(3).strip())
if __name__ == "__main__":
main() | 0.380529 | 0.265998 |
from typing import Set, Dict, Any
from ... import Batch, LocalBackend, ServiceBackend, Backend
from ...resource import Resource
import os
from os.path import exists
import sys
import shlex
from argparse import Namespace, ArgumentParser, SUPPRESS
import google.oauth2.service_account
from google.cloud import storage
from google.cloud.storage.blob import Blob
input_file_args = ["bgen", "bed", "pgen", "sample", "keep", "extract", "exclude", "remove",
"phenoFile", "covarFile"]
from_underscore = {
"force_impute": "force-impute",
"ignore_pred": "ignore-pred",
"lowmem_prefix": "lowmem-prefix"
}
def _is_local(spath: str):
if spath.startswith("gs://"):
return False
return True
GCS_CLIENT = None
def gcs_client():
global GCS_CLIENT
if GCS_CLIENT is None:
credentials = None
key_file = os.environ.get('HAIL_GSA_KEY_FILE')
if key_file:
credentials = google.oauth2.service_account.Credentials.from_service_account_file(
key_file)
GCS_CLIENT = storage.Client(project=None, credentials=credentials)
return GCS_CLIENT
def _read(spath: str):
if _is_local(spath):
with open(spath, "r") as f:
return f.read()
blob = Blob.from_string(spath, gcs_client())
return blob.download_as_string().decode("utf-8")
def _read_first_line(spath: str):
if _is_local(spath):
with open(spath, "r") as f:
return f.readline()
return _read(spath).split("\n")[0]
def _exists(spath: str) -> bool:
if _is_local(spath):
return exists(spath)
blob = Blob.from_string(spath, gcs_client())
return blob.exists()
def _warn(msg):
print(msg, file=sys.stderr)
def _error(msg):
_warn(msg)
sys.exit(1)
def add_shared_args(parser: ArgumentParser):
# Batch knows in advance which step it is, so not required
parser.add_argument('--step', required=False)
parser.add_argument('--phenoFile', required=True)
parser.add_argument('--out', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--bed', required=False)
group.add_argument('--bgen', required=False)
group.add_argument('--pgen', required=False)
parser.add_argument('--phenoCol', required=False, action='append')
parser.add_argument('--phenoColList', required=False)
parser.add_argument('--sample', required=False)
parser.add_argument('--covarFile', required=False)
parser.add_argument('--covarCol', required=False)
parser.add_argument('--covarColList', required=False)
parser.add_argument('--pThresh', required=False)
parser.add_argument('--remove', required=False)
parser.add_argument('--bsize', required=False)
parser.add_argument('--cv', required=False)
parser.add_argument('--nb', required=False)
parser.add_argument('--loocv', required=False, action='store_true')
parser.add_argument('--bt', required=False, action='store_true')
parser.add_argument('--1', '--cc12', required=False, action='store_true')
parser.add_argument('--split', required=False, action='store_true')
parser.add_argument('--strict', required=False, action='store_true')
parser.add_argument('--firth', required=False, action='store_true')
parser.add_argument('--approx', required=False, action='store_true')
parser.add_argument('--spa', required=False, action='store_true')
parser.add_argument('--debug', required=False, action='store_true')
parser.add_argument('--verbose', required=False, action='store_true')
parser.add_argument('--lowmem', required=False, action='store_true')
parser.add_argument('--lowmem-prefix', required=False)
def add_step1_args(parser: ArgumentParser):
parser.add_argument('--extract', required=False)
parser.add_argument('--exclude', required=False)
def add_step2_args(parser: ArgumentParser):
# Pred is derived from step 1, whenever step 1 is provided
parser.add_argument('--pred', required=False)
parser.add_argument('--ignore-pred', required=False, action='store_true')
parser.add_argument('--force-impute', required=False, action='store_true')
parser.add_argument('--chr', required=False)
def read_step_args(path_or_str: str, step: int):
parser = ArgumentParser()
add_shared_args(parser)
if step == 1:
add_step1_args(parser)
elif step == 2:
add_step2_args(parser)
else:
_error(f"Unknown step: {step}")
if not _exists(path_or_str):
print(f"Couldn't find a file named {path_or_str}, assuming this is an argument string")
t = shlex.split(path_or_str)
else:
print(f"Found {path_or_str}, reading")
t = shlex.split(_read(path_or_str))
regenie_args = parser.parse_known_args(t)[0]
if step == 2:
if regenie_args.pred:
print("Batch will set --pred to the output prefix of --step 1.")
bparser = ArgumentParser()
bparser.add_argument('--threads', required=False, default=1)
bparser.add_argument('--memory', required=False, default='1Gi')
bparser.add_argument('--storage', required=False, default='1Gi')
batch_args = bparser.parse_known_args(t)[0]
return regenie_args, batch_args
def get_phenos(step_args: Namespace):
phenos_to_keep = {}
if step_args.phenoCol:
for pheno in step_args.phenoCol:
phenos_to_keep[pheno] = True
if step_args.phenoColList:
for pheno in step_args.phenoColList.split(","):
phenos_to_keep[pheno] = True
phenos = _read_first_line(step_args.phenoFile).strip().split(" ")[2:]
if not phenos_to_keep:
return phenos
phenos_final = []
for pheno in phenos:
if pheno in phenos_to_keep:
phenos_final.append(pheno)
return phenos_final
def prepare_step_cmd(batch: Batch, step_args: Namespace, job_output: Resource, skip: Set[str] = None):
cmd = []
for name, val in vars(step_args).items():
if val is None or val is False or (skip is not None and name in skip):
continue
name = from_underscore.get(name, name)
if name in input_file_args:
if name == "bed":
res: Resource = batch.read_input_group(bed=f"{val}.bed", bim=f"{val}.bim", fam=f"{val}.fam")
elif name == "pgen":
res = batch.read_input_group(
pgen=f"{val}.pgen", pvar=f"{val}.pvar", psam=f"{val}.psam")
else:
res = batch.read_input(val)
cmd.append(f"--{name} {res}")
elif name == "out":
cmd.append(f"--{name} {job_output}")
elif isinstance(val, bool):
cmd.append(f"--{name}")
elif name == "phenoCol":
for pheno in val:
cmd.append(f"--{name} {pheno}")
else:
cmd.append(f"--{name} {val}")
return ' '.join(cmd).strip()
def prepare_jobs(batch, step1_args: Namespace, step1_batch_args: Namespace, step2_args: Namespace,
step2_batch_args: Namespace):
regenie_img = 'hailgenetics/regenie:v1.0.5.6'
j1 = batch.new_job(name='run-regenie-step1')
j1.image(regenie_img)
j1.cpu(step1_batch_args.threads)
j1.memory(step1_batch_args.memory)
j1.storage(step1_batch_args.storage)
phenos = get_phenos(step1_args)
nphenos = len(phenos)
s1out = {"log": "{root}.log", "pred_list": "{root}_pred.list"}
for i in range(1, nphenos + 1):
s1out[f"pheno_{i}"] = f"{{root}}_{i}.loco"
j1.declare_resource_group(output=s1out)
cmd1 = prepare_step_cmd(batch, step1_args, j1.output)
j1.command(f"regenie {cmd1}")
phenos = get_phenos(step2_args)
nphenos = len(phenos)
j2 = batch.new_job(name='run-regenie-step2')
j2.image(regenie_img)
j2.cpu(step2_batch_args.threads)
j2.memory(step2_batch_args.memory)
j2.storage(step2_batch_args.storage)
s2out = {"log": "{root}.log"}
if step2_args.split:
for pheno in phenos:
out = f"{{root}}_{pheno}.regenie"
s2out[f"{pheno}.regenie"] = out
else:
s2out["regenie"] = "{root}.regenie"
j2.declare_resource_group(output=s2out)
cmd2 = prepare_step_cmd(batch, step2_args, j2.output, skip=set(['pred']))
if not step2_args.ignore_pred:
cmd2 = (f"{cmd2} --pred {j1.output['pred_list']}")
j2.command(f"regenie {cmd2}")
return j2
def run(args: Namespace, backend_opts: Dict[str, Any], run_opts: Dict[str, Any]):
is_local = "local" in args or "demo" in args
if is_local:
backend: Backend = LocalBackend(**backend_opts)
else:
backend = ServiceBackend(**backend_opts)
has_steps = "step1" in args or "step2" in args
if "demo" in args:
if has_steps:
_warn("When --demo provided, --step1 and --step2 are ignored")
step1_args, step1_batch_args = read_step_args("example/step1.txt", 1)
step2_args, step2_batch_args = read_step_args("example/step2.txt", 2)
else:
if not has_steps:
_error("When --demo not provided, --step1 and --step2 must be")
step1_args, step1_batch_args = read_step_args(args.step1, 1)
step2_args, step2_batch_args = read_step_args(args.step2, 2)
batch = Batch(backend=backend, name='regenie')
j2 = prepare_jobs(batch, step1_args, step1_batch_args, step2_args, step2_batch_args)
print(f"Will write output to: {step2_args.out}")
batch.write_output(j2.output, step2_args.out)
return batch.run(**run_opts)
def parse_input_args(input_args: list):
parser = ArgumentParser(argument_default=SUPPRESS, add_help=False)
parser.add_argument('--local', required=False, action="store_true",
help="Use LocalBackend instead of the default ServiceBackend")
parser.add_argument('--demo', required=False, action="store_true",
help="Run Regenie using Batch LocalBackend and example/step1.txt, example/step2.txt step files")
parser.add_argument('--step1', required=False,
help="Path to newline-separated text file of Regenie step1 arguments")
parser.add_argument('--step2', required=False,
help="Path to newline-separated text file of Regenie step2 arguments")
args = parser.parse_known_args(input_args)
backend_parser = ArgumentParser(argument_default=SUPPRESS, add_help=False)
if "local" in args[0] or "demo" in args[0]:
backend_parser.add_argument('--tmp_dir', required=False,
help="Batch LocalBackend `tmp_dir` option")
backend_parser.add_argument('--gsa_key_file', required=False,
help="Batch LocalBackend `gsa_key_file` option")
backend_parser.add_argument('--extra_docker_run_flags', required=False,
help="Batch LocalBackend `extra_docker_run_flags` option")
run_parser = ArgumentParser(argument_default=SUPPRESS, parents=[parser, backend_parser], add_help=True,
epilog="Batch LocalBackend options shown, try without '--local' to see ServiceBackend options")
run_parser.add_argument('--dry_run', required=False, action="store_true",
help="Batch.run() LocalBackend `dry_run` option")
run_parser.add_argument('--verbose', required=False, action="store_true",
help="Batch.run() LocalBackend `verbose` option")
run_parser.add_argument('--delete_scratch_on_exit', required=False, action="store_true",
help="Batch.run() LocalBackend `delete_scratch_on_exit` option")
else:
backend_parser.add_argument('--billing_project', required=False,
help="Batch ServiceBackend `billing_project` option")
backend_parser.add_argument('--bucket', required=False,
help="Batch ServiceBackend `bucket` option")
run_parser = ArgumentParser(argument_default=SUPPRESS, parents=[parser, backend_parser], add_help=True,
epilog="Batch ServiceBackend options shown, try '--local' to see LocalBackend options")
run_parser.add_argument('--dry_run', required=False, action="store_true",
help="Batch.run() ServiceBackend `dry_run` option")
run_parser.add_argument('--verbose', required=False, action="store_true",
help="Batch.run() ServiceBackend `verbose` option")
run_parser.add_argument('--delete_scratch_on_exit', required=False, action="store_true",
help="Batch.run() ServiceBackend `delete_scratch_on_exit` option")
run_parser.add_argument('--wait', required=False, action="store_true",
help="Batch.run() ServiceBackend `wait` option")
run_parser.add_argument('--open', required=False, action="store_true",
help="Batch.run() ServiceBackend `open` option")
run_parser.add_argument('--disable_progress_bar', required=False, action="store_true",
help="Batch.run() ServiceBackend `disable_progress_bar` option")
run_parser.add_argument('--callback', required=False,
help="Batch.run() ServiceBackend `callback` option")
backend_args = backend_parser.parse_known_args(args[1])
run_args = run_parser.parse_known_args(backend_args[1])
return {"args": args[0], "backend_opts": vars(backend_args[0]), "run_opts": vars(run_args[0])}
if __name__ == '__main__':
args = parse_input_args(sys.argv[1:])
run(**args) | hail/python/hailtop/batch/genetics/regenie/regenie.py | from typing import Set, Dict, Any
from ... import Batch, LocalBackend, ServiceBackend, Backend
from ...resource import Resource
import os
from os.path import exists
import sys
import shlex
from argparse import Namespace, ArgumentParser, SUPPRESS
import google.oauth2.service_account
from google.cloud import storage
from google.cloud.storage.blob import Blob
input_file_args = ["bgen", "bed", "pgen", "sample", "keep", "extract", "exclude", "remove",
"phenoFile", "covarFile"]
from_underscore = {
"force_impute": "force-impute",
"ignore_pred": "ignore-pred",
"lowmem_prefix": "lowmem-prefix"
}
def _is_local(spath: str):
if spath.startswith("gs://"):
return False
return True
GCS_CLIENT = None
def gcs_client():
global GCS_CLIENT
if GCS_CLIENT is None:
credentials = None
key_file = os.environ.get('HAIL_GSA_KEY_FILE')
if key_file:
credentials = google.oauth2.service_account.Credentials.from_service_account_file(
key_file)
GCS_CLIENT = storage.Client(project=None, credentials=credentials)
return GCS_CLIENT
def _read(spath: str):
if _is_local(spath):
with open(spath, "r") as f:
return f.read()
blob = Blob.from_string(spath, gcs_client())
return blob.download_as_string().decode("utf-8")
def _read_first_line(spath: str):
if _is_local(spath):
with open(spath, "r") as f:
return f.readline()
return _read(spath).split("\n")[0]
def _exists(spath: str) -> bool:
if _is_local(spath):
return exists(spath)
blob = Blob.from_string(spath, gcs_client())
return blob.exists()
def _warn(msg):
print(msg, file=sys.stderr)
def _error(msg):
_warn(msg)
sys.exit(1)
def add_shared_args(parser: ArgumentParser):
# Batch knows in advance which step it is, so not required
parser.add_argument('--step', required=False)
parser.add_argument('--phenoFile', required=True)
parser.add_argument('--out', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--bed', required=False)
group.add_argument('--bgen', required=False)
group.add_argument('--pgen', required=False)
parser.add_argument('--phenoCol', required=False, action='append')
parser.add_argument('--phenoColList', required=False)
parser.add_argument('--sample', required=False)
parser.add_argument('--covarFile', required=False)
parser.add_argument('--covarCol', required=False)
parser.add_argument('--covarColList', required=False)
parser.add_argument('--pThresh', required=False)
parser.add_argument('--remove', required=False)
parser.add_argument('--bsize', required=False)
parser.add_argument('--cv', required=False)
parser.add_argument('--nb', required=False)
parser.add_argument('--loocv', required=False, action='store_true')
parser.add_argument('--bt', required=False, action='store_true')
parser.add_argument('--1', '--cc12', required=False, action='store_true')
parser.add_argument('--split', required=False, action='store_true')
parser.add_argument('--strict', required=False, action='store_true')
parser.add_argument('--firth', required=False, action='store_true')
parser.add_argument('--approx', required=False, action='store_true')
parser.add_argument('--spa', required=False, action='store_true')
parser.add_argument('--debug', required=False, action='store_true')
parser.add_argument('--verbose', required=False, action='store_true')
parser.add_argument('--lowmem', required=False, action='store_true')
parser.add_argument('--lowmem-prefix', required=False)
def add_step1_args(parser: ArgumentParser):
parser.add_argument('--extract', required=False)
parser.add_argument('--exclude', required=False)
def add_step2_args(parser: ArgumentParser):
# Pred is derived from step 1, whenever step 1 is provided
parser.add_argument('--pred', required=False)
parser.add_argument('--ignore-pred', required=False, action='store_true')
parser.add_argument('--force-impute', required=False, action='store_true')
parser.add_argument('--chr', required=False)
def read_step_args(path_or_str: str, step: int):
parser = ArgumentParser()
add_shared_args(parser)
if step == 1:
add_step1_args(parser)
elif step == 2:
add_step2_args(parser)
else:
_error(f"Unknown step: {step}")
if not _exists(path_or_str):
print(f"Couldn't find a file named {path_or_str}, assuming this is an argument string")
t = shlex.split(path_or_str)
else:
print(f"Found {path_or_str}, reading")
t = shlex.split(_read(path_or_str))
regenie_args = parser.parse_known_args(t)[0]
if step == 2:
if regenie_args.pred:
print("Batch will set --pred to the output prefix of --step 1.")
bparser = ArgumentParser()
bparser.add_argument('--threads', required=False, default=1)
bparser.add_argument('--memory', required=False, default='1Gi')
bparser.add_argument('--storage', required=False, default='1Gi')
batch_args = bparser.parse_known_args(t)[0]
return regenie_args, batch_args
def get_phenos(step_args: Namespace):
phenos_to_keep = {}
if step_args.phenoCol:
for pheno in step_args.phenoCol:
phenos_to_keep[pheno] = True
if step_args.phenoColList:
for pheno in step_args.phenoColList.split(","):
phenos_to_keep[pheno] = True
phenos = _read_first_line(step_args.phenoFile).strip().split(" ")[2:]
if not phenos_to_keep:
return phenos
phenos_final = []
for pheno in phenos:
if pheno in phenos_to_keep:
phenos_final.append(pheno)
return phenos_final
def prepare_step_cmd(batch: Batch, step_args: Namespace, job_output: Resource, skip: Set[str] = None):
cmd = []
for name, val in vars(step_args).items():
if val is None or val is False or (skip is not None and name in skip):
continue
name = from_underscore.get(name, name)
if name in input_file_args:
if name == "bed":
res: Resource = batch.read_input_group(bed=f"{val}.bed", bim=f"{val}.bim", fam=f"{val}.fam")
elif name == "pgen":
res = batch.read_input_group(
pgen=f"{val}.pgen", pvar=f"{val}.pvar", psam=f"{val}.psam")
else:
res = batch.read_input(val)
cmd.append(f"--{name} {res}")
elif name == "out":
cmd.append(f"--{name} {job_output}")
elif isinstance(val, bool):
cmd.append(f"--{name}")
elif name == "phenoCol":
for pheno in val:
cmd.append(f"--{name} {pheno}")
else:
cmd.append(f"--{name} {val}")
return ' '.join(cmd).strip()
def prepare_jobs(batch, step1_args: Namespace, step1_batch_args: Namespace, step2_args: Namespace,
step2_batch_args: Namespace):
regenie_img = 'hailgenetics/regenie:v1.0.5.6'
j1 = batch.new_job(name='run-regenie-step1')
j1.image(regenie_img)
j1.cpu(step1_batch_args.threads)
j1.memory(step1_batch_args.memory)
j1.storage(step1_batch_args.storage)
phenos = get_phenos(step1_args)
nphenos = len(phenos)
s1out = {"log": "{root}.log", "pred_list": "{root}_pred.list"}
for i in range(1, nphenos + 1):
s1out[f"pheno_{i}"] = f"{{root}}_{i}.loco"
j1.declare_resource_group(output=s1out)
cmd1 = prepare_step_cmd(batch, step1_args, j1.output)
j1.command(f"regenie {cmd1}")
phenos = get_phenos(step2_args)
nphenos = len(phenos)
j2 = batch.new_job(name='run-regenie-step2')
j2.image(regenie_img)
j2.cpu(step2_batch_args.threads)
j2.memory(step2_batch_args.memory)
j2.storage(step2_batch_args.storage)
s2out = {"log": "{root}.log"}
if step2_args.split:
for pheno in phenos:
out = f"{{root}}_{pheno}.regenie"
s2out[f"{pheno}.regenie"] = out
else:
s2out["regenie"] = "{root}.regenie"
j2.declare_resource_group(output=s2out)
cmd2 = prepare_step_cmd(batch, step2_args, j2.output, skip=set(['pred']))
if not step2_args.ignore_pred:
cmd2 = (f"{cmd2} --pred {j1.output['pred_list']}")
j2.command(f"regenie {cmd2}")
return j2
def run(args: Namespace, backend_opts: Dict[str, Any], run_opts: Dict[str, Any]):
is_local = "local" in args or "demo" in args
if is_local:
backend: Backend = LocalBackend(**backend_opts)
else:
backend = ServiceBackend(**backend_opts)
has_steps = "step1" in args or "step2" in args
if "demo" in args:
if has_steps:
_warn("When --demo provided, --step1 and --step2 are ignored")
step1_args, step1_batch_args = read_step_args("example/step1.txt", 1)
step2_args, step2_batch_args = read_step_args("example/step2.txt", 2)
else:
if not has_steps:
_error("When --demo not provided, --step1 and --step2 must be")
step1_args, step1_batch_args = read_step_args(args.step1, 1)
step2_args, step2_batch_args = read_step_args(args.step2, 2)
batch = Batch(backend=backend, name='regenie')
j2 = prepare_jobs(batch, step1_args, step1_batch_args, step2_args, step2_batch_args)
print(f"Will write output to: {step2_args.out}")
batch.write_output(j2.output, step2_args.out)
return batch.run(**run_opts)
def parse_input_args(input_args: list):
parser = ArgumentParser(argument_default=SUPPRESS, add_help=False)
parser.add_argument('--local', required=False, action="store_true",
help="Use LocalBackend instead of the default ServiceBackend")
parser.add_argument('--demo', required=False, action="store_true",
help="Run Regenie using Batch LocalBackend and example/step1.txt, example/step2.txt step files")
parser.add_argument('--step1', required=False,
help="Path to newline-separated text file of Regenie step1 arguments")
parser.add_argument('--step2', required=False,
help="Path to newline-separated text file of Regenie step2 arguments")
args = parser.parse_known_args(input_args)
backend_parser = ArgumentParser(argument_default=SUPPRESS, add_help=False)
if "local" in args[0] or "demo" in args[0]:
backend_parser.add_argument('--tmp_dir', required=False,
help="Batch LocalBackend `tmp_dir` option")
backend_parser.add_argument('--gsa_key_file', required=False,
help="Batch LocalBackend `gsa_key_file` option")
backend_parser.add_argument('--extra_docker_run_flags', required=False,
help="Batch LocalBackend `extra_docker_run_flags` option")
run_parser = ArgumentParser(argument_default=SUPPRESS, parents=[parser, backend_parser], add_help=True,
epilog="Batch LocalBackend options shown, try without '--local' to see ServiceBackend options")
run_parser.add_argument('--dry_run', required=False, action="store_true",
help="Batch.run() LocalBackend `dry_run` option")
run_parser.add_argument('--verbose', required=False, action="store_true",
help="Batch.run() LocalBackend `verbose` option")
run_parser.add_argument('--delete_scratch_on_exit', required=False, action="store_true",
help="Batch.run() LocalBackend `delete_scratch_on_exit` option")
else:
backend_parser.add_argument('--billing_project', required=False,
help="Batch ServiceBackend `billing_project` option")
backend_parser.add_argument('--bucket', required=False,
help="Batch ServiceBackend `bucket` option")
run_parser = ArgumentParser(argument_default=SUPPRESS, parents=[parser, backend_parser], add_help=True,
epilog="Batch ServiceBackend options shown, try '--local' to see LocalBackend options")
run_parser.add_argument('--dry_run', required=False, action="store_true",
help="Batch.run() ServiceBackend `dry_run` option")
run_parser.add_argument('--verbose', required=False, action="store_true",
help="Batch.run() ServiceBackend `verbose` option")
run_parser.add_argument('--delete_scratch_on_exit', required=False, action="store_true",
help="Batch.run() ServiceBackend `delete_scratch_on_exit` option")
run_parser.add_argument('--wait', required=False, action="store_true",
help="Batch.run() ServiceBackend `wait` option")
run_parser.add_argument('--open', required=False, action="store_true",
help="Batch.run() ServiceBackend `open` option")
run_parser.add_argument('--disable_progress_bar', required=False, action="store_true",
help="Batch.run() ServiceBackend `disable_progress_bar` option")
run_parser.add_argument('--callback', required=False,
help="Batch.run() ServiceBackend `callback` option")
backend_args = backend_parser.parse_known_args(args[1])
run_args = run_parser.parse_known_args(backend_args[1])
return {"args": args[0], "backend_opts": vars(backend_args[0]), "run_opts": vars(run_args[0])}
if __name__ == '__main__':
args = parse_input_args(sys.argv[1:])
run(**args) | 0.45423 | 0.093471 |
from unittest.mock import patch
import shaystack
from shaystack import Ref
from shaystack.ops import HaystackHttpRequest
from shaystack.providers import ping
@patch.object(ping.Provider, 'invoke_action')
def test_invoke_action_with_zinc(mock) -> None:
# GIVEN
"""
Args:
mock:
"""
envs = {'HAYSTACK_PROVIDER': 'shaystack.providers.ping'}
mock.return_value = ping._PingGrid
mime_type = shaystack.MODE_ZINC
request = HaystackHttpRequest()
grid = shaystack.Grid(metadata={'id': Ref('123'), 'action': 'doIt'},
columns={'key': {}, 'value': {}})
grid.append({'param': 'value'})
request.headers["Content-Type"] = mime_type
request.headers["Accept"] = mime_type
request.body = shaystack.dump(grid, mode=shaystack.MODE_ZINC)
# WHEN
response = shaystack.invoke_action(envs, request, "dev")
# THEN
mock.assert_called_once_with(Ref("123"), "doIt", {})
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert shaystack.parse(response.body, shaystack.MODE_ZINC) is not None
@patch.object(ping.Provider, 'invoke_action')
def test_invoke_action_without_params_with_zinc(mock):
# GIVEN
"""
Args:
mock:
"""
envs = {'HAYSTACK_PROVIDER': 'shaystack.providers.ping'}
mock.return_value = ping._PingGrid
mime_type = shaystack.MODE_ZINC
request = HaystackHttpRequest()
grid = shaystack.Grid(metadata={'id': Ref('123'), 'action': 'doIt'},
columns={'key': {}, 'value': {}})
request.headers["Content-Type"] = mime_type
request.headers["Accept"] = mime_type
request.body = shaystack.dump(grid, mode=shaystack.MODE_ZINC)
# WHEN
response = shaystack.invoke_action(envs, request, "dev")
# THEN
mock.assert_called_once_with(Ref("123"), "doIt", {})
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert shaystack.parse(response.body, shaystack.MODE_ZINC) is not None | tests/test_haystack_invoke_action.py | from unittest.mock import patch
import shaystack
from shaystack import Ref
from shaystack.ops import HaystackHttpRequest
from shaystack.providers import ping
@patch.object(ping.Provider, 'invoke_action')
def test_invoke_action_with_zinc(mock) -> None:
# GIVEN
"""
Args:
mock:
"""
envs = {'HAYSTACK_PROVIDER': 'shaystack.providers.ping'}
mock.return_value = ping._PingGrid
mime_type = shaystack.MODE_ZINC
request = HaystackHttpRequest()
grid = shaystack.Grid(metadata={'id': Ref('123'), 'action': 'doIt'},
columns={'key': {}, 'value': {}})
grid.append({'param': 'value'})
request.headers["Content-Type"] = mime_type
request.headers["Accept"] = mime_type
request.body = shaystack.dump(grid, mode=shaystack.MODE_ZINC)
# WHEN
response = shaystack.invoke_action(envs, request, "dev")
# THEN
mock.assert_called_once_with(Ref("123"), "doIt", {})
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert shaystack.parse(response.body, shaystack.MODE_ZINC) is not None
@patch.object(ping.Provider, 'invoke_action')
def test_invoke_action_without_params_with_zinc(mock):
# GIVEN
"""
Args:
mock:
"""
envs = {'HAYSTACK_PROVIDER': 'shaystack.providers.ping'}
mock.return_value = ping._PingGrid
mime_type = shaystack.MODE_ZINC
request = HaystackHttpRequest()
grid = shaystack.Grid(metadata={'id': Ref('123'), 'action': 'doIt'},
columns={'key': {}, 'value': {}})
request.headers["Content-Type"] = mime_type
request.headers["Accept"] = mime_type
request.body = shaystack.dump(grid, mode=shaystack.MODE_ZINC)
# WHEN
response = shaystack.invoke_action(envs, request, "dev")
# THEN
mock.assert_called_once_with(Ref("123"), "doIt", {})
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert shaystack.parse(response.body, shaystack.MODE_ZINC) is not None | 0.491944 | 0.483709 |
print('Значения вводятся через запятую')
x1, y1 = map(float, input('Введите координаты 1 точки: ').split(',')) #A
x2, y2 = map(float, input('Введите координаты 2 точки: ').split(',')) #B
x3, y3 = map(float, input('Введите координаты 3 точки: ').split(',')) #C
from math import sqrt
AB = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
BC = sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
AC = sqrt((x1 - x3) ** 2 + (y1 - y3) ** 2)
print()
if (AC * BC * AC != 0) and \
((y1-y2) * x3 + (x2-x1) * y3 + (x1*y2 - x2*y1) != 0):
print('Длина AB: ','{:.5g}'.format(AB))
print('Длина BC: ','{:.5g}'.format(BC))
print('Длина AC: ','{:.5g}'.format(AC))
b = max(AB, BC, AC)
m = min(AB, BC, AC)
s = AB + BC + AC - b - m
L = (sqrt(s * m *(m + s + b)*(m + s - b))) / (s + m)
print('Длина биссектриссы большего угла: ','{:.5}'.format(L))
if abs((b*b) - (m*m + s*s)) <= 0.1 :
print('Этот треугольник является прямоугольным.')
else:
print('Этот треугольник не является прямоугольным.')
print()
x0, y0 = map(float,input('Введите координаты точки: ').split(','))
if (min(x1,x2,x3) <= x0 <= max(x1,x2,x3)) and \
(min(y1,y2,y3) <= y0 <= max(y1,y2,y3)):
r1 = (x1 - x0) * (y2 - y1) - (x2 - x1) * (y1 - y0)
r2 = (x2 - x0) * (y3 - y2) - (x3 - x2) * (y2 - y0)
r3 = (x3 - x0) * (y1 - y3) - (x1 - x3) * (y3 - y0)
if (r1 * r2 * r3 == 0):
print('Точка лежит на стороне треугольника.')
elif (r1 == abs(r1)) == (r2 == abs(r2)) == (r3 == abs(r3)):
print('Точка входит в треугольник.')
h1 = abs((y1-y2)*x0 + (x2-x1)*y0 + (x1*y2 - x2*y1))\
/sqrt((y1-y2)**2 + (x2-x1)**2)
h2 = abs((y3-y2)*x0 + (x2-x3)*y0 + (x3*y2 - x2*y3))\
/sqrt((y3-y2)**2 + (x2-x3)**2)
h3 = abs((y1-y3)*x0 + (x3-x1)*y0 + (x1*y3 - x3*y1))\
/sqrt((y1-y3)**2 + (x3-x1)**2)
print('Расстояние до ближайшей стороны: ', \
'{:.5}'.format(min(h1, h2, h3)))
else:
print('Точка не входит в треугольник.')
else:
print('Точка не входит в треугольник.')
else:
if (x1 == x2 == x3) and (y1 == y2 == y3):
print('Треугольник не существует. Все вершины лежат на одной точке.')
elif AB * BC * AC == 0:
print('Треугольник не существует. Две точки совпадают.')
else:
print('Треугольник не существует т.к. точки лежат на одной прямой.') | 1_semester/triangle.py |
print('Значения вводятся через запятую')
x1, y1 = map(float, input('Введите координаты 1 точки: ').split(',')) #A
x2, y2 = map(float, input('Введите координаты 2 точки: ').split(',')) #B
x3, y3 = map(float, input('Введите координаты 3 точки: ').split(',')) #C
from math import sqrt
AB = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
BC = sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
AC = sqrt((x1 - x3) ** 2 + (y1 - y3) ** 2)
print()
if (AC * BC * AC != 0) and \
((y1-y2) * x3 + (x2-x1) * y3 + (x1*y2 - x2*y1) != 0):
print('Длина AB: ','{:.5g}'.format(AB))
print('Длина BC: ','{:.5g}'.format(BC))
print('Длина AC: ','{:.5g}'.format(AC))
b = max(AB, BC, AC)
m = min(AB, BC, AC)
s = AB + BC + AC - b - m
L = (sqrt(s * m *(m + s + b)*(m + s - b))) / (s + m)
print('Длина биссектриссы большего угла: ','{:.5}'.format(L))
if abs((b*b) - (m*m + s*s)) <= 0.1 :
print('Этот треугольник является прямоугольным.')
else:
print('Этот треугольник не является прямоугольным.')
print()
x0, y0 = map(float,input('Введите координаты точки: ').split(','))
if (min(x1,x2,x3) <= x0 <= max(x1,x2,x3)) and \
(min(y1,y2,y3) <= y0 <= max(y1,y2,y3)):
r1 = (x1 - x0) * (y2 - y1) - (x2 - x1) * (y1 - y0)
r2 = (x2 - x0) * (y3 - y2) - (x3 - x2) * (y2 - y0)
r3 = (x3 - x0) * (y1 - y3) - (x1 - x3) * (y3 - y0)
if (r1 * r2 * r3 == 0):
print('Точка лежит на стороне треугольника.')
elif (r1 == abs(r1)) == (r2 == abs(r2)) == (r3 == abs(r3)):
print('Точка входит в треугольник.')
h1 = abs((y1-y2)*x0 + (x2-x1)*y0 + (x1*y2 - x2*y1))\
/sqrt((y1-y2)**2 + (x2-x1)**2)
h2 = abs((y3-y2)*x0 + (x2-x3)*y0 + (x3*y2 - x2*y3))\
/sqrt((y3-y2)**2 + (x2-x3)**2)
h3 = abs((y1-y3)*x0 + (x3-x1)*y0 + (x1*y3 - x3*y1))\
/sqrt((y1-y3)**2 + (x3-x1)**2)
print('Расстояние до ближайшей стороны: ', \
'{:.5}'.format(min(h1, h2, h3)))
else:
print('Точка не входит в треугольник.')
else:
print('Точка не входит в треугольник.')
else:
if (x1 == x2 == x3) and (y1 == y2 == y3):
print('Треугольник не существует. Все вершины лежат на одной точке.')
elif AB * BC * AC == 0:
print('Треугольник не существует. Две точки совпадают.')
else:
print('Треугольник не существует т.к. точки лежат на одной прямой.') | 0.088885 | 0.566258 |
import pytest
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.addons import modifyheaders
class TestModifyHeaders:
def test_parse_modifyheaders(self):
x = modifyheaders.parse_modify_headers("/foo/bar/voing")
assert x == ("foo", "bar", "voing")
x = modifyheaders.parse_modify_headers("/foo/bar/vo/ing/")
assert x == ("foo", "bar", "vo/ing/")
x = modifyheaders.parse_modify_headers("/bar/voing")
assert x == ("bar", "voing", ".*")
with pytest.raises(Exception, match="Invalid replacement"):
modifyheaders.parse_modify_headers("/")
def test_configure(self):
sh = modifyheaders.ModifyHeaders()
with taddons.context(sh) as tctx:
with pytest.raises(Exception, match="Invalid modify_headers flow filter"):
tctx.configure(sh, modify_headers = ["/one/two/~b"])
tctx.configure(sh, modify_headers = ["/foo/bar/voing"])
def test_modify_headers(self):
sh = modifyheaders.ModifyHeaders()
with taddons.context(sh) as tctx:
tctx.configure(
sh,
modify_headers = [
"/one/two/~q",
"/one/three/~s"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert f.request.headers["one"] == "two"
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
sh.response(f)
assert f.response.headers["one"] == "three"
tctx.configure(
sh,
modify_headers = [
"/one/two/~s",
"/one/three/~s"
]
)
f = tflow.tflow(resp=True)
f.request.headers["one"] = "xxx"
f.response.headers["one"] = "xxx"
sh.response(f)
assert f.response.headers.get_all("one") == ["two", "three"]
tctx.configure(
sh,
modify_headers = [
"/one/two/~q",
"/one/three/~q"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert f.request.headers.get_all("one") == ["two", "three"]
# test removal of existing headers
tctx.configure(
sh,
modify_headers = [
"/one//~q",
"/one//~s"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
sh.response(f)
assert "one" not in f.response.headers
tctx.configure(
sh,
modify_headers = [
"/one/"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
sh.response(f)
assert "one" not in f.response.headers | test/mitmproxy/addons/test_modifyheaders.py | import pytest
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.addons import modifyheaders
class TestModifyHeaders:
def test_parse_modifyheaders(self):
x = modifyheaders.parse_modify_headers("/foo/bar/voing")
assert x == ("foo", "bar", "voing")
x = modifyheaders.parse_modify_headers("/foo/bar/vo/ing/")
assert x == ("foo", "bar", "vo/ing/")
x = modifyheaders.parse_modify_headers("/bar/voing")
assert x == ("bar", "voing", ".*")
with pytest.raises(Exception, match="Invalid replacement"):
modifyheaders.parse_modify_headers("/")
def test_configure(self):
sh = modifyheaders.ModifyHeaders()
with taddons.context(sh) as tctx:
with pytest.raises(Exception, match="Invalid modify_headers flow filter"):
tctx.configure(sh, modify_headers = ["/one/two/~b"])
tctx.configure(sh, modify_headers = ["/foo/bar/voing"])
def test_modify_headers(self):
sh = modifyheaders.ModifyHeaders()
with taddons.context(sh) as tctx:
tctx.configure(
sh,
modify_headers = [
"/one/two/~q",
"/one/three/~s"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert f.request.headers["one"] == "two"
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
sh.response(f)
assert f.response.headers["one"] == "three"
tctx.configure(
sh,
modify_headers = [
"/one/two/~s",
"/one/three/~s"
]
)
f = tflow.tflow(resp=True)
f.request.headers["one"] = "xxx"
f.response.headers["one"] = "xxx"
sh.response(f)
assert f.response.headers.get_all("one") == ["two", "three"]
tctx.configure(
sh,
modify_headers = [
"/one/two/~q",
"/one/three/~q"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert f.request.headers.get_all("one") == ["two", "three"]
# test removal of existing headers
tctx.configure(
sh,
modify_headers = [
"/one//~q",
"/one//~s"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
sh.response(f)
assert "one" not in f.response.headers
tctx.configure(
sh,
modify_headers = [
"/one/"
]
)
f = tflow.tflow()
f.request.headers["one"] = "xxx"
sh.request(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
sh.response(f)
assert "one" not in f.response.headers | 0.580352 | 0.421373 |
from django.db import models
from rest_framework import serializers
from django.contrib.postgres.fields import JSONField
# Create your models here.
class Account(models.Model):
zone = models.CharField(default='+86', max_length=10)
mobile = models.CharField(max_length=50)
twitter = models.CharField(max_length=15, default='')
facebook = models.CharField(max_length=15, default='')
email = models.CharField(max_length=50)
name = models.CharField(max_length=50)
eth = models.CharField(max_length=500)
json = JSONField()
profile = JSONField()
remark = models.CharField("备注", max_length=255, default='')
class Meta:
unique_together = (('mobile', 'email', 'name'), )
ordering = ["id"]
def __str__(self):
fields = [x.name for x in self._meta.fields]
return '\n'.join(
['{}: {}'.format(key, getattr(self, key)) for key in fields])
@property
def LastName(self):
return self.profile['LastName']
@property
def FirstName(self):
return self.profile['FirstName']
class Apis(models.Model):
telegram = JSONField(default={})
twitter = JSONField(default={})
account = models.OneToOneField(Account, on_delete=models.CASCADE)
class Meta:
unique_together = (('account', 'telegram'), )
class AirDrop(models.Model):
name = models.CharField(max_length=50, primary_key=True)
url = models.CharField(max_length=512)
created = models.DateTimeField("创建时间", auto_now_add=True)
updated = models.DateTimeField("Updated", auto_now=True)
# class Meta:
# unique_together = (('name', 'url'), )
class Operation(models.Model):
account = models.ForeignKey(Account, on_delete=models.CASCADE)
airdrop = models.ForeignKey(AirDrop, on_delete=models.CASCADE)
created = models.DateTimeField("创建时间", auto_now_add=True)
updated = models.DateTimeField("Updated", auto_now=True)
class Meta:
unique_together = (('account', 'airdrop'), )
class Link(models.Model):
href = models.TextField("链接")
text = models.TextField("链接内容")
verified = models.BooleanField("是否被点击过", default=False, blank=True)
class AccountSerializer(serializers.ModelSerializer):
class Meta:
model = Account
exclude = []
class AirDropSerializer(serializers.ModelSerializer):
class Meta:
model = AirDrop
exclude = []
class OperationSerializer(serializers.ModelSerializer):
class Meta:
model = Operation
exclude = [] | apps/accounts/models.py | from django.db import models
from rest_framework import serializers
from django.contrib.postgres.fields import JSONField
# Create your models here.
class Account(models.Model):
zone = models.CharField(default='+86', max_length=10)
mobile = models.CharField(max_length=50)
twitter = models.CharField(max_length=15, default='')
facebook = models.CharField(max_length=15, default='')
email = models.CharField(max_length=50)
name = models.CharField(max_length=50)
eth = models.CharField(max_length=500)
json = JSONField()
profile = JSONField()
remark = models.CharField("备注", max_length=255, default='')
class Meta:
unique_together = (('mobile', 'email', 'name'), )
ordering = ["id"]
def __str__(self):
fields = [x.name for x in self._meta.fields]
return '\n'.join(
['{}: {}'.format(key, getattr(self, key)) for key in fields])
@property
def LastName(self):
return self.profile['LastName']
@property
def FirstName(self):
return self.profile['FirstName']
class Apis(models.Model):
telegram = JSONField(default={})
twitter = JSONField(default={})
account = models.OneToOneField(Account, on_delete=models.CASCADE)
class Meta:
unique_together = (('account', 'telegram'), )
class AirDrop(models.Model):
name = models.CharField(max_length=50, primary_key=True)
url = models.CharField(max_length=512)
created = models.DateTimeField("创建时间", auto_now_add=True)
updated = models.DateTimeField("Updated", auto_now=True)
# class Meta:
# unique_together = (('name', 'url'), )
class Operation(models.Model):
account = models.ForeignKey(Account, on_delete=models.CASCADE)
airdrop = models.ForeignKey(AirDrop, on_delete=models.CASCADE)
created = models.DateTimeField("创建时间", auto_now_add=True)
updated = models.DateTimeField("Updated", auto_now=True)
class Meta:
unique_together = (('account', 'airdrop'), )
class Link(models.Model):
href = models.TextField("链接")
text = models.TextField("链接内容")
verified = models.BooleanField("是否被点击过", default=False, blank=True)
class AccountSerializer(serializers.ModelSerializer):
class Meta:
model = Account
exclude = []
class AirDropSerializer(serializers.ModelSerializer):
class Meta:
model = AirDrop
exclude = []
class OperationSerializer(serializers.ModelSerializer):
class Meta:
model = Operation
exclude = [] | 0.59561 | 0.107578 |
from django.urls import re_path
from . import views
group_re = r'(?P<group>' + '|'.join(views.SERIES_GROUPS) + ')'
group_date_re = r'(?P<group>' + '|'.join(views.SERIES_GROUPS_DATE) + ')'
range_re = r'(?P<start>\d{8})-(?P<end>\d{8})'
format_re = r'(?P<format>' + '|'.join(views.SERIES_FORMATS) + ')'
series_re = r'%s-%s\.%s$' % (group_re, range_re, format_re)
series = dict((type, r'^%s-%s' % (type, series_re)) for type in views.SERIES)
# Addon specific stats.
stats_patterns = [
# page URLs
re_path(
r'^$', views.stats_report, name='stats.overview', kwargs={'report': 'overview'}
),
re_path(
r'^downloads/$',
views.stats_report,
name='stats.downloads',
kwargs={'report': 'downloads'},
),
re_path(
r'^downloads/sources/$',
views.stats_report,
name='stats.sources',
kwargs={'report': 'sources'},
),
re_path(
r'^downloads/mediums/$',
views.stats_report,
name='stats.mediums',
kwargs={'report': 'mediums'},
),
re_path(
r'^downloads/contents/$',
views.stats_report,
name='stats.contents',
kwargs={'report': 'contents'},
),
re_path(
r'^downloads/campaigns/$',
views.stats_report,
name='stats.campaigns',
kwargs={'report': 'campaigns'},
),
re_path(
r'^usage/$', views.stats_report, name='stats.usage', kwargs={'report': 'usage'}
),
re_path(
r'^usage/languages/$',
views.stats_report,
name='stats.locales',
kwargs={'report': 'locales'},
),
re_path(
r'^usage/versions/$',
views.stats_report,
name='stats.versions',
kwargs={'report': 'versions'},
),
re_path(
r'^usage/applications/$',
views.stats_report,
name='stats.apps',
kwargs={'report': 'apps'},
),
re_path(
r'^usage/os/$', views.stats_report, name='stats.os', kwargs={'report': 'os'}
),
re_path(
r'^usage/countries/$',
views.stats_report,
name='stats.countries',
kwargs={'report': 'countries'},
),
# time series URLs following this pattern:
# /addon/{addon_id}/statistics/{series}-{group}-{start}-{end}.{format}
re_path(series['overview'], views.overview_series, name='stats.overview_series'),
re_path(series['downloads'], views.downloads_series, name='stats.downloads_series'),
re_path(series['usage'], views.usage_series, name='stats.usage_series'),
re_path(
series['sources'],
views.download_breakdown_series,
name='stats.sources_series',
kwargs={'source': 'sources'},
),
re_path(
series['mediums'],
views.download_breakdown_series,
name='stats.mediums_series',
kwargs={'source': 'mediums'},
),
re_path(
series['contents'],
views.download_breakdown_series,
name='stats.contents_series',
kwargs={'source': 'contents'},
),
re_path(
series['campaigns'],
views.download_breakdown_series,
name='stats.campaigns_series',
kwargs={'source': 'campaigns'},
),
re_path(
series['os'],
views.usage_breakdown_series,
name='stats.os_series',
kwargs={'field': 'oses'},
),
re_path(
series['locales'],
views.usage_breakdown_series,
name='stats.locales_series',
kwargs={'field': 'locales'},
),
re_path(
series['versions'],
views.usage_breakdown_series,
name='stats.versions_series',
kwargs={'field': 'versions'},
),
re_path(
series['apps'],
views.usage_breakdown_series,
name='stats.apps_series',
kwargs={'field': 'applications'},
),
re_path(
series['countries'],
views.usage_breakdown_series,
name='stats.countries_series',
kwargs={'field': 'countries'},
),
] | src/olympia/stats/urls.py | from django.urls import re_path
from . import views
group_re = r'(?P<group>' + '|'.join(views.SERIES_GROUPS) + ')'
group_date_re = r'(?P<group>' + '|'.join(views.SERIES_GROUPS_DATE) + ')'
range_re = r'(?P<start>\d{8})-(?P<end>\d{8})'
format_re = r'(?P<format>' + '|'.join(views.SERIES_FORMATS) + ')'
series_re = r'%s-%s\.%s$' % (group_re, range_re, format_re)
series = dict((type, r'^%s-%s' % (type, series_re)) for type in views.SERIES)
# Addon specific stats.
stats_patterns = [
# page URLs
re_path(
r'^$', views.stats_report, name='stats.overview', kwargs={'report': 'overview'}
),
re_path(
r'^downloads/$',
views.stats_report,
name='stats.downloads',
kwargs={'report': 'downloads'},
),
re_path(
r'^downloads/sources/$',
views.stats_report,
name='stats.sources',
kwargs={'report': 'sources'},
),
re_path(
r'^downloads/mediums/$',
views.stats_report,
name='stats.mediums',
kwargs={'report': 'mediums'},
),
re_path(
r'^downloads/contents/$',
views.stats_report,
name='stats.contents',
kwargs={'report': 'contents'},
),
re_path(
r'^downloads/campaigns/$',
views.stats_report,
name='stats.campaigns',
kwargs={'report': 'campaigns'},
),
re_path(
r'^usage/$', views.stats_report, name='stats.usage', kwargs={'report': 'usage'}
),
re_path(
r'^usage/languages/$',
views.stats_report,
name='stats.locales',
kwargs={'report': 'locales'},
),
re_path(
r'^usage/versions/$',
views.stats_report,
name='stats.versions',
kwargs={'report': 'versions'},
),
re_path(
r'^usage/applications/$',
views.stats_report,
name='stats.apps',
kwargs={'report': 'apps'},
),
re_path(
r'^usage/os/$', views.stats_report, name='stats.os', kwargs={'report': 'os'}
),
re_path(
r'^usage/countries/$',
views.stats_report,
name='stats.countries',
kwargs={'report': 'countries'},
),
# time series URLs following this pattern:
# /addon/{addon_id}/statistics/{series}-{group}-{start}-{end}.{format}
re_path(series['overview'], views.overview_series, name='stats.overview_series'),
re_path(series['downloads'], views.downloads_series, name='stats.downloads_series'),
re_path(series['usage'], views.usage_series, name='stats.usage_series'),
re_path(
series['sources'],
views.download_breakdown_series,
name='stats.sources_series',
kwargs={'source': 'sources'},
),
re_path(
series['mediums'],
views.download_breakdown_series,
name='stats.mediums_series',
kwargs={'source': 'mediums'},
),
re_path(
series['contents'],
views.download_breakdown_series,
name='stats.contents_series',
kwargs={'source': 'contents'},
),
re_path(
series['campaigns'],
views.download_breakdown_series,
name='stats.campaigns_series',
kwargs={'source': 'campaigns'},
),
re_path(
series['os'],
views.usage_breakdown_series,
name='stats.os_series',
kwargs={'field': 'oses'},
),
re_path(
series['locales'],
views.usage_breakdown_series,
name='stats.locales_series',
kwargs={'field': 'locales'},
),
re_path(
series['versions'],
views.usage_breakdown_series,
name='stats.versions_series',
kwargs={'field': 'versions'},
),
re_path(
series['apps'],
views.usage_breakdown_series,
name='stats.apps_series',
kwargs={'field': 'applications'},
),
re_path(
series['countries'],
views.usage_breakdown_series,
name='stats.countries_series',
kwargs={'field': 'countries'},
),
] | 0.535341 | 0.150122 |
import pytest
import pandas as pd
import datetime
from aggregate_transactions import (
Strategy,
process_file,
calculate_proceeds,
CoinbaseTransaction,
TransactionType,
)
@pytest.fixture(scope="session")
def test_start_time():
return datetime.datetime.now()
@pytest.fixture
def simple_buy_df(test_start_time):
buy_time = test_start_time - datetime.timedelta(days=30)
buy_tx = CoinbaseTransaction(
timestamp=buy_time,
transaction_type=TransactionType.BUY,
asset="BTC",
usd_fees=1.00,
quantity_transacted=1.0,
usd_spot_price_at_transaction=10.00,
usd_subtotal=10.00,
usd_total=11.00,
)
return buy_tx.to_df_row()
@pytest.fixture
def simple_sell_df(test_start_time):
sell_time = test_start_time - datetime.timedelta(days=15)
sell_tx = CoinbaseTransaction(
timestamp=sell_time,
transaction_type=TransactionType.SELL,
asset="BTC",
usd_fees=1.00,
quantity_transacted=1.0,
usd_spot_price_at_transaction=10.00,
usd_subtotal=10.00,
usd_total=11.00,
)
return sell_tx.to_df_row()
@pytest.fixture
def multi_asset_sell_df(simple_sell_df):
"""Create a mixed df of assets"""
new_sell_df = simple_sell_df.copy()
new_sell_df.at[0, "Asset"] = "ETH"
return pd.concat([simple_sell_df, new_sell_df], ignore_index=True)
@pytest.fixture
def multi_asset_buy_df(simple_buy_df):
"""Create a mixed df of assets"""
new_buy_df = simple_buy_df.copy()
new_buy_df.at[0, "Asset"] = "ETH"
return pd.concat([simple_buy_df, new_buy_df], ignore_index=True)
def test_simple_tx_history():
filepath = "test_transaction_simple.csv"
output_df = process_file(filepath)
assert output_df["PROCEEDS"].item() == (700.02 - 602.02)
def test_simple_buy_sell(simple_buy_df, simple_sell_df):
"""Given one simple buy transaction and sell
transaction at the same price with the same qantity,
the proceeds would equate to just the fees on the buy
transaction"""
print("simple buy df:\n")
print(simple_buy_df.to_string())
print("simple sell df:\n")
print(simple_sell_df.to_string())
output_df = calculate_proceeds(
simple_sell_df, simple_buy_df, strategy=Strategy.HIFO
)
print(output_df.to_string())
assert output_df["PROCEEDS"][0] == -1.0
assert simple_buy_df["quantity_attributed_to_profit"][0] == 1.0
def test_only_sell(simple_buy_df, simple_sell_df):
"""In this scenario, we don't have enough buy
to attribute to the sale.
Say for example that somebody had some old ethereum
on a hardware wallet, transfers it to an exchagne and sells it.
In this scenario, the exchange is not aware of any asset to
cover the cost and therefore.
"""
simple_buy_df = simple_buy_df.drop(0)
print("simple buy df:\n")
print(simple_buy_df.to_string())
with pytest.raises(Exception):
calculate_proceeds(simple_sell_df, simple_buy_df, strategy=Strategy.HIFO)
def test_multi_asset_type(multi_asset_buy_df, multi_asset_sell_df):
print("multi asset sell and buy dfs:\n")
print(multi_asset_sell_df.to_string())
print(multi_asset_buy_df.to_string())
output_df = calculate_proceeds(
multi_asset_buy_df, multi_asset_sell_df, strategy=Strategy.HIFO
)
print("multi output df:\n")
print(output_df.to_string())
assert all(output_df[output_df["ASSET NAME"] == "BTC"].PROCEEDS == -1.0)
assert all(output_df[output_df["ASSET NAME"] == "ETH"].PROCEEDS == -1.0)
def test_multi_currency(simple_buy_df, simple_sell_df):
"""In this scenario, we don't have enough buy
to attribute to the sale.
Say for example that somebody had some old ethereum
on a hardware wallet, transfers it to an exchagne and sells it.
In this scenario, the exchange is not aware of any asset to
cover the cost and therefore.
"""
simple_buy_df = simple_buy_df.drop(0)
print("simple buy df:\n")
print(simple_buy_df.to_string())
with pytest.raises(Exception):
calculate_proceeds(simple_sell_df, simple_buy_df, strategy=Strategy.HIFO) | test_aggregator.py | import pytest
import pandas as pd
import datetime
from aggregate_transactions import (
Strategy,
process_file,
calculate_proceeds,
CoinbaseTransaction,
TransactionType,
)
@pytest.fixture(scope="session")
def test_start_time():
return datetime.datetime.now()
@pytest.fixture
def simple_buy_df(test_start_time):
buy_time = test_start_time - datetime.timedelta(days=30)
buy_tx = CoinbaseTransaction(
timestamp=buy_time,
transaction_type=TransactionType.BUY,
asset="BTC",
usd_fees=1.00,
quantity_transacted=1.0,
usd_spot_price_at_transaction=10.00,
usd_subtotal=10.00,
usd_total=11.00,
)
return buy_tx.to_df_row()
@pytest.fixture
def simple_sell_df(test_start_time):
sell_time = test_start_time - datetime.timedelta(days=15)
sell_tx = CoinbaseTransaction(
timestamp=sell_time,
transaction_type=TransactionType.SELL,
asset="BTC",
usd_fees=1.00,
quantity_transacted=1.0,
usd_spot_price_at_transaction=10.00,
usd_subtotal=10.00,
usd_total=11.00,
)
return sell_tx.to_df_row()
@pytest.fixture
def multi_asset_sell_df(simple_sell_df):
"""Create a mixed df of assets"""
new_sell_df = simple_sell_df.copy()
new_sell_df.at[0, "Asset"] = "ETH"
return pd.concat([simple_sell_df, new_sell_df], ignore_index=True)
@pytest.fixture
def multi_asset_buy_df(simple_buy_df):
"""Create a mixed df of assets"""
new_buy_df = simple_buy_df.copy()
new_buy_df.at[0, "Asset"] = "ETH"
return pd.concat([simple_buy_df, new_buy_df], ignore_index=True)
def test_simple_tx_history():
filepath = "test_transaction_simple.csv"
output_df = process_file(filepath)
assert output_df["PROCEEDS"].item() == (700.02 - 602.02)
def test_simple_buy_sell(simple_buy_df, simple_sell_df):
"""Given one simple buy transaction and sell
transaction at the same price with the same qantity,
the proceeds would equate to just the fees on the buy
transaction"""
print("simple buy df:\n")
print(simple_buy_df.to_string())
print("simple sell df:\n")
print(simple_sell_df.to_string())
output_df = calculate_proceeds(
simple_sell_df, simple_buy_df, strategy=Strategy.HIFO
)
print(output_df.to_string())
assert output_df["PROCEEDS"][0] == -1.0
assert simple_buy_df["quantity_attributed_to_profit"][0] == 1.0
def test_only_sell(simple_buy_df, simple_sell_df):
"""In this scenario, we don't have enough buy
to attribute to the sale.
Say for example that somebody had some old ethereum
on a hardware wallet, transfers it to an exchagne and sells it.
In this scenario, the exchange is not aware of any asset to
cover the cost and therefore.
"""
simple_buy_df = simple_buy_df.drop(0)
print("simple buy df:\n")
print(simple_buy_df.to_string())
with pytest.raises(Exception):
calculate_proceeds(simple_sell_df, simple_buy_df, strategy=Strategy.HIFO)
def test_multi_asset_type(multi_asset_buy_df, multi_asset_sell_df):
print("multi asset sell and buy dfs:\n")
print(multi_asset_sell_df.to_string())
print(multi_asset_buy_df.to_string())
output_df = calculate_proceeds(
multi_asset_buy_df, multi_asset_sell_df, strategy=Strategy.HIFO
)
print("multi output df:\n")
print(output_df.to_string())
assert all(output_df[output_df["ASSET NAME"] == "BTC"].PROCEEDS == -1.0)
assert all(output_df[output_df["ASSET NAME"] == "ETH"].PROCEEDS == -1.0)
def test_multi_currency(simple_buy_df, simple_sell_df):
"""In this scenario, we don't have enough buy
to attribute to the sale.
Say for example that somebody had some old ethereum
on a hardware wallet, transfers it to an exchagne and sells it.
In this scenario, the exchange is not aware of any asset to
cover the cost and therefore.
"""
simple_buy_df = simple_buy_df.drop(0)
print("simple buy df:\n")
print(simple_buy_df.to_string())
with pytest.raises(Exception):
calculate_proceeds(simple_sell_df, simple_buy_df, strategy=Strategy.HIFO) | 0.534612 | 0.453988 |
import argparse
import configparser
import os
import shutil
from jinja2 import Template
from typing import Callable, Union, List
from functools import reduce
def parse_args():
"""Return parsed args when this file is executed rather than imported."""
parser = argparse.ArgumentParser(
description="Render of a folder tree of jinja templates, from an INI file.")
parser.add_argument("source",
type=str,
help="path to templates to render")
parser.add_argument("conf",
type=str,
nargs='+',
help="path(s) to the configuration file(s)")
parser.add_argument("-o", "--output",
dest='destination',
type=str,
help="path to the configuration file (default: render in-place)")
parser.add_argument("-e", "--extension",
type=str,
default='',
help="only attempt to render files with this extension (and just copy other files); "
"the custom extension will be stripped from the rendered filenames")
declared_args = parser.parse_args()
return declared_args
def config_path_to_configparser_instance(item: Union[configparser.ConfigParser, str]) -> configparser.ConfigParser:
"""Convert a path string to fully loaded ConfigParser instances.
If the provided argument is already a ConfigParser instances, it would be returned intact.
"""
if type(item) is str:
config = configparser.ConfigParser()
config.read(item)
return config
return item
def merge_configs(config: Union[configparser.ConfigParser, str, List[Union[configparser.ConfigParser, str]]]) \
-> configparser.ConfigParser:
"""Take a list of ConfigParser instances and path strings to config files, and merge them all into a single
ConfigParser instance.
"""
# Convert to list
if type(config) in [str, configparser.ConfigParser]:
config = [config]
# Load all config files
config = list(map(config_path_to_configparser_instance, config))
# Get a unique list of all sections
sections = reduce(lambda s, x: s.union(x.sections()), config, set())
# Merge all configs section-by-section
merged = configparser.ConfigParser()
for section in sections:
merged[section] = reduce(lambda d, x: dict(**d, **x[section]) if section in x else d, config, {})
return merged
class JinjaWalk:
"""JinjaWalk() -> new instance of a template tree walker.
JinjaWalk(filename_filter, filename_modifier) -> new instance with custom filename modifiers
"""
def __init__(self,
filename_filter: Callable[[str], bool] = lambda s: True,
filename_modifier: Callable[[str], str] = lambda s: s) -> None:
self.filename_filter = filename_filter
self.filename_modifier = filename_modifier
def walk(self, config: Union[configparser.ConfigParser, str, List[Union[configparser.ConfigParser, str]]],
source_dir: str, output_dir: str, namespace: str = 'config'):
"""Render a template tree using key-value pairs from given config file(s)"""
assert namespace == namespace.strip()
config = merge_configs(config)
for root, dirs, files in os.walk(source_dir):
if output_dir is None:
# render templates in place
output_folder = root
else:
# render templates in a user-specified destination
relative_root = root[len(source_dir):]
output_folder = os.path.join(output_dir, relative_root.strip(os.path.sep))
os.makedirs(output_folder, exist_ok=True)
for file in files:
full_source_file_path = os.path.join(root, file)
if self.filename_filter(file):
with open(full_source_file_path, 'r') as fd:
data = fd.read()
template = Template(data)
rendered_template_base_filename = self.filename_modifier(file)
full_destination_file_path = os.path.join(output_folder, rendered_template_base_filename)
kwargs = {namespace: config}
template.stream(**kwargs).dump(full_destination_file_path)
else:
if output_folder != root:
# copy is needed only if this is a not in-place rendering (otherwise shutil.SameFileError)
shutil.copy(full_source_file_path, output_folder)
if __name__ == '__main__':
args = parse_args()
if args.extension != '':
walker = JinjaWalk(filename_filter=lambda s: s.endswith(args.extension),
filename_modifier=lambda s: s[:-len(args.extension)])
else:
walker = JinjaWalk()
walker.walk(args.conf, args.source, args.destination) | jinjawalk.py |
import argparse
import configparser
import os
import shutil
from jinja2 import Template
from typing import Callable, Union, List
from functools import reduce
def parse_args():
"""Return parsed args when this file is executed rather than imported."""
parser = argparse.ArgumentParser(
description="Render of a folder tree of jinja templates, from an INI file.")
parser.add_argument("source",
type=str,
help="path to templates to render")
parser.add_argument("conf",
type=str,
nargs='+',
help="path(s) to the configuration file(s)")
parser.add_argument("-o", "--output",
dest='destination',
type=str,
help="path to the configuration file (default: render in-place)")
parser.add_argument("-e", "--extension",
type=str,
default='',
help="only attempt to render files with this extension (and just copy other files); "
"the custom extension will be stripped from the rendered filenames")
declared_args = parser.parse_args()
return declared_args
def config_path_to_configparser_instance(item: Union[configparser.ConfigParser, str]) -> configparser.ConfigParser:
"""Convert a path string to fully loaded ConfigParser instances.
If the provided argument is already a ConfigParser instances, it would be returned intact.
"""
if type(item) is str:
config = configparser.ConfigParser()
config.read(item)
return config
return item
def merge_configs(config: Union[configparser.ConfigParser, str, List[Union[configparser.ConfigParser, str]]]) \
-> configparser.ConfigParser:
"""Take a list of ConfigParser instances and path strings to config files, and merge them all into a single
ConfigParser instance.
"""
# Convert to list
if type(config) in [str, configparser.ConfigParser]:
config = [config]
# Load all config files
config = list(map(config_path_to_configparser_instance, config))
# Get a unique list of all sections
sections = reduce(lambda s, x: s.union(x.sections()), config, set())
# Merge all configs section-by-section
merged = configparser.ConfigParser()
for section in sections:
merged[section] = reduce(lambda d, x: dict(**d, **x[section]) if section in x else d, config, {})
return merged
class JinjaWalk:
"""JinjaWalk() -> new instance of a template tree walker.
JinjaWalk(filename_filter, filename_modifier) -> new instance with custom filename modifiers
"""
def __init__(self,
filename_filter: Callable[[str], bool] = lambda s: True,
filename_modifier: Callable[[str], str] = lambda s: s) -> None:
self.filename_filter = filename_filter
self.filename_modifier = filename_modifier
def walk(self, config: Union[configparser.ConfigParser, str, List[Union[configparser.ConfigParser, str]]],
source_dir: str, output_dir: str, namespace: str = 'config'):
"""Render a template tree using key-value pairs from given config file(s)"""
assert namespace == namespace.strip()
config = merge_configs(config)
for root, dirs, files in os.walk(source_dir):
if output_dir is None:
# render templates in place
output_folder = root
else:
# render templates in a user-specified destination
relative_root = root[len(source_dir):]
output_folder = os.path.join(output_dir, relative_root.strip(os.path.sep))
os.makedirs(output_folder, exist_ok=True)
for file in files:
full_source_file_path = os.path.join(root, file)
if self.filename_filter(file):
with open(full_source_file_path, 'r') as fd:
data = fd.read()
template = Template(data)
rendered_template_base_filename = self.filename_modifier(file)
full_destination_file_path = os.path.join(output_folder, rendered_template_base_filename)
kwargs = {namespace: config}
template.stream(**kwargs).dump(full_destination_file_path)
else:
if output_folder != root:
# copy is needed only if this is a not in-place rendering (otherwise shutil.SameFileError)
shutil.copy(full_source_file_path, output_folder)
if __name__ == '__main__':
args = parse_args()
if args.extension != '':
walker = JinjaWalk(filename_filter=lambda s: s.endswith(args.extension),
filename_modifier=lambda s: s[:-len(args.extension)])
else:
walker = JinjaWalk()
walker.walk(args.conf, args.source, args.destination) | 0.833291 | 0.163579 |
from django import forms
from haystack.forms import SearchForm
from .fields import CustomField
from apps.category.models import Category
from apps.global_category.models import GlobalCategory
from apps.shop.models import Shop
from .models import Product, ProductImage
class ProductForm(forms.ModelForm):
class Meta:
model = Product
exclude = ['slug', 'objects', 'sell_count', 'counter']
section = forms.ModelChoiceField(queryset=GlobalCategory.objects.filter(published=True))
parent_categories = CustomField(queryset=Category.objects.filter(parent=None))
removed_images = forms.CharField(required=False)
uploaded_images = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs['initial']['user']
super(ProductForm, self).__init__(*args, **kwargs)
self.fields['shop'].queryset = Shop.objects.filter(user__in=[self.user.id])
self.fields.get('parent_categories').widget.attrs['disabled'] = True
self.fields.get('category').widget.attrs['disabled'] = True
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
def clean(self):
cleaned_data = super(ProductForm, self).clean()
title = cleaned_data.get('title', '')
shop = cleaned_data.get('shop', '')
category = cleaned_data.get('category', '')
price = cleaned_data.get('price', '')
error_msg = "*Обязательное поле"
if shop is None or shop == "":
self._errors['shop'] = error_msg
if title is None or title == "":
self._errors['title'] = error_msg
if category is None or category == "":
self._errors['category'] = error_msg
if price is None or price == "":
self._errors['price'] = error_msg
class ProductUpdateForm(forms.ModelForm):
class Meta:
model = Product
exclude = ['objects', 'slug', 'sell_count', 'counter']
section = forms.ModelChoiceField(queryset=GlobalCategory.objects.filter(published=True))
parent_categories = forms.ModelChoiceField(queryset=Category.objects.filter(parent=None))
removed_images = forms.CharField(required=False)
uploaded_images = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs['initial']['user']
super(ProductUpdateForm, self).__init__(*args, **kwargs)
# self.fields['shop'].queryset = Shop.objects.filter(user__in=[self.user.id])
# self.fields['parent_categories'].queryset = Category.objects.filter(parent=None, section__id=kwargs.get('initial')['section'])
# self.fields['category'].queryset = Category.objects.get(id=kwargs.get("initial")['parent_categories']).get_descendants()
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
class ProductImagesForm(forms.ModelForm):
class Meta:
model = ProductImage
fields = ['image']
class ProductSearchForm(SearchForm):
models = [Product]
def get_models(self):
return self.models
def search(self):
sqs = super(ProductSearchForm, self).search().models(*self.get_models())
return sqs
class ShopSearchForm(SearchForm):
models = [Shop]
def get_models(self):
return self.models
def search(self):
sqs = super(ShopSearchForm, self).search().models(*self.get_models())
return sqs | apps/product/forms.py | from django import forms
from haystack.forms import SearchForm
from .fields import CustomField
from apps.category.models import Category
from apps.global_category.models import GlobalCategory
from apps.shop.models import Shop
from .models import Product, ProductImage
class ProductForm(forms.ModelForm):
class Meta:
model = Product
exclude = ['slug', 'objects', 'sell_count', 'counter']
section = forms.ModelChoiceField(queryset=GlobalCategory.objects.filter(published=True))
parent_categories = CustomField(queryset=Category.objects.filter(parent=None))
removed_images = forms.CharField(required=False)
uploaded_images = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs['initial']['user']
super(ProductForm, self).__init__(*args, **kwargs)
self.fields['shop'].queryset = Shop.objects.filter(user__in=[self.user.id])
self.fields.get('parent_categories').widget.attrs['disabled'] = True
self.fields.get('category').widget.attrs['disabled'] = True
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
def clean(self):
cleaned_data = super(ProductForm, self).clean()
title = cleaned_data.get('title', '')
shop = cleaned_data.get('shop', '')
category = cleaned_data.get('category', '')
price = cleaned_data.get('price', '')
error_msg = "*Обязательное поле"
if shop is None or shop == "":
self._errors['shop'] = error_msg
if title is None or title == "":
self._errors['title'] = error_msg
if category is None or category == "":
self._errors['category'] = error_msg
if price is None or price == "":
self._errors['price'] = error_msg
class ProductUpdateForm(forms.ModelForm):
class Meta:
model = Product
exclude = ['objects', 'slug', 'sell_count', 'counter']
section = forms.ModelChoiceField(queryset=GlobalCategory.objects.filter(published=True))
parent_categories = forms.ModelChoiceField(queryset=Category.objects.filter(parent=None))
removed_images = forms.CharField(required=False)
uploaded_images = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs['initial']['user']
super(ProductUpdateForm, self).__init__(*args, **kwargs)
# self.fields['shop'].queryset = Shop.objects.filter(user__in=[self.user.id])
# self.fields['parent_categories'].queryset = Category.objects.filter(parent=None, section__id=kwargs.get('initial')['section'])
# self.fields['category'].queryset = Category.objects.get(id=kwargs.get("initial")['parent_categories']).get_descendants()
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
class ProductImagesForm(forms.ModelForm):
class Meta:
model = ProductImage
fields = ['image']
class ProductSearchForm(SearchForm):
models = [Product]
def get_models(self):
return self.models
def search(self):
sqs = super(ProductSearchForm, self).search().models(*self.get_models())
return sqs
class ShopSearchForm(SearchForm):
models = [Shop]
def get_models(self):
return self.models
def search(self):
sqs = super(ShopSearchForm, self).search().models(*self.get_models())
return sqs | 0.480966 | 0.134208 |
import logging
import socket
import errno
from io import BytesIO
import msgpack
import select
_log = logging.getLogger(__name__)
MSG_KEY_TYPE = "type"
# Init message Felix -> Driver.
MSG_TYPE_INIT = "init"
MSG_KEY_ETCD_URLS = "etcd_urls"
MSG_KEY_HOSTNAME = "hostname"
MSG_KEY_KEY_FILE = "etcd_key_file"
MSG_KEY_CERT_FILE = "etcd_cert_file"
MSG_KEY_CA_FILE = "etcd_ca_file"
MSG_KEY_PROM_PORT = "prom_port"
# Config loaded message Driver -> Felix.
MSG_TYPE_CONFIG_LOADED = "config_loaded"
MSG_KEY_GLOBAL_CONFIG = "global"
MSG_KEY_HOST_CONFIG = "host"
# Config message Felix -> Driver.
MSG_TYPE_CONFIG = "conf"
MSG_KEY_LOG_FILE = "log_file"
MSG_KEY_SEV_FILE = "sev_file"
MSG_KEY_SEV_SCREEN = "sev_screen"
MSG_KEY_SEV_SYSLOG = "sev_syslog"
# Status message Driver -> Felix.
MSG_TYPE_STATUS = "stat"
MSG_KEY_STATUS = "status"
STATUS_WAIT_FOR_READY = "wait-for-ready"
STATUS_RESYNC = "resync"
STATUS_IN_SYNC = "in-sync"
# Force resync message Felix->Driver.
MSG_TYPE_RESYNC = "resync"
# Update message Driver -> Felix.
MSG_TYPE_UPDATE = "u"
MSG_KEY_KEY = "k"
MSG_KEY_VALUE = "v"
FLUSH_THRESHOLD = 200
class SocketClosed(Exception):
"""The socket was unexpectedly closed by the other end."""
pass
class WriteFailed(Exception):
"""Write to the socket failed."""
pass
class MessageWriter(object):
"""
Wrapper around a socket used to write protocol messages.
Supports buffering a number of messages for subsequent flush().
"""
def __init__(self, sck):
self._sck = sck
self._buf = BytesIO()
self._updates_pending = 0
def send_message(self, msg_type, fields=None, flush=True):
"""
Send a message of the given type with the given fields.
Optionally, flush the data to the socket.
This method will flush the buffer if it grows too large in any
case.
:param msg_type: one of the MSG_TYPE_* constants.
:param dict fields: dict mapping MSG_KEY_* constants to values.
:param flush: True to force the data to be written immediately.
"""
msg = {MSG_KEY_TYPE: msg_type}
if fields:
msg.update(fields)
self._buf.write(msgpack.dumps(msg))
if flush:
self.flush()
else:
self._maybe_flush()
def _maybe_flush(self):
self._updates_pending += 1
if self._updates_pending > FLUSH_THRESHOLD:
self.flush()
def flush(self):
"""
Flushes the write buffer to the socket immediately.
"""
_log.debug("Flushing the buffer to the socket")
buf_contents = self._buf.getvalue()
if buf_contents:
try:
self._sck.sendall(buf_contents)
except socket.error as e:
_log.exception("Failed to write to socket")
raise WriteFailed(e)
self._buf = BytesIO()
self._updates_pending = 0
class MessageReader(object):
def __init__(self, sck):
self._sck = sck
self._unpacker = msgpack.Unpacker()
def new_messages(self, timeout=1):
"""
Generator: generates 0 or more tuples containing message type and
message body (as a dict).
May generate 0 events in certain conditions even if there are
events available. (If the socket returns EAGAIN, for example.)
:param timeout: Maximum time to block waiting on the socket before
giving up. No exception is raised upon timeout but 0 events
are generated.
:raises SocketClosed if the socket is closed.
:raises socket.error if an unexpected socket error occurs.
"""
if timeout is not None:
read_ready, _, _ = select.select([self._sck], [], [], timeout)
if not read_ready:
return
try:
data = self._sck.recv(16384)
except socket.error as e:
if e.errno in (errno.EAGAIN,
errno.EWOULDBLOCK,
errno.EINTR):
_log.debug("Retryable error on read.")
return
else:
_log.error("Failed to read from socket: %r", e)
raise
if not data:
# No data indicates an orderly shutdown of the socket,
# which shouldn't happen.
_log.error("Socket closed by other end.")
raise SocketClosed()
# Feed the data into the Unpacker, if it has enough data it will then
# generate some messages.
self._unpacker.feed(data)
for msg in self._unpacker:
_log.debug("Unpacked message: %s", msg)
# coverage.py doesn't fully support yield statements.
yield msg[MSG_KEY_TYPE], msg # pragma: nocover | calico/etcddriver/protocol.py | import logging
import socket
import errno
from io import BytesIO
import msgpack
import select
_log = logging.getLogger(__name__)
MSG_KEY_TYPE = "type"
# Init message Felix -> Driver.
MSG_TYPE_INIT = "init"
MSG_KEY_ETCD_URLS = "etcd_urls"
MSG_KEY_HOSTNAME = "hostname"
MSG_KEY_KEY_FILE = "etcd_key_file"
MSG_KEY_CERT_FILE = "etcd_cert_file"
MSG_KEY_CA_FILE = "etcd_ca_file"
MSG_KEY_PROM_PORT = "prom_port"
# Config loaded message Driver -> Felix.
MSG_TYPE_CONFIG_LOADED = "config_loaded"
MSG_KEY_GLOBAL_CONFIG = "global"
MSG_KEY_HOST_CONFIG = "host"
# Config message Felix -> Driver.
MSG_TYPE_CONFIG = "conf"
MSG_KEY_LOG_FILE = "log_file"
MSG_KEY_SEV_FILE = "sev_file"
MSG_KEY_SEV_SCREEN = "sev_screen"
MSG_KEY_SEV_SYSLOG = "sev_syslog"
# Status message Driver -> Felix.
MSG_TYPE_STATUS = "stat"
MSG_KEY_STATUS = "status"
STATUS_WAIT_FOR_READY = "wait-for-ready"
STATUS_RESYNC = "resync"
STATUS_IN_SYNC = "in-sync"
# Force resync message Felix->Driver.
MSG_TYPE_RESYNC = "resync"
# Update message Driver -> Felix.
MSG_TYPE_UPDATE = "u"
MSG_KEY_KEY = "k"
MSG_KEY_VALUE = "v"
FLUSH_THRESHOLD = 200
class SocketClosed(Exception):
"""The socket was unexpectedly closed by the other end."""
pass
class WriteFailed(Exception):
"""Write to the socket failed."""
pass
class MessageWriter(object):
"""
Wrapper around a socket used to write protocol messages.
Supports buffering a number of messages for subsequent flush().
"""
def __init__(self, sck):
self._sck = sck
self._buf = BytesIO()
self._updates_pending = 0
def send_message(self, msg_type, fields=None, flush=True):
"""
Send a message of the given type with the given fields.
Optionally, flush the data to the socket.
This method will flush the buffer if it grows too large in any
case.
:param msg_type: one of the MSG_TYPE_* constants.
:param dict fields: dict mapping MSG_KEY_* constants to values.
:param flush: True to force the data to be written immediately.
"""
msg = {MSG_KEY_TYPE: msg_type}
if fields:
msg.update(fields)
self._buf.write(msgpack.dumps(msg))
if flush:
self.flush()
else:
self._maybe_flush()
def _maybe_flush(self):
self._updates_pending += 1
if self._updates_pending > FLUSH_THRESHOLD:
self.flush()
def flush(self):
"""
Flushes the write buffer to the socket immediately.
"""
_log.debug("Flushing the buffer to the socket")
buf_contents = self._buf.getvalue()
if buf_contents:
try:
self._sck.sendall(buf_contents)
except socket.error as e:
_log.exception("Failed to write to socket")
raise WriteFailed(e)
self._buf = BytesIO()
self._updates_pending = 0
class MessageReader(object):
def __init__(self, sck):
self._sck = sck
self._unpacker = msgpack.Unpacker()
def new_messages(self, timeout=1):
"""
Generator: generates 0 or more tuples containing message type and
message body (as a dict).
May generate 0 events in certain conditions even if there are
events available. (If the socket returns EAGAIN, for example.)
:param timeout: Maximum time to block waiting on the socket before
giving up. No exception is raised upon timeout but 0 events
are generated.
:raises SocketClosed if the socket is closed.
:raises socket.error if an unexpected socket error occurs.
"""
if timeout is not None:
read_ready, _, _ = select.select([self._sck], [], [], timeout)
if not read_ready:
return
try:
data = self._sck.recv(16384)
except socket.error as e:
if e.errno in (errno.EAGAIN,
errno.EWOULDBLOCK,
errno.EINTR):
_log.debug("Retryable error on read.")
return
else:
_log.error("Failed to read from socket: %r", e)
raise
if not data:
# No data indicates an orderly shutdown of the socket,
# which shouldn't happen.
_log.error("Socket closed by other end.")
raise SocketClosed()
# Feed the data into the Unpacker, if it has enough data it will then
# generate some messages.
self._unpacker.feed(data)
for msg in self._unpacker:
_log.debug("Unpacked message: %s", msg)
# coverage.py doesn't fully support yield statements.
yield msg[MSG_KEY_TYPE], msg # pragma: nocover | 0.417509 | 0.053825 |
import mandelbrot.mandelbrot_alg as mb
from numba import jit, njit, prange, vectorize, guvectorize, float64, int64
# Took the following from Thomas' example to avoid errors when trying to run files
# No-op for use with profiling and test
try:
@profile
def f(x): return x
except:
def profile(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
@profile
def naive(detail, rVals, iVals, res):
"""
The 'naive' solution for computing the Mandelbrot set using for-loops.
INPUT::
detail : int
How detailed should the simulation be.
rVals : Numpy array of size (detail,)
The values for the real component of c to iterate over.
iVals : Numpy array of size (detail,)
The values for the imaginary component of c to iterate over.
res : Numpy array of size (detail, detail)
Matrix of zeros that will be filled with outputs of the function
generating the Mandelbrot set.
OUTPUT::
res : Numpy array of size (detail, detail)
Matrix containing the result of the function generating the
Mandelbrot set for all values of c that this function has iterated
over.
"""
for i in range(detail):
for r in range(detail):
res[i, r] = mb.M(rVals[r] + iVals[i]*1j)
return res
@jit
def jit_func(detail, rVals, iVals, res):
"""
The same 'naive' solution as naive() but optimised with numba using the
@jit decorator
"""
for i in range(detail):
for r in range(detail):
res[i, r] = mb.M_jit(rVals[r] + iVals[i]*1j)
return res
@njit(parallel=True)
def njit_par(detail, rVals, iVals, res):
"""
The same 'naive' solution as naive() but optimised with numba using
parallelisation with the @njit decorator and the parallel flag set to True.
The 'range' funtions have been replaced by 'prange' to tell the compiler
that these loops can be parallelised.
"""
for i in prange(detail):
for r in prange(detail):
res[i, r] = mb.M_jit(rVals[r] + iVals[i]*1j)
return res
@vectorize(['float32(float32, float32)', 'float64(float64, float64)'])
def _vectorised_loop(r, i):
"""
Internal function to be used by vectorised(). The function is vectorised
using the @vectorise decorator and takes in two floating-point values and
returns a floating-point value.
"""
return mb.M(r + i*1j)
@profile
def vectorised(detail, rVals, iVals, res):
"""
The same 'naive' solution as the naive() but with the nested for-loop
(looping over the reals) vectorised.
The vectorisation strategy is to calculate a row at a time, rather than a
column, as the numpy 'matrix' is stored C-contiguously (by default). This
means that row elements are neighbouring in memory and the implementation
should be faster this way.
"""
for i in range(detail):
res[i, :] = _vectorised_loop(rVals, iVals[i])
return res
@vectorize(['float32(float32, float32)', 'float64(float64, float64)'])
def _jit_vectorised_loop(r, i):
"""
Internal function to be used by jit_vectorised(). Identical to
_vectorised_loop(), but uses the @jit version of the function calculating
the Mandelbrot set.
"""
return mb.M_jit(r + i*1j)
@jit
def jit_vectorised(detail, rVals, iVals, res):
"""
Same as the vectorised function, but optimised with numba using the @jit
decorator. Furthermore it calls the vectorised loop that uses the numba-
optimised version of the function calculating the Mandelbrot set.
"""
for i in range(detail):
res[i, :] = _jit_vectorised_loop(rVals, iVals[i])
return res
@guvectorize(['void(int64, float64[:], float64[:], float64[:, :])'], '(), (n),(n)->(n,n)', target='cpu')
def gu_jit_vectorised(detail, rVals, iVals, res):
"""
A general ufunc attempting to vectorise both for loops in the jit_func()
function using the @guvectorise decorator. Rather than returning the
result, it is saved in the last input argument of the function: res. This
denoted by the mapping in the argument of the decorator:
(), (n),(n)->(n,n)
which essentially says that it creates a n x n array from a scalar and two
n x 1 arrays.
"""
for i in range(detail):
for r in range(detail):
res[i, r] = mb.M_jit(rVals[r] + iVals[i]*1j)
@njit(parallel=True)
def jit_save_z(detail, rVals, iVals, res, z_res, I, T):
"""
Extra function that saves the values of :math:`z` from the last iteration
in the function generating the Mandelbrot set.
This function is only used by plot_z_values.py.
INPUT::
detail : int
How detailed should the simulation be.
rVals : Numpy array of size (detail,)
The values for the real component of c to iterate over.
iVals : Numpy array of size (detail,)
The values for the imaginary component of c to iterate over.
res : Numpy array of floats of size (detail, detail)
Matrix of zeros that will be filled with outputs of the
function generating the Mandelbrot set.
I : int
Maximum number of iterations.
T : float
Threshold value.
OUTPUT::
z_res : Numpy array of complex128 of size (detail, detail)
Matrix containing the last value of z before the function
generating the Mandelbrot set returns.
res : Numpy array of size (detail, detail)
Matrix containing the result of the function generating the
Mandelbrot set for all values of c that this function has iterated
over.
"""
for i in prange(detail):
for r in prange(detail):
z_res[i, r], res[i, r] = mb.M_save_z(rVals[r] + iVals[i]*1j, I, T)
return z_res, res | mandelbrot/optimisation_methods.py | import mandelbrot.mandelbrot_alg as mb
from numba import jit, njit, prange, vectorize, guvectorize, float64, int64
# Took the following from Thomas' example to avoid errors when trying to run files
# No-op for use with profiling and test
try:
@profile
def f(x): return x
except:
def profile(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
@profile
def naive(detail, rVals, iVals, res):
"""
The 'naive' solution for computing the Mandelbrot set using for-loops.
INPUT::
detail : int
How detailed should the simulation be.
rVals : Numpy array of size (detail,)
The values for the real component of c to iterate over.
iVals : Numpy array of size (detail,)
The values for the imaginary component of c to iterate over.
res : Numpy array of size (detail, detail)
Matrix of zeros that will be filled with outputs of the function
generating the Mandelbrot set.
OUTPUT::
res : Numpy array of size (detail, detail)
Matrix containing the result of the function generating the
Mandelbrot set for all values of c that this function has iterated
over.
"""
for i in range(detail):
for r in range(detail):
res[i, r] = mb.M(rVals[r] + iVals[i]*1j)
return res
@jit
def jit_func(detail, rVals, iVals, res):
"""
The same 'naive' solution as naive() but optimised with numba using the
@jit decorator
"""
for i in range(detail):
for r in range(detail):
res[i, r] = mb.M_jit(rVals[r] + iVals[i]*1j)
return res
@njit(parallel=True)
def njit_par(detail, rVals, iVals, res):
"""
The same 'naive' solution as naive() but optimised with numba using
parallelisation with the @njit decorator and the parallel flag set to True.
The 'range' funtions have been replaced by 'prange' to tell the compiler
that these loops can be parallelised.
"""
for i in prange(detail):
for r in prange(detail):
res[i, r] = mb.M_jit(rVals[r] + iVals[i]*1j)
return res
@vectorize(['float32(float32, float32)', 'float64(float64, float64)'])
def _vectorised_loop(r, i):
"""
Internal function to be used by vectorised(). The function is vectorised
using the @vectorise decorator and takes in two floating-point values and
returns a floating-point value.
"""
return mb.M(r + i*1j)
@profile
def vectorised(detail, rVals, iVals, res):
"""
The same 'naive' solution as the naive() but with the nested for-loop
(looping over the reals) vectorised.
The vectorisation strategy is to calculate a row at a time, rather than a
column, as the numpy 'matrix' is stored C-contiguously (by default). This
means that row elements are neighbouring in memory and the implementation
should be faster this way.
"""
for i in range(detail):
res[i, :] = _vectorised_loop(rVals, iVals[i])
return res
@vectorize(['float32(float32, float32)', 'float64(float64, float64)'])
def _jit_vectorised_loop(r, i):
"""
Internal function to be used by jit_vectorised(). Identical to
_vectorised_loop(), but uses the @jit version of the function calculating
the Mandelbrot set.
"""
return mb.M_jit(r + i*1j)
@jit
def jit_vectorised(detail, rVals, iVals, res):
"""
Same as the vectorised function, but optimised with numba using the @jit
decorator. Furthermore it calls the vectorised loop that uses the numba-
optimised version of the function calculating the Mandelbrot set.
"""
for i in range(detail):
res[i, :] = _jit_vectorised_loop(rVals, iVals[i])
return res
@guvectorize(['void(int64, float64[:], float64[:], float64[:, :])'], '(), (n),(n)->(n,n)', target='cpu')
def gu_jit_vectorised(detail, rVals, iVals, res):
"""
A general ufunc attempting to vectorise both for loops in the jit_func()
function using the @guvectorise decorator. Rather than returning the
result, it is saved in the last input argument of the function: res. This
denoted by the mapping in the argument of the decorator:
(), (n),(n)->(n,n)
which essentially says that it creates a n x n array from a scalar and two
n x 1 arrays.
"""
for i in range(detail):
for r in range(detail):
res[i, r] = mb.M_jit(rVals[r] + iVals[i]*1j)
@njit(parallel=True)
def jit_save_z(detail, rVals, iVals, res, z_res, I, T):
"""
Extra function that saves the values of :math:`z` from the last iteration
in the function generating the Mandelbrot set.
This function is only used by plot_z_values.py.
INPUT::
detail : int
How detailed should the simulation be.
rVals : Numpy array of size (detail,)
The values for the real component of c to iterate over.
iVals : Numpy array of size (detail,)
The values for the imaginary component of c to iterate over.
res : Numpy array of floats of size (detail, detail)
Matrix of zeros that will be filled with outputs of the
function generating the Mandelbrot set.
I : int
Maximum number of iterations.
T : float
Threshold value.
OUTPUT::
z_res : Numpy array of complex128 of size (detail, detail)
Matrix containing the last value of z before the function
generating the Mandelbrot set returns.
res : Numpy array of size (detail, detail)
Matrix containing the result of the function generating the
Mandelbrot set for all values of c that this function has iterated
over.
"""
for i in prange(detail):
for r in prange(detail):
z_res[i, r], res[i, r] = mb.M_save_z(rVals[r] + iVals[i]*1j, I, T)
return z_res, res | 0.724481 | 0.797754 |
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^inicio/$', 'tecnoservicio.users.views.inicio', name='inicio'),
url(r'^salir/$', 'tecnoservicio.users.views.salir', name='salir'),
url(r'^mi-password/$', 'tecnoservicio.users.views.mi_password', name='mi_password'),
url(r'^lista-usuarios/$', 'tecnoservicio.users.views.lista_usuario', name='lista_usuario'),
url(r'^alta-usuarios/$', 'tecnoservicio.users.views.alta_usuario', name='alta_usuario'),
url(r'^editar-usuario/(.+)/$', 'tecnoservicio.users.views.editar_usuario', name='editar_usuario'),
url(r'^eliminar-usuario/(.+)/$', 'tecnoservicio.users.views.eliminar_usuario', name='eliminar_usuario'),
url(r'^lista-ordenes/$', 'tecnoservicio.ordenes.views.lista_orden', name='lista_orden'),
url(r'^alta-orden/$', 'tecnoservicio.ordenes.views.alta_orden', name='alta_orden'),
url(r'^editar-orden/(.+)/$', 'tecnoservicio.ordenes.views.editar_orden', name='editar_orden'),
url(r'^imprimir-orden/(.+)/$', 'tecnoservicio.ordenes.views.imprimir_orden', name='imprimir_orden'),
url(r'^calendario/(.+)/$', 'tecnoservicio.ordenes.views.calendario', name='calendario'),
url(r'^publicidad/$', 'tecnoservicio.ordenes.views.publicidad', name='publicidad'),
url(r'^lista-cortes/$', 'tecnoservicio.ordenes.views.lista_cortes', name='lista_cortes'),
url(r'^generar-corte/(.+)/$', 'tecnoservicio.ordenes.views.generar_corte', name='generar_corte'),
url(r'^corte/(.+)/$', 'tecnoservicio.ordenes.views.corte', name='corte'),
url(r'^reportes/$', 'tecnoservicio.ordenes.views.reportes', name='reportes'),
url(r'^ordenes-icon/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.ordenes_icon', name='ordenes_icon'),
url(r'^ordenes-tecno/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.ordenes_tecno', name='ordenes_tecno'),
url(r'^armados-locales/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.armados_locales', name='armados_locales'),
url(r'^armados-foraneos/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.armados_foraneos', name='armados_foraneos'),
url(r'^manual/usuario/$', TemplateView.as_view(template_name='ordenes/manual_usuarios.html'), name="manual_usuarios"),
url(r'^manual/iconfield/$', TemplateView.as_view(template_name='ordenes/manual_iconfield.html'), name="manual_iconfield"),
# AJAX
url(r'^actualizar_marca/$', 'tecnoservicio.ordenes.views.actualizar_marca', name='actualizar_marca'),
url(r'^actualizar_modelo/$', 'tecnoservicio.ordenes.views.actualizar_modelo', name='actualizar_modelo'),
url(r'^calendario_ordenes/$', 'tecnoservicio.ordenes.views.calendario_ordenes', name='calendario_ordenes'),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("tecnoservicio.users.urls", namespace="users")),
url(r'^acceso/', include('allauth.urls')),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
] | config/urls.py | from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^inicio/$', 'tecnoservicio.users.views.inicio', name='inicio'),
url(r'^salir/$', 'tecnoservicio.users.views.salir', name='salir'),
url(r'^mi-password/$', 'tecnoservicio.users.views.mi_password', name='mi_password'),
url(r'^lista-usuarios/$', 'tecnoservicio.users.views.lista_usuario', name='lista_usuario'),
url(r'^alta-usuarios/$', 'tecnoservicio.users.views.alta_usuario', name='alta_usuario'),
url(r'^editar-usuario/(.+)/$', 'tecnoservicio.users.views.editar_usuario', name='editar_usuario'),
url(r'^eliminar-usuario/(.+)/$', 'tecnoservicio.users.views.eliminar_usuario', name='eliminar_usuario'),
url(r'^lista-ordenes/$', 'tecnoservicio.ordenes.views.lista_orden', name='lista_orden'),
url(r'^alta-orden/$', 'tecnoservicio.ordenes.views.alta_orden', name='alta_orden'),
url(r'^editar-orden/(.+)/$', 'tecnoservicio.ordenes.views.editar_orden', name='editar_orden'),
url(r'^imprimir-orden/(.+)/$', 'tecnoservicio.ordenes.views.imprimir_orden', name='imprimir_orden'),
url(r'^calendario/(.+)/$', 'tecnoservicio.ordenes.views.calendario', name='calendario'),
url(r'^publicidad/$', 'tecnoservicio.ordenes.views.publicidad', name='publicidad'),
url(r'^lista-cortes/$', 'tecnoservicio.ordenes.views.lista_cortes', name='lista_cortes'),
url(r'^generar-corte/(.+)/$', 'tecnoservicio.ordenes.views.generar_corte', name='generar_corte'),
url(r'^corte/(.+)/$', 'tecnoservicio.ordenes.views.corte', name='corte'),
url(r'^reportes/$', 'tecnoservicio.ordenes.views.reportes', name='reportes'),
url(r'^ordenes-icon/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.ordenes_icon', name='ordenes_icon'),
url(r'^ordenes-tecno/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.ordenes_tecno', name='ordenes_tecno'),
url(r'^armados-locales/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.armados_locales', name='armados_locales'),
url(r'^armados-foraneos/(.+)/(.+)/$', 'tecnoservicio.ordenes.views.armados_foraneos', name='armados_foraneos'),
url(r'^manual/usuario/$', TemplateView.as_view(template_name='ordenes/manual_usuarios.html'), name="manual_usuarios"),
url(r'^manual/iconfield/$', TemplateView.as_view(template_name='ordenes/manual_iconfield.html'), name="manual_iconfield"),
# AJAX
url(r'^actualizar_marca/$', 'tecnoservicio.ordenes.views.actualizar_marca', name='actualizar_marca'),
url(r'^actualizar_modelo/$', 'tecnoservicio.ordenes.views.actualizar_modelo', name='actualizar_modelo'),
url(r'^calendario_ordenes/$', 'tecnoservicio.ordenes.views.calendario_ordenes', name='calendario_ordenes'),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("tecnoservicio.users.urls", namespace="users")),
url(r'^acceso/', include('allauth.urls')),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
] | 0.356447 | 0.089216 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='edgelist.proto',
package='func2vec',
syntax='proto3',
serialized_pb=_b('\n\x0e\x65\x64gelist.proto\x12\x08\x66unc2vec\"\xa8\x02\n\x08\x45\x64gelist\x12%\n\x04\x65\x64ge\x18\x01 \x03(\x0b\x32\x17.func2vec.Edgelist.Edge\x12\x36\n\x0bid_to_label\x18\x02 \x03(\x0b\x32!.func2vec.Edgelist.IdToLabelEntry\x1aY\n\x04\x45\x64ge\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x10\n\x08label_id\x18\x05 \x03(\x05\x12\x10\n\x08location\x18\x04 \x01(\t\x1a\x16\n\x05Label\x12\r\n\x05label\x18\x01 \x01(\t\x1aJ\n\x0eIdToLabelEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.func2vec.Edgelist.Label:\x02\x38\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EDGELIST_EDGE = _descriptor.Descriptor(
name='Edge',
full_name='func2vec.Edgelist.Edge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='func2vec.Edgelist.Edge.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='func2vec.Edgelist.Edge.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='func2vec.Edgelist.Edge.label', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_id', full_name='func2vec.Edgelist.Edge.label_id', index=3,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location', full_name='func2vec.Edgelist.Edge.location', index=4,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=225,
)
_EDGELIST_LABEL = _descriptor.Descriptor(
name='Label',
full_name='func2vec.Edgelist.Label',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='func2vec.Edgelist.Label.label', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=227,
serialized_end=249,
)
_EDGELIST_IDTOLABELENTRY = _descriptor.Descriptor(
name='IdToLabelEntry',
full_name='func2vec.Edgelist.IdToLabelEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='func2vec.Edgelist.IdToLabelEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='func2vec.Edgelist.IdToLabelEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=251,
serialized_end=325,
)
_EDGELIST = _descriptor.Descriptor(
name='Edgelist',
full_name='func2vec.Edgelist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edge', full_name='func2vec.Edgelist.edge', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id_to_label', full_name='func2vec.Edgelist.id_to_label', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_EDGELIST_EDGE, _EDGELIST_LABEL, _EDGELIST_IDTOLABELENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=325,
)
_EDGELIST_EDGE.containing_type = _EDGELIST
_EDGELIST_LABEL.containing_type = _EDGELIST
_EDGELIST_IDTOLABELENTRY.fields_by_name['value'].message_type = _EDGELIST_LABEL
_EDGELIST_IDTOLABELENTRY.containing_type = _EDGELIST
_EDGELIST.fields_by_name['edge'].message_type = _EDGELIST_EDGE
_EDGELIST.fields_by_name['id_to_label'].message_type = _EDGELIST_IDTOLABELENTRY
DESCRIPTOR.message_types_by_name['Edgelist'] = _EDGELIST
Edgelist = _reflection.GeneratedProtocolMessageType('Edgelist', (_message.Message,), dict(
Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST_EDGE,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist.Edge)
))
,
Label = _reflection.GeneratedProtocolMessageType('Label', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST_LABEL,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist.Label)
))
,
IdToLabelEntry = _reflection.GeneratedProtocolMessageType('IdToLabelEntry', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST_IDTOLABELENTRY,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist.IdToLabelEntry)
))
,
DESCRIPTOR = _EDGELIST,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist)
))
_sym_db.RegisterMessage(Edgelist)
_sym_db.RegisterMessage(Edgelist.Edge)
_sym_db.RegisterMessage(Edgelist.Label)
_sym_db.RegisterMessage(Edgelist.IdToLabelEntry)
_EDGELIST_IDTOLABELENTRY.has_options = True
_EDGELIST_IDTOLABELENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope) | src/walker/edgelist_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='edgelist.proto',
package='func2vec',
syntax='proto3',
serialized_pb=_b('\n\x0e\x65\x64gelist.proto\x12\x08\x66unc2vec\"\xa8\x02\n\x08\x45\x64gelist\x12%\n\x04\x65\x64ge\x18\x01 \x03(\x0b\x32\x17.func2vec.Edgelist.Edge\x12\x36\n\x0bid_to_label\x18\x02 \x03(\x0b\x32!.func2vec.Edgelist.IdToLabelEntry\x1aY\n\x04\x45\x64ge\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x10\n\x08label_id\x18\x05 \x03(\x05\x12\x10\n\x08location\x18\x04 \x01(\t\x1a\x16\n\x05Label\x12\r\n\x05label\x18\x01 \x01(\t\x1aJ\n\x0eIdToLabelEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.func2vec.Edgelist.Label:\x02\x38\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EDGELIST_EDGE = _descriptor.Descriptor(
name='Edge',
full_name='func2vec.Edgelist.Edge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='func2vec.Edgelist.Edge.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='func2vec.Edgelist.Edge.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='func2vec.Edgelist.Edge.label', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_id', full_name='func2vec.Edgelist.Edge.label_id', index=3,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location', full_name='func2vec.Edgelist.Edge.location', index=4,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=225,
)
_EDGELIST_LABEL = _descriptor.Descriptor(
name='Label',
full_name='func2vec.Edgelist.Label',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='func2vec.Edgelist.Label.label', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=227,
serialized_end=249,
)
_EDGELIST_IDTOLABELENTRY = _descriptor.Descriptor(
name='IdToLabelEntry',
full_name='func2vec.Edgelist.IdToLabelEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='func2vec.Edgelist.IdToLabelEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='func2vec.Edgelist.IdToLabelEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=251,
serialized_end=325,
)
_EDGELIST = _descriptor.Descriptor(
name='Edgelist',
full_name='func2vec.Edgelist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edge', full_name='func2vec.Edgelist.edge', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id_to_label', full_name='func2vec.Edgelist.id_to_label', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_EDGELIST_EDGE, _EDGELIST_LABEL, _EDGELIST_IDTOLABELENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=325,
)
_EDGELIST_EDGE.containing_type = _EDGELIST
_EDGELIST_LABEL.containing_type = _EDGELIST
_EDGELIST_IDTOLABELENTRY.fields_by_name['value'].message_type = _EDGELIST_LABEL
_EDGELIST_IDTOLABELENTRY.containing_type = _EDGELIST
_EDGELIST.fields_by_name['edge'].message_type = _EDGELIST_EDGE
_EDGELIST.fields_by_name['id_to_label'].message_type = _EDGELIST_IDTOLABELENTRY
DESCRIPTOR.message_types_by_name['Edgelist'] = _EDGELIST
Edgelist = _reflection.GeneratedProtocolMessageType('Edgelist', (_message.Message,), dict(
Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST_EDGE,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist.Edge)
))
,
Label = _reflection.GeneratedProtocolMessageType('Label', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST_LABEL,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist.Label)
))
,
IdToLabelEntry = _reflection.GeneratedProtocolMessageType('IdToLabelEntry', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST_IDTOLABELENTRY,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist.IdToLabelEntry)
))
,
DESCRIPTOR = _EDGELIST,
__module__ = 'edgelist_pb2'
# @@protoc_insertion_point(class_scope:func2vec.Edgelist)
))
_sym_db.RegisterMessage(Edgelist)
_sym_db.RegisterMessage(Edgelist.Edge)
_sym_db.RegisterMessage(Edgelist.Label)
_sym_db.RegisterMessage(Edgelist.IdToLabelEntry)
_EDGELIST_IDTOLABELENTRY.has_options = True
_EDGELIST_IDTOLABELENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope) | 0.263694 | 0.172033 |
import tensorflow.keras.applications as keras_models
from codesign.config import supported_models
from benchmark.benchmark import Workload
from benchmark.computations import conv2d_compute, mm_compute, dwconv_compute
def get_model(model_name, input_shape):
if model_name not in supported_models:
raise NotImplementedError('unsupported model')
keras_model = getattr(keras_models, model_name)(weights=None, include_top=True, input_shape=input_shape)
print('Get the keras model: ' + model_name)
return keras_model
def get_workloads(model, dtype, layout):
workloads = []
UNIQUE_WORKLOADS = set()
for layer in model.layers:
config = layer.get_config()
ltype = layer.__class__.__name__
is_conv2d = ltype == 'Conv2D'
is_dwconv2d = ltype == 'DepthwiseConv2D'
is_sepconv2d = ltype == 'SeparableConv2D'
is_gemm = ltype == 'Dense'
if (is_conv2d or is_dwconv2d or is_sepconv2d):
c = layer.input_shape[3]
y = layer.input_shape[1]
x = layer.input_shape[2]
k = layer.output_shape[3]
r = layer.kernel_size[0]
s = layer.kernel_size[1]
stride = config["strides"][0]
if is_conv2d:
args = (1, c, y, x, k, r, s, stride, dtype, layout)
conv = Workload(config["name"], "CONV", conv2d_compute, args)
workloads.append(conv)
UNIQUE_WORKLOADS.add(conv.tag)
elif is_dwconv2d:
args = (1, c, y, x, k // c, r, s, stride, dtype, layout)
dwconv = Workload(config["name"], "DWCONV", dwconv_compute, args)
workloads.append(dwconv)
UNIQUE_WORKLOADS.add(dwconv.tag)
elif is_sepconv2d:
yo = layer.output_shape[1]
xo = layer.output_shape[2]
ko = int(config["depth_multiplier"])
dwargs = (1, c, y, x, ko, r, s, stride, dtype, layout)
dwconv = Workload(config["name"]+"_dw", "DWCONV", dwconv_compute, dwargs)
pwargs = (1, c * ko , yo, xo, k, 1, 1, 1, dtype, layout)
pwconv = Workload(config["name"]+"_pw", "CONV", conv2d_compute, pwargs)
workloads.append(dwconv)
workloads.append(pwconv)
UNIQUE_WORKLOADS.add(dwconv.tag)
UNIQUE_WORKLOADS.add(pwconv.tag)
elif is_gemm:
m = 1
n = layer.output_shape[1]
k = layer.input_shape[1]
args = (m, n, k, dtype, layout)
gemm = Workload(config["name"], "GEMM", mm_compute, args)
workloads.append(gemm)
UNIQUE_WORKLOADS.add(gemm.tag)
else: # unsupported layers
continue
print("Unique workloads: ", len(UNIQUE_WORKLOADS))
return workloads | src/benchmark/keras_extend.py | import tensorflow.keras.applications as keras_models
from codesign.config import supported_models
from benchmark.benchmark import Workload
from benchmark.computations import conv2d_compute, mm_compute, dwconv_compute
def get_model(model_name, input_shape):
if model_name not in supported_models:
raise NotImplementedError('unsupported model')
keras_model = getattr(keras_models, model_name)(weights=None, include_top=True, input_shape=input_shape)
print('Get the keras model: ' + model_name)
return keras_model
def get_workloads(model, dtype, layout):
workloads = []
UNIQUE_WORKLOADS = set()
for layer in model.layers:
config = layer.get_config()
ltype = layer.__class__.__name__
is_conv2d = ltype == 'Conv2D'
is_dwconv2d = ltype == 'DepthwiseConv2D'
is_sepconv2d = ltype == 'SeparableConv2D'
is_gemm = ltype == 'Dense'
if (is_conv2d or is_dwconv2d or is_sepconv2d):
c = layer.input_shape[3]
y = layer.input_shape[1]
x = layer.input_shape[2]
k = layer.output_shape[3]
r = layer.kernel_size[0]
s = layer.kernel_size[1]
stride = config["strides"][0]
if is_conv2d:
args = (1, c, y, x, k, r, s, stride, dtype, layout)
conv = Workload(config["name"], "CONV", conv2d_compute, args)
workloads.append(conv)
UNIQUE_WORKLOADS.add(conv.tag)
elif is_dwconv2d:
args = (1, c, y, x, k // c, r, s, stride, dtype, layout)
dwconv = Workload(config["name"], "DWCONV", dwconv_compute, args)
workloads.append(dwconv)
UNIQUE_WORKLOADS.add(dwconv.tag)
elif is_sepconv2d:
yo = layer.output_shape[1]
xo = layer.output_shape[2]
ko = int(config["depth_multiplier"])
dwargs = (1, c, y, x, ko, r, s, stride, dtype, layout)
dwconv = Workload(config["name"]+"_dw", "DWCONV", dwconv_compute, dwargs)
pwargs = (1, c * ko , yo, xo, k, 1, 1, 1, dtype, layout)
pwconv = Workload(config["name"]+"_pw", "CONV", conv2d_compute, pwargs)
workloads.append(dwconv)
workloads.append(pwconv)
UNIQUE_WORKLOADS.add(dwconv.tag)
UNIQUE_WORKLOADS.add(pwconv.tag)
elif is_gemm:
m = 1
n = layer.output_shape[1]
k = layer.input_shape[1]
args = (m, n, k, dtype, layout)
gemm = Workload(config["name"], "GEMM", mm_compute, args)
workloads.append(gemm)
UNIQUE_WORKLOADS.add(gemm.tag)
else: # unsupported layers
continue
print("Unique workloads: ", len(UNIQUE_WORKLOADS))
return workloads | 0.564459 | 0.282134 |
from time import sleep
from typing import Tuple, Optional
from urllib.parse import quote_plus
import logging
import requests
import shelve
from ..common import progress_bar
from ..types import Document, Author, DocumentSet, DocumentIdentifier
def extract_id(item):
if item is None or not item.get('title'):
return None
return DocumentIdentifier(
item['title'],
doi=item.get('doi'),
arxivid=item.get('arxivId'),
s2id=item.get('paperId'),
)
def extract_ids(items):
if not items:
return None
return list(filter(None, map(extract_id, items)))
class ScholarAuthor(Author):
def __init__(self, entry):
self.entry = entry
@property
def name(self):
return self.entry.get('name')
@property
def orcid(self):
return None
class ScholarDocument(Document):
def __init__(self, entry):
super().__init__(extract_id(entry))
self.entry = entry
@property
def title(self) -> str:
return self.entry.get('title')
@property
def authors(self):
authors = self.entry.get('authors')
if not authors:
return None
return [ScholarAuthor(a) for a in authors if a]
@property
def publication_year(self):
return self.entry.get('year')
@property
def publication_source(self):
return self.entry.get('venue')
@property
def abstract(self):
return self.entry.get('abstract')
@property
def citations(self):
return extract_ids(self.entry.get('citations'))
@property
def citation_count(self):
return self.entry.get('numCitedBy')
@property
def references(self):
return extract_ids(self.entry.get('references'))
def __repr__(self):
return f'<{self.title}>'
@staticmethod
def load(id):
return fetch_semanticscholar(id)
S2_PAPER_URL = 'http://api.semanticscholar.org/v1/paper/'
S2_QUERY_URL = 'https://api.semanticscholar.org/graph/v1/paper/search'
CACHE_FILE = '.semantischolar'
DEFAULT_TIMEOUT = 3.05 # 100 requests per 5 minutes
def request_results(query, offset, cache, timeout=DEFAULT_TIMEOUT):
cache_key = f'results={query};{offset}'
if cache_key in cache:
return cache[cache_key]
url = S2_QUERY_URL
params = dict(offset=offset, query=query, limit=100)
reply = requests.get(url, params=params)
response = reply.json()
if 'data' not in response:
msg = response.get('error') or response.get('message') or 'unknown'
raise Exception(f'error while fetching {reply.url}: {msg}')
cache[cache_key] = response
return response
def request_paper(key, cache, timeout=DEFAULT_TIMEOUT):
cache_key = f'paper={key}'
if cache_key in cache:
return cache[cache_key]
url = S2_PAPER_URL + quote_plus(key)
try:
sleep(timeout)
data = requests.get(url).json()
except Exception as e:
logging.warn(f'failed to retreive {key}: {e}')
return None
if 'paperId' in data:
cache[cache_key] = data
return data
else:
msg = data.get('error') or data.get('message') or 'unknown error'
logging.warn(f'failed to retreive {key}: {msg}')
return None
def fetch_semanticscholar(key: set) -> Optional[Document]:
"""Fetch SemanticScholar metadata for the given key. The key can be
one of the following (see `API reference
<https://www.semanticscholar.org/product/api>`_):
* DOI
* S2 paper ID
* ArXiv ID (example format: `arXiv:1705.10311`)
* MAG ID (example format: `MAG:112218234`)
* ACL ID (example format: `ACL:W12-3903`)
* PubMed ID (example format: `PMID:19872477`)
* Corpus ID (example format: `CorpusID:37220927`)
:returns: The `Document` if it was found and `None` otherwise.
"""
if key is None:
return None
with shelve.open(CACHE_FILE) as cache:
if isinstance(key, DocumentIdentifier):
data = None
if data is None and key.s2id:
data = request_paper(key.s2id, cache)
if data is None and key.doi:
data = request_paper(key.doi, cache)
if data is None and key.pubmed:
data = request_paper(f'PMID:{key.pubmed}', cache)
if data is None and key.arxivid:
data = request_paper(f'arXiv:{key.arxivid}', cache)
else:
data = request_paper(key, cache)
if data is None:
return None
return ScholarDocument(data)
def refine_semanticscholar(docs: DocumentSet
) -> Tuple[DocumentSet, DocumentSet]:
"""Attempt to fetch SemanticScholar metadata for each document in the
given set based on their DOIs. Returns a tuple containing two sets: the
documents available on SemanticScholar and the remaining documents that
were not found or do not have a DOI.
"""
def callback(doc):
if isinstance(doc, ScholarDocument):
return doc
return fetch_semanticscholar(doc.id)
return docs._refine_docs(callback)
def search_semanticscholar(query: str, *, limit: int = None) -> DocumentSet:
""" Submit the given query to SemanticScholar and return the results
as a `DocumentSet`.
"""
if not query:
raise Exception('invalid query: {query}')
docs = []
with shelve.open(CACHE_FILE) as cache:
offset = 0
paper_ids = []
while True:
data = request_results(query, offset, cache)
if not data:
break
records = data['data']
offset += len(records)
for record in records:
paper_ids.append(record['paperId'])
if limit is not None and len(paper_ids) > limit:
paper_ids = paper_ids[:limit]
break
for paper_id in progress_bar(paper_ids):
doc = request_paper(paper_id, cache)
if doc:
docs.append(ScholarDocument(doc))
else:
logging.warn(f'could not find paper id {paper_id}')
return DocumentSet(docs) | litstudy/sources/semanticscholar.py | from time import sleep
from typing import Tuple, Optional
from urllib.parse import quote_plus
import logging
import requests
import shelve
from ..common import progress_bar
from ..types import Document, Author, DocumentSet, DocumentIdentifier
def extract_id(item):
if item is None or not item.get('title'):
return None
return DocumentIdentifier(
item['title'],
doi=item.get('doi'),
arxivid=item.get('arxivId'),
s2id=item.get('paperId'),
)
def extract_ids(items):
if not items:
return None
return list(filter(None, map(extract_id, items)))
class ScholarAuthor(Author):
def __init__(self, entry):
self.entry = entry
@property
def name(self):
return self.entry.get('name')
@property
def orcid(self):
return None
class ScholarDocument(Document):
def __init__(self, entry):
super().__init__(extract_id(entry))
self.entry = entry
@property
def title(self) -> str:
return self.entry.get('title')
@property
def authors(self):
authors = self.entry.get('authors')
if not authors:
return None
return [ScholarAuthor(a) for a in authors if a]
@property
def publication_year(self):
return self.entry.get('year')
@property
def publication_source(self):
return self.entry.get('venue')
@property
def abstract(self):
return self.entry.get('abstract')
@property
def citations(self):
return extract_ids(self.entry.get('citations'))
@property
def citation_count(self):
return self.entry.get('numCitedBy')
@property
def references(self):
return extract_ids(self.entry.get('references'))
def __repr__(self):
return f'<{self.title}>'
@staticmethod
def load(id):
return fetch_semanticscholar(id)
S2_PAPER_URL = 'http://api.semanticscholar.org/v1/paper/'
S2_QUERY_URL = 'https://api.semanticscholar.org/graph/v1/paper/search'
CACHE_FILE = '.semantischolar'
DEFAULT_TIMEOUT = 3.05 # 100 requests per 5 minutes
def request_results(query, offset, cache, timeout=DEFAULT_TIMEOUT):
cache_key = f'results={query};{offset}'
if cache_key in cache:
return cache[cache_key]
url = S2_QUERY_URL
params = dict(offset=offset, query=query, limit=100)
reply = requests.get(url, params=params)
response = reply.json()
if 'data' not in response:
msg = response.get('error') or response.get('message') or 'unknown'
raise Exception(f'error while fetching {reply.url}: {msg}')
cache[cache_key] = response
return response
def request_paper(key, cache, timeout=DEFAULT_TIMEOUT):
cache_key = f'paper={key}'
if cache_key in cache:
return cache[cache_key]
url = S2_PAPER_URL + quote_plus(key)
try:
sleep(timeout)
data = requests.get(url).json()
except Exception as e:
logging.warn(f'failed to retreive {key}: {e}')
return None
if 'paperId' in data:
cache[cache_key] = data
return data
else:
msg = data.get('error') or data.get('message') or 'unknown error'
logging.warn(f'failed to retreive {key}: {msg}')
return None
def fetch_semanticscholar(key: set) -> Optional[Document]:
"""Fetch SemanticScholar metadata for the given key. The key can be
one of the following (see `API reference
<https://www.semanticscholar.org/product/api>`_):
* DOI
* S2 paper ID
* ArXiv ID (example format: `arXiv:1705.10311`)
* MAG ID (example format: `MAG:112218234`)
* ACL ID (example format: `ACL:W12-3903`)
* PubMed ID (example format: `PMID:19872477`)
* Corpus ID (example format: `CorpusID:37220927`)
:returns: The `Document` if it was found and `None` otherwise.
"""
if key is None:
return None
with shelve.open(CACHE_FILE) as cache:
if isinstance(key, DocumentIdentifier):
data = None
if data is None and key.s2id:
data = request_paper(key.s2id, cache)
if data is None and key.doi:
data = request_paper(key.doi, cache)
if data is None and key.pubmed:
data = request_paper(f'PMID:{key.pubmed}', cache)
if data is None and key.arxivid:
data = request_paper(f'arXiv:{key.arxivid}', cache)
else:
data = request_paper(key, cache)
if data is None:
return None
return ScholarDocument(data)
def refine_semanticscholar(docs: DocumentSet
) -> Tuple[DocumentSet, DocumentSet]:
"""Attempt to fetch SemanticScholar metadata for each document in the
given set based on their DOIs. Returns a tuple containing two sets: the
documents available on SemanticScholar and the remaining documents that
were not found or do not have a DOI.
"""
def callback(doc):
if isinstance(doc, ScholarDocument):
return doc
return fetch_semanticscholar(doc.id)
return docs._refine_docs(callback)
def search_semanticscholar(query: str, *, limit: int = None) -> DocumentSet:
""" Submit the given query to SemanticScholar and return the results
as a `DocumentSet`.
"""
if not query:
raise Exception('invalid query: {query}')
docs = []
with shelve.open(CACHE_FILE) as cache:
offset = 0
paper_ids = []
while True:
data = request_results(query, offset, cache)
if not data:
break
records = data['data']
offset += len(records)
for record in records:
paper_ids.append(record['paperId'])
if limit is not None and len(paper_ids) > limit:
paper_ids = paper_ids[:limit]
break
for paper_id in progress_bar(paper_ids):
doc = request_paper(paper_id, cache)
if doc:
docs.append(ScholarDocument(doc))
else:
logging.warn(f'could not find paper id {paper_id}')
return DocumentSet(docs) | 0.787278 | 0.195633 |
import sys, os, subprocess, re, tempfile, getopt, signal
def ex(cmd):
return subprocess.Popen([ 'bash', '-c', cmd ], stdout = subprocess.PIPE).communicate()[0]
def get_section_offsets(fn):
obj_out = ex('objdump -h "%s"' % fn)
ret = {}
for line in obj_out.split('\n'):
try:
if line and re.match(".", line.split()[1]):
ret[line.split()[1]] = long('0x%s' % line.split()[3], 16)
except IndexError:
pass
except ValueError:
pass
return ret
def add_offset(d, off):
return dict( [section, address + off] for section, address in d.iteritems() )
def get_base_offset(pid, so_file):
return long(ex('grep "%s" /proc/%s/maps' % (so_file, pid)).split('-')[0], 16)
# Strips chroot directory prefix, if the path contains it
# This is needed because the binary paths in /proc/<pid>/maps contains the full path if
# you are outside of that specific chroot. The assumption is that you can be inside of
# another equivalent chroot, that maps to the same files. If you aren't in the same
# type of chroot, gdb will fail with library version mismatch errors
def strip_possible_schroot(file):
if re.search('schroot',file):
return '/'+'/'.join(file.split('/')[6:])
else:
return file
# The goal of this function is to return a locally accessible path to a binary
# If a pid is in a chroot (or another chroot), then the full path to the
# binary will be presented. If we are also in that chroot, we cannot use
# the full path, and need the truncated version. The assumption is that if
# we are in a chroot, then it is the same one, allowing us to properly view
# the debug information of the requested binary
def get_bin_path(pid, bin, strip=True):
try:
path = ''.join(ex('grep "%s" /proc/%s/maps' % (bin, pid)).split('\n')[0].partition('/')[1:])
# Strip the possible chroot path only if the file doesn't exist
# Later we will update the solib for gdb appropriately to find the proper libraries
if strip and not os.path.isfile(path):
return strip_possible_schroot(path)
else:
return path
except IndexError:
raise IOError
def find_pintool_name(pid, pintoolname):
if pintoolname:
pintoolnames = (pintoolname,)
else:
pintoolnames = ('pin_sim.so', 'sift_recorder', 'sniper')
for pintoolname in pintoolnames:
if get_bin_path(pid, pintoolname):
return pintoolname
print 'No pintool found, please use --toolname'
sys.exit(1)
def attach_gdb(pid, symoff, pintoolname):
pinbin = get_bin_path(pid, 'pinbin')
pintool = get_bin_path(pid, pintoolname)
symbols = 'add-symbol-file %s %s -s .data %s -s .bss %s' % (pintool, symoff['.text'], symoff['.data'], symoff['.bss'])
# If we are debugging something in a chroot, and we can access it, change
# the solib path in gdb so that it doesn't use our local libraries incorrectly
# If we cannot access it, then we are also in a chroot, and need the truncated
# version, because the full version is not accessible from here
potential_schroot_path = get_bin_path(pid, 'pinbin', False)
if re.search('schroot', potential_schroot_path) and os.path.isfile(potential_schroot_path):
solib = 'set solib-absolute-prefix /'+'/'.join(potential_schroot_path.split('/')[1:6])
else:
solib = ''
fh, fn = tempfile.mkstemp()
f = open(fn, 'w')
f.write('%s\nattach %s\n%s\n' % (solib, pid, symbols))
if action == 'bt':
f.write('bt\nquit\n')
f.close()
os.system('gdb -quiet -command=%s %s' % (fn, '%(pinbin)s' % locals()))
os.unlink(fn)
if __name__ == '__main__':
actions = [ 'interactive', 'bt' ]
pintoolname = None
def usage():
print 'Attach GDB to a running Sniper process'
print 'Usage:'
print ' %s [-h|--help] [--all-threads] [--action={bt}] [--abt] [--toolname={auto}] <pid>' % sys.argv[0]
sys.exit(2)
action = 'interactive'
all_threads = False
if not sys.argv[1:]:
usage()
try:
opts, args = getopt.getopt(sys.argv[1:], "h", [ "help", "all-threads", "action=", "abt", "toolname=" ])
except getopt.GetoptError, e:
# print help information and exit:
print e
usage()
for o, a in opts:
if o == '-h' or o == '--help':
usage()
sys.exit()
if o == '--all-threads':
all_threads = True
if o == '--action':
if a not in actions:
print 'Invalid action', a
usage()
action = a
if o == '--abt':
all_threads = True
action = 'bt'
if o == '--toolname':
pintoolname = a
if len(args) < 1:
usage()
if action == 'interactive' and all_threads:
print 'Cannot combine --interactive with --all-threads'
sys.exit(2)
ret_code = 0
pgm_pid = long(args[0])
pgm_orig_state = ex('ps -p %u -o s=' % pgm_pid)
if all_threads:
pids = map(long, os.listdir(os.path.join('/proc', str(pgm_pid), 'task')))
else:
pids = [ pgm_pid ]
if pgm_orig_state == 'R':
os.kill(pgm_pid, signal.SIGSTOP)
try:
pintoolname = find_pintool_name(pgm_pid, pintoolname)
pintool = get_bin_path(pgm_pid, pintoolname)
base_offset = get_base_offset(pgm_pid, pintool)
symoff = add_offset(get_section_offsets(pintool), base_offset)
for pid in pids:
attach_gdb(pid, symoff, pintoolname)
except IOError:
print ""
print "Error: Unable to correctly determine the path to a mapped object."
print " This means that either you do not have permission to view the dynamic"
print " linking maps, or the pid provided isn't a pin/Sniper program."
print ""
ret_code = 1
if pgm_orig_state == 'R':
os.kill(pgm_pid, signal.SIGCONT)
sys.exit(ret_code) | sniper/tools/attachgdb.py |
import sys, os, subprocess, re, tempfile, getopt, signal
def ex(cmd):
return subprocess.Popen([ 'bash', '-c', cmd ], stdout = subprocess.PIPE).communicate()[0]
def get_section_offsets(fn):
obj_out = ex('objdump -h "%s"' % fn)
ret = {}
for line in obj_out.split('\n'):
try:
if line and re.match(".", line.split()[1]):
ret[line.split()[1]] = long('0x%s' % line.split()[3], 16)
except IndexError:
pass
except ValueError:
pass
return ret
def add_offset(d, off):
return dict( [section, address + off] for section, address in d.iteritems() )
def get_base_offset(pid, so_file):
return long(ex('grep "%s" /proc/%s/maps' % (so_file, pid)).split('-')[0], 16)
# Strips chroot directory prefix, if the path contains it
# This is needed because the binary paths in /proc/<pid>/maps contains the full path if
# you are outside of that specific chroot. The assumption is that you can be inside of
# another equivalent chroot, that maps to the same files. If you aren't in the same
# type of chroot, gdb will fail with library version mismatch errors
def strip_possible_schroot(file):
if re.search('schroot',file):
return '/'+'/'.join(file.split('/')[6:])
else:
return file
# The goal of this function is to return a locally accessible path to a binary
# If a pid is in a chroot (or another chroot), then the full path to the
# binary will be presented. If we are also in that chroot, we cannot use
# the full path, and need the truncated version. The assumption is that if
# we are in a chroot, then it is the same one, allowing us to properly view
# the debug information of the requested binary
def get_bin_path(pid, bin, strip=True):
try:
path = ''.join(ex('grep "%s" /proc/%s/maps' % (bin, pid)).split('\n')[0].partition('/')[1:])
# Strip the possible chroot path only if the file doesn't exist
# Later we will update the solib for gdb appropriately to find the proper libraries
if strip and not os.path.isfile(path):
return strip_possible_schroot(path)
else:
return path
except IndexError:
raise IOError
def find_pintool_name(pid, pintoolname):
if pintoolname:
pintoolnames = (pintoolname,)
else:
pintoolnames = ('pin_sim.so', 'sift_recorder', 'sniper')
for pintoolname in pintoolnames:
if get_bin_path(pid, pintoolname):
return pintoolname
print 'No pintool found, please use --toolname'
sys.exit(1)
def attach_gdb(pid, symoff, pintoolname):
pinbin = get_bin_path(pid, 'pinbin')
pintool = get_bin_path(pid, pintoolname)
symbols = 'add-symbol-file %s %s -s .data %s -s .bss %s' % (pintool, symoff['.text'], symoff['.data'], symoff['.bss'])
# If we are debugging something in a chroot, and we can access it, change
# the solib path in gdb so that it doesn't use our local libraries incorrectly
# If we cannot access it, then we are also in a chroot, and need the truncated
# version, because the full version is not accessible from here
potential_schroot_path = get_bin_path(pid, 'pinbin', False)
if re.search('schroot', potential_schroot_path) and os.path.isfile(potential_schroot_path):
solib = 'set solib-absolute-prefix /'+'/'.join(potential_schroot_path.split('/')[1:6])
else:
solib = ''
fh, fn = tempfile.mkstemp()
f = open(fn, 'w')
f.write('%s\nattach %s\n%s\n' % (solib, pid, symbols))
if action == 'bt':
f.write('bt\nquit\n')
f.close()
os.system('gdb -quiet -command=%s %s' % (fn, '%(pinbin)s' % locals()))
os.unlink(fn)
if __name__ == '__main__':
actions = [ 'interactive', 'bt' ]
pintoolname = None
def usage():
print 'Attach GDB to a running Sniper process'
print 'Usage:'
print ' %s [-h|--help] [--all-threads] [--action={bt}] [--abt] [--toolname={auto}] <pid>' % sys.argv[0]
sys.exit(2)
action = 'interactive'
all_threads = False
if not sys.argv[1:]:
usage()
try:
opts, args = getopt.getopt(sys.argv[1:], "h", [ "help", "all-threads", "action=", "abt", "toolname=" ])
except getopt.GetoptError, e:
# print help information and exit:
print e
usage()
for o, a in opts:
if o == '-h' or o == '--help':
usage()
sys.exit()
if o == '--all-threads':
all_threads = True
if o == '--action':
if a not in actions:
print 'Invalid action', a
usage()
action = a
if o == '--abt':
all_threads = True
action = 'bt'
if o == '--toolname':
pintoolname = a
if len(args) < 1:
usage()
if action == 'interactive' and all_threads:
print 'Cannot combine --interactive with --all-threads'
sys.exit(2)
ret_code = 0
pgm_pid = long(args[0])
pgm_orig_state = ex('ps -p %u -o s=' % pgm_pid)
if all_threads:
pids = map(long, os.listdir(os.path.join('/proc', str(pgm_pid), 'task')))
else:
pids = [ pgm_pid ]
if pgm_orig_state == 'R':
os.kill(pgm_pid, signal.SIGSTOP)
try:
pintoolname = find_pintool_name(pgm_pid, pintoolname)
pintool = get_bin_path(pgm_pid, pintoolname)
base_offset = get_base_offset(pgm_pid, pintool)
symoff = add_offset(get_section_offsets(pintool), base_offset)
for pid in pids:
attach_gdb(pid, symoff, pintoolname)
except IOError:
print ""
print "Error: Unable to correctly determine the path to a mapped object."
print " This means that either you do not have permission to view the dynamic"
print " linking maps, or the pid provided isn't a pin/Sniper program."
print ""
ret_code = 1
if pgm_orig_state == 'R':
os.kill(pgm_pid, signal.SIGCONT)
sys.exit(ret_code) | 0.17172 | 0.14137 |
import binascii
import pprint
import sys
from hmac_drbg import *
def parse_entry(line):
key, val = line.split('=')
key = key.strip()
val = val.strip()
if val == 'True':
val = True
elif val == 'False':
val = False
elif val.isdigit():
val = int(val)
return key, val
def parse_rsp(rsp_file):
test_suites = []
suite = {}
test = {}
with open(rsp_file, 'r') as f:
while True:
line = f.readline()
if line == '':
break
if line == '\n' or line == '\r\n':
continue
if line.startswith('#'):
continue
line = line.strip()
if line.startswith('['):
e = line[1:-1]
if not '=' in e:
if suite:
test_suites.append(suite)
suite = {'Algorithm': e, 'Tests': []}
test = {}
else:
key, val = parse_entry(e)
suite[key] = val
continue
if line.startswith('COUNT'):
if test:
suite['Tests'].append(test)
test = {}
continue
key, val = parse_entry(line)
if key in test:
key = key + '2'
test[key] = val
return test_suites
# generate test cases for go-drbg
def dump_go(tests):
pr_fields = ['EntropyInput', 'Nonce', 'PersonalizationString', 'AdditionalInput', 'EntropyInputPR', 'AdditionalInput2', 'EntropyInputPR2', 'ReturnedBits']
print('package hmac\n')
print('var HmacSha512PrTests = []map[string]string{')
for t in tests:
print('\t{')
for k in pr_fields:
print('\t\t"{}": "{}",'.format(k, t[k]))
print('\t},')
print('}')
def run_tests(tests):
for test in tests:
t = {k: binascii.unhexlify(v) for k, v in test.items()}
l = len(t['ReturnedBits'])
drbg = DRBG(t['EntropyInput'] + t['Nonce'] + t['PersonalizationString'])
drbg.reseed(t['EntropyInputPR'] + t['AdditionalInput'])
drbg.generate(l)
drbg.reseed(t['EntropyInputPR2'] + t['AdditionalInput2'])
result = drbg.generate(l)
if result != t['ReturnedBits']:
print('FAILED TEST:')
pprint.pprint(test)
print('\nGot:', binascii.hexlify(result).decode('ascii'))
return
print('Passed all %s tests.' % len(tests))
def main():
test_suites = parse_rsp('HMAC_DRBG_PR.rsp')
# NOTE customize this code
tests = []
for t in test_suites:
if t['Algorithm'] == 'SHA-512':
tests += t['Tests']
run_tests(tests)
if __name__ == '__main__':
main() | hmac_drbg_tests.py | import binascii
import pprint
import sys
from hmac_drbg import *
def parse_entry(line):
key, val = line.split('=')
key = key.strip()
val = val.strip()
if val == 'True':
val = True
elif val == 'False':
val = False
elif val.isdigit():
val = int(val)
return key, val
def parse_rsp(rsp_file):
test_suites = []
suite = {}
test = {}
with open(rsp_file, 'r') as f:
while True:
line = f.readline()
if line == '':
break
if line == '\n' or line == '\r\n':
continue
if line.startswith('#'):
continue
line = line.strip()
if line.startswith('['):
e = line[1:-1]
if not '=' in e:
if suite:
test_suites.append(suite)
suite = {'Algorithm': e, 'Tests': []}
test = {}
else:
key, val = parse_entry(e)
suite[key] = val
continue
if line.startswith('COUNT'):
if test:
suite['Tests'].append(test)
test = {}
continue
key, val = parse_entry(line)
if key in test:
key = key + '2'
test[key] = val
return test_suites
# generate test cases for go-drbg
def dump_go(tests):
pr_fields = ['EntropyInput', 'Nonce', 'PersonalizationString', 'AdditionalInput', 'EntropyInputPR', 'AdditionalInput2', 'EntropyInputPR2', 'ReturnedBits']
print('package hmac\n')
print('var HmacSha512PrTests = []map[string]string{')
for t in tests:
print('\t{')
for k in pr_fields:
print('\t\t"{}": "{}",'.format(k, t[k]))
print('\t},')
print('}')
def run_tests(tests):
for test in tests:
t = {k: binascii.unhexlify(v) for k, v in test.items()}
l = len(t['ReturnedBits'])
drbg = DRBG(t['EntropyInput'] + t['Nonce'] + t['PersonalizationString'])
drbg.reseed(t['EntropyInputPR'] + t['AdditionalInput'])
drbg.generate(l)
drbg.reseed(t['EntropyInputPR2'] + t['AdditionalInput2'])
result = drbg.generate(l)
if result != t['ReturnedBits']:
print('FAILED TEST:')
pprint.pprint(test)
print('\nGot:', binascii.hexlify(result).decode('ascii'))
return
print('Passed all %s tests.' % len(tests))
def main():
test_suites = parse_rsp('HMAC_DRBG_PR.rsp')
# NOTE customize this code
tests = []
for t in test_suites:
if t['Algorithm'] == 'SHA-512':
tests += t['Tests']
run_tests(tests)
if __name__ == '__main__':
main() | 0.208743 | 0.182171 |
import random
from maze import Direction
def binary_tree(grid):
for cell in grid.each_cell():
neighbors = []
if cell.get_neighbor(Direction.NORTH):
neighbors.append(cell.get_neighbor(Direction.NORTH))
if cell.get_neighbor(Direction.EAST):
neighbors.append(cell.get_neighbor(Direction.EAST))
if neighbors:
cell.link(random.choice(neighbors))
def sidewinder(grid):
for row in grid.each_row():
run = []
for cell in row:
run.append(cell)
at_east_boundary = (cell.get_neighbor(Direction.EAST) is None)
at_northern_boundary = (cell.get_neighbor(Direction.NORTH) is None)
should_close_out = at_east_boundary or (not at_northern_boundary and random.choice([True, False]))
if should_close_out:
member = random.choice(run)
if member.get_neighbor(Direction.NORTH):
member.link(member.get_neighbor(Direction.NORTH))
run.clear()
else:
cell.link(cell.get_neighbor(Direction.EAST))
def aldous_broder(grid):
cell = grid.random_cell()
unvisited = grid.size() - 1
while unvisited > 0:
neighbor = random.choice(cell.neighbors())
if not neighbor.links():
cell.link(neighbor)
unvisited -= 1
cell = neighbor
def wilsons(grid):
unvisited = grid.each_cell()
first = random.choice(unvisited)
unvisited.remove(first)
while unvisited:
cell = random.choice(unvisited)
path = [cell]
while cell in unvisited:
cell = random.choice(cell.neighbors())
if cell in path:
position = path.index(cell)
path = path[0:position + 1]
else:
path.append(cell)
for index in range(0, len(path) - 1):
path[index].link(path[index+1])
unvisited.remove(path[index])
def hunt_and_kill(grid):
current = grid.random_cell()
while current:
unvisited_neighbors = [c for c in current.neighbors() if not c.links()]
if unvisited_neighbors:
neighbor = random.choice(unvisited_neighbors)
current.link(neighbor)
current = neighbor
else:
current = None
for cell in grid.each_cell():
visited_neighbors = [c for c in cell.neighbors() if c.links()]
if not cell.links() and visited_neighbors:
current = cell
neighbor = random.choice(visited_neighbors)
current.link(neighbor)
break
def recursive_backtracker(grid, start_at=None):
if not start_at:
start_at = grid.random_cell()
stack = [start_at]
while stack:
current = stack[-1]
neighbors = [cell for cell in current.neighbors() if not cell.links()]
if not neighbors:
stack.pop()
else:
neighbor = random.choice(neighbors)
current.link(neighbor)
stack.append(neighbor) | maze/algorithm.py | import random
from maze import Direction
def binary_tree(grid):
for cell in grid.each_cell():
neighbors = []
if cell.get_neighbor(Direction.NORTH):
neighbors.append(cell.get_neighbor(Direction.NORTH))
if cell.get_neighbor(Direction.EAST):
neighbors.append(cell.get_neighbor(Direction.EAST))
if neighbors:
cell.link(random.choice(neighbors))
def sidewinder(grid):
for row in grid.each_row():
run = []
for cell in row:
run.append(cell)
at_east_boundary = (cell.get_neighbor(Direction.EAST) is None)
at_northern_boundary = (cell.get_neighbor(Direction.NORTH) is None)
should_close_out = at_east_boundary or (not at_northern_boundary and random.choice([True, False]))
if should_close_out:
member = random.choice(run)
if member.get_neighbor(Direction.NORTH):
member.link(member.get_neighbor(Direction.NORTH))
run.clear()
else:
cell.link(cell.get_neighbor(Direction.EAST))
def aldous_broder(grid):
cell = grid.random_cell()
unvisited = grid.size() - 1
while unvisited > 0:
neighbor = random.choice(cell.neighbors())
if not neighbor.links():
cell.link(neighbor)
unvisited -= 1
cell = neighbor
def wilsons(grid):
unvisited = grid.each_cell()
first = random.choice(unvisited)
unvisited.remove(first)
while unvisited:
cell = random.choice(unvisited)
path = [cell]
while cell in unvisited:
cell = random.choice(cell.neighbors())
if cell in path:
position = path.index(cell)
path = path[0:position + 1]
else:
path.append(cell)
for index in range(0, len(path) - 1):
path[index].link(path[index+1])
unvisited.remove(path[index])
def hunt_and_kill(grid):
current = grid.random_cell()
while current:
unvisited_neighbors = [c for c in current.neighbors() if not c.links()]
if unvisited_neighbors:
neighbor = random.choice(unvisited_neighbors)
current.link(neighbor)
current = neighbor
else:
current = None
for cell in grid.each_cell():
visited_neighbors = [c for c in cell.neighbors() if c.links()]
if not cell.links() and visited_neighbors:
current = cell
neighbor = random.choice(visited_neighbors)
current.link(neighbor)
break
def recursive_backtracker(grid, start_at=None):
if not start_at:
start_at = grid.random_cell()
stack = [start_at]
while stack:
current = stack[-1]
neighbors = [cell for cell in current.neighbors() if not cell.links()]
if not neighbors:
stack.pop()
else:
neighbor = random.choice(neighbors)
current.link(neighbor)
stack.append(neighbor) | 0.374104 | 0.412471 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CheckResultList import CheckResultList
class KoubeiQualityTestShieldResultSyncModel(object):
def __init__(self):
self._batch_no = None
self._check_result_list = None
self._order_id = None
self._out_biz_no = None
self._partner_id = None
self._pay_style = None
self._shop_id = None
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def check_result_list(self):
return self._check_result_list
@check_result_list.setter
def check_result_list(self, value):
if isinstance(value, list):
self._check_result_list = list()
for i in value:
if isinstance(i, CheckResultList):
self._check_result_list.append(i)
else:
self._check_result_list.append(CheckResultList.from_alipay_dict(i))
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def pay_style(self):
return self._pay_style
@pay_style.setter
def pay_style(self, value):
self._pay_style = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = self.batch_no.to_alipay_dict()
else:
params['batch_no'] = self.batch_no
if self.check_result_list:
if isinstance(self.check_result_list, list):
for i in range(0, len(self.check_result_list)):
element = self.check_result_list[i]
if hasattr(element, 'to_alipay_dict'):
self.check_result_list[i] = element.to_alipay_dict()
if hasattr(self.check_result_list, 'to_alipay_dict'):
params['check_result_list'] = self.check_result_list.to_alipay_dict()
else:
params['check_result_list'] = self.check_result_list
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.pay_style:
if hasattr(self.pay_style, 'to_alipay_dict'):
params['pay_style'] = self.pay_style.to_alipay_dict()
else:
params['pay_style'] = self.pay_style
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiQualityTestShieldResultSyncModel()
if 'batch_no' in d:
o.batch_no = d['batch_no']
if 'check_result_list' in d:
o.check_result_list = d['check_result_list']
if 'order_id' in d:
o.order_id = d['order_id']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'pay_style' in d:
o.pay_style = d['pay_style']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o | alipay/aop/api/domain/KoubeiQualityTestShieldResultSyncModel.py | import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CheckResultList import CheckResultList
class KoubeiQualityTestShieldResultSyncModel(object):
def __init__(self):
self._batch_no = None
self._check_result_list = None
self._order_id = None
self._out_biz_no = None
self._partner_id = None
self._pay_style = None
self._shop_id = None
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def check_result_list(self):
return self._check_result_list
@check_result_list.setter
def check_result_list(self, value):
if isinstance(value, list):
self._check_result_list = list()
for i in value:
if isinstance(i, CheckResultList):
self._check_result_list.append(i)
else:
self._check_result_list.append(CheckResultList.from_alipay_dict(i))
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def pay_style(self):
return self._pay_style
@pay_style.setter
def pay_style(self, value):
self._pay_style = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = self.batch_no.to_alipay_dict()
else:
params['batch_no'] = self.batch_no
if self.check_result_list:
if isinstance(self.check_result_list, list):
for i in range(0, len(self.check_result_list)):
element = self.check_result_list[i]
if hasattr(element, 'to_alipay_dict'):
self.check_result_list[i] = element.to_alipay_dict()
if hasattr(self.check_result_list, 'to_alipay_dict'):
params['check_result_list'] = self.check_result_list.to_alipay_dict()
else:
params['check_result_list'] = self.check_result_list
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.pay_style:
if hasattr(self.pay_style, 'to_alipay_dict'):
params['pay_style'] = self.pay_style.to_alipay_dict()
else:
params['pay_style'] = self.pay_style
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiQualityTestShieldResultSyncModel()
if 'batch_no' in d:
o.batch_no = d['batch_no']
if 'check_result_list' in d:
o.check_result_list = d['check_result_list']
if 'order_id' in d:
o.order_id = d['order_id']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'pay_style' in d:
o.pay_style = d['pay_style']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o | 0.457864 | 0.063424 |
import unittest
import os
import glob
import researcher as rs
import numpy as np
from tests.tools import TEST_EXPERIMENT_PATH
class TestSavingExperiment(unittest.TestCase):
def setUp(self):
files = glob.glob(TEST_EXPERIMENT_PATH + "*")
for f in files:
os.remove(f)
def test_records_correctly(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": 32,
"alpha": 2e-9,
"model": "rnn",
}
res = rs.ObservationCollector()
for i in range(3):
for j in range(1, 8):
res.add_fold_observation(i, "rmse", 0.98 / j)
rs.record_experiment_with_collector(params, TEST_EXPERIMENT_PATH, res)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
def test_records_correctly_if_given_dict(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": 32,
"alpha": 2e-9,
"model": "rnn",
}
res = rs.ObservationCollector()
for i in range(3):
for j in range(1, 8):
res.add_fold_observation(i, "rmse", 0.98 / j)
rs.record_experiment(params, TEST_EXPERIMENT_PATH, observations=res.observations)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
def test_records_numpy_integers(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": np.int64(32),
"alpha": 2e-9,
"model": "rnn",
}
rs.record_experiment(params, TEST_EXPERIMENT_PATH, observations=None)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
def test_records_NANs_as_zero(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": np.int64(32),
"alpha": 2e-9,
"model": "rnn",
}
res = rs.ObservationCollector()
for i in range(3):
for j in range(1, 8):
res.add_fold_observation(i, "rmse", float('nan'))
rs.record_experiment(params, TEST_EXPERIMENT_PATH, observations=res.observations)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
e = rs.load_experiment(TEST_EXPERIMENT_PATH, "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json") | tests/test_record.py | import unittest
import os
import glob
import researcher as rs
import numpy as np
from tests.tools import TEST_EXPERIMENT_PATH
class TestSavingExperiment(unittest.TestCase):
def setUp(self):
files = glob.glob(TEST_EXPERIMENT_PATH + "*")
for f in files:
os.remove(f)
def test_records_correctly(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": 32,
"alpha": 2e-9,
"model": "rnn",
}
res = rs.ObservationCollector()
for i in range(3):
for j in range(1, 8):
res.add_fold_observation(i, "rmse", 0.98 / j)
rs.record_experiment_with_collector(params, TEST_EXPERIMENT_PATH, res)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
def test_records_correctly_if_given_dict(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": 32,
"alpha": 2e-9,
"model": "rnn",
}
res = rs.ObservationCollector()
for i in range(3):
for j in range(1, 8):
res.add_fold_observation(i, "rmse", 0.98 / j)
rs.record_experiment(params, TEST_EXPERIMENT_PATH, observations=res.observations)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
def test_records_numpy_integers(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": np.int64(32),
"alpha": 2e-9,
"model": "rnn",
}
rs.record_experiment(params, TEST_EXPERIMENT_PATH, observations=None)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
def test_records_NANs_as_zero(self):
params = {
"title": "cool_experiment",
"learning_rate": 0.003,
"batch_size": np.int64(32),
"alpha": 2e-9,
"model": "rnn",
}
res = rs.ObservationCollector()
for i in range(3):
for j in range(1, 8):
res.add_fold_observation(i, "rmse", float('nan'))
rs.record_experiment(params, TEST_EXPERIMENT_PATH, observations=res.observations)
self.assertTrue(os.path.isfile(TEST_EXPERIMENT_PATH + "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json"))
e = rs.load_experiment(TEST_EXPERIMENT_PATH, "cool_experiment_d45dee5991986a5b8215706f5e904b3e.json") | 0.562657 | 0.52342 |
import pytest
import sys
from ray._private.test_utils import run_string_as_driver
@pytest.mark.parametrize("use_ray_client", [False, True])
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_working_dir_deploy_new_version(ray_start, tmp_dir, use_ray_client):
with open("hello", "w") as f:
f.write("world")
driver1 = """
import ray
from ray import serve
job_config = ray.job_config.JobConfig(runtime_env={{"working_dir": "."}})
if {use_ray_client}:
ray.util.connect("{client_addr}", namespace="serve", job_config=job_config)
else:
ray.init(address="auto", namespace="serve", job_config=job_config)
serve.start(detached=True)
@serve.deployment(version="1")
class Test:
def __call__(self, *args):
return open("hello").read()
Test.deploy()
handle = Test.get_handle()
assert ray.get(handle.remote()) == "world"
""".format(
use_ray_client=use_ray_client, client_addr=ray_start
)
run_string_as_driver(driver1)
with open("hello", "w") as f:
f.write("world2")
driver2 = """
import ray
from ray import serve
job_config = ray.job_config.JobConfig(runtime_env={{"working_dir": "."}})
if {use_ray_client}:
ray.util.connect("{client_addr}", namespace="serve", job_config=job_config)
else:
ray.init(address="auto", namespace="serve", job_config=job_config)
serve.start(detached=True)
@serve.deployment(version="2")
class Test:
def __call__(self, *args):
return open("hello").read()
Test.deploy()
handle = Test.get_handle()
assert ray.get(handle.remote()) == "world2"
Test.delete()
""".format(
use_ray_client=use_ray_client, client_addr=ray_start
)
run_string_as_driver(driver2)
@pytest.mark.parametrize("use_ray_client", [False, True])
@pytest.mark.skipif(
sys.platform == "win32", reason="Runtime env unsupported on Windows"
)
def test_pip_no_working_dir(ray_start, use_ray_client):
driver = """
import ray
from ray import serve
import requests
if {use_ray_client}:
ray.util.connect("{client_addr}")
else:
ray.init(address="auto")
serve.start()
@serve.deployment
def requests_version(request):
return requests.__version__
requests_version.options(
ray_actor_options={{
"runtime_env": {{
"pip": ["ray[serve]", "requests==2.25.1"]
}}
}}).deploy()
assert requests.get("http://127.0.0.1:8000/requests_version").text == "2.25.1"
""".format(
use_ray_client=use_ray_client, client_addr=ray_start
)
run_string_as_driver(driver)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__])) | python/ray/serve/tests/test_runtime_env_2.py | import pytest
import sys
from ray._private.test_utils import run_string_as_driver
@pytest.mark.parametrize("use_ray_client", [False, True])
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_working_dir_deploy_new_version(ray_start, tmp_dir, use_ray_client):
with open("hello", "w") as f:
f.write("world")
driver1 = """
import ray
from ray import serve
job_config = ray.job_config.JobConfig(runtime_env={{"working_dir": "."}})
if {use_ray_client}:
ray.util.connect("{client_addr}", namespace="serve", job_config=job_config)
else:
ray.init(address="auto", namespace="serve", job_config=job_config)
serve.start(detached=True)
@serve.deployment(version="1")
class Test:
def __call__(self, *args):
return open("hello").read()
Test.deploy()
handle = Test.get_handle()
assert ray.get(handle.remote()) == "world"
""".format(
use_ray_client=use_ray_client, client_addr=ray_start
)
run_string_as_driver(driver1)
with open("hello", "w") as f:
f.write("world2")
driver2 = """
import ray
from ray import serve
job_config = ray.job_config.JobConfig(runtime_env={{"working_dir": "."}})
if {use_ray_client}:
ray.util.connect("{client_addr}", namespace="serve", job_config=job_config)
else:
ray.init(address="auto", namespace="serve", job_config=job_config)
serve.start(detached=True)
@serve.deployment(version="2")
class Test:
def __call__(self, *args):
return open("hello").read()
Test.deploy()
handle = Test.get_handle()
assert ray.get(handle.remote()) == "world2"
Test.delete()
""".format(
use_ray_client=use_ray_client, client_addr=ray_start
)
run_string_as_driver(driver2)
@pytest.mark.parametrize("use_ray_client", [False, True])
@pytest.mark.skipif(
sys.platform == "win32", reason="Runtime env unsupported on Windows"
)
def test_pip_no_working_dir(ray_start, use_ray_client):
driver = """
import ray
from ray import serve
import requests
if {use_ray_client}:
ray.util.connect("{client_addr}")
else:
ray.init(address="auto")
serve.start()
@serve.deployment
def requests_version(request):
return requests.__version__
requests_version.options(
ray_actor_options={{
"runtime_env": {{
"pip": ["ray[serve]", "requests==2.25.1"]
}}
}}).deploy()
assert requests.get("http://127.0.0.1:8000/requests_version").text == "2.25.1"
""".format(
use_ray_client=use_ray_client, client_addr=ray_start
)
run_string_as_driver(driver)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__])) | 0.368406 | 0.306034 |
import datetime
import dateutil.parser
import pytest
from openprocurement.auction.insider.constants import DUTCH
def test_end_stage(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'timeline': {}
}
}
}
mock_update_stage = mocker.MagicMock()
mock_update_stage.return_value = 'run_time_value'
mocker.patch('openprocurement.auction.insider.mixins.utils.update_stage', mock_update_stage)
mock_lock_bids = mocker.MagicMock()
mock_update_auction_document = mocker.MagicMock()
mocker.patch('openprocurement.auction.insider.mixins.utils.lock_bids', mock_lock_bids)
mocker.patch('openprocurement.auction.insider.mixins.utils.update_auction_document', mock_update_auction_document)
stage = {
'amount': 500000.0,
'start': '2017-12-12T00:00:30',
'time': '',
'type': 'dutch_0'
}
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{'test_key': 'test_value'},
{'test_key': 'test_value'}
]
}
auction.next_stage(stage)
log_strings = logger.log_capture_string.getvalue().split('\n')
mock_lock_bids.assert_called_once_with(auction)
mock_update_auction_document.assert_called_once_with(auction)
mock_update_stage.assert_called_once_with(auction)
assert auction.auction_document['stages'][0]['passed'] is True
assert log_strings[-3] == '---------------- SWITCH DUTCH VALUE ----------------'
assert auction.auction_document['stages'][1]['time'] == 'run_time_value'
assert auction.auction_document['current_phase'] == DUTCH
assert auction.audit['timeline'][DUTCH]['timeline']['start'] == 'run_time_value'
assert log_strings[-2] == 'Switched dutch phase value from initial_value to 500000.0'
assert auction.audit['timeline'][DUTCH]['turn_1'] == {
'amount': 500000.0,
'time': 'run_time_value'
}
stage['type'] = 'not_dutch_type'
mock_end_dutch = mocker.patch.object(auction, 'end_dutch', autospec=True)
auction.auction_document['stages'][0]['passed'] = False
auction.next_stage(stage)
assert mock_lock_bids.call_count == 2
assert mock_update_auction_document.call_count == 2
assert mock_update_stage.call_count == 2
assert mock_end_dutch.call_count == 1
assert auction.auction_document['stages'][0]['passed'] is True
def test_approve_dutch_winner(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'bids': []
}
}
}
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{'test_key': 'test_value'},
{'test_key': 'test_value'}
]
}
bid = {'bidder_id': 'test_bidder_id'}
result_bid = auction.approve_dutch_winner(bid)
assert result_bid == {
'bidder_id': 'test_bidder_id',
'dutch_winner': True
}
assert len(auction.audit['timeline'][DUTCH]['bids']) == 1
assert auction.audit['timeline'][DUTCH]['bids'][0] == result_bid
assert auction._bids_data['test_bidder_id'][0] == result_bid
result = auction.approve_dutch_winner('bid')
log_strings = logger.log_capture_string.getvalue().split('\n')
assert result is False
assert log_strings[-2] == "Unable to post dutch winner. Error: 'str' object does not support item assignment"
def test_add_dutch_winner(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'bids': []
}
}
}
mock_update_auction_document = mocker.MagicMock()
mocker.patch('openprocurement.auction.insider.mixins.utils.update_auction_document', mock_update_auction_document)
auction.mapping['test_bidder_id'] = 'test_bid'
auction.request_id = 'auction_request_id'
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{'test_key': 'test_value'},
{'test_key': 'test_value'}
],
'results': []
}
bid = {'bidder_id': 'test_bidder_id',
'current_stage': 1}
mock_prepare_results_stage = mocker.MagicMock()
mock_prepare_results_stage.return_value = {
'stage_results': 'result_from_prepare_results_stage'
}
mocker.patch('openprocurement.auction.insider.mixins.utils.prepare_results_stage', mock_prepare_results_stage)
mock_end_dutch = mocker.patch.object(auction, 'end_dutch', autospec=True)
spied_approve_dutch_winner = mocker.spy(auction, 'approve_dutch_winner')
result = auction.add_dutch_winner(bid)
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-3] == '---------------- Adding dutch winner ----------------'
spied_approve_dutch_winner.assert_called_once_with(bid)
mock_prepare_results_stage.assert_called_once_with(
**{
'bidder_name': 'test_bid',
'bidder_id': 'test_bidder_id',
'dutch_winner': True
}
)
assert auction.auction_document['stages'][auction.auction_document['current_stage']]['stage_results'] == \
'result_from_prepare_results_stage'
assert len(auction.auction_document['results']) == 1
assert auction.auction_document['results'][0] == {'stage_results': 'result_from_prepare_results_stage'}
assert log_strings[-2] == 'Approved dutch winner'
assert mock_end_dutch.call_count == 1
assert result is True
auction.auction_document['current_stage'] = 2
bid = {'bidder_id': 'test_bidder_id', 'current_stage': 1}
result = auction.add_dutch_winner(bid)
log_strings = logger.log_capture_string.getvalue().split('\n')
assert isinstance(result, Exception)
assert result.message == u"Your bid is not submitted since the previous step has already ended."
assert log_strings[-3] == '---------------- Adding dutch winner ----------------'
assert log_strings[-2] == 'Exception during initialization dutch winner. Error: Your bid is not submitted since the previous step has already ended.'
auction.mapping = None
result = auction.add_dutch_winner(bid)
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-3] == '---------------- Adding dutch winner ----------------'
assert log_strings[-2] == "Exception during initialization dutch winner. Error: 'NoneType' object has no attribute 'get'"
assert isinstance(result, AttributeError)
def test_end_dutch(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'timeline': {},
'bids': []
}
}
}
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{
'test_key': 'test_value',
'type': 'dutch_0'
},
{
'test_key': 'test_value',
'type': 'dutch_1'
},
{
'test_key': 'test_value',
'type': 'pre-sealedbid'
}
],
'results': []
}
mock_spawn = mocker.MagicMock()
mocker.patch('openprocurement.auction.insider.mixins.spawn', mock_spawn)
mock_end_auction = mocker.patch.object(auction, 'end_auction', autospec=True)
result = auction.end_dutch()
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-3] == '---------------- End dutch phase ----------------'
assert isinstance(dateutil.parser.parse(auction.audit['timeline'][DUTCH]['timeline']['end']), datetime.datetime)
assert len(auction.auction_document['stages'][1]) == 3
assert auction.auction_document['stages'][1]['passed'] is True
mock_spawn.assert_called_once_with(auction.clean_up_preplanned_jobs)
assert log_strings[-2] == "No bids on dutch phase. End auction now."
assert mock_end_auction.call_count == 1
assert result is None
auction.auction_document['results'].append({'test_key': 'test_value'})
auction.end_dutch()
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-2] == '---------------- End dutch phase ----------------'
assert isinstance(dateutil.parser.parse(auction.audit['timeline'][DUTCH]['timeline']['end']), datetime.datetime)
assert len(auction.auction_document['stages'][1]) == 3
assert auction.auction_document['stages'][1]['passed'] is True
assert mock_spawn.call_count == 2
assert auction.auction_document['current_phase'] == 'pre-sealedbid'
assert auction.auction_document['current_stage'] == 2 | openprocurement/auction/insider/tests/unit/test_dutch_phase.py | import datetime
import dateutil.parser
import pytest
from openprocurement.auction.insider.constants import DUTCH
def test_end_stage(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'timeline': {}
}
}
}
mock_update_stage = mocker.MagicMock()
mock_update_stage.return_value = 'run_time_value'
mocker.patch('openprocurement.auction.insider.mixins.utils.update_stage', mock_update_stage)
mock_lock_bids = mocker.MagicMock()
mock_update_auction_document = mocker.MagicMock()
mocker.patch('openprocurement.auction.insider.mixins.utils.lock_bids', mock_lock_bids)
mocker.patch('openprocurement.auction.insider.mixins.utils.update_auction_document', mock_update_auction_document)
stage = {
'amount': 500000.0,
'start': '2017-12-12T00:00:30',
'time': '',
'type': 'dutch_0'
}
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{'test_key': 'test_value'},
{'test_key': 'test_value'}
]
}
auction.next_stage(stage)
log_strings = logger.log_capture_string.getvalue().split('\n')
mock_lock_bids.assert_called_once_with(auction)
mock_update_auction_document.assert_called_once_with(auction)
mock_update_stage.assert_called_once_with(auction)
assert auction.auction_document['stages'][0]['passed'] is True
assert log_strings[-3] == '---------------- SWITCH DUTCH VALUE ----------------'
assert auction.auction_document['stages'][1]['time'] == 'run_time_value'
assert auction.auction_document['current_phase'] == DUTCH
assert auction.audit['timeline'][DUTCH]['timeline']['start'] == 'run_time_value'
assert log_strings[-2] == 'Switched dutch phase value from initial_value to 500000.0'
assert auction.audit['timeline'][DUTCH]['turn_1'] == {
'amount': 500000.0,
'time': 'run_time_value'
}
stage['type'] = 'not_dutch_type'
mock_end_dutch = mocker.patch.object(auction, 'end_dutch', autospec=True)
auction.auction_document['stages'][0]['passed'] = False
auction.next_stage(stage)
assert mock_lock_bids.call_count == 2
assert mock_update_auction_document.call_count == 2
assert mock_update_stage.call_count == 2
assert mock_end_dutch.call_count == 1
assert auction.auction_document['stages'][0]['passed'] is True
def test_approve_dutch_winner(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'bids': []
}
}
}
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{'test_key': 'test_value'},
{'test_key': 'test_value'}
]
}
bid = {'bidder_id': 'test_bidder_id'}
result_bid = auction.approve_dutch_winner(bid)
assert result_bid == {
'bidder_id': 'test_bidder_id',
'dutch_winner': True
}
assert len(auction.audit['timeline'][DUTCH]['bids']) == 1
assert auction.audit['timeline'][DUTCH]['bids'][0] == result_bid
assert auction._bids_data['test_bidder_id'][0] == result_bid
result = auction.approve_dutch_winner('bid')
log_strings = logger.log_capture_string.getvalue().split('\n')
assert result is False
assert log_strings[-2] == "Unable to post dutch winner. Error: 'str' object does not support item assignment"
def test_add_dutch_winner(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'bids': []
}
}
}
mock_update_auction_document = mocker.MagicMock()
mocker.patch('openprocurement.auction.insider.mixins.utils.update_auction_document', mock_update_auction_document)
auction.mapping['test_bidder_id'] = 'test_bid'
auction.request_id = 'auction_request_id'
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{'test_key': 'test_value'},
{'test_key': 'test_value'}
],
'results': []
}
bid = {'bidder_id': 'test_bidder_id',
'current_stage': 1}
mock_prepare_results_stage = mocker.MagicMock()
mock_prepare_results_stage.return_value = {
'stage_results': 'result_from_prepare_results_stage'
}
mocker.patch('openprocurement.auction.insider.mixins.utils.prepare_results_stage', mock_prepare_results_stage)
mock_end_dutch = mocker.patch.object(auction, 'end_dutch', autospec=True)
spied_approve_dutch_winner = mocker.spy(auction, 'approve_dutch_winner')
result = auction.add_dutch_winner(bid)
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-3] == '---------------- Adding dutch winner ----------------'
spied_approve_dutch_winner.assert_called_once_with(bid)
mock_prepare_results_stage.assert_called_once_with(
**{
'bidder_name': 'test_bid',
'bidder_id': 'test_bidder_id',
'dutch_winner': True
}
)
assert auction.auction_document['stages'][auction.auction_document['current_stage']]['stage_results'] == \
'result_from_prepare_results_stage'
assert len(auction.auction_document['results']) == 1
assert auction.auction_document['results'][0] == {'stage_results': 'result_from_prepare_results_stage'}
assert log_strings[-2] == 'Approved dutch winner'
assert mock_end_dutch.call_count == 1
assert result is True
auction.auction_document['current_stage'] = 2
bid = {'bidder_id': 'test_bidder_id', 'current_stage': 1}
result = auction.add_dutch_winner(bid)
log_strings = logger.log_capture_string.getvalue().split('\n')
assert isinstance(result, Exception)
assert result.message == u"Your bid is not submitted since the previous step has already ended."
assert log_strings[-3] == '---------------- Adding dutch winner ----------------'
assert log_strings[-2] == 'Exception during initialization dutch winner. Error: Your bid is not submitted since the previous step has already ended.'
auction.mapping = None
result = auction.add_dutch_winner(bid)
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-3] == '---------------- Adding dutch winner ----------------'
assert log_strings[-2] == "Exception during initialization dutch winner. Error: 'NoneType' object has no attribute 'get'"
assert isinstance(result, AttributeError)
def test_end_dutch(auction, logger, mocker):
auction.audit = {
'timeline':
{
DUTCH: {
'timeline': {},
'bids': []
}
}
}
auction.auction_document = {
'initial_value': 'initial_value',
'current_stage': 1,
'stages': [
{
'test_key': 'test_value',
'type': 'dutch_0'
},
{
'test_key': 'test_value',
'type': 'dutch_1'
},
{
'test_key': 'test_value',
'type': 'pre-sealedbid'
}
],
'results': []
}
mock_spawn = mocker.MagicMock()
mocker.patch('openprocurement.auction.insider.mixins.spawn', mock_spawn)
mock_end_auction = mocker.patch.object(auction, 'end_auction', autospec=True)
result = auction.end_dutch()
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-3] == '---------------- End dutch phase ----------------'
assert isinstance(dateutil.parser.parse(auction.audit['timeline'][DUTCH]['timeline']['end']), datetime.datetime)
assert len(auction.auction_document['stages'][1]) == 3
assert auction.auction_document['stages'][1]['passed'] is True
mock_spawn.assert_called_once_with(auction.clean_up_preplanned_jobs)
assert log_strings[-2] == "No bids on dutch phase. End auction now."
assert mock_end_auction.call_count == 1
assert result is None
auction.auction_document['results'].append({'test_key': 'test_value'})
auction.end_dutch()
log_strings = logger.log_capture_string.getvalue().split('\n')
assert log_strings[-2] == '---------------- End dutch phase ----------------'
assert isinstance(dateutil.parser.parse(auction.audit['timeline'][DUTCH]['timeline']['end']), datetime.datetime)
assert len(auction.auction_document['stages'][1]) == 3
assert auction.auction_document['stages'][1]['passed'] is True
assert mock_spawn.call_count == 2
assert auction.auction_document['current_phase'] == 'pre-sealedbid'
assert auction.auction_document['current_stage'] == 2 | 0.616705 | 0.453746 |
import logging
import sys
import traceback
import warnings
from pathlib import Path
class UltranestFilter(logging.Filter):
def filter(self, record):
return not record.getMessage().startswith("iteration=")
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
def set_logger(srcid, model, stdout_to_log=True, fmt=None):
log_file = Path("logs", model, f"fit_{srcid}.log")
filehandler = logging.FileHandler(log_file, "w")
if fmt is None:
#infofmt = "%(levelname)s:%(asctime)s: %(module)s:%(funcName)s: %(message)s"
infofmt = "[%(name)s %(levelname)s]: %(message)s"
fmt = logging.Formatter(infofmt, datefmt="%I:%M:%S")
filehandler.setFormatter(fmt)
filehandler.addFilter(UltranestFilter())
# root logger - Good to get it only once.
logger = logging.getLogger()
# remove the existing file handlers
for hdlr in logger.handlers[:]:
logger.removeHandler(hdlr)
logger.addHandler(filehandler)
logger.setLevel(logging.INFO)
if stdout_to_log:
sys.stdout = StreamToLogger(logger, logging.INFO)
sys.stderr = StreamToLogger(logger, logging.ERROR)
logger = logging.getLogger("sherpa")
logger.setLevel(logging.ERROR)
warnings.filterwarnings("ignore", message='displayed errorbars')
logger = logging.getLogger("ultranest")
logger.setLevel(logging.INFO)
def log_exception(exception):
logging.error(''.join(traceback.format_tb(exception.__traceback__)))
logging.error(exception) | logs.py | import logging
import sys
import traceback
import warnings
from pathlib import Path
class UltranestFilter(logging.Filter):
def filter(self, record):
return not record.getMessage().startswith("iteration=")
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
def set_logger(srcid, model, stdout_to_log=True, fmt=None):
log_file = Path("logs", model, f"fit_{srcid}.log")
filehandler = logging.FileHandler(log_file, "w")
if fmt is None:
#infofmt = "%(levelname)s:%(asctime)s: %(module)s:%(funcName)s: %(message)s"
infofmt = "[%(name)s %(levelname)s]: %(message)s"
fmt = logging.Formatter(infofmt, datefmt="%I:%M:%S")
filehandler.setFormatter(fmt)
filehandler.addFilter(UltranestFilter())
# root logger - Good to get it only once.
logger = logging.getLogger()
# remove the existing file handlers
for hdlr in logger.handlers[:]:
logger.removeHandler(hdlr)
logger.addHandler(filehandler)
logger.setLevel(logging.INFO)
if stdout_to_log:
sys.stdout = StreamToLogger(logger, logging.INFO)
sys.stderr = StreamToLogger(logger, logging.ERROR)
logger = logging.getLogger("sherpa")
logger.setLevel(logging.ERROR)
warnings.filterwarnings("ignore", message='displayed errorbars')
logger = logging.getLogger("ultranest")
logger.setLevel(logging.INFO)
def log_exception(exception):
logging.error(''.join(traceback.format_tb(exception.__traceback__)))
logging.error(exception) | 0.380759 | 0.101902 |
import unittest
from mock import MagicMock, patch
from state_model.put_state_model import put_state_model
def get_mock_event():
return {
'session_id': '12345',
'state_model': {
'session_id': '12345',
'get_preference_result': 'success',
'existing_preference': {
'is_present': True,
'id': '12345',
'status': 'active'
},
'contact_centre': False,
'expiry_time_key': '12345'
}
}
class TestPutStateModel(unittest.TestCase):
@patch('state_model.put_state_model.put_state_model._kms_encrypt_dict')
@patch('state_model.put_state_model.put_state_model.StrictRedisCluster')
@patch('state_model.put_state_model.put_state_model.setup_lambda')
def test__put_state_model__lambda_handler__WillPutSateModel__WhenCalledWithAnEventContainingAValidSessionId(self,
mock_setup_lambda,
mock_strict_redis_cluster,
mock_kms_encrypt):
mock_redis = MagicMock()
mock_strict_redis_cluster.return_value = mock_redis
mock_setup_lambda.return_value = '12345'
mock_state_model = get_mock_event()
put_state_model.lambda_handler(mock_state_model, MagicMock)
mock_redis.expireat.assert_called_with('12345', '12345')
@patch('state_model.put_state_model.put_state_model.handle')
def test__put_state_model__lambda_handler__WillRaiseAGivenException__WhenAnExceptionOccursThatIsThrownOrUnhandled(self,
mock_handle):
mock_handle.side_effect = RuntimeError('Example message')
with self.assertRaises(RuntimeError):
put_state_model.lambda_handler({}, MagicMock)
@patch('state_model.put_state_model.put_state_model.put_state_model')
@patch('state_model.put_state_model.put_state_model.setup_lambda')
def test__put_state_model__handle__WillCallPutStateModel__WhenCalledWithAnEventContainingAValidStateModel(self,
mock_setup_lambda,
mock_put_state_model):
mock_setup_lambda.return_value = '12345'
mock_state_model = get_mock_event()
put_state_model.handle(mock_state_model, MagicMock)
mock_put_state_model.assert_called_with(
'12345',
{
'session_id': '12345',
'get_preference_result': 'success',
'existing_preference': {
'is_present': True,
'id': '12345',
'status': 'active'
},
'contact_centre': False,
'expiry_time_key': '12345'
})
@patch('state_model.put_state_model.put_state_model.setup_lambda')
def test__put_state_model__lambda_handler__WillRaiseAtributeErrpr__WhenCalledWithAnEventContainingNoStateModel(self,
mock_setup_lambda):
mock_setup_lambda.return_value = '12345'
with self.assertRaises(AttributeError):
put_state_model.handle({'session_id': '12345'}, MagicMock)
@patch('state_model.put_state_model.put_state_model.configure_logger')
def test__put_state_model__setup_lambda__WillReturnSessionId__WhenEventContainsAValidSessionId(
self,
mock_configure_logger):
actual = put_state_model.setup_lambda({'session_id': '12345'}, {})
self.assertEqual(actual, '12345')
@patch('state_model.put_state_model.put_state_model.configure_logger')
def test__put_state_model__setup_lambda__WillRaiseAttributeError__WhenEventContainsNoSessionIdField(
self,
mock_configure_logger):
with self.assertRaises(AttributeError):
put_state_model.setup_lambda({}, {})
@patch('state_model.put_state_model.put_state_model.configure_logger')
def test__put_state_model__setup_lambda__WillRaiseAttributeError__WhenEventContainsAnEmptySessionId(
self,
mock_configure_logger):
with self.assertRaises(AttributeError):
put_state_model.setup_lambda({'session_id': ''}, {}) | unit_tests/state_model/test_put_state_model.py | import unittest
from mock import MagicMock, patch
from state_model.put_state_model import put_state_model
def get_mock_event():
return {
'session_id': '12345',
'state_model': {
'session_id': '12345',
'get_preference_result': 'success',
'existing_preference': {
'is_present': True,
'id': '12345',
'status': 'active'
},
'contact_centre': False,
'expiry_time_key': '12345'
}
}
class TestPutStateModel(unittest.TestCase):
@patch('state_model.put_state_model.put_state_model._kms_encrypt_dict')
@patch('state_model.put_state_model.put_state_model.StrictRedisCluster')
@patch('state_model.put_state_model.put_state_model.setup_lambda')
def test__put_state_model__lambda_handler__WillPutSateModel__WhenCalledWithAnEventContainingAValidSessionId(self,
mock_setup_lambda,
mock_strict_redis_cluster,
mock_kms_encrypt):
mock_redis = MagicMock()
mock_strict_redis_cluster.return_value = mock_redis
mock_setup_lambda.return_value = '12345'
mock_state_model = get_mock_event()
put_state_model.lambda_handler(mock_state_model, MagicMock)
mock_redis.expireat.assert_called_with('12345', '12345')
@patch('state_model.put_state_model.put_state_model.handle')
def test__put_state_model__lambda_handler__WillRaiseAGivenException__WhenAnExceptionOccursThatIsThrownOrUnhandled(self,
mock_handle):
mock_handle.side_effect = RuntimeError('Example message')
with self.assertRaises(RuntimeError):
put_state_model.lambda_handler({}, MagicMock)
@patch('state_model.put_state_model.put_state_model.put_state_model')
@patch('state_model.put_state_model.put_state_model.setup_lambda')
def test__put_state_model__handle__WillCallPutStateModel__WhenCalledWithAnEventContainingAValidStateModel(self,
mock_setup_lambda,
mock_put_state_model):
mock_setup_lambda.return_value = '12345'
mock_state_model = get_mock_event()
put_state_model.handle(mock_state_model, MagicMock)
mock_put_state_model.assert_called_with(
'12345',
{
'session_id': '12345',
'get_preference_result': 'success',
'existing_preference': {
'is_present': True,
'id': '12345',
'status': 'active'
},
'contact_centre': False,
'expiry_time_key': '12345'
})
@patch('state_model.put_state_model.put_state_model.setup_lambda')
def test__put_state_model__lambda_handler__WillRaiseAtributeErrpr__WhenCalledWithAnEventContainingNoStateModel(self,
mock_setup_lambda):
mock_setup_lambda.return_value = '12345'
with self.assertRaises(AttributeError):
put_state_model.handle({'session_id': '12345'}, MagicMock)
@patch('state_model.put_state_model.put_state_model.configure_logger')
def test__put_state_model__setup_lambda__WillReturnSessionId__WhenEventContainsAValidSessionId(
self,
mock_configure_logger):
actual = put_state_model.setup_lambda({'session_id': '12345'}, {})
self.assertEqual(actual, '12345')
@patch('state_model.put_state_model.put_state_model.configure_logger')
def test__put_state_model__setup_lambda__WillRaiseAttributeError__WhenEventContainsNoSessionIdField(
self,
mock_configure_logger):
with self.assertRaises(AttributeError):
put_state_model.setup_lambda({}, {})
@patch('state_model.put_state_model.put_state_model.configure_logger')
def test__put_state_model__setup_lambda__WillRaiseAttributeError__WhenEventContainsAnEmptySessionId(
self,
mock_configure_logger):
with self.assertRaises(AttributeError):
put_state_model.setup_lambda({'session_id': ''}, {}) | 0.696887 | 0.247646 |
import os
import shutil
import subprocess
DEST="/home/ubuntu/cleverhans/examples/nips17_adversarial_competition"
META_DIR = "/home/ubuntu/adversarial_attack/metafiles"
CONFIG_DIR = "config.csv"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# organize the files based on config.csv
all_content = open(CONFIG_DIR).readlines()
all_content = [x.strip() for x in all_content if x[0] != "#"]
attacks = all_content[0].split(",")
attacks_target = all_content[1].split(",")
defenses = all_content[2].split(",")
# removing existing folders
for e_folder in ["sample_attacks", "sample_defenses", "sample_targeted_attacks"]:
folder_dir = os.path.join(DEST, e_folder)
try:
shutil.rmtree(folder_dir)
except:
print(bcolors.WARNING + "Folder" + folder_dir + " have already been removed." + bcolors.ENDC)
# copy the whole folders into the destination
for e_folder in ["sample_attacks", "sample_defenses", "sample_targeted_attacks"]:
folder_dir = os.path.join(DEST, e_folder)
os.makedirs(folder_dir)
for e_subfolder in os.listdir(e_folder):
orig_folder = os.path.join(e_folder, e_subfolder)
dest_folder = os.path.join(folder_dir, e_subfolder)
if os.path.isfile(orig_folder):
print(bcolors.OKBLUE + "Copy file:" + bcolors.ENDC + orig_folder + " to destination folder:" + dest_folder)
shutil.copy2(orig_folder, dest_folder)
elif e_subfolder in attacks + attacks_target + defenses:
print(bcolors.OKBLUE + "Copy folder:" + bcolors.ENDC + orig_folder + " to destination folder:" + dest_folder)
shutil.copytree(orig_folder, dest_folder)
# copy model and meta files into directory
for efile in os.listdir(META_DIR):
if efile.startswith("meta"):
continue
efile_dir = os.path.join(META_DIR, efile)
for e_folder in ["sample_attacks", "sample_targeted_attacks"]:
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
if not os.path.isfile(e_subfolder) :
dest_sub_dir = os.path.join(DEST, e_folder, e_subfolder)
shutil.copy2(efile_dir, dest_sub_dir)
folder_dict = {"sample_attacks": "attack", "sample_targeted_attacks": "target", "sample_defenses": "defense"}
for e_folder in folder_dict.keys():
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
e_subpath = os.path.join(DEST, e_folder, e_subfolder)
if not os.path.isfile(e_subpath) :
dest_dir = os.path.join(e_subpath, "metadata.json")
efile_dir = os.path.join(META_DIR, "metadata_" + folder_dict[e_folder] + ".json")
shutil.copyfile(efile_dir, dest_dir)
# and change file permissions
for e_folder in ["sample_attacks", "sample_targeted_attacks", "sample_defenses"]:
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
dest_sub_dir = os.path.join(DEST, e_folder, e_subfolder)
if not os.path.isfile(dest_sub_dir) :
for mod_file in os.listdir(dest_sub_dir):
if mod_file in ["run_defense.sh", "run_attack.sh"]:
mod_dir = os.path.join(dest_sub_dir, mod_file)
# this is only supported by python 3
print(bcolors.OKBLUE + "Change file mode for:" + bcolors.ENDC + mod_dir)
os.chmod(mod_dir, 0o777)
# run the defense and attack
subprocess.call(['/home/ubuntu/cleverhans/examples/nips17_adversarial_competition/run_attacks_and_defenses.sh']) | copy_files.py | import os
import shutil
import subprocess
DEST="/home/ubuntu/cleverhans/examples/nips17_adversarial_competition"
META_DIR = "/home/ubuntu/adversarial_attack/metafiles"
CONFIG_DIR = "config.csv"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# organize the files based on config.csv
all_content = open(CONFIG_DIR).readlines()
all_content = [x.strip() for x in all_content if x[0] != "#"]
attacks = all_content[0].split(",")
attacks_target = all_content[1].split(",")
defenses = all_content[2].split(",")
# removing existing folders
for e_folder in ["sample_attacks", "sample_defenses", "sample_targeted_attacks"]:
folder_dir = os.path.join(DEST, e_folder)
try:
shutil.rmtree(folder_dir)
except:
print(bcolors.WARNING + "Folder" + folder_dir + " have already been removed." + bcolors.ENDC)
# copy the whole folders into the destination
for e_folder in ["sample_attacks", "sample_defenses", "sample_targeted_attacks"]:
folder_dir = os.path.join(DEST, e_folder)
os.makedirs(folder_dir)
for e_subfolder in os.listdir(e_folder):
orig_folder = os.path.join(e_folder, e_subfolder)
dest_folder = os.path.join(folder_dir, e_subfolder)
if os.path.isfile(orig_folder):
print(bcolors.OKBLUE + "Copy file:" + bcolors.ENDC + orig_folder + " to destination folder:" + dest_folder)
shutil.copy2(orig_folder, dest_folder)
elif e_subfolder in attacks + attacks_target + defenses:
print(bcolors.OKBLUE + "Copy folder:" + bcolors.ENDC + orig_folder + " to destination folder:" + dest_folder)
shutil.copytree(orig_folder, dest_folder)
# copy model and meta files into directory
for efile in os.listdir(META_DIR):
if efile.startswith("meta"):
continue
efile_dir = os.path.join(META_DIR, efile)
for e_folder in ["sample_attacks", "sample_targeted_attacks"]:
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
if not os.path.isfile(e_subfolder) :
dest_sub_dir = os.path.join(DEST, e_folder, e_subfolder)
shutil.copy2(efile_dir, dest_sub_dir)
folder_dict = {"sample_attacks": "attack", "sample_targeted_attacks": "target", "sample_defenses": "defense"}
for e_folder in folder_dict.keys():
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
e_subpath = os.path.join(DEST, e_folder, e_subfolder)
if not os.path.isfile(e_subpath) :
dest_dir = os.path.join(e_subpath, "metadata.json")
efile_dir = os.path.join(META_DIR, "metadata_" + folder_dict[e_folder] + ".json")
shutil.copyfile(efile_dir, dest_dir)
# and change file permissions
for e_folder in ["sample_attacks", "sample_targeted_attacks", "sample_defenses"]:
for e_subfolder in os.listdir(os.path.join(DEST, e_folder)):
dest_sub_dir = os.path.join(DEST, e_folder, e_subfolder)
if not os.path.isfile(dest_sub_dir) :
for mod_file in os.listdir(dest_sub_dir):
if mod_file in ["run_defense.sh", "run_attack.sh"]:
mod_dir = os.path.join(dest_sub_dir, mod_file)
# this is only supported by python 3
print(bcolors.OKBLUE + "Change file mode for:" + bcolors.ENDC + mod_dir)
os.chmod(mod_dir, 0o777)
# run the defense and attack
subprocess.call(['/home/ubuntu/cleverhans/examples/nips17_adversarial_competition/run_attacks_and_defenses.sh']) | 0.052838 | 0.072341 |
import utilities
import rasterio
import numpy as np
import datetime
from scipy import stats
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
def create_continent_ecozone_tiles(tile_id):
print "Processing:", tile_id
# Start time
start = datetime.datetime.now()
ymax, xmin, ymin, xmax = utilities.coords(tile_id)
print "Extent of", tile_id, "-- ymax:", ymax, "; ymin:", ymin, "; xmax", xmax, "; xmin:", xmin
print "Rasterizing ecozone to extent of biomass tile {}".format(tile_id)
cont_eco_raw = "{0}_{1}".format(tile_id, cn.pattern_cont_eco_raw)
# This makes rasters that are made of 1024 x 1024 pixel windows instead of 40000 x 1 pixel windows
# to improve assigning pixels without continent-ecozone codes to a continent-ecozone code.
# This way, pixels without continent-ecozone are assigned a code based on what's in a window nearby, rather
# than a window that spans the entire 10x10 degree tile.
utilities.rasterize('fao_ecozones_fra_2000_continents_assigned_dissolved_FINAL_20180906.shp',
cont_eco_raw, xmin, ymin, xmax, ymax, '.00025', 'Int16', 'gainEcoCon', '0')
# Opens continent-ecozone tile.
# Everything from here down is used to assign pixels without continent ecozone codes to a continent-ecozone in the 1024x1024 windows.
with rasterio.open('{}.tif'.format(cont_eco_raw)) as cont_eco_raw_src:
# Grabs metadata about the tif, like its location/projection/cellsize
kwargs = cont_eco_raw_src.meta
# Grabs the windows of the tile (stripes) to iterate over the entire tif without running out of memory
windows = cont_eco_raw_src.block_windows(1)
# Updates kwargs for the output dataset.
# Need to update data type to float 32 so that it can handle fractional gain rates
kwargs.update(
driver='GTiff',
count=1,
compress='lzw',
nodata=0
)
# Opens the output tile, giving it the arguments of the input tiles
with rasterio.open('{0}_{1}.tif'.format(tile_id, cn.pattern_cont_eco_processed), 'w', **kwargs) as dst:
# Iterates across the windows (1024 x 1024 pixel boxes) of the input tile.
for idx, window in windows:
# Creates windows for each input raster
cont_eco_raw = cont_eco_raw_src.read(1, window=window)
# Turns the 2D array into a 1D array that is n x n long.
# This makes to easier to remove 0s and find the mode of the remaining continent-ecozone codes
cont_eco_raw_flat = cont_eco_raw.flatten()
# Removes all zeros from the array, leaving just pixels with continent-ecozone codes
non_zeros = np.delete(cont_eco_raw_flat, np.where(cont_eco_raw_flat == 0))
# If there were only pixels without continent-ecozone codes in the array, the mode is assigned 0
if non_zeros.size < 1:
# print " Window is all 0s"
mode = 0
# If there were pixels with continent-ecozone codes, the mode is the most common code among those in the window
else:
mode = stats.mode(non_zeros)[0]
# print " Window is not all 0s. Mode is", mode
cont_eco_processed = cont_eco_raw
# Assigns all pixels without a continent-ecozone code in that window to that most common code
cont_eco_processed[cont_eco_processed == 0] = mode
# Writes the output window to the output.
# Although the windows for the input tiles are 1024 x 1024 pixels,
# the windows for these output files are 40000 x 1 pixels, like all the other tiles in this model,
# so they should work fine with all the other tiles.
dst.write_band(1, cont_eco_processed, window=window)
# Prints information about the tile that was just processed
uu.end_of_fx_summary(start, tile_id, cn.pattern_annual_gain_AGB_mangrove) | gain/continent_ecozone_tiles.py |
import utilities
import rasterio
import numpy as np
import datetime
from scipy import stats
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
def create_continent_ecozone_tiles(tile_id):
print "Processing:", tile_id
# Start time
start = datetime.datetime.now()
ymax, xmin, ymin, xmax = utilities.coords(tile_id)
print "Extent of", tile_id, "-- ymax:", ymax, "; ymin:", ymin, "; xmax", xmax, "; xmin:", xmin
print "Rasterizing ecozone to extent of biomass tile {}".format(tile_id)
cont_eco_raw = "{0}_{1}".format(tile_id, cn.pattern_cont_eco_raw)
# This makes rasters that are made of 1024 x 1024 pixel windows instead of 40000 x 1 pixel windows
# to improve assigning pixels without continent-ecozone codes to a continent-ecozone code.
# This way, pixels without continent-ecozone are assigned a code based on what's in a window nearby, rather
# than a window that spans the entire 10x10 degree tile.
utilities.rasterize('fao_ecozones_fra_2000_continents_assigned_dissolved_FINAL_20180906.shp',
cont_eco_raw, xmin, ymin, xmax, ymax, '.00025', 'Int16', 'gainEcoCon', '0')
# Opens continent-ecozone tile.
# Everything from here down is used to assign pixels without continent ecozone codes to a continent-ecozone in the 1024x1024 windows.
with rasterio.open('{}.tif'.format(cont_eco_raw)) as cont_eco_raw_src:
# Grabs metadata about the tif, like its location/projection/cellsize
kwargs = cont_eco_raw_src.meta
# Grabs the windows of the tile (stripes) to iterate over the entire tif without running out of memory
windows = cont_eco_raw_src.block_windows(1)
# Updates kwargs for the output dataset.
# Need to update data type to float 32 so that it can handle fractional gain rates
kwargs.update(
driver='GTiff',
count=1,
compress='lzw',
nodata=0
)
# Opens the output tile, giving it the arguments of the input tiles
with rasterio.open('{0}_{1}.tif'.format(tile_id, cn.pattern_cont_eco_processed), 'w', **kwargs) as dst:
# Iterates across the windows (1024 x 1024 pixel boxes) of the input tile.
for idx, window in windows:
# Creates windows for each input raster
cont_eco_raw = cont_eco_raw_src.read(1, window=window)
# Turns the 2D array into a 1D array that is n x n long.
# This makes to easier to remove 0s and find the mode of the remaining continent-ecozone codes
cont_eco_raw_flat = cont_eco_raw.flatten()
# Removes all zeros from the array, leaving just pixels with continent-ecozone codes
non_zeros = np.delete(cont_eco_raw_flat, np.where(cont_eco_raw_flat == 0))
# If there were only pixels without continent-ecozone codes in the array, the mode is assigned 0
if non_zeros.size < 1:
# print " Window is all 0s"
mode = 0
# If there were pixels with continent-ecozone codes, the mode is the most common code among those in the window
else:
mode = stats.mode(non_zeros)[0]
# print " Window is not all 0s. Mode is", mode
cont_eco_processed = cont_eco_raw
# Assigns all pixels without a continent-ecozone code in that window to that most common code
cont_eco_processed[cont_eco_processed == 0] = mode
# Writes the output window to the output.
# Although the windows for the input tiles are 1024 x 1024 pixels,
# the windows for these output files are 40000 x 1 pixels, like all the other tiles in this model,
# so they should work fine with all the other tiles.
dst.write_band(1, cont_eco_processed, window=window)
# Prints information about the tile that was just processed
uu.end_of_fx_summary(start, tile_id, cn.pattern_annual_gain_AGB_mangrove) | 0.396769 | 0.340102 |
import pathlib as path
import numpy as np
from tslearn.metrics import dtw, dtw_path
from tqdm import tqdm
from modules.barycenter import sdtw_barycenter
from modules.barycenter import gfsdtw_barycenter
from auxiliary.dataset import load_ucr
import time
import fsdtw
import itertools
TIMESTAMP = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
def exp_fun(ctx, name):
print(f"calculating {name}")
X_tr, y_tr, X_te, y_te = load_ucr("data/ucr2015", name)
# PATCH only for original sdtw implementation
X_tr = X_tr.reshape(*X_tr.shape, 1)
# END PATCH
result = []
for seed in tqdm(range(10), disable=True):
r = exp_1seed(ctx, X_tr, y_tr, seed)
result.append(r)
result = np.array(result)
return name, result.mean(axis=0), result.std(axis=0)
def exp_1seed(ctx, X_tr, y_tr, seed=0):
settings = ctx['settings']
n = 10
# Pick n time series at random from the same class.
rng = np.random.RandomState(seed)
classes = np.unique(y_tr)
k = rng.randint(len(classes))
X = X_tr[y_tr == classes[k]]
X = X[rng.permutation(len(X))[:n]]
barycenter_init = sum(X) / len(X)
result = []
for r, gamma, q in settings['params']: # gamma, q in zip((1, 0.1, 0.01, 0.001, 0.0001, 0.00001), (20, 50, 100, 200, 500, 1000)):
# gamma = settings['gamma']
# q = settings['q']
print(f"seed: {seed}, r {r}, gamma: {gamma}, q {q}")
dtw_score = 0
Z = None
if settings['method'] == "softdtw":
Z = sdtw_barycenter(X, barycenter_init, gamma=gamma, max_iter=settings['max_iter'])
elif settings['method'] == "gfsdtw":
Z = gfsdtw_barycenter(X, barycenter_init, gamma=gamma, q=q, radius=r, max_iter=settings['max_iter'])
else:
raise Exception(f'metohd `{settings["method"]}` not found')
for x in X:
dtw_score += (dtw(x.squeeze(), Z))**2
result.append(dtw_score / len(X))
# print('finish one', time.strftime('%H:%M:%S', time.localtime(time.time())))
return np.array(result)
def get_params():
r = [1]
gamma = [0.05, 0.1, 0.2, 0.5, 1]
q = [10, 20, 300, 400, 600, 1000]
# r = [1]
# gamma = [0.1]
# q = [100]
params = list(itertools.product(r, gamma, q))
return params
SETTINGS = {
"method": "gfsdtw", # "fsdtw"
"version": fsdtw.__version__,
"max_iter": 100,
"params": get_params()
}
if __name__ == "__main__":
ucr_dir = path.Path("data/ucr2015")
ctx = {"settings": SETTINGS}
for data_name in sorted(ucr_dir.iterdir()):
print(data_name.name)
name, r_mean, r_std = exp_fun(ctx, data_name.name)
r = []
for i in range(r_mean.shape[0]):
r.append(r_mean[i])
r.append(r_std[i])
print(name, *r)
break | barycenter.py | import pathlib as path
import numpy as np
from tslearn.metrics import dtw, dtw_path
from tqdm import tqdm
from modules.barycenter import sdtw_barycenter
from modules.barycenter import gfsdtw_barycenter
from auxiliary.dataset import load_ucr
import time
import fsdtw
import itertools
TIMESTAMP = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
def exp_fun(ctx, name):
print(f"calculating {name}")
X_tr, y_tr, X_te, y_te = load_ucr("data/ucr2015", name)
# PATCH only for original sdtw implementation
X_tr = X_tr.reshape(*X_tr.shape, 1)
# END PATCH
result = []
for seed in tqdm(range(10), disable=True):
r = exp_1seed(ctx, X_tr, y_tr, seed)
result.append(r)
result = np.array(result)
return name, result.mean(axis=0), result.std(axis=0)
def exp_1seed(ctx, X_tr, y_tr, seed=0):
settings = ctx['settings']
n = 10
# Pick n time series at random from the same class.
rng = np.random.RandomState(seed)
classes = np.unique(y_tr)
k = rng.randint(len(classes))
X = X_tr[y_tr == classes[k]]
X = X[rng.permutation(len(X))[:n]]
barycenter_init = sum(X) / len(X)
result = []
for r, gamma, q in settings['params']: # gamma, q in zip((1, 0.1, 0.01, 0.001, 0.0001, 0.00001), (20, 50, 100, 200, 500, 1000)):
# gamma = settings['gamma']
# q = settings['q']
print(f"seed: {seed}, r {r}, gamma: {gamma}, q {q}")
dtw_score = 0
Z = None
if settings['method'] == "softdtw":
Z = sdtw_barycenter(X, barycenter_init, gamma=gamma, max_iter=settings['max_iter'])
elif settings['method'] == "gfsdtw":
Z = gfsdtw_barycenter(X, barycenter_init, gamma=gamma, q=q, radius=r, max_iter=settings['max_iter'])
else:
raise Exception(f'metohd `{settings["method"]}` not found')
for x in X:
dtw_score += (dtw(x.squeeze(), Z))**2
result.append(dtw_score / len(X))
# print('finish one', time.strftime('%H:%M:%S', time.localtime(time.time())))
return np.array(result)
def get_params():
r = [1]
gamma = [0.05, 0.1, 0.2, 0.5, 1]
q = [10, 20, 300, 400, 600, 1000]
# r = [1]
# gamma = [0.1]
# q = [100]
params = list(itertools.product(r, gamma, q))
return params
SETTINGS = {
"method": "gfsdtw", # "fsdtw"
"version": fsdtw.__version__,
"max_iter": 100,
"params": get_params()
}
if __name__ == "__main__":
ucr_dir = path.Path("data/ucr2015")
ctx = {"settings": SETTINGS}
for data_name in sorted(ucr_dir.iterdir()):
print(data_name.name)
name, r_mean, r_std = exp_fun(ctx, data_name.name)
r = []
for i in range(r_mean.shape[0]):
r.append(r_mean[i])
r.append(r_std[i])
print(name, *r)
break | 0.293101 | 0.29005 |
from typing import List
from enum import Enum
import operator
class State(Enum):
FLOOR = 1
EMPTY = 2
OCCUPIED = 3
def __repr__(self):
if self.value == self.FLOOR.value:
return '.'
elif self.value == self.OCCUPIED.value:
return '#'
else:
return 'L'
class Direction(Enum):
UP = (0, 1)
DOWN = (0, -1)
LEFT = (-1, 0)
RIGHT = (1, 0)
UP_LEFT = (-1, 1)
UP_RIGHT = (1, 1)
DOWN_LEFT = (-1, -1)
DOWN_RIGHT = (1, -1)
def from_char(char) -> State:
mapping = {
'.': State.FLOOR,
'L': State.EMPTY,
'#': State.OCCUPIED
}
return mapping[char]
def parse_input(filename: str) -> List[List[State]]:
with open(filename, 'r') as f:
lines = f.readlines()
return_list = []
for row in lines:
return_list.append([from_char(char) for char in row.rstrip('\n')])
return return_list
def find_state(rows: List[List[State]], coord: (int, int), part2: bool) -> State:
row, column = coord
current_state = rows[coord[1]][coord[0]]
def valid(i: int, j: int, row_length: int, col_length: int) -> bool:
return not (i==row and j==column) and i >= 0 and j >= 0 and i < row_length and j < col_length
def coord_generator(row: int, column: int, row_length: int, col_length: int):
for i in range(row - 1, row + 2):
for j in range(column - 1, column + 2):
if valid(i, j, row_length, col_length):
yield (i, j)
def vector_coord_gen(row: int, column: int, row_length: int, col_length: int):
for d in Direction:
current_coord = (row, column)
next_coord = tuple(map(operator.add, d.value, current_coord))
while valid(next_coord[0], next_coord[1], row_length, col_length):
if(rows[next_coord[1]][next_coord[0]] != State.FLOOR):
yield (next_coord[0], next_coord[1])
break
current_coord = next_coord
next_coord = tuple(map(operator.add, d.value, current_coord))
if current_state == State.FLOOR:
return State.FLOOR
else:
gen = coord_generator(row, column, len(rows[0]), len(rows)) if not part2 else \
vector_coord_gen(row, column, len(rows[0]), len(rows))
occupies = 0
for x, y in gen:
position = rows[y][x]
if position == State.OCCUPIED:
occupies += 1
if occupies == 0:
return State.OCCUPIED
elif not part2 and occupies >= 4:
return State.EMPTY
elif part2 and occupies >= 5:
return State.EMPTY
else:
return current_state
def run_rules(plane: List[List[State]], part2: bool) -> List[List[State]]:
return [[find_state(plane, (x,y), part2) for x, _ in enumerate(row)] for y, row in enumerate(plane)]
def get_occupied_seats(part2: bool) -> int:
rows = parse_input('input.txt')
changed = True
sum_total = current_sum = 0
while changed:
sum_total = current_sum
rows = run_rules(rows, part2)
current_sum = sum([1 if (item == State.OCCUPIED) else 0 for row in rows for item in row])
if(current_sum == sum_total):
changed = False
return current_sum
print(f"Part 1: {get_occupied_seats(False)}")
print(f"Part 2: {get_occupied_seats(True)}") | Chris/Day11/hodges_day11.py | from typing import List
from enum import Enum
import operator
class State(Enum):
FLOOR = 1
EMPTY = 2
OCCUPIED = 3
def __repr__(self):
if self.value == self.FLOOR.value:
return '.'
elif self.value == self.OCCUPIED.value:
return '#'
else:
return 'L'
class Direction(Enum):
UP = (0, 1)
DOWN = (0, -1)
LEFT = (-1, 0)
RIGHT = (1, 0)
UP_LEFT = (-1, 1)
UP_RIGHT = (1, 1)
DOWN_LEFT = (-1, -1)
DOWN_RIGHT = (1, -1)
def from_char(char) -> State:
mapping = {
'.': State.FLOOR,
'L': State.EMPTY,
'#': State.OCCUPIED
}
return mapping[char]
def parse_input(filename: str) -> List[List[State]]:
with open(filename, 'r') as f:
lines = f.readlines()
return_list = []
for row in lines:
return_list.append([from_char(char) for char in row.rstrip('\n')])
return return_list
def find_state(rows: List[List[State]], coord: (int, int), part2: bool) -> State:
row, column = coord
current_state = rows[coord[1]][coord[0]]
def valid(i: int, j: int, row_length: int, col_length: int) -> bool:
return not (i==row and j==column) and i >= 0 and j >= 0 and i < row_length and j < col_length
def coord_generator(row: int, column: int, row_length: int, col_length: int):
for i in range(row - 1, row + 2):
for j in range(column - 1, column + 2):
if valid(i, j, row_length, col_length):
yield (i, j)
def vector_coord_gen(row: int, column: int, row_length: int, col_length: int):
for d in Direction:
current_coord = (row, column)
next_coord = tuple(map(operator.add, d.value, current_coord))
while valid(next_coord[0], next_coord[1], row_length, col_length):
if(rows[next_coord[1]][next_coord[0]] != State.FLOOR):
yield (next_coord[0], next_coord[1])
break
current_coord = next_coord
next_coord = tuple(map(operator.add, d.value, current_coord))
if current_state == State.FLOOR:
return State.FLOOR
else:
gen = coord_generator(row, column, len(rows[0]), len(rows)) if not part2 else \
vector_coord_gen(row, column, len(rows[0]), len(rows))
occupies = 0
for x, y in gen:
position = rows[y][x]
if position == State.OCCUPIED:
occupies += 1
if occupies == 0:
return State.OCCUPIED
elif not part2 and occupies >= 4:
return State.EMPTY
elif part2 and occupies >= 5:
return State.EMPTY
else:
return current_state
def run_rules(plane: List[List[State]], part2: bool) -> List[List[State]]:
return [[find_state(plane, (x,y), part2) for x, _ in enumerate(row)] for y, row in enumerate(plane)]
def get_occupied_seats(part2: bool) -> int:
rows = parse_input('input.txt')
changed = True
sum_total = current_sum = 0
while changed:
sum_total = current_sum
rows = run_rules(rows, part2)
current_sum = sum([1 if (item == State.OCCUPIED) else 0 for row in rows for item in row])
if(current_sum == sum_total):
changed = False
return current_sum
print(f"Part 1: {get_occupied_seats(False)}")
print(f"Part 2: {get_occupied_seats(True)}") | 0.577019 | 0.460471 |
import re
import shlex
from subprocess import PIPE, Popen, TimeoutExpired
class PlayerException(Exception):
pass
class PlayerCmdException(PlayerException):
pass
class Player(object):
"""
Player is a simple interface to a game playing process which is communicated
with using the GTP protocol. Player contains very minimal logic outside of
nicely handling interaction with the player process.
"""
def __init__(self, invocation):
args = shlex.split(invocation)
self._process = Popen(args, stdin=PIPE, stdout=PIPE,
universal_newlines=True)
self._stdout = self._process.stdout
self._stdin = self._process.stdin
def _write(self, command):
self._stdin.write(command)
self._stdin.write('\n')
self._stdin.flush()
def _read(self):
response = self._stdout.readline()
next_line = self._stdout.readline()
while next_line != '\n':
response += next_line
next_line = self._stdout.readline()
return response
def _cmd(self, command):
self._write(command)
response = self._read()
error_occurred = response.startswith('?')
response = response[2:].rstrip()
if error_occurred:
raise PlayerCmdException("Error issuing command: '{}'. Response "
"was: '{}'".format(command, response))
return response
def exit(self):
self._cmd('q')
try:
rc = self._process.wait(timeout=10)
if rc != 0:
raise PlayerException('{} exited with non-zero error code {}'
.format(self._process.pid, rc))
except TimeoutExpired:
raise PlayerException('Timed out wating for {} to exit.'
.format(self._process.pid))
def _set_size(self, size):
self._cmd('size {}'.format(size))
def _set_time_limit(self, time_limit):
self._cmd('set_time {}'.format(time_limit))
def _gen_move(self):
return self._cmd('genmove')
def _play_move(self, move):
self._cmd('play {}'.format(move))
def _name(self):
return self._cmd('name')
def _clear_board(self):
self._cmd('clear_board')
def _final_score(self):
return self._cmd('final_score')
def _board(self):
return self._cmd('showboard')
def _player_to_move(self):
return self._cmd('player_to_move')
def configure(self, size=None, time_limit=None):
if size is not None:
self._set_size(size)
if time_limit is not None:
self._set_time_limit(time_limit)
def play(self, move=None):
if move is not None:
self._play_move(move)
return
return self._gen_move()
def clear(self):
self._clear_board()
def game_finished(self):
# An empty string indicates the game is ongoing.
score_string = self._final_score()
return bool(score_string)
def final_score(self):
score_string = self._final_score()
if score_string == '0':
return ('0', '0')
regex = r'(?P<winner>.*)\+(?P<score>.*)'
result = re.match(regex, score_string)
if result is None:
raise PlayerException('Could not parse win string: '
'{}'.format(score_string))
return (result.group('winner'), result.group('score'))
def board(self):
return self._board()
def player_to_move(self):
return self._player_to_move()
def __str__(self):
return '{}-{}'.format(self._name(), self._process.pid) | old/tournament/player.py | import re
import shlex
from subprocess import PIPE, Popen, TimeoutExpired
class PlayerException(Exception):
pass
class PlayerCmdException(PlayerException):
pass
class Player(object):
"""
Player is a simple interface to a game playing process which is communicated
with using the GTP protocol. Player contains very minimal logic outside of
nicely handling interaction with the player process.
"""
def __init__(self, invocation):
args = shlex.split(invocation)
self._process = Popen(args, stdin=PIPE, stdout=PIPE,
universal_newlines=True)
self._stdout = self._process.stdout
self._stdin = self._process.stdin
def _write(self, command):
self._stdin.write(command)
self._stdin.write('\n')
self._stdin.flush()
def _read(self):
response = self._stdout.readline()
next_line = self._stdout.readline()
while next_line != '\n':
response += next_line
next_line = self._stdout.readline()
return response
def _cmd(self, command):
self._write(command)
response = self._read()
error_occurred = response.startswith('?')
response = response[2:].rstrip()
if error_occurred:
raise PlayerCmdException("Error issuing command: '{}'. Response "
"was: '{}'".format(command, response))
return response
def exit(self):
self._cmd('q')
try:
rc = self._process.wait(timeout=10)
if rc != 0:
raise PlayerException('{} exited with non-zero error code {}'
.format(self._process.pid, rc))
except TimeoutExpired:
raise PlayerException('Timed out wating for {} to exit.'
.format(self._process.pid))
def _set_size(self, size):
self._cmd('size {}'.format(size))
def _set_time_limit(self, time_limit):
self._cmd('set_time {}'.format(time_limit))
def _gen_move(self):
return self._cmd('genmove')
def _play_move(self, move):
self._cmd('play {}'.format(move))
def _name(self):
return self._cmd('name')
def _clear_board(self):
self._cmd('clear_board')
def _final_score(self):
return self._cmd('final_score')
def _board(self):
return self._cmd('showboard')
def _player_to_move(self):
return self._cmd('player_to_move')
def configure(self, size=None, time_limit=None):
if size is not None:
self._set_size(size)
if time_limit is not None:
self._set_time_limit(time_limit)
def play(self, move=None):
if move is not None:
self._play_move(move)
return
return self._gen_move()
def clear(self):
self._clear_board()
def game_finished(self):
# An empty string indicates the game is ongoing.
score_string = self._final_score()
return bool(score_string)
def final_score(self):
score_string = self._final_score()
if score_string == '0':
return ('0', '0')
regex = r'(?P<winner>.*)\+(?P<score>.*)'
result = re.match(regex, score_string)
if result is None:
raise PlayerException('Could not parse win string: '
'{}'.format(score_string))
return (result.group('winner'), result.group('score'))
def board(self):
return self._board()
def player_to_move(self):
return self._player_to_move()
def __str__(self):
return '{}-{}'.format(self._name(), self._process.pid) | 0.563498 | 0.14253 |
import uuid
import six
from datetime import timedelta, datetime
import json
import adal
import dateutil.parser
import requests
from Kqlmagic.my_aad_helper import _MyAadHelper, ConnKeysKCSB
from Kqlmagic.kql_client import KqlQueryResponse, KqlError
from Kqlmagic.constants import Constants, ConnStrKeys
from Kqlmagic.version import VERSION
class Kusto_Client(object):
"""
Kusto client wrapper for Python.
KustoClient works with both 2.x and 3.x flavors of Python. All primitive types are supported.
KustoClient takes care of ADAL authentication, parsing response and giving you typed result set,
and offers familiar Python DB API.
Test are run using nose.
Examples
--------
To use KustoClient, you can choose betwen two ways of authentication.
For the first option, you'll need to have your own AAD application and know your client credentials (client_id and client_secret).
>>> kusto_cluster = 'https://help.kusto.windows.net'
>>> kusto_client = KustoClient(kusto_cluster, client_id, client_secret='your_app_secret')
For the second option, you can use KustoClient's client id and authenticate using your username and password.
>>> kusto_cluster = 'https://help.kusto.windows.net'
>>> client_id = 'e07cf1fb-c6a6-4668-b21a-f74731afa19a'
>>> kusto_client = KustoClient(kusto_cluster, client_id, username='your_username', password='<PASSWORD>')"""
_DEFAULT_CLIENTID = "db662dc1-0cfe-4e1c-a843-19a68e65be58" # kusto client app, (didn't find app name ?)
# _DEFAULT_CLIENTID = "8430759c-5626-4577-b151-d0755f5355d8" # kusto client app, don't know app name
_MGMT_ENDPOINT_VERSION = "v1"
_QUERY_ENDPOINT_VERSION = "v2"
_MGMT_ENDPOINT_TEMPLATE = "{0}/{1}/rest/mgmt"
_QUERY_ENDPOINT_TEMPLATE = "{0}/{1}/rest/query"
_DATA_SOURCE_TEMPLATE = "https://{0}.kusto.windows.net"
_WEB_CLIENT_VERSION = VERSION
def __init__(self, conn_kv:dict):
"""
Kusto Client constructor.
Parameters
----------
kusto_cluster : str
Kusto cluster endpoint. Example: https://help.kusto.windows.net
client_id : str
The AAD application ID of the application making the request to Kusto
client_secret : str
The AAD application key of the application making the request to Kusto.
if this is given, then username/password should not be.
username : str
The username of the user making the request to Kusto.
if this is given, then password must follow and the client_secret should not be given.
password : str
The password matching the username of the user making the request to Kusto
authority : 'microsoft.com', optional
In case your tenant is not microsoft please use this param.
"""
cluster_name = conn_kv[ConnStrKeys.CLUSTER]
data_source = cluster_name if cluster_name.find("://") >= 0 else self._DATA_SOURCE_TEMPLATE.format(cluster_name)
self._mgmt_endpoint = self._MGMT_ENDPOINT_TEMPLATE.format(data_source, self._MGMT_ENDPOINT_VERSION)
self._query_endpoint = self._QUERY_ENDPOINT_TEMPLATE.format(data_source, self._QUERY_ENDPOINT_VERSION)
self._aad_helper = _MyAadHelper(ConnKeysKCSB(conn_kv, data_source), self._DEFAULT_CLIENTID) if conn_kv.get(ConnStrKeys.ANONYMOUS) is None else None
def execute(self, kusto_database, kusto_query, accept_partial_results=False, **options):
""" Execute a simple query or management command
Parameters
----------
kusto_database : str
Database against query will be executed.
query : str
Query to be executed
accept_partial_results : bool
Optional parameter. If query fails, but we receive some results, we consider results as partial.
If this is True, results are returned to client, even if there are exceptions.
If this is False, exception is raised. Default is False.
options["timeout"] : float, optional
Optional parameter. Network timeout in seconds. Default is no timeout.
"""
if kusto_query.startswith("."):
endpoint_version = self._MGMT_ENDPOINT_VERSION
endpoint = self._mgmt_endpoint
else:
endpoint_version = self._QUERY_ENDPOINT_VERSION
endpoint = self._query_endpoint
request_payload = {
"db": kusto_database,
"csl": kusto_query,
}
request_headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json; charset=utf-8",
"x-ms-client-version": "{0}.Python.Client:{1}".format(Constants.MAGIC_CLASS_NAME, self._WEB_CLIENT_VERSION),
"x-ms-client-request-id": "{0}.execute;{1}".format(Constants.MAGIC_CLASS_NAME, str(uuid.uuid4())),
}
if self._aad_helper is not None:
request_headers["Authorization"] = self._aad_helper.acquire_token(**options)
request_headers["Fed"] = "True"
response = requests.post(endpoint, headers=request_headers, json=request_payload, timeout=options.get("timeout"))
if response.status_code != requests.codes.ok: # pylint: disable=E1101
raise KqlError([response.text], response)
kql_response = KqlQueryResponse(response.json(), endpoint_version)
if kql_response.has_exceptions() and not accept_partial_results:
raise KqlError(kql_response.get_exceptions(), response, kql_response)
return kql_response | azure/Kqlmagic/kusto_client.py |
import uuid
import six
from datetime import timedelta, datetime
import json
import adal
import dateutil.parser
import requests
from Kqlmagic.my_aad_helper import _MyAadHelper, ConnKeysKCSB
from Kqlmagic.kql_client import KqlQueryResponse, KqlError
from Kqlmagic.constants import Constants, ConnStrKeys
from Kqlmagic.version import VERSION
class Kusto_Client(object):
"""
Kusto client wrapper for Python.
KustoClient works with both 2.x and 3.x flavors of Python. All primitive types are supported.
KustoClient takes care of ADAL authentication, parsing response and giving you typed result set,
and offers familiar Python DB API.
Test are run using nose.
Examples
--------
To use KustoClient, you can choose betwen two ways of authentication.
For the first option, you'll need to have your own AAD application and know your client credentials (client_id and client_secret).
>>> kusto_cluster = 'https://help.kusto.windows.net'
>>> kusto_client = KustoClient(kusto_cluster, client_id, client_secret='your_app_secret')
For the second option, you can use KustoClient's client id and authenticate using your username and password.
>>> kusto_cluster = 'https://help.kusto.windows.net'
>>> client_id = 'e07cf1fb-c6a6-4668-b21a-f74731afa19a'
>>> kusto_client = KustoClient(kusto_cluster, client_id, username='your_username', password='<PASSWORD>')"""
_DEFAULT_CLIENTID = "db662dc1-0cfe-4e1c-a843-19a68e65be58" # kusto client app, (didn't find app name ?)
# _DEFAULT_CLIENTID = "8430759c-5626-4577-b151-d0755f5355d8" # kusto client app, don't know app name
_MGMT_ENDPOINT_VERSION = "v1"
_QUERY_ENDPOINT_VERSION = "v2"
_MGMT_ENDPOINT_TEMPLATE = "{0}/{1}/rest/mgmt"
_QUERY_ENDPOINT_TEMPLATE = "{0}/{1}/rest/query"
_DATA_SOURCE_TEMPLATE = "https://{0}.kusto.windows.net"
_WEB_CLIENT_VERSION = VERSION
def __init__(self, conn_kv:dict):
"""
Kusto Client constructor.
Parameters
----------
kusto_cluster : str
Kusto cluster endpoint. Example: https://help.kusto.windows.net
client_id : str
The AAD application ID of the application making the request to Kusto
client_secret : str
The AAD application key of the application making the request to Kusto.
if this is given, then username/password should not be.
username : str
The username of the user making the request to Kusto.
if this is given, then password must follow and the client_secret should not be given.
password : str
The password matching the username of the user making the request to Kusto
authority : 'microsoft.com', optional
In case your tenant is not microsoft please use this param.
"""
cluster_name = conn_kv[ConnStrKeys.CLUSTER]
data_source = cluster_name if cluster_name.find("://") >= 0 else self._DATA_SOURCE_TEMPLATE.format(cluster_name)
self._mgmt_endpoint = self._MGMT_ENDPOINT_TEMPLATE.format(data_source, self._MGMT_ENDPOINT_VERSION)
self._query_endpoint = self._QUERY_ENDPOINT_TEMPLATE.format(data_source, self._QUERY_ENDPOINT_VERSION)
self._aad_helper = _MyAadHelper(ConnKeysKCSB(conn_kv, data_source), self._DEFAULT_CLIENTID) if conn_kv.get(ConnStrKeys.ANONYMOUS) is None else None
def execute(self, kusto_database, kusto_query, accept_partial_results=False, **options):
""" Execute a simple query or management command
Parameters
----------
kusto_database : str
Database against query will be executed.
query : str
Query to be executed
accept_partial_results : bool
Optional parameter. If query fails, but we receive some results, we consider results as partial.
If this is True, results are returned to client, even if there are exceptions.
If this is False, exception is raised. Default is False.
options["timeout"] : float, optional
Optional parameter. Network timeout in seconds. Default is no timeout.
"""
if kusto_query.startswith("."):
endpoint_version = self._MGMT_ENDPOINT_VERSION
endpoint = self._mgmt_endpoint
else:
endpoint_version = self._QUERY_ENDPOINT_VERSION
endpoint = self._query_endpoint
request_payload = {
"db": kusto_database,
"csl": kusto_query,
}
request_headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json; charset=utf-8",
"x-ms-client-version": "{0}.Python.Client:{1}".format(Constants.MAGIC_CLASS_NAME, self._WEB_CLIENT_VERSION),
"x-ms-client-request-id": "{0}.execute;{1}".format(Constants.MAGIC_CLASS_NAME, str(uuid.uuid4())),
}
if self._aad_helper is not None:
request_headers["Authorization"] = self._aad_helper.acquire_token(**options)
request_headers["Fed"] = "True"
response = requests.post(endpoint, headers=request_headers, json=request_payload, timeout=options.get("timeout"))
if response.status_code != requests.codes.ok: # pylint: disable=E1101
raise KqlError([response.text], response)
kql_response = KqlQueryResponse(response.json(), endpoint_version)
if kql_response.has_exceptions() and not accept_partial_results:
raise KqlError(kql_response.get_exceptions(), response, kql_response)
return kql_response | 0.72331 | 0.145844 |
import requests, phue, time, asyncio, bottom, rgbxy
from config import config, load
from unpack import rfc2812_handler
def get_ip():
print('Looking for the Hue Bridge')
r = requests.get('https://discovery.meethue.com')
if r.status_code == 200:
data = r.json()
if not data:
return
return data[0]['internalipaddress']
else:
return
def bridge_connect():
b = None
hue_ip = get_ip()
if not hue_ip:
print('Unable to locate a Hue Bridge. Make sure you are on the same network!')
exit()
press_message_displayed = False
print(f'Connecting to Hue Bridge: {hue_ip}')
while True:
try:
b = phue.Bridge(hue_ip)
b.connect()
b.get_api()
break
except phue.PhueRegistrationException:
if not press_message_displayed:
print('Press the button on the Hue Bridge to allow access')
press_message_displayed = True
time.sleep(1)
except:
raise
print('Connected to the Hue Bridge')
return b
bot = bottom.Client(
host='irc.chat.twitch.tv',
port=6697,
ssl=True,
)
bot.raw_handlers = [rfc2812_handler(bot)]
@bot.on('CLIENT_CONNECT')
async def connect(**kwargs):
bot.send('PASS', password='<PASSWORD>')
bot.send('NICK', nick='justinfan32429')
done, pending = await asyncio.wait(
[bot.wait("RPL_ENDOFMOTD"),
bot.wait("ERR_NOMOTD")],
loop=bot.loop,
return_when=asyncio.FIRST_COMPLETED
)
bot.send_raw('CAP REQ :twitch.tv/tags')
bot.send_raw('CAP REQ :twitch.tv/commands')
bot.send_raw('CAP REQ :twitch.tv/membership')
for c in config['channels']:
print(f'Joining {c}')
bot.send('JOIN', channel=f'#{c}')
if not hasattr(bot, 'bridge'):
bot.bridge = bridge_connect()
@bot.on('PING')
def keepalive(message, **kwargs):
bot.send('PONG', message=message)
@bot.on('USERNOTICE')
async def usernotice(**kwargs):
if kwargs['msg-id'] in ('sub', 'resub', 'subgift',
'anonsubgift', 'giftpaidupgrade', 'submysterygift',
'anonsubmysterygift', 'extendsub'):
run_sub_light()
@bot.on('PRIVMSG')
async def message(message, **kwargs):
if message in ['!testsub', '!subtest']:
if 'moderator' in kwargs['badges'] or 'broadcaster' in kwargs['badges']:
run_sub_light()
def run_sub_light():
print('Running sub light')
light_names = []
if config['rooms']:
for r in config['rooms']:
group = bot.bridge.get_group(r)
if group:
light_names.extend([int(i) for i in group['lights']])
else:
print(f'Unknown group {r}')
if config['lights']:
light_names.extend(config['lights'])
lights = []
for l in light_names:
lights.append(bot.bridge.get_light(l))
light_names = [l['name'] for l in lights]
try:
converter = rgbxy.Converter()
for c in config['colors']:
d = {
'on': True,
}
if c.get('color'):
d['xy'] = converter.hex_to_xy(c['color'].strip('#'))
if c.get('bri'):
d['bri'] = int(c['bri'])
if c.get('ct'):
d['ct'] = int(c['ct'])
bot.bridge.set_light(light_names, d)
time.sleep(float(config['interval']))
finally:
# Reset the lights to their prev state
for l in lights:
for k in list(l['state'].keys()):
if not k in ['on', 'bri', 'xy', 'ct']:
del l['state'][k]
bot.bridge.set_light(l['name'], l['state'])
if __name__ == '__main__':
load()
bot.loop.create_task(bot.connect())
bot.loop.run_forever() | twitchhue/app.py | import requests, phue, time, asyncio, bottom, rgbxy
from config import config, load
from unpack import rfc2812_handler
def get_ip():
print('Looking for the Hue Bridge')
r = requests.get('https://discovery.meethue.com')
if r.status_code == 200:
data = r.json()
if not data:
return
return data[0]['internalipaddress']
else:
return
def bridge_connect():
b = None
hue_ip = get_ip()
if not hue_ip:
print('Unable to locate a Hue Bridge. Make sure you are on the same network!')
exit()
press_message_displayed = False
print(f'Connecting to Hue Bridge: {hue_ip}')
while True:
try:
b = phue.Bridge(hue_ip)
b.connect()
b.get_api()
break
except phue.PhueRegistrationException:
if not press_message_displayed:
print('Press the button on the Hue Bridge to allow access')
press_message_displayed = True
time.sleep(1)
except:
raise
print('Connected to the Hue Bridge')
return b
bot = bottom.Client(
host='irc.chat.twitch.tv',
port=6697,
ssl=True,
)
bot.raw_handlers = [rfc2812_handler(bot)]
@bot.on('CLIENT_CONNECT')
async def connect(**kwargs):
bot.send('PASS', password='<PASSWORD>')
bot.send('NICK', nick='justinfan32429')
done, pending = await asyncio.wait(
[bot.wait("RPL_ENDOFMOTD"),
bot.wait("ERR_NOMOTD")],
loop=bot.loop,
return_when=asyncio.FIRST_COMPLETED
)
bot.send_raw('CAP REQ :twitch.tv/tags')
bot.send_raw('CAP REQ :twitch.tv/commands')
bot.send_raw('CAP REQ :twitch.tv/membership')
for c in config['channels']:
print(f'Joining {c}')
bot.send('JOIN', channel=f'#{c}')
if not hasattr(bot, 'bridge'):
bot.bridge = bridge_connect()
@bot.on('PING')
def keepalive(message, **kwargs):
bot.send('PONG', message=message)
@bot.on('USERNOTICE')
async def usernotice(**kwargs):
if kwargs['msg-id'] in ('sub', 'resub', 'subgift',
'anonsubgift', 'giftpaidupgrade', 'submysterygift',
'anonsubmysterygift', 'extendsub'):
run_sub_light()
@bot.on('PRIVMSG')
async def message(message, **kwargs):
if message in ['!testsub', '!subtest']:
if 'moderator' in kwargs['badges'] or 'broadcaster' in kwargs['badges']:
run_sub_light()
def run_sub_light():
print('Running sub light')
light_names = []
if config['rooms']:
for r in config['rooms']:
group = bot.bridge.get_group(r)
if group:
light_names.extend([int(i) for i in group['lights']])
else:
print(f'Unknown group {r}')
if config['lights']:
light_names.extend(config['lights'])
lights = []
for l in light_names:
lights.append(bot.bridge.get_light(l))
light_names = [l['name'] for l in lights]
try:
converter = rgbxy.Converter()
for c in config['colors']:
d = {
'on': True,
}
if c.get('color'):
d['xy'] = converter.hex_to_xy(c['color'].strip('#'))
if c.get('bri'):
d['bri'] = int(c['bri'])
if c.get('ct'):
d['ct'] = int(c['ct'])
bot.bridge.set_light(light_names, d)
time.sleep(float(config['interval']))
finally:
# Reset the lights to their prev state
for l in lights:
for k in list(l['state'].keys()):
if not k in ['on', 'bri', 'xy', 'ct']:
del l['state'][k]
bot.bridge.set_light(l['name'], l['state'])
if __name__ == '__main__':
load()
bot.loop.create_task(bot.connect())
bot.loop.run_forever() | 0.277473 | 0.101411 |
import logging
from typing import Dict
from inspect import iscoroutine
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from aiohttp.web import Application, Request, Response, run_app
from aiohttp.web_exceptions import HTTPNotFound, HTTPInternalServerError
from .platform.thrift import serialize, deserialize, get_call_args, ThriftService
logger = logging.getLogger(__name__)
class AsyncNexusServer(object):
def __init__(self, services_map: list, address: tuple, protocol_cls=TBinaryProtocol):
"""Initialize AsyncNexusServer
:param services_map: A list of (thrift_service, api_handler) two-tuples.
:param address: A (host, port) tuple.
:param protocol_cls: Thrift protocol class, default is `TBinaryProtocol`.
"""
self.services_map: Dict[str, ThriftService] = {}
for service_module, handler in services_map:
service = ThriftService(service_module, handler)
self.services_map[service.name] = service
self.address = address
self.protocol_cls = protocol_cls
self._app = Application()
self._app.router.add_post('/{service}/{rpc}', self._handle_request)
def _has_service(self, service_name: str) -> bool:
return service_name in self.services_map
@staticmethod
async def _process(rpc_impl, call_args):
ret = rpc_impl(*call_args)
if iscoroutine(ret):
return await ret
return ret
async def _handle_request(self, request: Request):
service_name = request.match_info['service']
rpc_name = request.match_info['rpc']
if not self._has_service(service_name):
raise HTTPNotFound(body=b'')
service = self.services_map[service_name]
if not service.has_rpc(rpc_name):
raise HTTPNotFound(body=b'')
rpc_impl = getattr(service.handler, rpc_name)
rpc_args, rpc_result = service.get_rpc_args_and_result_object(rpc_name)
deserialize(rpc_args, await request.read(), self.protocol_cls)
call_args = get_call_args(rpc_args)
try:
rpc_result.success = await self._process(rpc_impl, call_args)
except Exception as e:
for result_field_info in rpc_result.thrift_spec:
if result_field_info is None:
continue
exc_name = result_field_info[2]
if exc_name == 'success':
continue
exc_class = result_field_info[3][0]
if isinstance(e, exc_class):
setattr(rpc_result, exc_name, e)
break
else:
logger.exception('NexusServiceError: Unrecognized Exception')
raise HTTPInternalServerError(body=b'') from e
return Response(body=serialize(rpc_result, self.protocol_cls))
def run(self):
run_app(self._app, host=self.address[0], port=self.address[1]) | nexus/server.py |
import logging
from typing import Dict
from inspect import iscoroutine
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from aiohttp.web import Application, Request, Response, run_app
from aiohttp.web_exceptions import HTTPNotFound, HTTPInternalServerError
from .platform.thrift import serialize, deserialize, get_call_args, ThriftService
logger = logging.getLogger(__name__)
class AsyncNexusServer(object):
def __init__(self, services_map: list, address: tuple, protocol_cls=TBinaryProtocol):
"""Initialize AsyncNexusServer
:param services_map: A list of (thrift_service, api_handler) two-tuples.
:param address: A (host, port) tuple.
:param protocol_cls: Thrift protocol class, default is `TBinaryProtocol`.
"""
self.services_map: Dict[str, ThriftService] = {}
for service_module, handler in services_map:
service = ThriftService(service_module, handler)
self.services_map[service.name] = service
self.address = address
self.protocol_cls = protocol_cls
self._app = Application()
self._app.router.add_post('/{service}/{rpc}', self._handle_request)
def _has_service(self, service_name: str) -> bool:
return service_name in self.services_map
@staticmethod
async def _process(rpc_impl, call_args):
ret = rpc_impl(*call_args)
if iscoroutine(ret):
return await ret
return ret
async def _handle_request(self, request: Request):
service_name = request.match_info['service']
rpc_name = request.match_info['rpc']
if not self._has_service(service_name):
raise HTTPNotFound(body=b'')
service = self.services_map[service_name]
if not service.has_rpc(rpc_name):
raise HTTPNotFound(body=b'')
rpc_impl = getattr(service.handler, rpc_name)
rpc_args, rpc_result = service.get_rpc_args_and_result_object(rpc_name)
deserialize(rpc_args, await request.read(), self.protocol_cls)
call_args = get_call_args(rpc_args)
try:
rpc_result.success = await self._process(rpc_impl, call_args)
except Exception as e:
for result_field_info in rpc_result.thrift_spec:
if result_field_info is None:
continue
exc_name = result_field_info[2]
if exc_name == 'success':
continue
exc_class = result_field_info[3][0]
if isinstance(e, exc_class):
setattr(rpc_result, exc_name, e)
break
else:
logger.exception('NexusServiceError: Unrecognized Exception')
raise HTTPInternalServerError(body=b'') from e
return Response(body=serialize(rpc_result, self.protocol_cls))
def run(self):
run_app(self._app, host=self.address[0], port=self.address[1]) | 0.827026 | 0.05199 |
import unittest
import time
import json
from decimal import Decimal
import context
from arithmetictrainer.core import get_number
from arithmetictrainer.core import get_number_array
from arithmetictrainer.core import Arithmetictrainer
from arithmetictrainer.core import arithmetictrainerFromJson
class GetNumberTest(unittest.TestCase):
def test_get_number(self):
self.assertRaises(ValueError, get_number, 2, 2, 1)
self.assertRaises(ValueError, get_number, 2, 4, -1)
for i in range(100):
num = get_number(-100, 100, 1)
self.assertNotEqual(num, Decimal('0'))
self.assertTrue(num >= Decimal(-100))
self.assertTrue(num <= Decimal(100))
def test_get_number_array(self):
num = get_number_array(0, -100, 100, 1)
self.assertEqual(len(num), 0)
num = get_number_array(1, -100, 100, 1)
self.assertEqual(len(num), 1)
class ArithmetictrainerTest(unittest.TestCase):
def setUp(self):
config = [{
'operator': '+',
'variable_num': 2,
'variable_min': -100,
'variable_max': 100,
'variable_decimal_points': 1,
'result_decimal_points': 1,
}]
state = {
'started_at': time.time(),
'num_correct_answers': 0,
'num_incorrect_answers': 0,
}
self.trainer = Arithmetictrainer(config, state=state)
def test__init__(self):
config = [{
'operator': '+',
'variable_num': 2,
'variable_min': -100,
'variable_max': 100,
'variable_decimal_points': 1,
'result_decimal_points': 1,
}]
state = {
'started_at': time.time(),
'num_correct_answers': 0,
'num_incorrect_answers': 0,
}
a = Arithmetictrainer(config, state=state)
self.assertTrue(a.getConfig() == config)
self.assertTrue(a.getState() == state)
def test_answer(self):
task = self.trainer.getTask()
correct_answer = task['correct_answer']
wrong_answer = str(Decimal(correct_answer) + 1)
self.assertEqual(0, self.trainer.getState()['num_correct_answers'])
self.assertFalse(self.trainer.answer(wrong_answer))
self.assertEqual(0, self.trainer.getState()['num_correct_answers'])
self.assertTrue(self.trainer.answer(correct_answer))
self.assertEqual(1, self.trainer.getState()['num_correct_answers'])
class ArithmetictrainerJsonTest(unittest.TestCase):
def setUp(self):
config = [{
'operator': '+',
'variable_num': 2,
'variable_min': -100,
'variable_max': 100,
'variable_decimal_points': 1,
'result_decimal_points': 1,
}]
state = {
'started_at': time.time(),
'num_correct_answers': 0,
'num_incorrect_answers': 0,
}
self.trainer = Arithmetictrainer(config, state=state)
def test_decode_encode(self):
j = json.dumps(self.trainer.toJsonSerializable())
decoded_trainer = arithmetictrainerFromJson(j)
self.assertTrue(self.trainer == decoded_trainer)
if __name__ == '__main__':
unittest.main() | tests/test_core.py | import unittest
import time
import json
from decimal import Decimal
import context
from arithmetictrainer.core import get_number
from arithmetictrainer.core import get_number_array
from arithmetictrainer.core import Arithmetictrainer
from arithmetictrainer.core import arithmetictrainerFromJson
class GetNumberTest(unittest.TestCase):
def test_get_number(self):
self.assertRaises(ValueError, get_number, 2, 2, 1)
self.assertRaises(ValueError, get_number, 2, 4, -1)
for i in range(100):
num = get_number(-100, 100, 1)
self.assertNotEqual(num, Decimal('0'))
self.assertTrue(num >= Decimal(-100))
self.assertTrue(num <= Decimal(100))
def test_get_number_array(self):
num = get_number_array(0, -100, 100, 1)
self.assertEqual(len(num), 0)
num = get_number_array(1, -100, 100, 1)
self.assertEqual(len(num), 1)
class ArithmetictrainerTest(unittest.TestCase):
def setUp(self):
config = [{
'operator': '+',
'variable_num': 2,
'variable_min': -100,
'variable_max': 100,
'variable_decimal_points': 1,
'result_decimal_points': 1,
}]
state = {
'started_at': time.time(),
'num_correct_answers': 0,
'num_incorrect_answers': 0,
}
self.trainer = Arithmetictrainer(config, state=state)
def test__init__(self):
config = [{
'operator': '+',
'variable_num': 2,
'variable_min': -100,
'variable_max': 100,
'variable_decimal_points': 1,
'result_decimal_points': 1,
}]
state = {
'started_at': time.time(),
'num_correct_answers': 0,
'num_incorrect_answers': 0,
}
a = Arithmetictrainer(config, state=state)
self.assertTrue(a.getConfig() == config)
self.assertTrue(a.getState() == state)
def test_answer(self):
task = self.trainer.getTask()
correct_answer = task['correct_answer']
wrong_answer = str(Decimal(correct_answer) + 1)
self.assertEqual(0, self.trainer.getState()['num_correct_answers'])
self.assertFalse(self.trainer.answer(wrong_answer))
self.assertEqual(0, self.trainer.getState()['num_correct_answers'])
self.assertTrue(self.trainer.answer(correct_answer))
self.assertEqual(1, self.trainer.getState()['num_correct_answers'])
class ArithmetictrainerJsonTest(unittest.TestCase):
def setUp(self):
config = [{
'operator': '+',
'variable_num': 2,
'variable_min': -100,
'variable_max': 100,
'variable_decimal_points': 1,
'result_decimal_points': 1,
}]
state = {
'started_at': time.time(),
'num_correct_answers': 0,
'num_incorrect_answers': 0,
}
self.trainer = Arithmetictrainer(config, state=state)
def test_decode_encode(self):
j = json.dumps(self.trainer.toJsonSerializable())
decoded_trainer = arithmetictrainerFromJson(j)
self.assertTrue(self.trainer == decoded_trainer)
if __name__ == '__main__':
unittest.main() | 0.457379 | 0.492554 |
from dotenv import load_dotenv # pip install python-dotenv
from geopy import distance
from googleplaces import GooglePlaces, types, lang
import json
import os
import pgeocode
import requests as req
load_dotenv()
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
google_places = GooglePlaces(GOOGLE_API_KEY)
def Closest_Hospitals(Latitude, Longitude, Nearby_Hospital_Results):
def Take_Second(elem):
return elem[1]
User_Coords = (Latitude, Longitude)
Nearby_Hospital_Result_w_Distance = [['$' for x in range(2)] for y in range(len(Nearby_Hospital_Results)-1)]
for x in range(len(Nearby_Hospital_Results)-1):
current = Nearby_Hospital_Results[x].split(' ')
Hospital_Coords = (current[1], current[2])
Nearby_Hospital_Result_w_Distance[x][0] = distance.distance(Hospital_Coords, User_Coords).km
Nearby_Hospital_Result_w_Distance[x][1] = str(Nearby_Hospital_Results[x])
Nearby_Hospital_Result_w_Distance.sort()
Three_Closest_Hospitals = []
for x in range(3):
Three_Closest_Hospitals.append(str(Nearby_Hospital_Result_w_Distance[x][0]) + ' ' + Nearby_Hospital_Result_w_Distance[x][1])
return Three_Closest_Hospitals
def Nearby_Hospitals(Latitude, Longitude, Radius): # Returns an array of all health center/hospitals within a certain radius
Nearby_Hospital_Results = []
print("Lat: " + str(Latitude) + " Lng: " + str(Longitude) + " Radius: " + str(Radius))
query_result = google_places.nearby_search(lat_lng={'lat': Latitude, 'lng': Longitude}, radius = Radius, types = [types.TYPE_HOSPITAL])
if query_result.has_attributions:
print (query_result.html_attributions)
for place in query_result.places:
if ("Health Centre" in place.name) or ("Hospital" in place.name):
name = str(place.name).replace(' ', '_')
Nearby_Hospital_Results.append(name + ' ' + str(place.geo_location['lat']) + ' ' + str(place.geo_location['lng']))
return Closest_Hospitals(Latitude, Longitude, Nearby_Hospital_Results)
def Area_Code_to_Coordinates(area_code):
nomi = pgeocode.Nominatim('ca')
area_code_data = nomi.query_postal_code(area_code.lower())
Latitude = area_code_data.get('latitude')
Longitude = area_code_data.get('longitude')
print("Lat: " + str(Latitude) + " Long: " + str(Longitude) + "\n")
Three_Closest_Hospitals = Nearby_Hospitals(float(Latitude), float(Longitude), 20000)
return Three_Closest_Hospitals | Hospital_Finder_V1.py | from dotenv import load_dotenv # pip install python-dotenv
from geopy import distance
from googleplaces import GooglePlaces, types, lang
import json
import os
import pgeocode
import requests as req
load_dotenv()
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
google_places = GooglePlaces(GOOGLE_API_KEY)
def Closest_Hospitals(Latitude, Longitude, Nearby_Hospital_Results):
def Take_Second(elem):
return elem[1]
User_Coords = (Latitude, Longitude)
Nearby_Hospital_Result_w_Distance = [['$' for x in range(2)] for y in range(len(Nearby_Hospital_Results)-1)]
for x in range(len(Nearby_Hospital_Results)-1):
current = Nearby_Hospital_Results[x].split(' ')
Hospital_Coords = (current[1], current[2])
Nearby_Hospital_Result_w_Distance[x][0] = distance.distance(Hospital_Coords, User_Coords).km
Nearby_Hospital_Result_w_Distance[x][1] = str(Nearby_Hospital_Results[x])
Nearby_Hospital_Result_w_Distance.sort()
Three_Closest_Hospitals = []
for x in range(3):
Three_Closest_Hospitals.append(str(Nearby_Hospital_Result_w_Distance[x][0]) + ' ' + Nearby_Hospital_Result_w_Distance[x][1])
return Three_Closest_Hospitals
def Nearby_Hospitals(Latitude, Longitude, Radius): # Returns an array of all health center/hospitals within a certain radius
Nearby_Hospital_Results = []
print("Lat: " + str(Latitude) + " Lng: " + str(Longitude) + " Radius: " + str(Radius))
query_result = google_places.nearby_search(lat_lng={'lat': Latitude, 'lng': Longitude}, radius = Radius, types = [types.TYPE_HOSPITAL])
if query_result.has_attributions:
print (query_result.html_attributions)
for place in query_result.places:
if ("Health Centre" in place.name) or ("Hospital" in place.name):
name = str(place.name).replace(' ', '_')
Nearby_Hospital_Results.append(name + ' ' + str(place.geo_location['lat']) + ' ' + str(place.geo_location['lng']))
return Closest_Hospitals(Latitude, Longitude, Nearby_Hospital_Results)
def Area_Code_to_Coordinates(area_code):
nomi = pgeocode.Nominatim('ca')
area_code_data = nomi.query_postal_code(area_code.lower())
Latitude = area_code_data.get('latitude')
Longitude = area_code_data.get('longitude')
print("Lat: " + str(Latitude) + " Long: " + str(Longitude) + "\n")
Three_Closest_Hospitals = Nearby_Hospitals(float(Latitude), float(Longitude), 20000)
return Three_Closest_Hospitals | 0.480966 | 0.230833 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.ops as ops
from models.resnet import resnet50_backbone
from models.modules import Flatten, FeatureBranch, CNNEncoder, FeatureBranch2, CNNEncoderGroupNorm, CNNEncoderGroupNorm2, CNNEncoderGroupNorm3, FeatureBlockGroupNorm
class Fixed(nn.Module):
def __init__(self, args):
super().__init__()
def forward(self, batch):
return torch.ones_like(batch["labels"][0], device=batch["labels"].device).float()
class IoU(nn.Module):
def __init__(self, args):
super().__init__()
def forward(self, batch):
return (batch["mask_ious"][0] > 0).float()
class UnionLSTMHO(nn.Module):
def __init__(self, args, lrelu=False):
super().__init__()
self.rgb_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, no_pool=True, lrelu=lrelu), # 1x
FeatureBlockGroupNorm(32, 64, 64, lrelu=lrelu), # 1x->2x
FeatureBlockGroupNorm(64, 128, 128, no_pool=True, lrelu=lrelu), # 2x
)
self.flow_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(32, 128, 128, no_pool=True, lrelu=lrelu), # 2x
)
self.fusion_encoder = nn.Sequential(
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 2->4x
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 4->8x
nn.AdaptiveAvgPool2d((1, 1)),
Flatten()
)
self.spatial_module = nn.Sequential(
nn.Conv2d(4, 96, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(96, 128, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=8)
)
self.fc1 = nn.Sequential(
nn.Linear(256 + 64, 128),
nn.LeakyReLU() if lrelu else nn.ReLU(),
)
self.lstm = nn.LSTM(128, 64, num_layers=args.nb_layers, bidirectional=True)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(64, 32),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(32, 1)
)
h_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
c_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
self.h_0 = nn.Parameter(h_0, requires_grad=True)
self.c_0 = nn.Parameter(c_0, requires_grad=True)
def forward(self, batch):
h_rgb = self.rgb_encoder(batch["union_imgs"][0])
h_flow = self.flow_encoder(batch["union_flows"][0])
hs = self.fusion_encoder(torch.cat((h_rgb, h_flow), dim=1))
dual_masks = torch.stack((
batch["hand_masks"][0],
batch["obj_bbox_masks"][0],
batch["other_hand_masks"][0],
batch["other_bbox_masks"][0]
), dim=1)
h_spa = torch.flatten(self.spatial_module(dual_masks), 1)
hs = torch.cat((hs, h_spa), dim=1)
hs = self.fc1(hs)
hs, _ = self.lstm(hs.unsqueeze(1), (self.h_0, self.c_0))
hs = self.fc2(hs.squeeze(1))
return torch.sigmoid(hs)[..., 0]
class UnionLSTMHORGB(nn.Module):
def __init__(self, args, lrelu=False):
super().__init__()
self.rgb_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(32, 64, 64, no_pool=True, lrelu=lrelu), # 2->4x
FeatureBlockGroupNorm(64, 128, 128, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(128, 128, 128, no_pool=True, lrelu=lrelu), # 2->4x
)
self.fusion_encoder = nn.Sequential(
FeatureBlockGroupNorm(128, 256, 256, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 1->2x
nn.AdaptiveAvgPool2d((1, 1)),
Flatten()
)
self.spatial_module = nn.Sequential(
nn.Conv2d(4, 96, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(96, 128, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=8)
)
self.fc1 = nn.Sequential(
nn.Linear(256 + 64, 128),
nn.LeakyReLU() if lrelu else nn.ReLU(),
)
self.lstm = nn.LSTM(128, 64, num_layers=args.nb_layers, bidirectional=True)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(64, 32),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(32, 1)
)
h_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
c_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
self.h_0 = nn.Parameter(h_0, requires_grad=True)
self.c_0 = nn.Parameter(c_0, requires_grad=True)
def forward(self, batch):
h_rgb = self.rgb_encoder(batch["union_imgs"][0])
hs = self.fusion_encoder(h_rgb)
dual_masks = torch.stack((
batch["hand_masks"][0],
batch["obj_bbox_masks"][0],
batch["other_hand_masks"][0],
batch["other_bbox_masks"][0]
), dim=1)
h_spa = torch.flatten(self.spatial_module(dual_masks), 1)
hs = torch.cat((hs, h_spa), dim=1)
hs = self.fc1(hs)
hs, _ = self.lstm(hs.unsqueeze(1), (self.h_0, self.c_0))
hs = self.fc2(hs.squeeze(1))
return torch.sigmoid(hs)[..., 0]
class UnionLSTMHOFlow(nn.Module):
def __init__(self, args, lrelu=False):
super().__init__()
self.flow_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(32, 128, 128, no_pool=True, lrelu=lrelu), # 2->4x
)
self.fusion_encoder = nn.Sequential(
FeatureBlockGroupNorm(128, 256, 256, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 1->2x
nn.AdaptiveAvgPool2d((1, 1)),
Flatten()
)
self.spatial_module = nn.Sequential(
nn.Conv2d(4, 96, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(96, 128, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=8)
)
self.fc1 = nn.Sequential(
nn.Linear(256 + 64, 128),
nn.LeakyReLU() if lrelu else nn.ReLU(),
)
self.lstm = nn.LSTM(128, 64, num_layers=args.nb_layers, bidirectional=True)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(64, 32),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(32, 1)
)
h_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
c_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
self.h_0 = nn.Parameter(h_0, requires_grad=True)
self.c_0 = nn.Parameter(c_0, requires_grad=True)
def forward(self, batch):
h_flow = self.flow_encoder(batch["union_flows"][0])
hs = self.fusion_encoder(h_flow)
dual_masks = torch.stack((
batch["hand_masks"][0],
batch["obj_bbox_masks"][0],
batch["other_hand_masks"][0],
batch["other_bbox_masks"][0]
), dim=1)
h_spa = torch.flatten(self.spatial_module(dual_masks), 1)
hs = torch.cat((hs, h_spa), dim=1)
hs = self.fc1(hs)
hs, _ = self.lstm(hs.unsqueeze(1), (self.h_0, self.c_0))
hs = self.fc2(hs.squeeze(1))
return torch.sigmoid(hs)[..., 0] | models/baseline.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.ops as ops
from models.resnet import resnet50_backbone
from models.modules import Flatten, FeatureBranch, CNNEncoder, FeatureBranch2, CNNEncoderGroupNorm, CNNEncoderGroupNorm2, CNNEncoderGroupNorm3, FeatureBlockGroupNorm
class Fixed(nn.Module):
def __init__(self, args):
super().__init__()
def forward(self, batch):
return torch.ones_like(batch["labels"][0], device=batch["labels"].device).float()
class IoU(nn.Module):
def __init__(self, args):
super().__init__()
def forward(self, batch):
return (batch["mask_ious"][0] > 0).float()
class UnionLSTMHO(nn.Module):
def __init__(self, args, lrelu=False):
super().__init__()
self.rgb_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, no_pool=True, lrelu=lrelu), # 1x
FeatureBlockGroupNorm(32, 64, 64, lrelu=lrelu), # 1x->2x
FeatureBlockGroupNorm(64, 128, 128, no_pool=True, lrelu=lrelu), # 2x
)
self.flow_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(32, 128, 128, no_pool=True, lrelu=lrelu), # 2x
)
self.fusion_encoder = nn.Sequential(
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 2->4x
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 4->8x
nn.AdaptiveAvgPool2d((1, 1)),
Flatten()
)
self.spatial_module = nn.Sequential(
nn.Conv2d(4, 96, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(96, 128, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=8)
)
self.fc1 = nn.Sequential(
nn.Linear(256 + 64, 128),
nn.LeakyReLU() if lrelu else nn.ReLU(),
)
self.lstm = nn.LSTM(128, 64, num_layers=args.nb_layers, bidirectional=True)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(64, 32),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(32, 1)
)
h_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
c_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
self.h_0 = nn.Parameter(h_0, requires_grad=True)
self.c_0 = nn.Parameter(c_0, requires_grad=True)
def forward(self, batch):
h_rgb = self.rgb_encoder(batch["union_imgs"][0])
h_flow = self.flow_encoder(batch["union_flows"][0])
hs = self.fusion_encoder(torch.cat((h_rgb, h_flow), dim=1))
dual_masks = torch.stack((
batch["hand_masks"][0],
batch["obj_bbox_masks"][0],
batch["other_hand_masks"][0],
batch["other_bbox_masks"][0]
), dim=1)
h_spa = torch.flatten(self.spatial_module(dual_masks), 1)
hs = torch.cat((hs, h_spa), dim=1)
hs = self.fc1(hs)
hs, _ = self.lstm(hs.unsqueeze(1), (self.h_0, self.c_0))
hs = self.fc2(hs.squeeze(1))
return torch.sigmoid(hs)[..., 0]
class UnionLSTMHORGB(nn.Module):
def __init__(self, args, lrelu=False):
super().__init__()
self.rgb_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(32, 64, 64, no_pool=True, lrelu=lrelu), # 2->4x
FeatureBlockGroupNorm(64, 128, 128, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(128, 128, 128, no_pool=True, lrelu=lrelu), # 2->4x
)
self.fusion_encoder = nn.Sequential(
FeatureBlockGroupNorm(128, 256, 256, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 1->2x
nn.AdaptiveAvgPool2d((1, 1)),
Flatten()
)
self.spatial_module = nn.Sequential(
nn.Conv2d(4, 96, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(96, 128, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=8)
)
self.fc1 = nn.Sequential(
nn.Linear(256 + 64, 128),
nn.LeakyReLU() if lrelu else nn.ReLU(),
)
self.lstm = nn.LSTM(128, 64, num_layers=args.nb_layers, bidirectional=True)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(64, 32),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(32, 1)
)
h_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
c_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
self.h_0 = nn.Parameter(h_0, requires_grad=True)
self.c_0 = nn.Parameter(c_0, requires_grad=True)
def forward(self, batch):
h_rgb = self.rgb_encoder(batch["union_imgs"][0])
hs = self.fusion_encoder(h_rgb)
dual_masks = torch.stack((
batch["hand_masks"][0],
batch["obj_bbox_masks"][0],
batch["other_hand_masks"][0],
batch["other_bbox_masks"][0]
), dim=1)
h_spa = torch.flatten(self.spatial_module(dual_masks), 1)
hs = torch.cat((hs, h_spa), dim=1)
hs = self.fc1(hs)
hs, _ = self.lstm(hs.unsqueeze(1), (self.h_0, self.c_0))
hs = self.fc2(hs.squeeze(1))
return torch.sigmoid(hs)[..., 0]
class UnionLSTMHOFlow(nn.Module):
def __init__(self, args, lrelu=False):
super().__init__()
self.flow_encoder = nn.Sequential(
FeatureBlockGroupNorm(3, 32, 32, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(32, 128, 128, no_pool=True, lrelu=lrelu), # 2->4x
)
self.fusion_encoder = nn.Sequential(
FeatureBlockGroupNorm(128, 256, 256, lrelu=lrelu), # 1->2x
FeatureBlockGroupNorm(256, 256, 256, lrelu=lrelu), # 1->2x
nn.AdaptiveAvgPool2d((1, 1)),
Flatten()
)
self.spatial_module = nn.Sequential(
nn.Conv2d(4, 96, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(96, 128, kernel_size=5, padding=2, stride=2),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Conv2d(128, 64, kernel_size=8)
)
self.fc1 = nn.Sequential(
nn.Linear(256 + 64, 128),
nn.LeakyReLU() if lrelu else nn.ReLU(),
)
self.lstm = nn.LSTM(128, 64, num_layers=args.nb_layers, bidirectional=True)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(64, 32),
nn.LeakyReLU() if lrelu else nn.ReLU(),
nn.Linear(32, 1)
)
h_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
c_0 = torch.zeros((args.nb_layers * 2, 1, 64), dtype=torch.float32)
self.h_0 = nn.Parameter(h_0, requires_grad=True)
self.c_0 = nn.Parameter(c_0, requires_grad=True)
def forward(self, batch):
h_flow = self.flow_encoder(batch["union_flows"][0])
hs = self.fusion_encoder(h_flow)
dual_masks = torch.stack((
batch["hand_masks"][0],
batch["obj_bbox_masks"][0],
batch["other_hand_masks"][0],
batch["other_bbox_masks"][0]
), dim=1)
h_spa = torch.flatten(self.spatial_module(dual_masks), 1)
hs = torch.cat((hs, h_spa), dim=1)
hs = self.fc1(hs)
hs, _ = self.lstm(hs.unsqueeze(1), (self.h_0, self.c_0))
hs = self.fc2(hs.squeeze(1))
return torch.sigmoid(hs)[..., 0] | 0.956156 | 0.357848 |
from sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support
import math
def eval_singlemodel(ReasonerObj,eval_d,method, K=1):
if K==1:
# eval top-1 of each ranking
y_pred = ReasonerObj.predictions[:, 0, 0].astype('int').astype('str').tolist()
y_true = ReasonerObj.labels
global_acc = accuracy_score(y_true, y_pred)
print(classification_report(y_true, y_pred, digits=4))
print(global_acc)
Pu,Ru, F1u, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
Pw, Rw, F1w, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted')
for k,metr in [('accuracy',global_acc),('Punweighted',Pu),('Runweighted',Ru),('F1unweighted',F1u), ('Pweighted',Pw),('Rweighted',Rw),('F1weighted',F1w)]:
try:eval_d[method][k].append(metr)
except KeyError:
eval_d[method][k] =[]
eval_d[method][k].append(metr)
return eval_d
else:#eval quality of top-K ranking
return eval_ranking(ReasonerObj, K, eval_d,method)
def eval_ranking(ReasonerObj,K,eval_d,method):
"""
Prints mean Precision@K, mean nDCG@K and hit ratio @ K
"""
y_pred = ReasonerObj.predictions[:, :K, 0].astype('int').astype('str').tolist()
y_true = ReasonerObj.labels
precisions = []
ndcgs = []
hits = 0
IDCG = 0. # Ideal DCG
for n in range(2, K + 2):
IDCG += float(1 / math.log(n, 2))
for z, (ranking, gt_label) in enumerate(zip(y_pred, y_true)):
pred_rank = [1 if r == gt_label else 0 for r in ranking]
dis_scores = [float(1 / math.log(i + 2, 2)) for i, r in enumerate(ranking) if r == gt_label]
no_hits = pred_rank.count(1)
precisions.append(float(no_hits / K))
if no_hits >= 1:
hits += 1 # increment if at least one hit in the ranking
nDCG = float(sum(dis_scores) / IDCG) # compute nDCG for ranking
ndcgs.append(nDCG)
print("Avg ranking Precision@%i: %f " % (K, float(sum(precisions) / len(precisions))))
print("Avg Normalised DCG @%i: %f" % (K, float(sum(ndcgs) / len(precisions))))
print("Hit ratio @%i: %f" % (K, float(hits / len(precisions))))
for k,metr in [('meanP@K', float(sum(precisions) / len(precisions))), ('meannDCG@K', float(sum(ndcgs) / len(precisions))) \
, ('hitratio', float(hits / len(precisions)))]:
try: eval_d[method][k].append(metr)
except KeyError:
eval_d[method][k] = []
eval_d[method][k].append(metr)
return eval_d | evalscript.py | from sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support
import math
def eval_singlemodel(ReasonerObj,eval_d,method, K=1):
if K==1:
# eval top-1 of each ranking
y_pred = ReasonerObj.predictions[:, 0, 0].astype('int').astype('str').tolist()
y_true = ReasonerObj.labels
global_acc = accuracy_score(y_true, y_pred)
print(classification_report(y_true, y_pred, digits=4))
print(global_acc)
Pu,Ru, F1u, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
Pw, Rw, F1w, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted')
for k,metr in [('accuracy',global_acc),('Punweighted',Pu),('Runweighted',Ru),('F1unweighted',F1u), ('Pweighted',Pw),('Rweighted',Rw),('F1weighted',F1w)]:
try:eval_d[method][k].append(metr)
except KeyError:
eval_d[method][k] =[]
eval_d[method][k].append(metr)
return eval_d
else:#eval quality of top-K ranking
return eval_ranking(ReasonerObj, K, eval_d,method)
def eval_ranking(ReasonerObj,K,eval_d,method):
"""
Prints mean Precision@K, mean nDCG@K and hit ratio @ K
"""
y_pred = ReasonerObj.predictions[:, :K, 0].astype('int').astype('str').tolist()
y_true = ReasonerObj.labels
precisions = []
ndcgs = []
hits = 0
IDCG = 0. # Ideal DCG
for n in range(2, K + 2):
IDCG += float(1 / math.log(n, 2))
for z, (ranking, gt_label) in enumerate(zip(y_pred, y_true)):
pred_rank = [1 if r == gt_label else 0 for r in ranking]
dis_scores = [float(1 / math.log(i + 2, 2)) for i, r in enumerate(ranking) if r == gt_label]
no_hits = pred_rank.count(1)
precisions.append(float(no_hits / K))
if no_hits >= 1:
hits += 1 # increment if at least one hit in the ranking
nDCG = float(sum(dis_scores) / IDCG) # compute nDCG for ranking
ndcgs.append(nDCG)
print("Avg ranking Precision@%i: %f " % (K, float(sum(precisions) / len(precisions))))
print("Avg Normalised DCG @%i: %f" % (K, float(sum(ndcgs) / len(precisions))))
print("Hit ratio @%i: %f" % (K, float(hits / len(precisions))))
for k,metr in [('meanP@K', float(sum(precisions) / len(precisions))), ('meannDCG@K', float(sum(ndcgs) / len(precisions))) \
, ('hitratio', float(hits / len(precisions)))]:
try: eval_d[method][k].append(metr)
except KeyError:
eval_d[method][k] = []
eval_d[method][k].append(metr)
return eval_d | 0.448426 | 0.297285 |
import functools
import logging
import numpy as np
from django.conf import settings
from django.db import models
from django.utils import timezone
logger = logging.getLogger(__name__)
class AccessLogMixin(models.Model):
"""Base class which logs access of information."""
# The user which accessed the data.
user = models.ForeignKey(settings.AUTH_USER_MODEL,
db_index=True,
on_delete=models.CASCADE)
# Timestamp of the access.
timestamp = models.DateTimeField(db_index=True)
class Meta:
abstract = True
index_together = (('user', 'timestamp'), )
def __init__(self, *args, **kwargs):
super(AccessLogMixin, self).__init__(*args, **kwargs)
if self.timestamp is None:
self.timestamp = timezone.now()
@classmethod
def by_user(cls, user, start_time=None, end_time=None):
"""Gets the time-sorted list of access log for the given user.
Args:
user: The user to get the access log for.
start_time: Optional. Inclusive start time.
end_time: Optional. Exclusive end time.
Returns:
A list of access log objects for the given user sorted by timestamp.
"""
query = cls.objects.filter(user_id=user.pk)
if start_time:
query = query.filter(timestamp__gte=start_time)
if end_time:
query = query.filter(timestamp__lt=end_time)
return query.order_by('timestamp')
@classmethod
def last_for_user(cls, user, start_time=None, end_time=None):
"""Gets the last access log for the user.
Args:
user: The user to get the access log for.
start_time: Optional. Inclusive start time.
end_time: Optional. Exclusive end time.
Returns:
The last access log for the user.
"""
return cls.by_user(user, start_time, end_time).last()
@classmethod
def by_time_period(cls, user, time_periods):
"""Gets a list of time-sorted lists of access logs for each time period.
The method returns the full sets of AccessLogMixins for each TimePeriod. If
overlapping TimePeriods are provided, the results may contain duplicate
logs.
Args:
user: The user to get the access log for.
time_periods: A list of TimePeriod objects.
Returns:
A list of AccessLogMixin lists, where each AccessLogMixin list contains all
AccessLogMixins corresponding to the related TimePeriod.
"""
return [cls.by_user(user, p.start, p.end) for p in time_periods]
@classmethod
def rates(cls, user, time_periods, time_period_logs=None):
"""Gets the access log rates.
Args:
user: The user to get the access log rates for.
time_periods: A list of TimePeriod objects. Note: to avoid
computing rates with duplicate logs, ensure that all
time periods are non-overlapping.
time_period_logs: Optional. A sequence of AccessLogMixin sequences,
where each AccessLogMixin sequence contains all AccessLogMixins
corresponding to the related TimePeriod. If None, will obtain
by calling by_time_period().
Returns:
A (max, avg) tuple. The max is the max time between logs, and avg
is the avg time between logs.
"""
# Check that time periods were provided.
if not time_periods:
return (None, None)
# Check that all time periods are closed.
for time_period in time_periods:
if time_period.duration() is None:
return (None, None)
# If logs were not provided, obtain.
if not time_period_logs:
time_period_logs = cls.by_time_period(user, time_periods)
# Utility generator for time durations.
def time_between_logs(time_periods, time_period_logs):
for ix, period in enumerate(time_periods):
prev_time = period.start
for log in time_period_logs[ix]:
yield (log.timestamp - prev_time).total_seconds()
prev_time = log.timestamp
yield (period.end - prev_time).total_seconds()
# Calculate max, sum, count for time durations.
(m, s, c) = functools.reduce(
lambda r, d: (max(r[0], d), r[1] + d, r[2] + 1),
time_between_logs(time_periods, time_period_logs), (0.0, 0.0, 0))
# Convert to max and average.
return (m, s / c) | server/auvsi_suas/models/access_log.py |
import functools
import logging
import numpy as np
from django.conf import settings
from django.db import models
from django.utils import timezone
logger = logging.getLogger(__name__)
class AccessLogMixin(models.Model):
"""Base class which logs access of information."""
# The user which accessed the data.
user = models.ForeignKey(settings.AUTH_USER_MODEL,
db_index=True,
on_delete=models.CASCADE)
# Timestamp of the access.
timestamp = models.DateTimeField(db_index=True)
class Meta:
abstract = True
index_together = (('user', 'timestamp'), )
def __init__(self, *args, **kwargs):
super(AccessLogMixin, self).__init__(*args, **kwargs)
if self.timestamp is None:
self.timestamp = timezone.now()
@classmethod
def by_user(cls, user, start_time=None, end_time=None):
"""Gets the time-sorted list of access log for the given user.
Args:
user: The user to get the access log for.
start_time: Optional. Inclusive start time.
end_time: Optional. Exclusive end time.
Returns:
A list of access log objects for the given user sorted by timestamp.
"""
query = cls.objects.filter(user_id=user.pk)
if start_time:
query = query.filter(timestamp__gte=start_time)
if end_time:
query = query.filter(timestamp__lt=end_time)
return query.order_by('timestamp')
@classmethod
def last_for_user(cls, user, start_time=None, end_time=None):
"""Gets the last access log for the user.
Args:
user: The user to get the access log for.
start_time: Optional. Inclusive start time.
end_time: Optional. Exclusive end time.
Returns:
The last access log for the user.
"""
return cls.by_user(user, start_time, end_time).last()
@classmethod
def by_time_period(cls, user, time_periods):
"""Gets a list of time-sorted lists of access logs for each time period.
The method returns the full sets of AccessLogMixins for each TimePeriod. If
overlapping TimePeriods are provided, the results may contain duplicate
logs.
Args:
user: The user to get the access log for.
time_periods: A list of TimePeriod objects.
Returns:
A list of AccessLogMixin lists, where each AccessLogMixin list contains all
AccessLogMixins corresponding to the related TimePeriod.
"""
return [cls.by_user(user, p.start, p.end) for p in time_periods]
@classmethod
def rates(cls, user, time_periods, time_period_logs=None):
"""Gets the access log rates.
Args:
user: The user to get the access log rates for.
time_periods: A list of TimePeriod objects. Note: to avoid
computing rates with duplicate logs, ensure that all
time periods are non-overlapping.
time_period_logs: Optional. A sequence of AccessLogMixin sequences,
where each AccessLogMixin sequence contains all AccessLogMixins
corresponding to the related TimePeriod. If None, will obtain
by calling by_time_period().
Returns:
A (max, avg) tuple. The max is the max time between logs, and avg
is the avg time between logs.
"""
# Check that time periods were provided.
if not time_periods:
return (None, None)
# Check that all time periods are closed.
for time_period in time_periods:
if time_period.duration() is None:
return (None, None)
# If logs were not provided, obtain.
if not time_period_logs:
time_period_logs = cls.by_time_period(user, time_periods)
# Utility generator for time durations.
def time_between_logs(time_periods, time_period_logs):
for ix, period in enumerate(time_periods):
prev_time = period.start
for log in time_period_logs[ix]:
yield (log.timestamp - prev_time).total_seconds()
prev_time = log.timestamp
yield (period.end - prev_time).total_seconds()
# Calculate max, sum, count for time durations.
(m, s, c) = functools.reduce(
lambda r, d: (max(r[0], d), r[1] + d, r[2] + 1),
time_between_logs(time_periods, time_period_logs), (0.0, 0.0, 0))
# Convert to max and average.
return (m, s / c) | 0.897201 | 0.315762 |
import torch
import torch.nn as nn
import pyro.distributions as dist
from pyro.nn import PyroModule
import tyxe
def test_iid():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1))
prior.apply_(l)
p = l._pyro_samples["weight"]
assert isinstance(p, dist.Independent)
assert isinstance(p.base_dist, dist.Normal)
assert p.base_dist.loc.allclose(torch.tensor(0.))
assert p.base_dist.scale.allclose(torch.tensor(1.))
def test_layerwise_normal_kaiming():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.LayerwiseNormalPrior(method="kaiming")
prior.apply_(l)
p = l._pyro_samples["weight"]
assert p.base_dist.scale.allclose(torch.tensor((2 / 3.) ** 0.5))
def test_layerwise_normal_radford():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.LayerwiseNormalPrior(method="radford")
prior.apply_(l)
p = l._pyro_samples["weight"]
assert p.base_dist.scale.allclose(torch.tensor(3 ** -0.5))
def test_layerwise_normal_xavier():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.LayerwiseNormalPrior(method="xavier")
prior.apply_(l)
p = l._pyro_samples["weight"]
assert p.base_dist.scale.allclose(torch.tensor(0.8 ** 0.5))
def test_expose_all():
net = PyroModule[nn.Sequential](PyroModule[nn.Linear](4, 3), PyroModule[nn.Linear](3, 2))
tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=True).apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_samples
def test_hide_all():
net = PyroModule[nn.Sequential](PyroModule[nn.Linear](4, 3), PyroModule[nn.Linear](3, 2))
tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_all=True).apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_expose_modules():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose_modules=[net[0]])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_hide_modules():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_modules=[net[0]])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_samples
def test_expose_types():
net = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose_module_types=(nn.Conv2d,))
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_hide_types():
net = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_module_types=(nn.Linear,))
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_expose_parameters():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose_parameters=["weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_params
def test_hide_parameters():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_parameters=["weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_samples
def test_expose():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose=["0.weight", "1.weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_params
def test_hide():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide=["0.weight", "1.weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_samples | tests/test_priors.py | import torch
import torch.nn as nn
import pyro.distributions as dist
from pyro.nn import PyroModule
import tyxe
def test_iid():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1))
prior.apply_(l)
p = l._pyro_samples["weight"]
assert isinstance(p, dist.Independent)
assert isinstance(p.base_dist, dist.Normal)
assert p.base_dist.loc.allclose(torch.tensor(0.))
assert p.base_dist.scale.allclose(torch.tensor(1.))
def test_layerwise_normal_kaiming():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.LayerwiseNormalPrior(method="kaiming")
prior.apply_(l)
p = l._pyro_samples["weight"]
assert p.base_dist.scale.allclose(torch.tensor((2 / 3.) ** 0.5))
def test_layerwise_normal_radford():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.LayerwiseNormalPrior(method="radford")
prior.apply_(l)
p = l._pyro_samples["weight"]
assert p.base_dist.scale.allclose(torch.tensor(3 ** -0.5))
def test_layerwise_normal_xavier():
l = PyroModule[nn.Linear](3, 2, bias=False)
prior = tyxe.priors.LayerwiseNormalPrior(method="xavier")
prior.apply_(l)
p = l._pyro_samples["weight"]
assert p.base_dist.scale.allclose(torch.tensor(0.8 ** 0.5))
def test_expose_all():
net = PyroModule[nn.Sequential](PyroModule[nn.Linear](4, 3), PyroModule[nn.Linear](3, 2))
tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=True).apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_samples
def test_hide_all():
net = PyroModule[nn.Sequential](PyroModule[nn.Linear](4, 3), PyroModule[nn.Linear](3, 2))
tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_all=True).apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_expose_modules():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose_modules=[net[0]])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_hide_modules():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_modules=[net[0]])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_samples
def test_expose_types():
net = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose_module_types=(nn.Conv2d,))
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_hide_types():
net = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_module_types=(nn.Linear,))
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_params
def test_expose_parameters():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose_parameters=["weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_params
def test_hide_parameters():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide_parameters=["weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_samples
def test_expose():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, expose=["0.weight", "1.weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_samples
assert "bias" in net[0]._pyro_params
assert "weight" in net[1]._pyro_samples
assert "bias" in net[1]._pyro_params
def test_hide():
net = nn.Sequential(nn.Linear(4, 3), nn.Linear(3, 2))
prior = tyxe.priors.IIDPrior(dist.Normal(0, 1), expose_all=False, hide=["0.weight", "1.weight"])
tyxe.util.to_pyro_module_(net)
prior.apply_(net)
assert "weight" in net[0]._pyro_params
assert "bias" in net[0]._pyro_samples
assert "weight" in net[1]._pyro_params
assert "bias" in net[1]._pyro_samples | 0.894099 | 0.685723 |
import csv
ff_analytics_data = 'week_2_data/Sunday_Evening_Game/ffa_customrankings2018-2.csv'
yahoo_analytics_data = 'week_2_data/Sunday_Evening_Game/Yahoo_DF_player_export.csv'
positions_we_care_about = ['QB','TE','RB','WR','DST']
output_file_ffa = 'week_2_data/Sunday_Evening_Game/cleaned_ffa_customrankings2018-2.csv'
output_file_yahoo = 'week_2_data/Sunday_Evening_Game/cleaned_Yahoo_DF_player_export.csv'
conversion_key_dict = {}
conversion_key_dict['Saints'] = 'New Orleans Saints'
conversion_key_dict['Steelers'] = 'Pittsburgh Steelers'
conversion_key_dict['Patriots'] = 'New England Patriots'
conversion_key_dict['<NAME>'] = 'Todd Gurley II'
conversion_key_dict['Buccaneers'] = 'Tampa Bay Buccaneers'
conversion_key_dict['Eagles'] = 'Philadelphia Eagles'
conversion_key_dict['Falcons'] = 'Atlanta Falcons'
conversion_key_dict['Browns'] = 'Cleveland Browns'
conversion_key_dict['Chargers'] = 'Los Angeles Chargers'
conversion_key_dict['Raiders'] = 'Oakland Raiders'
conversion_key_dict['Bills'] = 'Buffalo Bills'
conversion_key_dict['Giants'] = 'New York Giants'
conversion_key_dict['<NAME>'] = 'Marvin Jones Jr.'
conversion_key_dict['Lions'] = 'Detroit Lions'
conversion_key_dict['Panthers'] = 'Carolina Panthers'
conversion_key_dict['49ers'] = 'San Francisco 49ers'
conversion_key_dict['Odell Beckham'] = 'Odell Beckham Jr.'
conversion_key_dict['Dolphins'] = 'Miami Dolphins'
conversion_key_dict['Redskins'] = 'Washington Redskins'
conversion_key_dict['Cardinals'] = 'Arizona Cardinals'
conversion_key_dict['Texans'] = 'Houston Texans'
conversion_key_dict['Melvin Gordon'] = 'Melvin Gordon III'
conversion_key_dict['Titans'] = 'Tennessee Titans'
conversion_key_dict['Jaguars'] = 'Jacksonville Jaguars'
conversion_key_dict['<NAME>'] = '<NAME> V'
conversion_key_dict['Rams'] = 'Los Angeles Rams'
conversion_key_dict['Colts'] = 'Indianapolis Colts'
conversion_key_dict['<NAME>'] = '<NAME> Jr.'
conversion_key_dict['Jets'] = 'New York Jets'
conversion_key_dict['<NAME>'] = '<NAME> Jr.'
conversion_key_dict['Chiefs'] = 'Kansas City Chiefs'
conversion_key_dict['Broncos'] = 'Denver Broncos'
conversion_key_dict['Packers'] = 'Green Bay Packers'
conversion_key_dict['Vikings'] = 'Minnesota Vikings'
conversion_key_dict['Cowboys'] = 'Dallas Cowboys'
players_this_week = []
with open(ff_analytics_data,'rb') as csvfile:
reader = csv.reader(csvfile)
with open(output_file_ffa,'w') as csv_out:
writer = csv.writer(csv_out)
skip_first_row = 0
for row in reader:
if skip_first_row == 0:
writer.writerow(row)
skip_first_row += 1
pass
else:
if row[3] in positions_we_care_about:
if int(row[11]) < 1000:
if row[1] in conversion_key_dict.keys():
row[1] = conversion_key_dict[row[1]]
writer.writerow(row)
players_this_week.append(row[1])
else:
pass
print players_this_week
with open(yahoo_analytics_data,'rb') as csvfile:
reader = csv.reader(csvfile)
with open(output_file_yahoo,'w') as csv_out:
writer = csv.writer(csv_out)
skip_first_row = 0
for row in reader:
if skip_first_row == 0:
writer.writerow(row)
skip_first_row += 1
pass
else:
name = row[1] + ' ' + row[2]
if name in players_this_week:
writer.writerow(row)
else:
print name, row[3] | clean_ffa_data.py | import csv
ff_analytics_data = 'week_2_data/Sunday_Evening_Game/ffa_customrankings2018-2.csv'
yahoo_analytics_data = 'week_2_data/Sunday_Evening_Game/Yahoo_DF_player_export.csv'
positions_we_care_about = ['QB','TE','RB','WR','DST']
output_file_ffa = 'week_2_data/Sunday_Evening_Game/cleaned_ffa_customrankings2018-2.csv'
output_file_yahoo = 'week_2_data/Sunday_Evening_Game/cleaned_Yahoo_DF_player_export.csv'
conversion_key_dict = {}
conversion_key_dict['Saints'] = 'New Orleans Saints'
conversion_key_dict['Steelers'] = 'Pittsburgh Steelers'
conversion_key_dict['Patriots'] = 'New England Patriots'
conversion_key_dict['<NAME>'] = 'Todd Gurley II'
conversion_key_dict['Buccaneers'] = 'Tampa Bay Buccaneers'
conversion_key_dict['Eagles'] = 'Philadelphia Eagles'
conversion_key_dict['Falcons'] = 'Atlanta Falcons'
conversion_key_dict['Browns'] = 'Cleveland Browns'
conversion_key_dict['Chargers'] = 'Los Angeles Chargers'
conversion_key_dict['Raiders'] = 'Oakland Raiders'
conversion_key_dict['Bills'] = 'Buffalo Bills'
conversion_key_dict['Giants'] = 'New York Giants'
conversion_key_dict['<NAME>'] = 'Marvin Jones Jr.'
conversion_key_dict['Lions'] = 'Detroit Lions'
conversion_key_dict['Panthers'] = 'Carolina Panthers'
conversion_key_dict['49ers'] = 'San Francisco 49ers'
conversion_key_dict['Odell Beckham'] = 'Odell Beckham Jr.'
conversion_key_dict['Dolphins'] = 'Miami Dolphins'
conversion_key_dict['Redskins'] = 'Washington Redskins'
conversion_key_dict['Cardinals'] = 'Arizona Cardinals'
conversion_key_dict['Texans'] = 'Houston Texans'
conversion_key_dict['Melvin Gordon'] = 'Melvin Gordon III'
conversion_key_dict['Titans'] = 'Tennessee Titans'
conversion_key_dict['Jaguars'] = 'Jacksonville Jaguars'
conversion_key_dict['<NAME>'] = '<NAME> V'
conversion_key_dict['Rams'] = 'Los Angeles Rams'
conversion_key_dict['Colts'] = 'Indianapolis Colts'
conversion_key_dict['<NAME>'] = '<NAME> Jr.'
conversion_key_dict['Jets'] = 'New York Jets'
conversion_key_dict['<NAME>'] = '<NAME> Jr.'
conversion_key_dict['Chiefs'] = 'Kansas City Chiefs'
conversion_key_dict['Broncos'] = 'Denver Broncos'
conversion_key_dict['Packers'] = 'Green Bay Packers'
conversion_key_dict['Vikings'] = 'Minnesota Vikings'
conversion_key_dict['Cowboys'] = 'Dallas Cowboys'
players_this_week = []
with open(ff_analytics_data,'rb') as csvfile:
reader = csv.reader(csvfile)
with open(output_file_ffa,'w') as csv_out:
writer = csv.writer(csv_out)
skip_first_row = 0
for row in reader:
if skip_first_row == 0:
writer.writerow(row)
skip_first_row += 1
pass
else:
if row[3] in positions_we_care_about:
if int(row[11]) < 1000:
if row[1] in conversion_key_dict.keys():
row[1] = conversion_key_dict[row[1]]
writer.writerow(row)
players_this_week.append(row[1])
else:
pass
print players_this_week
with open(yahoo_analytics_data,'rb') as csvfile:
reader = csv.reader(csvfile)
with open(output_file_yahoo,'w') as csv_out:
writer = csv.writer(csv_out)
skip_first_row = 0
for row in reader:
if skip_first_row == 0:
writer.writerow(row)
skip_first_row += 1
pass
else:
name = row[1] + ' ' + row[2]
if name in players_this_week:
writer.writerow(row)
else:
print name, row[3] | 0.080709 | 0.141875 |
import uuid
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User, AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.utils.translation import gettext_lazy
from django.dispatch import receiver
from django.db.models.signals import pre_save
from django.core.exceptions import ValidationError
#NOTE: django gives each model an auto generated id field: id = models.AutoField(primary_key=True, **options)
#NOTE: Django admin panels use __str__ to generate labels, so explicitly definiting them is important
#NOTE: Django model class can have a "Meta" subclass to fill out additional metadata. More info here: https://docs.djangoproject.com/en/3.1/ref/models/options/
#NOTE: As per the docs, model fields should be lower case, separated by underscores
class Author(AbstractUser):
"""
Models information about a user
"""
# Used to uniquely identify an author on our server. Will be part of related URLs
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Automatically derived from the HOST_NAME field in settings.py
host = models.CharField(max_length=100, default=settings.HOST_NAME, editable=False)
# URL that points to the REST api endpoint for this author - also used as the "id" in the protocol
url = models.CharField(max_length=200, editable=False)
# URL to the user's github. Editable by the user.
github = models.CharField(max_length=200, blank=True)
# Whether or not this account is allowed to log-in (default driven by settings.py)
is_active = models.BooleanField(default=settings.NEW_ACCOUNTS_AUTO_APPROVED)
#followers stores the number of users following the current user linking them through the intermediate table Followers
followers = models.ManyToManyField('self', through='Followers',symmetrical=False,related_name='followed_by')
# Whether or not this account should be treated as a friendly server and get elevated permissions
is_server = models.BooleanField(default=False)
def __str__(self):
return self.username
# Overwrite the default save function so that we can generate our URL
def save(self, *args, **kwargs):
if not self.url:
self.url = "{}://{}/author/{}/".format(settings.SCHEME, settings.HOST_NAME, self.id)
super(Author, self).save(*args, **kwargs)
class PostCategory(models.Model):
"""
Models a category that a post can belong to
"""
# Unique names prevents duplicate entries from appearing in the database
# prefer to re-use existing categories where possible
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
# Helpful for Django Admin
verbose_name = "Post Category"
verbose_name_plural = "Post Categories"
class Post(models.Model):
"""
Models a post created by an author
"""
# Used to define valid visibility strings
VISIBILITY_CHOICES = [
("PUBLIC", "Public"),
("FRIENDS", "Friends"),
]
# Used to define valid content-type strings for posts (text or image based)
CONTENT_TYPE_CHOICES = [
("text/plain", "Plain Text"),
("text/markdown", "Markdown"),
("application/base64", "Base64 Encoding"),
("image/png;base64", "PNG"),
("image/jpeg;base64", "JPEG"),
]
# Uniquely identifies a post on our server. Will be part of the related URLs
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# The title of the post. Set by the author.
title = models.CharField(max_length=200)
# URL that points to the REST api endpoint for this post - also used as the "id" in the protocol
url = models.CharField(max_length=200, editable=False)
# Short description of the post
description = models.CharField(max_length=200)
# The content type of the post. Must be one of a few specific types.
content_type = models.CharField(max_length=20, choices=CONTENT_TYPE_CHOICES, default="text/plain")
# The content associated with this post. If the post is an image, should be base64 encoded text.
content = models.TextField(blank=True, default="")
# The author of this post
author = models.ForeignKey(Author, on_delete=models.CASCADE)
# The categories this post has been tagged with
categories = models.ManyToManyField(PostCategory, blank=True)
# The time that the post was originally published
published = models.DateTimeField(auto_now_add=True)
# Privacy settings for the post
visibility = models.CharField(max_length=10, choices=VISIBILITY_CHOICES, default="PUBLIC")
# Whether or not this post should show up in feeds, or is only accessible via URL
unlisted = models.BooleanField(default=False)
def __str__(self):
return self.title
# Overwrite the default save function so that we can generate our URL
def save(self, *args, **kwargs):
if not self.url:
self.url = "{}://{}/author/{}/posts/{}/".format(settings.SCHEME, settings.HOST_NAME, self.author.id, self.id)
super(Post, self).save(*args, **kwargs)
class Comment(models.Model):
"""
Models a comment on a post
"""
# Used to define valid content-type strings for comments (text based)
CONTENT_TYPE_CHOICES = [
("text/plain", "Plain Text"),
("text/markdown", "Markdown"),
]
# Uniquely identifies a comment on our server. Will be part of the related URLs
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# The post this comment is attached to
post = models.ForeignKey(Post, on_delete=models.CASCADE)
# The author of this comment (not to be confused with the author of the post)
author_url = models.CharField(max_length=200, blank=True, default="")
# Backup author JSON (will go stale)
author_json = models.TextField(default="")
# The text content content of the comment
comment = models.TextField()
# The content type of the comment. Must be one of a few specific types.
content_type = models.CharField(max_length=20, choices=CONTENT_TYPE_CHOICES, default="text/plain")
# The time that the comment was originally published
published = models.DateTimeField(auto_now_add=True)
# URL that points to the REST api endpoint for this comment - also used as the "id" in the protocol
url = models.CharField(max_length=200, editable=False)
# Overwrite the default save function so that we can generate our URL
def save(self, *args, **kwargs):
if not self.url:
self.url = "{}://{}/author/{}/posts/{}/comments/{}/".format(settings.SCHEME, settings.HOST_NAME, self.post.author.id, self.post.id, self.id)
super(Comment, self).save(*args, **kwargs)
class ObjectLike(models.Model):
"""
Models a liked object
"""
# URL of the author who liked the object
author_url = models.CharField(max_length=200)
# JSON of the author who liked the object (will go stale)
author_json = models.TextField(default="")
# URL of the object being liked
object_url = models.CharField(max_length=200)
class Meta:
constraints = [
models.UniqueConstraint(fields=["author_url", "object_url"], name="unique_like")
]
verbose_name = "Liked Object"
verbose_name_plural = "Liked Objects"
class Followers(models.Model):
""" get a specific user's followers """
#the author sending the follow request
#reverse relationship author.following get all the people the author is following
author_from = models.ForeignKey(Author, related_name='following', on_delete=models.CASCADE)
#the author that is being followed
#reverse relationship author.followee get all their followers (all the people currently following the user)
author_to = models.ForeignKey(Author, related_name='followee', on_delete=models.CASCADE, default=None)
# prohibit following same person twice
class Meta:
constraints = [
models.UniqueConstraint(fields=['author_from','author_to'], name="unique_follow")
]
# Helpful for Django Admin
verbose_name = "Followers"
verbose_name_plural = "Followers"
class ForeignServer(models.Model):
"""
Models a fetch-content relationship with a foreign server
"""
# A name by which to more easily identify the server
name = models.CharField(max_length=100)
# Whether or not to try and connect to this server
is_active = models.BooleanField(default=True)
# Host name - used to check URLs for matches
host_name = models.CharField(max_length=100, blank=True)
# The url to get all of the authors on the server (leave blank if unsupported)
authors_url = models.CharField(max_length=200, blank=True)
# The key to look at for the JSON list of authors
authors_json_key = models.CharField(max_length=25, blank=True)
# The url to get all of the posts on the server (leave blank if unsupported)
posts_url = models.CharField(max_length=200, blank=True)
# The key to look at for the JSON list of posts
posts_json_key = models.CharField(max_length=25, blank=True)
# The username credentials for connecting to the server with basic auth (leave blank if unsupported)
username = models.CharField(max_length=100, blank=True)
# The password credentials for connecting to the server with basic auth (leave blank if unsupported)
password = models.CharField(max_length=25, blank=True)
class Meta:
verbose_name = "Foreign Server"
verbose_name_plural = "Foreign Servers"
#prohibit self following
@receiver(pre_save, sender=Followers)
def check_self_following(sender, instance, **kwargs):
if instance.author_from == instance.author_to:
raise ValidationError('ERROR!!, you cannot follow yourself ')
class InboxItem(models.Model):
"""
An item in an Author's inbox.
`author` is the id of the user that you wish to share this item with.
`json_str` contains a JSON string. That means an InboxItem can contain
a post, like, or follow.
`link` is a complete permalink to whatever you're sharing (optional)
"""
author = models.ForeignKey(Author, on_delete=models.CASCADE) # the recipient
link = models.TextField(default="")
json_str = models.TextField(default="")
class RemoteFollow(models.Model):
""" keep track of the remote authors that an author is following """
#the author sending the follow request
local_author_from = models.ForeignKey(Author, related_name='remote_following', on_delete=models.CASCADE)
#the author that is being followed
remote_author_to = models.CharField(max_length=200, editable=False)
# make relationship unique
class Meta:
constraints = [
models.UniqueConstraint(fields=['local_author_from','remote_author_to'], name="remote_follow")
]
class RemoteFollowers(models.Model):
""" keep track of the remote authors that are following the current """
#the author sending the follow request
remote_author_from = models.CharField(max_length=200, editable=False)
#the author that is being followed
local_author_to = models.ForeignKey(Author, related_name='remote_followers', on_delete=models.CASCADE)
# make relationship unique
class Meta:
constraints = [
models.UniqueConstraint(fields=['remote_author_from','local_author_to'], name="remote_followers")
]
verbose_name = "Remote Follower"
verbose_name_plural = "Remote Followers" | mysite/SocialApp/models.py | import uuid
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User, AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.utils.translation import gettext_lazy
from django.dispatch import receiver
from django.db.models.signals import pre_save
from django.core.exceptions import ValidationError
#NOTE: django gives each model an auto generated id field: id = models.AutoField(primary_key=True, **options)
#NOTE: Django admin panels use __str__ to generate labels, so explicitly definiting them is important
#NOTE: Django model class can have a "Meta" subclass to fill out additional metadata. More info here: https://docs.djangoproject.com/en/3.1/ref/models/options/
#NOTE: As per the docs, model fields should be lower case, separated by underscores
class Author(AbstractUser):
"""
Models information about a user
"""
# Used to uniquely identify an author on our server. Will be part of related URLs
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Automatically derived from the HOST_NAME field in settings.py
host = models.CharField(max_length=100, default=settings.HOST_NAME, editable=False)
# URL that points to the REST api endpoint for this author - also used as the "id" in the protocol
url = models.CharField(max_length=200, editable=False)
# URL to the user's github. Editable by the user.
github = models.CharField(max_length=200, blank=True)
# Whether or not this account is allowed to log-in (default driven by settings.py)
is_active = models.BooleanField(default=settings.NEW_ACCOUNTS_AUTO_APPROVED)
#followers stores the number of users following the current user linking them through the intermediate table Followers
followers = models.ManyToManyField('self', through='Followers',symmetrical=False,related_name='followed_by')
# Whether or not this account should be treated as a friendly server and get elevated permissions
is_server = models.BooleanField(default=False)
def __str__(self):
return self.username
# Overwrite the default save function so that we can generate our URL
def save(self, *args, **kwargs):
if not self.url:
self.url = "{}://{}/author/{}/".format(settings.SCHEME, settings.HOST_NAME, self.id)
super(Author, self).save(*args, **kwargs)
class PostCategory(models.Model):
"""
Models a category that a post can belong to
"""
# Unique names prevents duplicate entries from appearing in the database
# prefer to re-use existing categories where possible
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
# Helpful for Django Admin
verbose_name = "Post Category"
verbose_name_plural = "Post Categories"
class Post(models.Model):
"""
Models a post created by an author
"""
# Used to define valid visibility strings
VISIBILITY_CHOICES = [
("PUBLIC", "Public"),
("FRIENDS", "Friends"),
]
# Used to define valid content-type strings for posts (text or image based)
CONTENT_TYPE_CHOICES = [
("text/plain", "Plain Text"),
("text/markdown", "Markdown"),
("application/base64", "Base64 Encoding"),
("image/png;base64", "PNG"),
("image/jpeg;base64", "JPEG"),
]
# Uniquely identifies a post on our server. Will be part of the related URLs
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# The title of the post. Set by the author.
title = models.CharField(max_length=200)
# URL that points to the REST api endpoint for this post - also used as the "id" in the protocol
url = models.CharField(max_length=200, editable=False)
# Short description of the post
description = models.CharField(max_length=200)
# The content type of the post. Must be one of a few specific types.
content_type = models.CharField(max_length=20, choices=CONTENT_TYPE_CHOICES, default="text/plain")
# The content associated with this post. If the post is an image, should be base64 encoded text.
content = models.TextField(blank=True, default="")
# The author of this post
author = models.ForeignKey(Author, on_delete=models.CASCADE)
# The categories this post has been tagged with
categories = models.ManyToManyField(PostCategory, blank=True)
# The time that the post was originally published
published = models.DateTimeField(auto_now_add=True)
# Privacy settings for the post
visibility = models.CharField(max_length=10, choices=VISIBILITY_CHOICES, default="PUBLIC")
# Whether or not this post should show up in feeds, or is only accessible via URL
unlisted = models.BooleanField(default=False)
def __str__(self):
return self.title
# Overwrite the default save function so that we can generate our URL
def save(self, *args, **kwargs):
if not self.url:
self.url = "{}://{}/author/{}/posts/{}/".format(settings.SCHEME, settings.HOST_NAME, self.author.id, self.id)
super(Post, self).save(*args, **kwargs)
class Comment(models.Model):
"""
Models a comment on a post
"""
# Used to define valid content-type strings for comments (text based)
CONTENT_TYPE_CHOICES = [
("text/plain", "Plain Text"),
("text/markdown", "Markdown"),
]
# Uniquely identifies a comment on our server. Will be part of the related URLs
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# The post this comment is attached to
post = models.ForeignKey(Post, on_delete=models.CASCADE)
# The author of this comment (not to be confused with the author of the post)
author_url = models.CharField(max_length=200, blank=True, default="")
# Backup author JSON (will go stale)
author_json = models.TextField(default="")
# The text content content of the comment
comment = models.TextField()
# The content type of the comment. Must be one of a few specific types.
content_type = models.CharField(max_length=20, choices=CONTENT_TYPE_CHOICES, default="text/plain")
# The time that the comment was originally published
published = models.DateTimeField(auto_now_add=True)
# URL that points to the REST api endpoint for this comment - also used as the "id" in the protocol
url = models.CharField(max_length=200, editable=False)
# Overwrite the default save function so that we can generate our URL
def save(self, *args, **kwargs):
if not self.url:
self.url = "{}://{}/author/{}/posts/{}/comments/{}/".format(settings.SCHEME, settings.HOST_NAME, self.post.author.id, self.post.id, self.id)
super(Comment, self).save(*args, **kwargs)
class ObjectLike(models.Model):
"""
Models a liked object
"""
# URL of the author who liked the object
author_url = models.CharField(max_length=200)
# JSON of the author who liked the object (will go stale)
author_json = models.TextField(default="")
# URL of the object being liked
object_url = models.CharField(max_length=200)
class Meta:
constraints = [
models.UniqueConstraint(fields=["author_url", "object_url"], name="unique_like")
]
verbose_name = "Liked Object"
verbose_name_plural = "Liked Objects"
class Followers(models.Model):
""" get a specific user's followers """
#the author sending the follow request
#reverse relationship author.following get all the people the author is following
author_from = models.ForeignKey(Author, related_name='following', on_delete=models.CASCADE)
#the author that is being followed
#reverse relationship author.followee get all their followers (all the people currently following the user)
author_to = models.ForeignKey(Author, related_name='followee', on_delete=models.CASCADE, default=None)
# prohibit following same person twice
class Meta:
constraints = [
models.UniqueConstraint(fields=['author_from','author_to'], name="unique_follow")
]
# Helpful for Django Admin
verbose_name = "Followers"
verbose_name_plural = "Followers"
class ForeignServer(models.Model):
"""
Models a fetch-content relationship with a foreign server
"""
# A name by which to more easily identify the server
name = models.CharField(max_length=100)
# Whether or not to try and connect to this server
is_active = models.BooleanField(default=True)
# Host name - used to check URLs for matches
host_name = models.CharField(max_length=100, blank=True)
# The url to get all of the authors on the server (leave blank if unsupported)
authors_url = models.CharField(max_length=200, blank=True)
# The key to look at for the JSON list of authors
authors_json_key = models.CharField(max_length=25, blank=True)
# The url to get all of the posts on the server (leave blank if unsupported)
posts_url = models.CharField(max_length=200, blank=True)
# The key to look at for the JSON list of posts
posts_json_key = models.CharField(max_length=25, blank=True)
# The username credentials for connecting to the server with basic auth (leave blank if unsupported)
username = models.CharField(max_length=100, blank=True)
# The password credentials for connecting to the server with basic auth (leave blank if unsupported)
password = models.CharField(max_length=25, blank=True)
class Meta:
verbose_name = "Foreign Server"
verbose_name_plural = "Foreign Servers"
#prohibit self following
@receiver(pre_save, sender=Followers)
def check_self_following(sender, instance, **kwargs):
if instance.author_from == instance.author_to:
raise ValidationError('ERROR!!, you cannot follow yourself ')
class InboxItem(models.Model):
"""
An item in an Author's inbox.
`author` is the id of the user that you wish to share this item with.
`json_str` contains a JSON string. That means an InboxItem can contain
a post, like, or follow.
`link` is a complete permalink to whatever you're sharing (optional)
"""
author = models.ForeignKey(Author, on_delete=models.CASCADE) # the recipient
link = models.TextField(default="")
json_str = models.TextField(default="")
class RemoteFollow(models.Model):
""" keep track of the remote authors that an author is following """
#the author sending the follow request
local_author_from = models.ForeignKey(Author, related_name='remote_following', on_delete=models.CASCADE)
#the author that is being followed
remote_author_to = models.CharField(max_length=200, editable=False)
# make relationship unique
class Meta:
constraints = [
models.UniqueConstraint(fields=['local_author_from','remote_author_to'], name="remote_follow")
]
class RemoteFollowers(models.Model):
""" keep track of the remote authors that are following the current """
#the author sending the follow request
remote_author_from = models.CharField(max_length=200, editable=False)
#the author that is being followed
local_author_to = models.ForeignKey(Author, related_name='remote_followers', on_delete=models.CASCADE)
# make relationship unique
class Meta:
constraints = [
models.UniqueConstraint(fields=['remote_author_from','local_author_to'], name="remote_followers")
]
verbose_name = "Remote Follower"
verbose_name_plural = "Remote Followers" | 0.560373 | 0.120775 |
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, BatchNormalization, Activation, add
from tensorflow.keras.models import Model, model_from_json
from tensorflow.keras.optimizers import Adam
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), activation='relu', name=None):
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, kernel_initializer="he_normal", use_bias=False)(x)
x = BatchNormalization(axis=3, scale=False)(x)
x = Activation(activation, name=name)(x)
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, kernel_initializer="he_normal", use_bias=False)(x)
x = BatchNormalization(axis=3, scale=False)(x)
x = Activation(activation, name=name)(x)
return x
def UNet(input_filters, height, width, n_channels):
inputs = Input((height, width, n_channels))
filters = input_filters
block1 = conv2d_bn(inputs, filters, 3, 3, activation='relu', padding='same')
pool1 = MaxPooling2D(pool_size=(2, 2))(block1)
block2 = conv2d_bn(pool1, filters*2, 3, 3, activation='relu', padding='same')
pool2 = MaxPooling2D(pool_size=(2, 2))(block2)
block3 = conv2d_bn(pool2, filters*4, 3, 3, activation='relu', padding='same')
pool3 = MaxPooling2D(pool_size=(2, 2))(block3)
block4 = conv2d_bn(pool3, filters*8, 3, 3, activation='relu', padding='same')
pool4 = MaxPooling2D(pool_size=(2, 2))(block4)
block5 = conv2d_bn(pool4, filters*16, 3, 3, activation='relu', padding='same')
up6 = concatenate([Conv2DTranspose(
filters*8, (2, 2), strides=(2, 2), padding='same')(block5), block4], axis=3)
block6 = conv2d_bn(up6, filters*8, 3, 3, activation='relu', padding='same')
up7 = concatenate([Conv2DTranspose(
filters*4, (2, 2), strides=(2, 2), padding='same')(block6), block3], axis=3)
block7 = conv2d_bn(up7, filters*4, 3, 3, activation='relu', padding='same')
up8 = concatenate([Conv2DTranspose(
filters*2, (2, 2), strides=(2, 2), padding='same')(block7), block2], axis=3)
block8 = conv2d_bn(up8, filters*2, 3, 3, activation='relu', padding='same')
up9 = concatenate([Conv2DTranspose(filters, (2, 2), strides=(
2, 2), padding='same')(block8), block1], axis=3)
block9 = conv2d_bn(up9, filters, 3, 3, activation='relu', padding='same')
conv10 = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(block9)
model = Model(inputs=[inputs], outputs=[conv10])
return model
def main():
# Define the model
model = UNet(32, 256, 256, 3)
print(model.summary())
if __name__ == '__main__':
main() | archs/unet.py | from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, BatchNormalization, Activation, add
from tensorflow.keras.models import Model, model_from_json
from tensorflow.keras.optimizers import Adam
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), activation='relu', name=None):
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, kernel_initializer="he_normal", use_bias=False)(x)
x = BatchNormalization(axis=3, scale=False)(x)
x = Activation(activation, name=name)(x)
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, kernel_initializer="he_normal", use_bias=False)(x)
x = BatchNormalization(axis=3, scale=False)(x)
x = Activation(activation, name=name)(x)
return x
def UNet(input_filters, height, width, n_channels):
inputs = Input((height, width, n_channels))
filters = input_filters
block1 = conv2d_bn(inputs, filters, 3, 3, activation='relu', padding='same')
pool1 = MaxPooling2D(pool_size=(2, 2))(block1)
block2 = conv2d_bn(pool1, filters*2, 3, 3, activation='relu', padding='same')
pool2 = MaxPooling2D(pool_size=(2, 2))(block2)
block3 = conv2d_bn(pool2, filters*4, 3, 3, activation='relu', padding='same')
pool3 = MaxPooling2D(pool_size=(2, 2))(block3)
block4 = conv2d_bn(pool3, filters*8, 3, 3, activation='relu', padding='same')
pool4 = MaxPooling2D(pool_size=(2, 2))(block4)
block5 = conv2d_bn(pool4, filters*16, 3, 3, activation='relu', padding='same')
up6 = concatenate([Conv2DTranspose(
filters*8, (2, 2), strides=(2, 2), padding='same')(block5), block4], axis=3)
block6 = conv2d_bn(up6, filters*8, 3, 3, activation='relu', padding='same')
up7 = concatenate([Conv2DTranspose(
filters*4, (2, 2), strides=(2, 2), padding='same')(block6), block3], axis=3)
block7 = conv2d_bn(up7, filters*4, 3, 3, activation='relu', padding='same')
up8 = concatenate([Conv2DTranspose(
filters*2, (2, 2), strides=(2, 2), padding='same')(block7), block2], axis=3)
block8 = conv2d_bn(up8, filters*2, 3, 3, activation='relu', padding='same')
up9 = concatenate([Conv2DTranspose(filters, (2, 2), strides=(
2, 2), padding='same')(block8), block1], axis=3)
block9 = conv2d_bn(up9, filters, 3, 3, activation='relu', padding='same')
conv10 = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(block9)
model = Model(inputs=[inputs], outputs=[conv10])
return model
def main():
# Define the model
model = UNet(32, 256, 256, 3)
print(model.summary())
if __name__ == '__main__':
main() | 0.929007 | 0.768972 |
from math import sqrt, pow
from utils.nodefinder import node_finder
import time
# This is for simulating vehicle movement
# Velocity in m/s
CAR_VELOCITY = 13
# Whatever rate we choose
TICK_RATE = 1
CONVERSION_FACTOR = 1.542
DISTANCE_PER_TICK = CAR_VELOCITY*CONVERSION_FACTOR/TICK_RATE
RUNNING_STATE = False
OUR_SMART_CAR = "Car 1"
def move_car(car):
if car.id == OUR_SMART_CAR:
move_user_car(car)
else:
distance = 0
while distance < DISTANCE_PER_TICK:
if len(car.coordinates) < 2:
if len(car.passengers) > 0:
car.passengers = []
break
point = car.coordinates.pop(0)
if point == car.destinations[0] or car.coordinates[0] == car.destinations[0]:
car.destinations.pop(0)
for p in car.passengers:
if p.destination == point:
car.passengers.pop(car.passengers.index(p))
distance += sqrt(pow(car.coordinates[0].x - point.x, 2)+pow(car.coordinates[0].y - point.y, 2))
car.location = car.coordinates[0]
def move_all_cars(carpool):
lst = carpool.cars
global RUNNING_STATE
RUNNING_STATE = True
for car in lst:
if len(car.destinations) > 0:
if car.id == carpool.OUR_SMART_CAR:
car = move_user_car(car)
else:
move_car(car)
RUNNING_STATE = False
def move_user_car(car):
"""
The user car is moved by the telemetry data sent by the smartcar.
The only thing that needs to be done here is popping deprecated route points.
:param car: the user car
:return:
"""
while len(car.visited) > 0:
if len(car.coordinates) < 2:
car.passengers = []
return car
else:
point = car.coordinates[0]
if car.visited[0] == point:
car.visited.pop(0)
car.coordinates.pop(0)
if point == car.destinations[0]:
car.destinations.pop()
for p in car.passengers:
if p.destination == point:
car.passengers.pop(car.passengers.index(p))
return car
def run(carpool):
while True:
if not RUNNING_STATE:
move_all_cars(carpool)
time.sleep(1/TICK_RATE) | server/utils/simulator/car_mover.py | from math import sqrt, pow
from utils.nodefinder import node_finder
import time
# This is for simulating vehicle movement
# Velocity in m/s
CAR_VELOCITY = 13
# Whatever rate we choose
TICK_RATE = 1
CONVERSION_FACTOR = 1.542
DISTANCE_PER_TICK = CAR_VELOCITY*CONVERSION_FACTOR/TICK_RATE
RUNNING_STATE = False
OUR_SMART_CAR = "Car 1"
def move_car(car):
if car.id == OUR_SMART_CAR:
move_user_car(car)
else:
distance = 0
while distance < DISTANCE_PER_TICK:
if len(car.coordinates) < 2:
if len(car.passengers) > 0:
car.passengers = []
break
point = car.coordinates.pop(0)
if point == car.destinations[0] or car.coordinates[0] == car.destinations[0]:
car.destinations.pop(0)
for p in car.passengers:
if p.destination == point:
car.passengers.pop(car.passengers.index(p))
distance += sqrt(pow(car.coordinates[0].x - point.x, 2)+pow(car.coordinates[0].y - point.y, 2))
car.location = car.coordinates[0]
def move_all_cars(carpool):
lst = carpool.cars
global RUNNING_STATE
RUNNING_STATE = True
for car in lst:
if len(car.destinations) > 0:
if car.id == carpool.OUR_SMART_CAR:
car = move_user_car(car)
else:
move_car(car)
RUNNING_STATE = False
def move_user_car(car):
"""
The user car is moved by the telemetry data sent by the smartcar.
The only thing that needs to be done here is popping deprecated route points.
:param car: the user car
:return:
"""
while len(car.visited) > 0:
if len(car.coordinates) < 2:
car.passengers = []
return car
else:
point = car.coordinates[0]
if car.visited[0] == point:
car.visited.pop(0)
car.coordinates.pop(0)
if point == car.destinations[0]:
car.destinations.pop()
for p in car.passengers:
if p.destination == point:
car.passengers.pop(car.passengers.index(p))
return car
def run(carpool):
while True:
if not RUNNING_STATE:
move_all_cars(carpool)
time.sleep(1/TICK_RATE) | 0.359589 | 0.289475 |
AUTH_HEADER = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"X2VudGl0bGVkIjp0cnVlfX19Cg=="
}
AUTH_HEADER_NO_ENTITLEMENTS = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
}
AUTH_HEADER_SMART_MGMT_FALSE = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp"
"c19vcmdfYWRtaW4iOmZhbHNlLC<KEY>"
"<KEY>"
"<KEY>l"
"<KEY>WV<KEY>uYWdlbWVu"
"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg=="
}
# this can't happen in real life, adding test anyway
AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"fX0K"
}
"""
decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):
{
"identity": {
"internal": {
"org_id": "9999"
},
"type": "User",
"user": {
"email": "<EMAIL>",
"first_name": "No",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Number",
"locale": "en_US",
"username": "nonumber"
}
}
}
"""
AUTH_HEADER_NO_ACCT = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo"
"iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp"
"<KEY>ZG1pbiI6ZmFsc2UsImlzX"
"<KEY>"
"<KEY>
}
FETCH_BASELINES_RESULT = [
{
"id": "ff35596c-f98e-11e9-aea9-98fa9b07d419",
"account": "1212729",
"display_name": "baseline1",
"fact_count": 1,
"created": "2019-10-17T16:23:34.238952Z",
"updated": "2019-10-17T16:25:34.041645Z",
"baseline_facts": [{"name": "fqdn", "value": "test.example1.com"}],
},
{
"id": "89df6310-f98e-11e9-8a65-98fa9b07d419",
"account": "1212729",
"display_name": "baseline2",
"fact_count": 1,
"created": "2019-10-17T16:23:34.238952Z",
"updated": "2019-10-17T16:25:34.041645Z",
"baseline_facts": [{"name": "arch", "value": "golden"}],
},
]
FETCH_SYSTEMS_WITH_PROFILES_CAPTURED_DATE_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hello",
"fqdn": "hostname_two",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"cpu_flags": ["maryland"],
"system_memory_bytes": 640,
"yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:02"],
},
{"no_name": "foo"},
],
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
},
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": "hello",
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "hostname_one",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": False,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEMS_WITH_PROFILES_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hello",
"fqdn": "hostname_two",
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"cpu_flags": ["maryland"],
"system_memory_bytes": 640,
"yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:02"],
},
{"no_name": "foo"},
],
"enabled_services": ["insights_client"],
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
},
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": "hello",
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "hostname_one",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": False,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEM_PROFILES_INV_SVC = """
{
"count": 1,
"total": 1,
"page": 1,
"per_page": 50,
"results": [
{
"id": "243926fa-262f-11e9-a632-c85b761454fa",
"system_profile": {
"arch": "x86_64",
"bios_vendor": "SeaBIOS",
"bios_version": "?-20180531_142017-buildhw-08.phx2.fedoraproject.org-1.fc28",
"cores_per_socket": 1,
"cpu_flags": [ "fpu", "vme" ],
"enabled_services": ["auditd", "chronyd", "crond" ],
"infrastructure_type": "virtual",
"infrastructure_vendor": "kvm",
"installed_packages": ["0:bash-4.4.19-7.el8", "0:chrony-3.3-3.el8",
"0:dnf-4.0.9.2-4.el8", "1:NetworkManager-1.14.0-14.el8"],
"installed_services": [ "arp-ethers", "auditd", "autovt@", "chronyd", "cpupower"],
"kernel_modules": [ "kvm", "pcspkr", "joydev", "xfs"],
"last_boot_time": "2019-03-25T19:32:18",
"network_interfaces": [
{
"ipv4_addresses": ["127.0.0.1"],
"ipv6_addresses": ["::1"],
"mac_address": "00:00:00:00:00:00",
"mtu": 65536,
"name": "lo",
"state": "UNKNOWN",
"type": "loopback"
},
{
"ipv4_addresses": ["192.168.0.1"],
"ipv6_addresses": ["fe80::5054:ff::0001"],
"mac_address": "52:54:00:00:00:00",
"mtu": 1500,
"name": "eth0",
"state": "UP",
"type": "ether"
}
],
"number_of_cpus": 2,
"number_of_sockets": 2,
"os_kernel_version": "4.18.0",
"running_processes": [ "watchdog/1", "systemd-logind", "md", "ksmd", "sshd" ],
"system_memory_bytes": 1917988864,
"yum_repos": [
{
"base_url": "https://cdn.example.com/content/freedos/1.0/i386/os",
"enabled": true,
"gpgcheck": true,
"name": "freedos 1.0 repo i386"
},
{
"base_url": "https://cdn.example.com/content/freedos/1.0/z80/os",
"enabled": false,
"gpgcheck": true,
"name": "freedos 1.0 repo z80"
}
]
}
}
],
"total": 1
}
"""
FETCH_SYSTEMS_WITH_PROFILES_SAME_FACTS_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"system_profile": {
"salutation": "howdy",
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
},
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"system_profile": {
"salutation": "howdy",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
},
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEM_TAGS = """
{
"total": 1,
"count": 1,
"page": 1,
"per_page": 50,
"results": {
"ec67f65c-2bc8-4ce8-82e2-6a27cada8d31": [
{
"namespace": "insights-client",
"key": "group",
"value": "XmygroupX"
}
]
}
}
"""
FETCH_SYSTEMS_INV_SVC = """
{
"count": 2,
"total": 2,
"page": 1,
"per_page": 50,
"results": [
{
"account": "1234567",
"bios_uuid": "dc43976c263411e9bcf0c85b761454fa",
"created": "2018-12-01T12:00:00.000000Z",
"display_name": "system1.example.com",
"fqdn": "system.example.com",
"id": "243926fa-262f-11e9-a632-c85b761454fa",
"insights_id": "TEST-ID00-0000-0000",
"ip_addresses": [
"10.0.0.1",
"10.0.0.2"
],
"mac_addresses": [
"c2:00:d0:c8:00:01"
],
"subscription_manager_id": "1234FAKE1234",
"tags": [],
"updated": "2018-12-31T12:00:00.000000Z",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z"
},
{
"account": "1234567",
"bios_uuid": "ec43976c263411e9bcf0c85b761454fa",
"created": "2018-12-01T12:00:00.000000Z",
"display_name": "system2.example.com",
"fqdn": "system2.example.com",
"id": "264fb5b2-262f-11e9-9b12-c85b761454fa",
"insights_id": "TEST-ID22-2222-2222",
"ip_addresses": [
"10.0.0.3",
"10.0.0.4"
],
"mac_addresses": [
"ec2:00:d0:c8:00:01"
],
"subscription_manager_id": "2222FAKE2222",
"tags": [],
"updated": "2018-12-31T12:00:00.000000Z",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z"
}
]}"""
SYSTEM_NOT_FOUND_TEMPLATE = """
{
"count": 0,
"page": 1,
"per_page": 50,
"results": [],
"total": 0
}
""" | tests/fixtures.py | AUTH_HEADER = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"X2VudGl0bGVkIjp0cnVlfX19Cg=="
}
AUTH_HEADER_NO_ENTITLEMENTS = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
}
AUTH_HEADER_SMART_MGMT_FALSE = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp"
"c19vcmdfYWRtaW4iOmZhbHNlLC<KEY>"
"<KEY>"
"<KEY>l"
"<KEY>WV<KEY>uYWdlbWVu"
"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg=="
}
# this can't happen in real life, adding test anyway
AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"fX0K"
}
"""
decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):
{
"identity": {
"internal": {
"org_id": "9999"
},
"type": "User",
"user": {
"email": "<EMAIL>",
"first_name": "No",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Number",
"locale": "en_US",
"username": "nonumber"
}
}
}
"""
AUTH_HEADER_NO_ACCT = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo"
"iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp"
"<KEY>ZG1pbiI6ZmFsc2UsImlzX"
"<KEY>"
"<KEY>
}
FETCH_BASELINES_RESULT = [
{
"id": "ff35596c-f98e-11e9-aea9-98fa9b07d419",
"account": "1212729",
"display_name": "baseline1",
"fact_count": 1,
"created": "2019-10-17T16:23:34.238952Z",
"updated": "2019-10-17T16:25:34.041645Z",
"baseline_facts": [{"name": "fqdn", "value": "test.example1.com"}],
},
{
"id": "89df6310-f98e-11e9-8a65-98fa9b07d419",
"account": "1212729",
"display_name": "baseline2",
"fact_count": 1,
"created": "2019-10-17T16:23:34.238952Z",
"updated": "2019-10-17T16:25:34.041645Z",
"baseline_facts": [{"name": "arch", "value": "golden"}],
},
]
FETCH_SYSTEMS_WITH_PROFILES_CAPTURED_DATE_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hello",
"fqdn": "hostname_two",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"cpu_flags": ["maryland"],
"system_memory_bytes": 640,
"yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:02"],
},
{"no_name": "foo"},
],
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
},
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": "hello",
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "hostname_one",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": False,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEMS_WITH_PROFILES_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hello",
"fqdn": "hostname_two",
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"cpu_flags": ["maryland"],
"system_memory_bytes": 640,
"yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:02"],
},
{"no_name": "foo"},
],
"enabled_services": ["insights_client"],
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
},
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": "hello",
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "hostname_one",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": False,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["172.16.58.3"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEM_PROFILES_INV_SVC = """
{
"count": 1,
"total": 1,
"page": 1,
"per_page": 50,
"results": [
{
"id": "243926fa-262f-11e9-a632-c85b761454fa",
"system_profile": {
"arch": "x86_64",
"bios_vendor": "SeaBIOS",
"bios_version": "?-20180531_142017-buildhw-08.phx2.fedoraproject.org-1.fc28",
"cores_per_socket": 1,
"cpu_flags": [ "fpu", "vme" ],
"enabled_services": ["auditd", "chronyd", "crond" ],
"infrastructure_type": "virtual",
"infrastructure_vendor": "kvm",
"installed_packages": ["0:bash-4.4.19-7.el8", "0:chrony-3.3-3.el8",
"0:dnf-4.0.9.2-4.el8", "1:NetworkManager-1.14.0-14.el8"],
"installed_services": [ "arp-ethers", "auditd", "autovt@", "chronyd", "cpupower"],
"kernel_modules": [ "kvm", "pcspkr", "joydev", "xfs"],
"last_boot_time": "2019-03-25T19:32:18",
"network_interfaces": [
{
"ipv4_addresses": ["127.0.0.1"],
"ipv6_addresses": ["::1"],
"mac_address": "00:00:00:00:00:00",
"mtu": 65536,
"name": "lo",
"state": "UNKNOWN",
"type": "loopback"
},
{
"ipv4_addresses": ["192.168.0.1"],
"ipv6_addresses": ["fe80::5054:ff::0001"],
"mac_address": "52:54:00:00:00:00",
"mtu": 1500,
"name": "eth0",
"state": "UP",
"type": "ether"
}
],
"number_of_cpus": 2,
"number_of_sockets": 2,
"os_kernel_version": "4.18.0",
"running_processes": [ "watchdog/1", "systemd-logind", "md", "ksmd", "sshd" ],
"system_memory_bytes": 1917988864,
"yum_repos": [
{
"base_url": "https://cdn.example.com/content/freedos/1.0/i386/os",
"enabled": true,
"gpgcheck": true,
"name": "freedos 1.0 repo i386"
},
{
"base_url": "https://cdn.example.com/content/freedos/1.0/z80/os",
"enabled": false,
"gpgcheck": true,
"name": "freedos 1.0 repo z80"
}
]
}
}
],
"total": 1
}
"""
FETCH_SYSTEMS_WITH_PROFILES_SAME_FACTS_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"system_profile": {
"salutation": "howdy",
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
},
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"system_profile": {
"salutation": "howdy",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
},
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fdf8:f53e:61e4::18"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEM_TAGS = """
{
"total": 1,
"count": 1,
"page": 1,
"per_page": 50,
"results": {
"ec67f65c-2bc8-4ce8-82e2-6a27cada8d31": [
{
"namespace": "insights-client",
"key": "group",
"value": "XmygroupX"
}
]
}
}
"""
FETCH_SYSTEMS_INV_SVC = """
{
"count": 2,
"total": 2,
"page": 1,
"per_page": 50,
"results": [
{
"account": "1234567",
"bios_uuid": "dc43976c263411e9bcf0c85b761454fa",
"created": "2018-12-01T12:00:00.000000Z",
"display_name": "system1.example.com",
"fqdn": "system.example.com",
"id": "243926fa-262f-11e9-a632-c85b761454fa",
"insights_id": "TEST-ID00-0000-0000",
"ip_addresses": [
"10.0.0.1",
"10.0.0.2"
],
"mac_addresses": [
"c2:00:d0:c8:00:01"
],
"subscription_manager_id": "1234FAKE1234",
"tags": [],
"updated": "2018-12-31T12:00:00.000000Z",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z"
},
{
"account": "1234567",
"bios_uuid": "ec43976c263411e9bcf0c85b761454fa",
"created": "2018-12-01T12:00:00.000000Z",
"display_name": "system2.example.com",
"fqdn": "system2.example.com",
"id": "264fb5b2-262f-11e9-9b12-c85b761454fa",
"insights_id": "TEST-ID22-2222-2222",
"ip_addresses": [
"10.0.0.3",
"10.0.0.4"
],
"mac_addresses": [
"ec2:00:d0:c8:00:01"
],
"subscription_manager_id": "2222FAKE2222",
"tags": [],
"updated": "2018-12-31T12:00:00.000000Z",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z"
}
]}"""
SYSTEM_NOT_FOUND_TEMPLATE = """
{
"count": 0,
"page": 1,
"per_page": 50,
"results": [],
"total": 0
}
""" | 0.329715 | 0.248831 |
from .helpers import *
import random
#DIRT = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"dirt.png")
#ROCK = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"rock.png")
#GRASS = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass.png")
#WATER = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"water.png")
#OBSTACLE = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"obstacle.png")
BORDER1 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border1.png")
BORDER2 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border2.png")
BORDER3 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border3.png")
BORDER4 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border4.png")
GRASS1 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass1.png")
GRASS2 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass2.png")
GRASS3 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass3.png")
WATER1 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"water1.png")
WATER2 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"water2.png")
BORDERS_EX = (BORDER1, BORDER2)
BORDERS_IN = (BORDER3, BORDER4)
GRASS = (GRASS1, GRASS2, GRASS3)
WATER = (WATER1, WATER2)
def borderEx():
return BORDERS_EX[random.randrange(0, len(BORDERS_EX))]
def borderIn():
return BORDERS_IN[random.randrange(0, len(BORDERS_IN))]
def grass():
return GRASS[random.randrange(0, len(GRASS))]
def water():
return WATER[random.randrange(0, len(WATER))]
TILE_WIDTH = 64
TILE_HEIGHT = 64
MAP_WIDTH = 12
MAP_HEIGHT = 12
class Tile():
def __init__(self):
#12x12 map
self.tileMap = [[borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx()],
[borderEx(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2],
[WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderEx()],
[borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx()]]
self.tileMap_rect = [[tile.get_rect() for tile in row] for row in self.tileMap]
def draw(self, screen):
for i in range(0,MAP_HEIGHT):
for j in range(0,MAP_WIDTH):
#centerx and centery + 32 because it's the center, not a corner
self.tileMap_rect[j][i].centerx = 64*i+32
self.tileMap_rect[j][i].centery = 64*j+32
screen.blit(self.tileMap[j][i], self.tileMap_rect[j][i]) | src/tile.py | from .helpers import *
import random
#DIRT = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"dirt.png")
#ROCK = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"rock.png")
#GRASS = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass.png")
#WATER = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"water.png")
#OBSTACLE = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"obstacle.png")
BORDER1 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border1.png")
BORDER2 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border2.png")
BORDER3 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border3.png")
BORDER4 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"border4.png")
GRASS1 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass1.png")
GRASS2 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass2.png")
GRASS3 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"grass3.png")
WATER1 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"water1.png")
WATER2 = load_image("assets"+os.sep+"img"+os.sep+"tiles"+os.sep+"water2.png")
BORDERS_EX = (BORDER1, BORDER2)
BORDERS_IN = (BORDER3, BORDER4)
GRASS = (GRASS1, GRASS2, GRASS3)
WATER = (WATER1, WATER2)
def borderEx():
return BORDERS_EX[random.randrange(0, len(BORDERS_EX))]
def borderIn():
return BORDERS_IN[random.randrange(0, len(BORDERS_IN))]
def grass():
return GRASS[random.randrange(0, len(GRASS))]
def water():
return WATER[random.randrange(0, len(WATER))]
TILE_WIDTH = 64
TILE_HEIGHT = 64
MAP_WIDTH = 12
MAP_HEIGHT = 12
class Tile():
def __init__(self):
#12x12 map
self.tileMap = [[borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx()],
[borderEx(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2],
[WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1,WATER2,WATER1],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),grass(),borderIn(),borderEx()],
[borderEx(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderIn(),borderEx()],
[borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx(),borderEx()]]
self.tileMap_rect = [[tile.get_rect() for tile in row] for row in self.tileMap]
def draw(self, screen):
for i in range(0,MAP_HEIGHT):
for j in range(0,MAP_WIDTH):
#centerx and centery + 32 because it's the center, not a corner
self.tileMap_rect[j][i].centerx = 64*i+32
self.tileMap_rect[j][i].centery = 64*j+32
screen.blit(self.tileMap[j][i], self.tileMap_rect[j][i]) | 0.085013 | 0.097993 |
def root(context, missing=missing, environment=environment):
resolve = context.resolve_or_missing
undefined = environment.undefined
if 0:
yield None
l_0_adquery = resolve("adquery")
l_0_entity_def_id = resolve("entity_def_id")
l_0_prj_prefix = resolve("prj_prefix")
l_0_verb_name = resolve("verb_name")
l_0_entity_ids = l_0_process_descriptor_id = l_0_process_descriptor_ref_id = missing
t_1 = environment.filters["suggest"]
pass
l_0_entity_ids = context.call(
(undefined(name="adquery") if l_0_adquery is missing else l_0_adquery),
"SELECT KEYNAME FROM CCADMIN_IDMAP WHERE KEYSET ='ED'",
)
context.vars["entity_ids"] = l_0_entity_ids
context.exported_vars.add("entity_ids")
yield to_string(
t_1(
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
(
undefined(name="entity_ids")
if l_0_entity_ids is missing
else l_0_entity_ids
),
)
)
yield "\n"
l_0_process_descriptor_id = (
context.call(
(
undefined(name="prj_prefix")
if l_0_prj_prefix is missing
else l_0_prj_prefix
)
)
+ context.call(
environment.getattr(
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
"capitalize",
)
)
) + context.call(
environment.getattr(
(
undefined(name="verb_name")
if l_0_verb_name is missing
else l_0_verb_name
),
"capitalize",
)
)
context.vars["process_descriptor_id"] = l_0_process_descriptor_id
context.exported_vars.add("process_descriptor_id")
template = environment.get_template("add_process_descriptor.sql", "rewire_verb.sql")
for event in template.root_render_func(
template.new_context(
context.get_all(),
True,
{
"process_descriptor_id": l_0_process_descriptor_id,
"process_descriptor_ref_id": l_0_process_descriptor_ref_id,
"entity_ids": l_0_entity_ids,
},
)
):
yield event
l_0_process_descriptor_ref_id = (
undefined(name="process_descriptor_id")
if l_0_process_descriptor_id is missing
else l_0_process_descriptor_id
)
context.vars["process_descriptor_ref_id"] = l_0_process_descriptor_ref_id
context.exported_vars.add("process_descriptor_ref_id")
template = environment.get_template(
"add_process_descriptor_ref.sql", "rewire_verb.sql"
)
for event in template.root_render_func(
template.new_context(
context.get_all(),
True,
{
"process_descriptor_id": l_0_process_descriptor_id,
"process_descriptor_ref_id": l_0_process_descriptor_ref_id,
"entity_ids": l_0_entity_ids,
},
)
):
yield event
yield "\n\nUPDATE EVA_VERB \nSET (PROCESS_DESC_REF_ID) = (@PDR.%s)\nWHERE ENTITY_DEF_ID = @ED.%s AND NAME ='%s';" % (
(
undefined(name="process_descriptor_ref_id")
if l_0_process_descriptor_ref_id is missing
else l_0_process_descriptor_ref_id
),
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
(undefined(name="verb_name") if l_0_verb_name is missing else l_0_verb_name),
) | sql_gen/test/playground/rewire_verb_compiled.py | def root(context, missing=missing, environment=environment):
resolve = context.resolve_or_missing
undefined = environment.undefined
if 0:
yield None
l_0_adquery = resolve("adquery")
l_0_entity_def_id = resolve("entity_def_id")
l_0_prj_prefix = resolve("prj_prefix")
l_0_verb_name = resolve("verb_name")
l_0_entity_ids = l_0_process_descriptor_id = l_0_process_descriptor_ref_id = missing
t_1 = environment.filters["suggest"]
pass
l_0_entity_ids = context.call(
(undefined(name="adquery") if l_0_adquery is missing else l_0_adquery),
"SELECT KEYNAME FROM CCADMIN_IDMAP WHERE KEYSET ='ED'",
)
context.vars["entity_ids"] = l_0_entity_ids
context.exported_vars.add("entity_ids")
yield to_string(
t_1(
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
(
undefined(name="entity_ids")
if l_0_entity_ids is missing
else l_0_entity_ids
),
)
)
yield "\n"
l_0_process_descriptor_id = (
context.call(
(
undefined(name="prj_prefix")
if l_0_prj_prefix is missing
else l_0_prj_prefix
)
)
+ context.call(
environment.getattr(
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
"capitalize",
)
)
) + context.call(
environment.getattr(
(
undefined(name="verb_name")
if l_0_verb_name is missing
else l_0_verb_name
),
"capitalize",
)
)
context.vars["process_descriptor_id"] = l_0_process_descriptor_id
context.exported_vars.add("process_descriptor_id")
template = environment.get_template("add_process_descriptor.sql", "rewire_verb.sql")
for event in template.root_render_func(
template.new_context(
context.get_all(),
True,
{
"process_descriptor_id": l_0_process_descriptor_id,
"process_descriptor_ref_id": l_0_process_descriptor_ref_id,
"entity_ids": l_0_entity_ids,
},
)
):
yield event
l_0_process_descriptor_ref_id = (
undefined(name="process_descriptor_id")
if l_0_process_descriptor_id is missing
else l_0_process_descriptor_id
)
context.vars["process_descriptor_ref_id"] = l_0_process_descriptor_ref_id
context.exported_vars.add("process_descriptor_ref_id")
template = environment.get_template(
"add_process_descriptor_ref.sql", "rewire_verb.sql"
)
for event in template.root_render_func(
template.new_context(
context.get_all(),
True,
{
"process_descriptor_id": l_0_process_descriptor_id,
"process_descriptor_ref_id": l_0_process_descriptor_ref_id,
"entity_ids": l_0_entity_ids,
},
)
):
yield event
yield "\n\nUPDATE EVA_VERB \nSET (PROCESS_DESC_REF_ID) = (@PDR.%s)\nWHERE ENTITY_DEF_ID = @ED.%s AND NAME ='%s';" % (
(
undefined(name="process_descriptor_ref_id")
if l_0_process_descriptor_ref_id is missing
else l_0_process_descriptor_ref_id
),
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
(undefined(name="verb_name") if l_0_verb_name is missing else l_0_verb_name),
) | 0.171512 | 0.151372 |
import matplotlib.pyplot as plt
'''
Takes a list of lists, representing a (n x m) grayscale image, and flips it over the vertical axis
n := rows
m := columns
'''
def flipImage(image):
start = 0
start = 0
end = len(image) - 1
# Run loop to switch image[start] and image[end] rows until start++ and end--
# pass each other (when n = even) or they equal each other (when n = odd)
while (start < end):
tempList = image[start]
image[start] = image[end]
image[end] = tempList
start += 1
end -= 1
'''
Calculate the average intensity and average symmetry for digits 1 and 5
linelist := 16x16 grayscale image (represented as a list of lists)
digitListX := list of calculated average intensity of the digits
digitListY := list of calculated average symmetry of the digits
'''
def intensityAndSymmetry(linelist, digitListX, digitListY):
digitList = []
intensity = 0
tempList = []
for i in range(1, len(linelist)):
tempList.append(float(linelist[i]))
# Add to intensity
intensity += float(linelist[i])
# Add row of 16 grayscale values to the overall list
if (len(tempList) == 16):
digitList.append(tempList)
tempList = []
# Calculate the average intensity as the average
averageIntensity = intensity / len(linelist)
# Save the average intensity as an x-coordinate value
digitListX.append(averageIntensity)
# Make a copy of the grayscale values for the original image
digitCopy = digitList.copy()
# Flip the image over horizontal axis
flipImage(digitList)
# Calculate asymmetry as the absolute difference between an image and its flipped version
# and symmetry being the negation of asymmetry
asymmetryValue = 0
for i in range(len(digitList)):
for j in range(len(digitList[i])):
asymmetryValue += abs(digitCopy[i][j] - digitList[i][j])
averageAsymmetry = asymmetryValue / len(digitList)
# Save the average symmetry as an y-coordinate value
digitListY.append(-averageAsymmetry)
if __name__ == "__main__":
'''
Open data file that contains digit data about 1s and 5s with the first value in each line being
the digit value, and the 256 values following that be the 16x16 grayscale image values
(on a scale from -1 to 1, -1 being dark pixels and 1 being light pixels)
'''
input = open("Only1sAnd5sTraining.txt", "r")
# input = open("Only1sAnd5sTest.txt", "r")
# Average intensity (as a list of x-coordinates) of digit 1
oneX = []
# Average symmetry (as a list of y-coordinates) of digit 1
oneY = []
# Average intensity (as a list of x-coordinates) of digit 5
fiveX = []
# Average symmetry (as a list of y-coordinates) of digit 5
fiveY = []
# Loop through the file for every digit data
for line in input:
linelist = line.strip().split(" ")
# Calculate the average intensity and average symmetry value for a handwritten digit 5
if (int(float(linelist[0])) == 5):
intensityAndSymmetry(linelist, fiveX, fiveY)
# Calculate the average intensity and average symmetry value for a handwritten digit 1
if (int(float(linelist[0])) == 1):
intensityAndSymmetry(linelist, oneX, oneY)
input.close()
# Plot the values as a scatterplot, with the x-axis being the average intensity, and the
# y-axis being the average symmetry value, and see the classification separation
# between the 1s and the 5s
plt.xlabel("Average Intensity")
plt.ylabel("Average Symmetry")
plt.title("Digit 1 and 5 Comparison")
plt.scatter(oneX, oneY, s=20, color="blue", marker="o", label="digit 1")
plt.scatter(fiveX, fiveY, s=20, color="red", marker="x", label="digit 5")
plt.legend(loc="upper right")
plt.show() | Handwritten Digit Classification/compare1And5.py | import matplotlib.pyplot as plt
'''
Takes a list of lists, representing a (n x m) grayscale image, and flips it over the vertical axis
n := rows
m := columns
'''
def flipImage(image):
start = 0
start = 0
end = len(image) - 1
# Run loop to switch image[start] and image[end] rows until start++ and end--
# pass each other (when n = even) or they equal each other (when n = odd)
while (start < end):
tempList = image[start]
image[start] = image[end]
image[end] = tempList
start += 1
end -= 1
'''
Calculate the average intensity and average symmetry for digits 1 and 5
linelist := 16x16 grayscale image (represented as a list of lists)
digitListX := list of calculated average intensity of the digits
digitListY := list of calculated average symmetry of the digits
'''
def intensityAndSymmetry(linelist, digitListX, digitListY):
digitList = []
intensity = 0
tempList = []
for i in range(1, len(linelist)):
tempList.append(float(linelist[i]))
# Add to intensity
intensity += float(linelist[i])
# Add row of 16 grayscale values to the overall list
if (len(tempList) == 16):
digitList.append(tempList)
tempList = []
# Calculate the average intensity as the average
averageIntensity = intensity / len(linelist)
# Save the average intensity as an x-coordinate value
digitListX.append(averageIntensity)
# Make a copy of the grayscale values for the original image
digitCopy = digitList.copy()
# Flip the image over horizontal axis
flipImage(digitList)
# Calculate asymmetry as the absolute difference between an image and its flipped version
# and symmetry being the negation of asymmetry
asymmetryValue = 0
for i in range(len(digitList)):
for j in range(len(digitList[i])):
asymmetryValue += abs(digitCopy[i][j] - digitList[i][j])
averageAsymmetry = asymmetryValue / len(digitList)
# Save the average symmetry as an y-coordinate value
digitListY.append(-averageAsymmetry)
if __name__ == "__main__":
'''
Open data file that contains digit data about 1s and 5s with the first value in each line being
the digit value, and the 256 values following that be the 16x16 grayscale image values
(on a scale from -1 to 1, -1 being dark pixels and 1 being light pixels)
'''
input = open("Only1sAnd5sTraining.txt", "r")
# input = open("Only1sAnd5sTest.txt", "r")
# Average intensity (as a list of x-coordinates) of digit 1
oneX = []
# Average symmetry (as a list of y-coordinates) of digit 1
oneY = []
# Average intensity (as a list of x-coordinates) of digit 5
fiveX = []
# Average symmetry (as a list of y-coordinates) of digit 5
fiveY = []
# Loop through the file for every digit data
for line in input:
linelist = line.strip().split(" ")
# Calculate the average intensity and average symmetry value for a handwritten digit 5
if (int(float(linelist[0])) == 5):
intensityAndSymmetry(linelist, fiveX, fiveY)
# Calculate the average intensity and average symmetry value for a handwritten digit 1
if (int(float(linelist[0])) == 1):
intensityAndSymmetry(linelist, oneX, oneY)
input.close()
# Plot the values as a scatterplot, with the x-axis being the average intensity, and the
# y-axis being the average symmetry value, and see the classification separation
# between the 1s and the 5s
plt.xlabel("Average Intensity")
plt.ylabel("Average Symmetry")
plt.title("Digit 1 and 5 Comparison")
plt.scatter(oneX, oneY, s=20, color="blue", marker="o", label="digit 1")
plt.scatter(fiveX, fiveY, s=20, color="red", marker="x", label="digit 5")
plt.legend(loc="upper right")
plt.show() | 0.579757 | 0.74382 |
from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
class UserManager(BaseUserManager):
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
extra_fields.setdefault('is_active', True)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_admin', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class CoreUser(AbstractBaseUser, PermissionsMixin):
user = models.OneToOneField(
'api.Employee',
null=True,
on_delete=models.SET_NULL,
related_name='user',
)
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=191, blank=True, null=True)
last_name = models.CharField(max_length=191, blank=True, null=True)
is_superuser = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
objects = UserManager()
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return '{}.{}'.format(self.first_name, self.last_name)
def get_short_name(self):
return self.first_name
@property
def is_staff(self):
return self.is_admin | backend/authentication/models.py | from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
class UserManager(BaseUserManager):
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
extra_fields.setdefault('is_active', True)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_admin', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class CoreUser(AbstractBaseUser, PermissionsMixin):
user = models.OneToOneField(
'api.Employee',
null=True,
on_delete=models.SET_NULL,
related_name='user',
)
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=191, blank=True, null=True)
last_name = models.CharField(max_length=191, blank=True, null=True)
is_superuser = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
objects = UserManager()
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return '{}.{}'.format(self.first_name, self.last_name)
def get_short_name(self):
return self.first_name
@property
def is_staff(self):
return self.is_admin | 0.372505 | 0.075756 |
import shapely.geometry
import shapely.geos
import esridump
GEO_URLS = {
'tracts': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/8',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2011: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2012: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2013: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/8',
2014: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/8',
2015: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/8',
2016: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/8',
2017: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/8',
2018: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2018/MapServer/8',
2019: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2019/MapServer/8',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/6',
},
'block groups': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/10',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2011: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2012: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2013: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/10',
2014: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/10',
2015: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/10',
2016: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/10',
2017: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/10',
2018: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/10',
2019: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2018/MapServer/10',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/8',
},
'blocks': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/12',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2010/MapServer/14',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/10',
},
'incorporated places': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/26',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2011: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2012: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2013: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/26',
2014: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/26',
2015: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/26',
2016: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/26',
2017: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/26',
2018: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/28',
2019: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/28',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/26',
}
}
class AreaFilter(object):
def __init__(self, geojson_geometry, sub_geography_url):
self.geo = shapely.geometry.shape(geojson_geometry)
geo_query_args = {'geometry': ','.join(str(x) for x in self.geo.bounds),
'geometryType': 'esriGeometryEnvelope',
'spatialRel': 'esriSpatialRelEnvelopeIntersects',
'inSR': '4326',
'geometryPrecision': 9,
'orderByFields': 'STATE,COUNTY,TRACT,OID'}
self.area_dumper = esridump.EsriDumper(sub_geography_url,
extra_query_args=geo_query_args)
def __iter__(self):
for area in self.area_dumper:
area_geo = shapely.geometry.shape(area['geometry'])
if self.geo.intersects(area_geo):
try:
intersection = self.geo.intersection(area_geo)
except shapely.geos.TopologicalError:
intersection = self.geo.buffer(0).intersection(area_geo.buffer(0))
intersection_proportion = intersection.area / area_geo.area
if intersection_proportion > 0.01:
yield area, intersection_proportion | census_area/core.py | import shapely.geometry
import shapely.geos
import esridump
GEO_URLS = {
'tracts': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/8',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2011: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2012: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/14',
2013: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/8',
2014: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/8',
2015: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/8',
2016: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/8',
2017: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/8',
2018: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2018/MapServer/8',
2019: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2019/MapServer/8',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/6',
},
'block groups': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/10',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2011: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2012: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/16',
2013: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/10',
2014: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/10',
2015: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/10',
2016: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/10',
2017: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/10',
2018: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/10',
2019: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2018/MapServer/10',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/8',
},
'blocks': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/12',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2010/MapServer/14',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/10',
},
'incorporated places': {
2000: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2000/MapServer/26',
2010: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2011: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2012: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_Census2010/MapServer/34',
2013: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2013/MapServer/26',
2014: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2014/MapServer/26',
2015: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2015/MapServer/26',
2016: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2016/MapServer/26',
2017: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/26',
2018: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/28',
2019: 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/tigerWMS_ACS2017/MapServer/28',
2020: 'https://tigerweb.geo.census.gov/arcgis/rest/services/Census2020/tigerWMS_Census2020/MapServer/26',
}
}
class AreaFilter(object):
def __init__(self, geojson_geometry, sub_geography_url):
self.geo = shapely.geometry.shape(geojson_geometry)
geo_query_args = {'geometry': ','.join(str(x) for x in self.geo.bounds),
'geometryType': 'esriGeometryEnvelope',
'spatialRel': 'esriSpatialRelEnvelopeIntersects',
'inSR': '4326',
'geometryPrecision': 9,
'orderByFields': 'STATE,COUNTY,TRACT,OID'}
self.area_dumper = esridump.EsriDumper(sub_geography_url,
extra_query_args=geo_query_args)
def __iter__(self):
for area in self.area_dumper:
area_geo = shapely.geometry.shape(area['geometry'])
if self.geo.intersects(area_geo):
try:
intersection = self.geo.intersection(area_geo)
except shapely.geos.TopologicalError:
intersection = self.geo.buffer(0).intersection(area_geo.buffer(0))
intersection_proportion = intersection.area / area_geo.area
if intersection_proportion > 0.01:
yield area, intersection_proportion | 0.560974 | 0.362631 |
from __future__ import annotations
from typing import Dict, Optional, Union # noqa: F401
import datetime
from pathlib import Path
from os import environ
import dataclasses
import myfitnesspal
from pprint import pprint
from dotenv import load_dotenv
import json
import more_itertools as mit
from typing_extensions import TypedDict
cwd = Path('.')
load_dotenv(dotenv_path=cwd / 'fp.env', verbose=True, encoding="UTF-8")
username = environ.get('fp_username')
password = environ.get('fp_password')
print(username, password)
class DailyTD(TypedDict):
calories: int
protein: int
weight: Optional[float]
@ dataclasses.dataclass
class DailyData():
calories: int
protein: int
weight: Optional[float] = None
def pprint(self) -> None:
print(f"""
Calories ={self.calories: .0f} Cal
Protein ={self.protein: .0f} g
Weight ={self.weight: .1f} Kg""")
def as_dict(self) -> DailyTD:
d = dataclasses.asdict(self)
return DailyTD(calories=d['calories'], protein=d['protein'], weight=d['weight'])
client = myfitnesspal.Client(username, password)
aug2020 = datetime.date(2020, 8, 1)
today = datetime.date.today()
dates = today - aug2020
weights = client.get_measurements('Weight', aug2020, today)
start_weight = mit.first(weights.values()) # list(weights.values())[0]
current_weight = mit.last(weights.values()) # weights.popitem()[1] # list(weights.values())[-1]
data: Dict[datetime.date, DailyData] = {}
total_protein = 0
total_cals = 0
food_logs = 0
for day in range(dates.days + 1):
date = aug2020 + datetime.timedelta(days=day)
fp_day = client.get_date(date)
meals = fp_day.totals
if meals:
dd = DailyData(meals['calories'], meals['protein'])
total_protein += meals['protein']
total_cals += meals['calories']
food_logs += 1
else:
dd = DailyData(0, 0)
for x in weights:
if x == date:
dd.weight = weights[x]
data.update({date: dd})
average_protein = total_protein / food_logs
average_cals = total_cals / food_logs
weight_lost = current_weight - start_weight
overall_data = DailyData(int(average_cals), int(average_protein), weight_lost)
serialised_data = {k.isoformat(): v.as_dict() for k, v in data.items()}
with open("fp.json", mode="w") as file:
json.dump(serialised_data, file, indent=8)
pprint(data)
overall_data.pprint() | fp/fit_pal.py | from __future__ import annotations
from typing import Dict, Optional, Union # noqa: F401
import datetime
from pathlib import Path
from os import environ
import dataclasses
import myfitnesspal
from pprint import pprint
from dotenv import load_dotenv
import json
import more_itertools as mit
from typing_extensions import TypedDict
cwd = Path('.')
load_dotenv(dotenv_path=cwd / 'fp.env', verbose=True, encoding="UTF-8")
username = environ.get('fp_username')
password = environ.get('fp_password')
print(username, password)
class DailyTD(TypedDict):
calories: int
protein: int
weight: Optional[float]
@ dataclasses.dataclass
class DailyData():
calories: int
protein: int
weight: Optional[float] = None
def pprint(self) -> None:
print(f"""
Calories ={self.calories: .0f} Cal
Protein ={self.protein: .0f} g
Weight ={self.weight: .1f} Kg""")
def as_dict(self) -> DailyTD:
d = dataclasses.asdict(self)
return DailyTD(calories=d['calories'], protein=d['protein'], weight=d['weight'])
client = myfitnesspal.Client(username, password)
aug2020 = datetime.date(2020, 8, 1)
today = datetime.date.today()
dates = today - aug2020
weights = client.get_measurements('Weight', aug2020, today)
start_weight = mit.first(weights.values()) # list(weights.values())[0]
current_weight = mit.last(weights.values()) # weights.popitem()[1] # list(weights.values())[-1]
data: Dict[datetime.date, DailyData] = {}
total_protein = 0
total_cals = 0
food_logs = 0
for day in range(dates.days + 1):
date = aug2020 + datetime.timedelta(days=day)
fp_day = client.get_date(date)
meals = fp_day.totals
if meals:
dd = DailyData(meals['calories'], meals['protein'])
total_protein += meals['protein']
total_cals += meals['calories']
food_logs += 1
else:
dd = DailyData(0, 0)
for x in weights:
if x == date:
dd.weight = weights[x]
data.update({date: dd})
average_protein = total_protein / food_logs
average_cals = total_cals / food_logs
weight_lost = current_weight - start_weight
overall_data = DailyData(int(average_cals), int(average_protein), weight_lost)
serialised_data = {k.isoformat(): v.as_dict() for k, v in data.items()}
with open("fp.json", mode="w") as file:
json.dump(serialised_data, file, indent=8)
pprint(data)
overall_data.pprint() | 0.668664 | 0.110976 |
import cv2
import logging
import pytesseract
import math
import random
import numpy as np
from PIL import Image, ImageDraw
from hanashi.model.rectangle import Rectangle
from hanashi.model.ufarray import UFarray
from hanashi.model.quadtree import Quadtree
from hanashi.model.contour_tree import Tree, Node
logger = logging.getLogger("CCL")
logger.setLevel(logging.INFO)
logging.basicConfig(format='[%(asctime)-15s %(levelname)s] [%(name)s] %(message)s')
def crop_size(verts):
"""
Calculates the sides of the
bounding box for a series of points
:param verts:
:return:
:rtype: (int, int, int, int)
"""
x_list = [v[0] for v in verts]
y_list = [v[1] for v in verts]
x_max = max(x_list)
x_min = min(x_list)
y_max = max(y_list)
y_min = min(y_list)
return x_max, x_min, y_max, y_min
def show(img):
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def cv2_connected_components(img1, min_size=50, max_size=100000):
img1 = cv2.bitwise_not(img1)
labelnum, labels, stats, centroids = cv2.connectedComponentsWithStats(img1)
rectangles = []
for label in range(1, labelnum):
x, y, w, h, size = stats[label]
rect = Rectangle(x,y,w,h)
if min_size < rect.area() < max_size:
rectangles.append(rect)
return labelnum, labels, rectangles
def find_bubbles(labelnum, labels, rectangles, img, show_image=False):
height, width = img.shape
colors = dict()
colors[0] = 0
for label in range(1, labelnum):
labels[labels == label] = random.randint(0, 255)
labels = labels.astype(np.uint8)
img1 = cv2.bitwise_not(img)
_, contours, hierarchy = cv2.findContours(
img1, cv2.cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
n = 0
area = float(labels.shape[0] * labels.shape[1])
medium_tree = Tree()
large_tree = Tree()
for elem in hierarchy[0]:
contour = contours[n]
x, y, w, h = cv2.boundingRect(contour)
bounding_box = Rectangle(x, y, w, h)
box_area = bounding_box.area()
node = Node(elem[3], n, contour, bounding_box)
if box_area > area / 10:
large_tree.add(node)
elif box_area > area / 1000:
medium_tree.add(node)
n += 1
possible_bubbles = [(node, level)
for node, level in
medium_tree.level_order_traversal()]
img2 = np.zeros((height, width, 3), np.uint8)
if show_image:
for node, level1 in medium_tree.level_order_traversal():
if node.n != -1:
cv2.drawContours(img2, contours, node.n,
(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), -1)
for rect in rectangles:
print("Drawing ", rect)
img2 = cv2.rectangle(img2, (rect.x, rect.y), (rect.r_bot.x, rect.r_bot.y), (255, 255, 0), 1)
"""
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (rect.l_bot.x, rect.l_bot.y)
fontScale = 0.4
fontColor = (255, 255, 255)
cv2.putText(img2, str(rect.area()),
bottomLeftCornerOfText,
font,
fontScale,
fontColor)
"""
show(img2)
return possible_bubbles
def adaptive_segmentation(cv2_img):
segmentation_levels = []
resize_factor = 1
cv2_img = cv2.resize(cv2_img, (0, 0), fx=resize_factor, fy=resize_factor, interpolation=cv2.INTER_LINEAR)
min_size = 10
max_size = 100000
resize_factor *= resize_factor
min_size *= resize_factor
max_size *= resize_factor
prev_len = float("inf")
mean = int(cv2.mean(cv2_img)[0])
height, width = cv2_img.shape
for level in range(mean, 230, 5):
thresholded = threshold_image(cv2_img.copy(), level)
labelnum, labels, rectangles = cv2_connected_components(thresholded, min_size, max_size)
#possible_bubbles = merge_bubbles(possible_bubbles)
segmentation_levels.append((level, rectangles))
n_rectangles = len(rectangles)
if n_rectangles < prev_len:
prev_len = n_rectangles
else:
break
possible_bubbles = find_bubbles(labelnum, labels, rectangles, thresholded)
rectangles = overlaps(rectangles, possible_bubbles, width, height)
return rectangles, possible_bubbles, thresholded
def iterate_rectangles(rectangles1, rectangles2, width, height):
quadtree = Quadtree(0, Rectangle(0, 0, width, height))
for rect in rectangles2:
if rect:
quadtree.insert(rect)
for rect in rectangles1:
neighbours = []
quadtree.retrieve(neighbours, rect)
for rect1 in neighbours:
yield rect, rect1
def merge_bubbles(bubbles, width, height):
pass
def overlaps(rectangles, possible_bubbles, width, height):
bubbles = [bubble[0].box for bubble in possible_bubbles]
rectangles2 = [rect for rect, bubble in
iterate_rectangles(rectangles, bubbles, width, height)
if rect in bubble]
return rectangles2
def threshold_image(img, level):
"""
Apply threshold level to the supplied image
:param img:
:param level:
:return: the resulting image
:rtype: cv2 image
"""
blur = cv2.GaussianBlur(img, (5, 5), 0)
_, dst = cv2.threshold(blur, level, 255, cv2.THRESH_BINARY)
return dst
def adaptive_threshold(image):
"""
Calculate the threshold
:param image: the cv2 image
:return: threshold level
:rtype: int
"""
mean = np.mean(image)
if 100 > mean > 230:
mean = mean + mean * 0.2
return mean
def third_pass(rectangles):
possible_letters = list()
for rec in rectangles:
top_rec = Rectangle(rec.x, rec.y - rec.height,
rec.height, rec.height)
bottom_rec = Rectangle(rec.x, rec.y + rec.height,
rec.height, rec.height)
right_rec = Rectangle(rec.x + rec.width, rec.y,
rec.height, rec.height)
left_rec = Rectangle(rec.x - rec.width, rec.y - rec.height,
rec.height, rec.height)
for rect2 in rectangles:
if (rect2.intersects(top_rec) or
rect2.intersects(right_rec) or
rect2.intersects(bottom_rec) or
rect2.intersects(left_rec) or
rect2.intersects(rec)):
if rec not in possible_letters:
possible_letters.append(rec)
logger.info("found " + str(len(possible_letters)) + " possible letters")
return possible_letters
def is_first_letter(rect, quadtree, draw = None):
left_rec = Rectangle(rect.x - rect.height,
rect.y, rect.height, rect.height)
if draw:
left_rec.draw(draw, outline="yellow")
neighbours = []
quadtree.retrieve(neighbours, rect)
for rect1 in neighbours:
if rect1 is not rect and rect1.overlaps_with(left_rec):
return False
return True
def remove_overlaps(rectangles, width, height):
remove = []
i = 0
for rect, rect1 in iterate_rectangles(rectangles, rectangles, width, height):
percentage = rect.overlap_percentage(rect1)
if percentage == 100:
remove.append(rect1)
i += 1
for rect in rectangles[:]:
if rect in remove:
rectangles.remove(rect)
logger.info("Removed " + str(i))
return rectangles
def get_lines(rectangles, width, height):
"""
Finds all rectangles that are aligned with each other
and have similar height and groups them
:param rectangles:
:type: Rectangle
:return: groups of rectangles
:rtype: dict[int, list]
"""
n = 0
lines = dict()
quadtree = Quadtree(0, Rectangle(0,0,width, height))
for rect in rectangles:
quadtree.insert(rect)
for rect in rectangles:
is_first = is_first_letter(rect, quadtree)
if is_first:
last_rect = rect
lines[n] = list()
lines[n].append(last_rect)
neighbours = []
quadtree.retrieve(neighbours, rect)
for rect1 in sorted(neighbours, key=lambda rec: rec.x):
right_last_rect = Rectangle(last_rect.x + last_rect.width,
last_rect.y, last_rect.height * 1.5,
last_rect.height)
if rect is not rect1 and \
rect.inline(rect1) and \
right_last_rect.intersects(rect1) and \
math.sqrt(pow(rect.height-rect1.height, 2)) < 0.5*rect.height:
last_rect = rect1
lines[n].append(last_rect)
n += 1
result = []
for key in lines:
line = line_bounding_box(lines[key])
result.append(line)
return result
def line_bounding_box(line):
left = min([v.l_top.x for v in line])
right = max([v.r_bot.x for v in line])
top = min([v.l_top.y for v in line])
bottom = max([v.r_bot.y for v in line])
return Rectangle(left, top, (right - left), (bottom - top))
def group_lines(lines):
"""
Groups lines together
:param lines: dictionary that contains all the
black pixels in a line
:type lines: dict[int, list]
:return:
:rtype: dict[int, list[Rectangle]]
"""
bounding_boxes = dict()
uf_arr = UFarray()
n = 0
for line in lines:
bounding_boxes[n] = line
n += 1
groups = dict()
uf_arr.setLabel(len(bounding_boxes))
for n in bounding_boxes:
rect = bounding_boxes[n]
top_rect = Rectangle(rect.x, rect.y + rect.height,
rect.width, rect.height)
bottom_rect = Rectangle(rect.x, rect.y - rect.height,
rect.width, rect.height)
for k in bounding_boxes:
rect1 = bounding_boxes[k]
if rect is not rect1:
if (rect1.intersects(bottom_rect) or
rect1.intersects(top_rect)) and \
abs(rect.height - rect1.height) < 0.3 * rect.height:
uf_arr.setLabel(max(n, k))
uf_arr.union(n, k)
uf_arr.flatten()
for n in bounding_boxes:
index = uf_arr.find(n)
line_list = groups.get(index, list())
line_list.append(bounding_boxes[n])
groups[index] = line_list
return groups
def crop_size_rectangles(rectangles):
(x_max, x_min, y_max, y_min) = (0, float("inf"), 0, float("inf"))
for rect in rectangles:
x_max = max(rect.r_bot.x, x_max)
x_min = min(rect.l_top.x, x_min)
y_max = max(rect.r_bot.y, y_max)
y_min = min(rect.l_top.y, y_min)
return x_max, x_min, y_max, y_min
def mask_groups(img, groups, possible_bubbles):
"""
Returns list of masked images
:param img: image to mask
:type img: Image
:param groups: group of rectangles to use as masks
:return: list of masked images and their
top left corner position on the original image
:rtype: list[int, int, Image, Rectangle, list[Rectangles], list[Rectangle]]
"""
masks = []
width, height = img.size
used_bubbles = []
for label in groups:
lines = groups[label]
(x_max, x_min, y_max, y_min) = crop_size_rectangles(groups[label])
bounding_box = Rectangle(x_min, y_min, x_max - x_min, y_max - y_min)
line_length = len(lines)
if line_length <= 1:
continue
"""
if line_length > 1 or \
(line_length == 1 and
lines[0].width/lines[0].height > 2 and
lines[0].width * lines[0].height > 500):
"""
highest_level = 0
index = -1
for i, bubble in enumerate(possible_bubbles):
if i != 0 and \
bounding_box in bubble[0].box:
if i in used_bubbles:
break
if highest_level < bubble[1]:
highest_level=bubble[1]
index = i
if index > 0 and index not in used_bubbles:
used_bubbles.append(index)
bubble = possible_bubbles[index][0]
cv2_img = np.full((height, width), 255, dtype=np.uint8)
cv2.drawContours(cv2_img, [bubble.contour], 0, 0, -1)
masked_img = Image.fromarray(cv2_img)
draw = ImageDraw.Draw(masked_img)
for rect in lines:
rect.draw(draw, fill=True)
temp_img = img.copy()
temp_img.paste(masked_img, mask=masked_img)
bounding_box = bubble.box
temp_img = temp_img.crop((bounding_box.x, bounding_box.y,
bounding_box.x + bounding_box.width,
bounding_box.y + bounding_box.height))
masks.append((bounding_box.x, bounding_box.y, temp_img, bounding_box, lines, bubble))
continue
return masks
def mask_img(img, masks):
"""
:param img:
:param masks:
:return:
"""
line_masks = list()
for rect in masks:
img2 = Image.new("L", img.size, color="white")
rect.draw(ImageDraw.Draw(img2), fill=True)
img3 = img.copy()
img3.paste(img2, mask=img2)
img3 = img3.crop((rect.l_top.x, rect.l_top.y, rect.r_bot.x, rect.r_bot.y))
line_masks.append((rect.l_top.x, rect.l_top.y, img3))
return line_masks
def compare_with_original(filename, masks):
original = Image.open(filename)
#original = original.resize([int(2 * s) for s in original.size], Image.ANTIALIAS)
img3 = Image.new("RGB", (original.size[0] * 2, original.size[1]), color="white")
img3.paste(original, box=(original.width, 0))
for mask in masks:
img3.paste(mask[2], box=(mask[0], mask[1]))
#img3 = img3.resize([int(0.5 * s) for s in img3.size], Image.ANTIALIAS)
return img3
def apply_masks(original, masks):
masked = np.zeros(original.shape, dtype=np.uint8)
for mask in masks:
cv2.drawContours(masked,[mask[5].contour],0, 255, -1)
for line in mask[4]:
cv2.rectangle(masked,
(line.l_top.x, line.l_top.y),
(line.r_bot.x, line.r_bot.y),
255, thickness=cv2.FILLED)
fg_masked = cv2.bitwise_or(original, original, mask=masked)
masked = cv2.bitwise_not(masked)
bk = np.full(original.shape, 255, dtype=np.uint8)
bk_masked = cv2.bitwise_and(bk, bk, mask=masked)
final = cv2.bitwise_or(fg_masked, bk_masked)
return final
def compare_image(img1, img2):
img3 = cv2.subtract(img1, img2)
img1 = cv2.bitwise_not(img1)
total = cv2.countNonZero(img1)
non_zero = cv2.countNonZero(img3)
return float(non_zero)/total*100
def process(filename):
"""
Process an page of a manga and return a list of images that contain text
:param filename:
:return: list of (Image objects, (x,y)position on the original image)
:rtype: list
"""
img = Image.open(filename)
cv2_img = cv2.imread(filename,0)
width, height = img.size
rectangles, possible_bubbles = adaptive_segmentation(cv2_img)
#rectangles = remove_overlaps(rectangles, width, height)
logger.debug("Getting Lines")
lines = get_lines(rectangles, width, height)
groups = group_lines(lines)
logger.debug("Applying mask")
masks = mask_groups(img, groups, possible_bubbles)
return masks, lines, rectangles
def extract_text(masks):
resize_factor = 2.5
if not masks:
return "", []
width = max([int(mask[2].size[0]*resize_factor) for mask in masks])
height = sum([int(mask[2].size[1]*resize_factor) for mask in masks])
image = Image.new("RGB", (width, height))
height = 0
masks1 = []
text = []
for mask in masks:
resized = mask[2].resize([int(n*resize_factor) for n in mask[2].size], Image.ANTIALIAS)
s = (pytesseract.image_to_string(resized)).strip()
if s != "":
masks1.append(mask)
text.append(s)
"""
box = masked_img.size
x = int(width/2 - box[0]/2)
y = height
image.paste(masked_img, box=(x,y))
height += box[1]
"""
return text, masks1
if __name__ == "__main__":
filename = "/media/filippo/HDD1/pythonProjects/Github-Hanashi/Hanashi/Hanashi/tests/resources/onepunch.jpg"
img = cv2.imread(filename,0)
pass | hanashi/processor/page_processor.py | import cv2
import logging
import pytesseract
import math
import random
import numpy as np
from PIL import Image, ImageDraw
from hanashi.model.rectangle import Rectangle
from hanashi.model.ufarray import UFarray
from hanashi.model.quadtree import Quadtree
from hanashi.model.contour_tree import Tree, Node
logger = logging.getLogger("CCL")
logger.setLevel(logging.INFO)
logging.basicConfig(format='[%(asctime)-15s %(levelname)s] [%(name)s] %(message)s')
def crop_size(verts):
"""
Calculates the sides of the
bounding box for a series of points
:param verts:
:return:
:rtype: (int, int, int, int)
"""
x_list = [v[0] for v in verts]
y_list = [v[1] for v in verts]
x_max = max(x_list)
x_min = min(x_list)
y_max = max(y_list)
y_min = min(y_list)
return x_max, x_min, y_max, y_min
def show(img):
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def cv2_connected_components(img1, min_size=50, max_size=100000):
img1 = cv2.bitwise_not(img1)
labelnum, labels, stats, centroids = cv2.connectedComponentsWithStats(img1)
rectangles = []
for label in range(1, labelnum):
x, y, w, h, size = stats[label]
rect = Rectangle(x,y,w,h)
if min_size < rect.area() < max_size:
rectangles.append(rect)
return labelnum, labels, rectangles
def find_bubbles(labelnum, labels, rectangles, img, show_image=False):
height, width = img.shape
colors = dict()
colors[0] = 0
for label in range(1, labelnum):
labels[labels == label] = random.randint(0, 255)
labels = labels.astype(np.uint8)
img1 = cv2.bitwise_not(img)
_, contours, hierarchy = cv2.findContours(
img1, cv2.cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
n = 0
area = float(labels.shape[0] * labels.shape[1])
medium_tree = Tree()
large_tree = Tree()
for elem in hierarchy[0]:
contour = contours[n]
x, y, w, h = cv2.boundingRect(contour)
bounding_box = Rectangle(x, y, w, h)
box_area = bounding_box.area()
node = Node(elem[3], n, contour, bounding_box)
if box_area > area / 10:
large_tree.add(node)
elif box_area > area / 1000:
medium_tree.add(node)
n += 1
possible_bubbles = [(node, level)
for node, level in
medium_tree.level_order_traversal()]
img2 = np.zeros((height, width, 3), np.uint8)
if show_image:
for node, level1 in medium_tree.level_order_traversal():
if node.n != -1:
cv2.drawContours(img2, contours, node.n,
(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), -1)
for rect in rectangles:
print("Drawing ", rect)
img2 = cv2.rectangle(img2, (rect.x, rect.y), (rect.r_bot.x, rect.r_bot.y), (255, 255, 0), 1)
"""
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (rect.l_bot.x, rect.l_bot.y)
fontScale = 0.4
fontColor = (255, 255, 255)
cv2.putText(img2, str(rect.area()),
bottomLeftCornerOfText,
font,
fontScale,
fontColor)
"""
show(img2)
return possible_bubbles
def adaptive_segmentation(cv2_img):
segmentation_levels = []
resize_factor = 1
cv2_img = cv2.resize(cv2_img, (0, 0), fx=resize_factor, fy=resize_factor, interpolation=cv2.INTER_LINEAR)
min_size = 10
max_size = 100000
resize_factor *= resize_factor
min_size *= resize_factor
max_size *= resize_factor
prev_len = float("inf")
mean = int(cv2.mean(cv2_img)[0])
height, width = cv2_img.shape
for level in range(mean, 230, 5):
thresholded = threshold_image(cv2_img.copy(), level)
labelnum, labels, rectangles = cv2_connected_components(thresholded, min_size, max_size)
#possible_bubbles = merge_bubbles(possible_bubbles)
segmentation_levels.append((level, rectangles))
n_rectangles = len(rectangles)
if n_rectangles < prev_len:
prev_len = n_rectangles
else:
break
possible_bubbles = find_bubbles(labelnum, labels, rectangles, thresholded)
rectangles = overlaps(rectangles, possible_bubbles, width, height)
return rectangles, possible_bubbles, thresholded
def iterate_rectangles(rectangles1, rectangles2, width, height):
quadtree = Quadtree(0, Rectangle(0, 0, width, height))
for rect in rectangles2:
if rect:
quadtree.insert(rect)
for rect in rectangles1:
neighbours = []
quadtree.retrieve(neighbours, rect)
for rect1 in neighbours:
yield rect, rect1
def merge_bubbles(bubbles, width, height):
pass
def overlaps(rectangles, possible_bubbles, width, height):
bubbles = [bubble[0].box for bubble in possible_bubbles]
rectangles2 = [rect for rect, bubble in
iterate_rectangles(rectangles, bubbles, width, height)
if rect in bubble]
return rectangles2
def threshold_image(img, level):
"""
Apply threshold level to the supplied image
:param img:
:param level:
:return: the resulting image
:rtype: cv2 image
"""
blur = cv2.GaussianBlur(img, (5, 5), 0)
_, dst = cv2.threshold(blur, level, 255, cv2.THRESH_BINARY)
return dst
def adaptive_threshold(image):
"""
Calculate the threshold
:param image: the cv2 image
:return: threshold level
:rtype: int
"""
mean = np.mean(image)
if 100 > mean > 230:
mean = mean + mean * 0.2
return mean
def third_pass(rectangles):
possible_letters = list()
for rec in rectangles:
top_rec = Rectangle(rec.x, rec.y - rec.height,
rec.height, rec.height)
bottom_rec = Rectangle(rec.x, rec.y + rec.height,
rec.height, rec.height)
right_rec = Rectangle(rec.x + rec.width, rec.y,
rec.height, rec.height)
left_rec = Rectangle(rec.x - rec.width, rec.y - rec.height,
rec.height, rec.height)
for rect2 in rectangles:
if (rect2.intersects(top_rec) or
rect2.intersects(right_rec) or
rect2.intersects(bottom_rec) or
rect2.intersects(left_rec) or
rect2.intersects(rec)):
if rec not in possible_letters:
possible_letters.append(rec)
logger.info("found " + str(len(possible_letters)) + " possible letters")
return possible_letters
def is_first_letter(rect, quadtree, draw = None):
left_rec = Rectangle(rect.x - rect.height,
rect.y, rect.height, rect.height)
if draw:
left_rec.draw(draw, outline="yellow")
neighbours = []
quadtree.retrieve(neighbours, rect)
for rect1 in neighbours:
if rect1 is not rect and rect1.overlaps_with(left_rec):
return False
return True
def remove_overlaps(rectangles, width, height):
remove = []
i = 0
for rect, rect1 in iterate_rectangles(rectangles, rectangles, width, height):
percentage = rect.overlap_percentage(rect1)
if percentage == 100:
remove.append(rect1)
i += 1
for rect in rectangles[:]:
if rect in remove:
rectangles.remove(rect)
logger.info("Removed " + str(i))
return rectangles
def get_lines(rectangles, width, height):
"""
Finds all rectangles that are aligned with each other
and have similar height and groups them
:param rectangles:
:type: Rectangle
:return: groups of rectangles
:rtype: dict[int, list]
"""
n = 0
lines = dict()
quadtree = Quadtree(0, Rectangle(0,0,width, height))
for rect in rectangles:
quadtree.insert(rect)
for rect in rectangles:
is_first = is_first_letter(rect, quadtree)
if is_first:
last_rect = rect
lines[n] = list()
lines[n].append(last_rect)
neighbours = []
quadtree.retrieve(neighbours, rect)
for rect1 in sorted(neighbours, key=lambda rec: rec.x):
right_last_rect = Rectangle(last_rect.x + last_rect.width,
last_rect.y, last_rect.height * 1.5,
last_rect.height)
if rect is not rect1 and \
rect.inline(rect1) and \
right_last_rect.intersects(rect1) and \
math.sqrt(pow(rect.height-rect1.height, 2)) < 0.5*rect.height:
last_rect = rect1
lines[n].append(last_rect)
n += 1
result = []
for key in lines:
line = line_bounding_box(lines[key])
result.append(line)
return result
def line_bounding_box(line):
left = min([v.l_top.x for v in line])
right = max([v.r_bot.x for v in line])
top = min([v.l_top.y for v in line])
bottom = max([v.r_bot.y for v in line])
return Rectangle(left, top, (right - left), (bottom - top))
def group_lines(lines):
"""
Groups lines together
:param lines: dictionary that contains all the
black pixels in a line
:type lines: dict[int, list]
:return:
:rtype: dict[int, list[Rectangle]]
"""
bounding_boxes = dict()
uf_arr = UFarray()
n = 0
for line in lines:
bounding_boxes[n] = line
n += 1
groups = dict()
uf_arr.setLabel(len(bounding_boxes))
for n in bounding_boxes:
rect = bounding_boxes[n]
top_rect = Rectangle(rect.x, rect.y + rect.height,
rect.width, rect.height)
bottom_rect = Rectangle(rect.x, rect.y - rect.height,
rect.width, rect.height)
for k in bounding_boxes:
rect1 = bounding_boxes[k]
if rect is not rect1:
if (rect1.intersects(bottom_rect) or
rect1.intersects(top_rect)) and \
abs(rect.height - rect1.height) < 0.3 * rect.height:
uf_arr.setLabel(max(n, k))
uf_arr.union(n, k)
uf_arr.flatten()
for n in bounding_boxes:
index = uf_arr.find(n)
line_list = groups.get(index, list())
line_list.append(bounding_boxes[n])
groups[index] = line_list
return groups
def crop_size_rectangles(rectangles):
(x_max, x_min, y_max, y_min) = (0, float("inf"), 0, float("inf"))
for rect in rectangles:
x_max = max(rect.r_bot.x, x_max)
x_min = min(rect.l_top.x, x_min)
y_max = max(rect.r_bot.y, y_max)
y_min = min(rect.l_top.y, y_min)
return x_max, x_min, y_max, y_min
def mask_groups(img, groups, possible_bubbles):
"""
Returns list of masked images
:param img: image to mask
:type img: Image
:param groups: group of rectangles to use as masks
:return: list of masked images and their
top left corner position on the original image
:rtype: list[int, int, Image, Rectangle, list[Rectangles], list[Rectangle]]
"""
masks = []
width, height = img.size
used_bubbles = []
for label in groups:
lines = groups[label]
(x_max, x_min, y_max, y_min) = crop_size_rectangles(groups[label])
bounding_box = Rectangle(x_min, y_min, x_max - x_min, y_max - y_min)
line_length = len(lines)
if line_length <= 1:
continue
"""
if line_length > 1 or \
(line_length == 1 and
lines[0].width/lines[0].height > 2 and
lines[0].width * lines[0].height > 500):
"""
highest_level = 0
index = -1
for i, bubble in enumerate(possible_bubbles):
if i != 0 and \
bounding_box in bubble[0].box:
if i in used_bubbles:
break
if highest_level < bubble[1]:
highest_level=bubble[1]
index = i
if index > 0 and index not in used_bubbles:
used_bubbles.append(index)
bubble = possible_bubbles[index][0]
cv2_img = np.full((height, width), 255, dtype=np.uint8)
cv2.drawContours(cv2_img, [bubble.contour], 0, 0, -1)
masked_img = Image.fromarray(cv2_img)
draw = ImageDraw.Draw(masked_img)
for rect in lines:
rect.draw(draw, fill=True)
temp_img = img.copy()
temp_img.paste(masked_img, mask=masked_img)
bounding_box = bubble.box
temp_img = temp_img.crop((bounding_box.x, bounding_box.y,
bounding_box.x + bounding_box.width,
bounding_box.y + bounding_box.height))
masks.append((bounding_box.x, bounding_box.y, temp_img, bounding_box, lines, bubble))
continue
return masks
def mask_img(img, masks):
"""
:param img:
:param masks:
:return:
"""
line_masks = list()
for rect in masks:
img2 = Image.new("L", img.size, color="white")
rect.draw(ImageDraw.Draw(img2), fill=True)
img3 = img.copy()
img3.paste(img2, mask=img2)
img3 = img3.crop((rect.l_top.x, rect.l_top.y, rect.r_bot.x, rect.r_bot.y))
line_masks.append((rect.l_top.x, rect.l_top.y, img3))
return line_masks
def compare_with_original(filename, masks):
original = Image.open(filename)
#original = original.resize([int(2 * s) for s in original.size], Image.ANTIALIAS)
img3 = Image.new("RGB", (original.size[0] * 2, original.size[1]), color="white")
img3.paste(original, box=(original.width, 0))
for mask in masks:
img3.paste(mask[2], box=(mask[0], mask[1]))
#img3 = img3.resize([int(0.5 * s) for s in img3.size], Image.ANTIALIAS)
return img3
def apply_masks(original, masks):
masked = np.zeros(original.shape, dtype=np.uint8)
for mask in masks:
cv2.drawContours(masked,[mask[5].contour],0, 255, -1)
for line in mask[4]:
cv2.rectangle(masked,
(line.l_top.x, line.l_top.y),
(line.r_bot.x, line.r_bot.y),
255, thickness=cv2.FILLED)
fg_masked = cv2.bitwise_or(original, original, mask=masked)
masked = cv2.bitwise_not(masked)
bk = np.full(original.shape, 255, dtype=np.uint8)
bk_masked = cv2.bitwise_and(bk, bk, mask=masked)
final = cv2.bitwise_or(fg_masked, bk_masked)
return final
def compare_image(img1, img2):
img3 = cv2.subtract(img1, img2)
img1 = cv2.bitwise_not(img1)
total = cv2.countNonZero(img1)
non_zero = cv2.countNonZero(img3)
return float(non_zero)/total*100
def process(filename):
"""
Process an page of a manga and return a list of images that contain text
:param filename:
:return: list of (Image objects, (x,y)position on the original image)
:rtype: list
"""
img = Image.open(filename)
cv2_img = cv2.imread(filename,0)
width, height = img.size
rectangles, possible_bubbles = adaptive_segmentation(cv2_img)
#rectangles = remove_overlaps(rectangles, width, height)
logger.debug("Getting Lines")
lines = get_lines(rectangles, width, height)
groups = group_lines(lines)
logger.debug("Applying mask")
masks = mask_groups(img, groups, possible_bubbles)
return masks, lines, rectangles
def extract_text(masks):
resize_factor = 2.5
if not masks:
return "", []
width = max([int(mask[2].size[0]*resize_factor) for mask in masks])
height = sum([int(mask[2].size[1]*resize_factor) for mask in masks])
image = Image.new("RGB", (width, height))
height = 0
masks1 = []
text = []
for mask in masks:
resized = mask[2].resize([int(n*resize_factor) for n in mask[2].size], Image.ANTIALIAS)
s = (pytesseract.image_to_string(resized)).strip()
if s != "":
masks1.append(mask)
text.append(s)
"""
box = masked_img.size
x = int(width/2 - box[0]/2)
y = height
image.paste(masked_img, box=(x,y))
height += box[1]
"""
return text, masks1
if __name__ == "__main__":
filename = "/media/filippo/HDD1/pythonProjects/Github-Hanashi/Hanashi/Hanashi/tests/resources/onepunch.jpg"
img = cv2.imread(filename,0)
pass | 0.625324 | 0.470189 |
import logging
from pyhap.const import CATEGORY_ALARM_SYSTEM
from pyhap.loader import get_loader
from openpeerpower.components.alarm_control_panel import DOMAIN
from openpeerpower.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
from openpeerpower.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from openpeerpower.core import callback
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_CURRENT_SECURITY_STATE,
CHAR_TARGET_SECURITY_STATE,
SERV_SECURITY_SYSTEM,
)
_LOGGER = logging.getLogger(__name__)
OPP_TO_HOMEKIT = {
STATE_ALARM_ARMED_HOME: 0,
STATE_ALARM_ARMED_AWAY: 1,
STATE_ALARM_ARMED_NIGHT: 2,
STATE_ALARM_DISARMED: 3,
STATE_ALARM_TRIGGERED: 4,
}
OPP_TO_HOMEKIT_SERVICES = {
SERVICE_ALARM_ARM_HOME: 0,
SERVICE_ALARM_ARM_AWAY: 1,
SERVICE_ALARM_ARM_NIGHT: 2,
SERVICE_ALARM_DISARM: 3,
}
HOMEKIT_TO_OPP = {c: s for s, c in OPP_TO_HOMEKIT.items()}
STATE_TO_SERVICE = {
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_DISARMED: SERVICE_ALARM_DISARM,
}
@TYPES.register("SecuritySystem")
class SecuritySystem(HomeAccessory):
"""Generate an SecuritySystem accessory for an alarm control panel."""
def __init__(self, *args):
"""Initialize a SecuritySystem accessory object."""
super().__init__(*args, category=CATEGORY_ALARM_SYSTEM)
state = self.opp.states.get(self.entity_id)
self._alarm_code = self.config.get(ATTR_CODE)
supported_states = state.attributes.get(
ATTR_SUPPORTED_FEATURES,
(
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_TRIGGER
),
)
loader = get_loader()
default_current_states = loader.get_char(
"SecuritySystemCurrentState"
).properties.get("ValidValues")
default_target_services = loader.get_char(
"SecuritySystemTargetState"
).properties.get("ValidValues")
current_supported_states = [
OPP_TO_HOMEKIT[STATE_ALARM_DISARMED],
OPP_TO_HOMEKIT[STATE_ALARM_TRIGGERED],
]
target_supported_services = [OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_DISARM]]
if supported_states & SUPPORT_ALARM_ARM_HOME:
current_supported_states.append(OPP_TO_HOMEKIT[STATE_ALARM_ARMED_HOME])
target_supported_services.append(
OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_ARM_HOME]
)
if supported_states & SUPPORT_ALARM_ARM_AWAY:
current_supported_states.append(OPP_TO_HOMEKIT[STATE_ALARM_ARMED_AWAY])
target_supported_services.append(
OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_ARM_AWAY]
)
if supported_states & SUPPORT_ALARM_ARM_NIGHT:
current_supported_states.append(OPP_TO_HOMEKIT[STATE_ALARM_ARMED_NIGHT])
target_supported_services.append(
OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_ARM_NIGHT]
)
new_current_states = {
key: val
for key, val in default_current_states.items()
if val in current_supported_states
}
new_target_services = {
key: val
for key, val in default_target_services.items()
if val in target_supported_services
}
serv_alarm = self.add_preload_service(SERV_SECURITY_SYSTEM)
self.char_current_state = serv_alarm.configure_char(
CHAR_CURRENT_SECURITY_STATE,
value=OPP_TO_HOMEKIT[STATE_ALARM_DISARMED],
valid_values=new_current_states,
)
self.char_target_state = serv_alarm.configure_char(
CHAR_TARGET_SECURITY_STATE,
value=OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_DISARM],
valid_values=new_target_services,
setter_callback=self.set_security_state,
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
def set_security_state(self, value):
"""Move security state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set security state to %d", self.entity_id, value)
opp_value = HOMEKIT_TO_OPP[value]
service = STATE_TO_SERVICE[opp_value]
params = {ATTR_ENTITY_ID: self.entity_id}
if self._alarm_code:
params[ATTR_CODE] = self._alarm_code
self.async_call_service(DOMAIN, service, params)
@callback
def async_update_state(self, new_state):
"""Update security state after state changed."""
opp_state = new_state.state
if opp_state in OPP_TO_HOMEKIT:
current_security_state = OPP_TO_HOMEKIT[opp_state]
if self.char_current_state.value != current_security_state:
self.char_current_state.set_value(current_security_state)
_LOGGER.debug(
"%s: Updated current state to %s (%d)",
self.entity_id,
opp_state,
current_security_state,
)
# SecuritySystemTargetState does not support triggered
if (
opp_state != STATE_ALARM_TRIGGERED
and self.char_target_state.value != current_security_state
):
self.char_target_state.set_value(current_security_state) | openpeerpower/components/homekit/type_security_systems.py | import logging
from pyhap.const import CATEGORY_ALARM_SYSTEM
from pyhap.loader import get_loader
from openpeerpower.components.alarm_control_panel import DOMAIN
from openpeerpower.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
from openpeerpower.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from openpeerpower.core import callback
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_CURRENT_SECURITY_STATE,
CHAR_TARGET_SECURITY_STATE,
SERV_SECURITY_SYSTEM,
)
_LOGGER = logging.getLogger(__name__)
OPP_TO_HOMEKIT = {
STATE_ALARM_ARMED_HOME: 0,
STATE_ALARM_ARMED_AWAY: 1,
STATE_ALARM_ARMED_NIGHT: 2,
STATE_ALARM_DISARMED: 3,
STATE_ALARM_TRIGGERED: 4,
}
OPP_TO_HOMEKIT_SERVICES = {
SERVICE_ALARM_ARM_HOME: 0,
SERVICE_ALARM_ARM_AWAY: 1,
SERVICE_ALARM_ARM_NIGHT: 2,
SERVICE_ALARM_DISARM: 3,
}
HOMEKIT_TO_OPP = {c: s for s, c in OPP_TO_HOMEKIT.items()}
STATE_TO_SERVICE = {
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_DISARMED: SERVICE_ALARM_DISARM,
}
@TYPES.register("SecuritySystem")
class SecuritySystem(HomeAccessory):
"""Generate an SecuritySystem accessory for an alarm control panel."""
def __init__(self, *args):
"""Initialize a SecuritySystem accessory object."""
super().__init__(*args, category=CATEGORY_ALARM_SYSTEM)
state = self.opp.states.get(self.entity_id)
self._alarm_code = self.config.get(ATTR_CODE)
supported_states = state.attributes.get(
ATTR_SUPPORTED_FEATURES,
(
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_TRIGGER
),
)
loader = get_loader()
default_current_states = loader.get_char(
"SecuritySystemCurrentState"
).properties.get("ValidValues")
default_target_services = loader.get_char(
"SecuritySystemTargetState"
).properties.get("ValidValues")
current_supported_states = [
OPP_TO_HOMEKIT[STATE_ALARM_DISARMED],
OPP_TO_HOMEKIT[STATE_ALARM_TRIGGERED],
]
target_supported_services = [OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_DISARM]]
if supported_states & SUPPORT_ALARM_ARM_HOME:
current_supported_states.append(OPP_TO_HOMEKIT[STATE_ALARM_ARMED_HOME])
target_supported_services.append(
OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_ARM_HOME]
)
if supported_states & SUPPORT_ALARM_ARM_AWAY:
current_supported_states.append(OPP_TO_HOMEKIT[STATE_ALARM_ARMED_AWAY])
target_supported_services.append(
OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_ARM_AWAY]
)
if supported_states & SUPPORT_ALARM_ARM_NIGHT:
current_supported_states.append(OPP_TO_HOMEKIT[STATE_ALARM_ARMED_NIGHT])
target_supported_services.append(
OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_ARM_NIGHT]
)
new_current_states = {
key: val
for key, val in default_current_states.items()
if val in current_supported_states
}
new_target_services = {
key: val
for key, val in default_target_services.items()
if val in target_supported_services
}
serv_alarm = self.add_preload_service(SERV_SECURITY_SYSTEM)
self.char_current_state = serv_alarm.configure_char(
CHAR_CURRENT_SECURITY_STATE,
value=OPP_TO_HOMEKIT[STATE_ALARM_DISARMED],
valid_values=new_current_states,
)
self.char_target_state = serv_alarm.configure_char(
CHAR_TARGET_SECURITY_STATE,
value=OPP_TO_HOMEKIT_SERVICES[SERVICE_ALARM_DISARM],
valid_values=new_target_services,
setter_callback=self.set_security_state,
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
def set_security_state(self, value):
"""Move security state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set security state to %d", self.entity_id, value)
opp_value = HOMEKIT_TO_OPP[value]
service = STATE_TO_SERVICE[opp_value]
params = {ATTR_ENTITY_ID: self.entity_id}
if self._alarm_code:
params[ATTR_CODE] = self._alarm_code
self.async_call_service(DOMAIN, service, params)
@callback
def async_update_state(self, new_state):
"""Update security state after state changed."""
opp_state = new_state.state
if opp_state in OPP_TO_HOMEKIT:
current_security_state = OPP_TO_HOMEKIT[opp_state]
if self.char_current_state.value != current_security_state:
self.char_current_state.set_value(current_security_state)
_LOGGER.debug(
"%s: Updated current state to %s (%d)",
self.entity_id,
opp_state,
current_security_state,
)
# SecuritySystemTargetState does not support triggered
if (
opp_state != STATE_ALARM_TRIGGERED
and self.char_target_state.value != current_security_state
):
self.char_target_state.set_value(current_security_state) | 0.536556 | 0.097262 |
from typing import Any, List
import networkx as nx
from interface import Interface, implements
from .builders.graph_builders import GraphBuilderInterface, TextGCNGraphBuilder
from .model.document import Document
from .model.graph_matrix import GraphMatrix
from .nlp.pieplines import ProcessingPipeline, ProcessingPipelineInterface
from .nlp.processors import Lemmatizer
from .presenters.graph_presenter import GraphPresenter
from .readers.reading_controller import ReadingController
class GBTRInterface(Interface):
"""Main module."""
def get_graph(
self,
source: Any
) -> List[GraphMatrix]:
"""Transform given documents corpus to graph representation.
Parameters
----------
source: any
Data source in one of supported types.
Currently supported types:
- list of dictionaries {"text" : str, "label" : str}.
Returns
-------
List[GraphMatrix]
List of prepared graphs.
If method implements whole corpus representation as one graph
then one element list is returned.
"""
class GBTR(implements(GBTRInterface)):
def __init__(
self,
reading_controller: ReadingController,
nlp_pipeline: ProcessingPipelineInterface,
graph_builder: GraphBuilderInterface
):
self._data: List[Document] = None
self._reading_controller = reading_controller
self._graph_builder = graph_builder
def get_graph(
self,
source: Any
) -> List[GraphMatrix]:
self._data = self._reading_controller.read_data(source)
# TODO
# consider parallel processing
for document in self._data:
document.text = self.nlp_pipeline.process(document.text)
return self._graph_builder.get_graph(self._data)
class TextGCN:
"""Implementation of graph representation for TextGCN."""
def __call__(
self,
source: Any
) -> nx.Graph:
"""Returns TextGCN based grapg representation for given corpus.
Parameters
----------
source: any
Data source in one of supported types.
Currently supported types:
- list of dictionaries {"text" : str, "label" : str}.
Returns
-------
nx.Graph
Graph representation as Networkx Graph object.
"""
gbtr = GBTR(
reading_controller=ReadingController(),
nlp_pipeline=ProcessingPipeline([
# TODO
Lemmatizer()
]),
graph_builder=TextGCNGraphBuilder()
)
graph_matrix = gbtr.get_graph(source)[0]
graph_presenter = GraphPresenter()
return graph_presenter.to_nx(graph_matrix) | gbtr/gbtr.py | from typing import Any, List
import networkx as nx
from interface import Interface, implements
from .builders.graph_builders import GraphBuilderInterface, TextGCNGraphBuilder
from .model.document import Document
from .model.graph_matrix import GraphMatrix
from .nlp.pieplines import ProcessingPipeline, ProcessingPipelineInterface
from .nlp.processors import Lemmatizer
from .presenters.graph_presenter import GraphPresenter
from .readers.reading_controller import ReadingController
class GBTRInterface(Interface):
"""Main module."""
def get_graph(
self,
source: Any
) -> List[GraphMatrix]:
"""Transform given documents corpus to graph representation.
Parameters
----------
source: any
Data source in one of supported types.
Currently supported types:
- list of dictionaries {"text" : str, "label" : str}.
Returns
-------
List[GraphMatrix]
List of prepared graphs.
If method implements whole corpus representation as one graph
then one element list is returned.
"""
class GBTR(implements(GBTRInterface)):
def __init__(
self,
reading_controller: ReadingController,
nlp_pipeline: ProcessingPipelineInterface,
graph_builder: GraphBuilderInterface
):
self._data: List[Document] = None
self._reading_controller = reading_controller
self._graph_builder = graph_builder
def get_graph(
self,
source: Any
) -> List[GraphMatrix]:
self._data = self._reading_controller.read_data(source)
# TODO
# consider parallel processing
for document in self._data:
document.text = self.nlp_pipeline.process(document.text)
return self._graph_builder.get_graph(self._data)
class TextGCN:
"""Implementation of graph representation for TextGCN."""
def __call__(
self,
source: Any
) -> nx.Graph:
"""Returns TextGCN based grapg representation for given corpus.
Parameters
----------
source: any
Data source in one of supported types.
Currently supported types:
- list of dictionaries {"text" : str, "label" : str}.
Returns
-------
nx.Graph
Graph representation as Networkx Graph object.
"""
gbtr = GBTR(
reading_controller=ReadingController(),
nlp_pipeline=ProcessingPipeline([
# TODO
Lemmatizer()
]),
graph_builder=TextGCNGraphBuilder()
)
graph_matrix = gbtr.get_graph(source)[0]
graph_presenter = GraphPresenter()
return graph_presenter.to_nx(graph_matrix) | 0.830903 | 0.202522 |
from __future__ import division
from pyomo.environ import (ConcreteModel, Constraint, NonNegativeReals,
Objective, Param, RangeSet, Set, Suffix, Var,
minimize)
from pyomo.gdp import Disjunct, Disjunction
def build_model():
"""Build the model."""
m = ConcreteModel()
m.streams = Set(initialize=['H1', 'H2', 'C1', 'C2'])
m.hot_streams = Set(within=m.streams, initialize=['H1', 'H2'])
m.cold_streams = Set(within=m.streams, initialize=['C1', 'C2'])
num_stages = 2
m.stages = RangeSet(num_stages)
m.stages_plus_one = RangeSet(num_stages + 1)
m.inlet_T = Param(
m.streams, doc="Inlet temperature of stream [K]",
initialize={'H1': 443,
'H2': 423,
'C1': 293,
'C2': 353})
m.outlet_T = Param(
m.streams, doc="Outlet temperature of stream [K]",
initialize={'H1': 333,
'H2': 303,
'C1': 408,
'C2': 413})
m.cold_util_outlet_T = Param(default=313)
m.hot_util_outlet_T = Param(default=450)
# m.bigM_process_heat = Param(
# m.hot_streams, m.cold_streams, m.stages,
# doc="Big-M value for process match existence.",
# default=10000)
# m.bigM_cold_utility = Param(m.hot_streams, default=10000)
# m.bigM_hot_utility = Param(m.cold_streams, default=10000)
m.heat_exchanged = Var(
m.hot_streams, m.cold_streams, m.stages,
domain=NonNegativeReals,
doc="Heat exchanged from hot stream to cold stream in stage",
initialize=1, bounds=(0, 5000))
m.FCp = Param(m.streams, doc="Flow times heat capacity of stream",
initialize={'H1': 30,
'H2': 15,
'C1': 20,
'C2': 40})
m.utility_needed = Var(
m.streams,
doc="Hot or cold utility needed to bring a stream "
"to its required exit temperature.",
domain=NonNegativeReals, initialize=1, bounds=(0, 5000))
m.T = Var(m.streams, m.stages_plus_one,
doc="Temperature of stream at hot end of stage",
bounds=(293, 450))
m.bigM_T_approach = Param(default=500)
m.BigM = Suffix(direction=Suffix.LOCAL)
m.cost_cold_util = Param(default=20)
m.cost_hot_util = Param(default=80)
m.exchanger_fixed_cost = Param(
m.hot_streams, m.cold_streams, default=0)
m.utility_exchanger_unit_cost = Param(
m.streams, default=0)
m.area_cost_coefficient = Param(
m.hot_streams, m.cold_streams, default=1000)
m.utility_area_cost_coefficient = Param(
m.streams, initialize={
strm: (1000 if strm in m.hot_streams else 1200)
for strm in m.streams},
doc="1200 for heaters. 1000 for all other exchangers.")
m.area_cost_exponent = Param(default=0.6)
m.U = Param(m.hot_streams, m.cold_streams, default=0.8)
m.utility_U = Param(
m.streams, initialize={
strm: (0.8 if strm in m.hot_streams else 1.2)
for strm in m.streams},
doc="1.2 for heaters. 0.8 for everything else.")
m.cold_util_T_in = Param(default=293)
m.utility_area_cost_exponent = Param(m.streams, default=0.6)
m.hot_util_T_in = Param(default=450)
m.exchanger_approach_T = Var(
m.hot_streams, m.cold_streams, m.stages_plus_one,
doc="Temperature approach for exchanger between "
"hot and cold stream at a stage.",
bounds=(0.1, 500))
m.utility_approach_T = Var(
m.streams, doc="Temperature approach for utility exchangers",
bounds=(0.1, 500))
@m.Constraint(m.streams)
def overall_stream_heat_balance(m, strm):
if strm in m.hot_streams:
return (m.inlet_T[strm] - m.outlet_T[strm]) * m.FCp[strm] == (
sum(m.heat_exchanged[strm, cold, stg]
for cold in m.cold_streams for stg in m.stages)
+ m.utility_needed[strm])
if strm in m.cold_streams:
return (m.outlet_T[strm] - m.inlet_T[strm]) * m.FCp[strm] == (
sum(m.heat_exchanged[hot, strm, stg]
for hot in m.hot_streams for stg in m.stages)
+ m.utility_needed[strm])
@m.Constraint(m.stages, m.streams)
def stage_heat_balance(m, stg, strm):
if strm in m.hot_streams:
return (m.T[strm, stg] - m.T[strm, stg + 1]) * m.FCp[strm] == sum(
m.heat_exchanged[strm, cold, stg] for cold in m.cold_streams)
if strm in m.cold_streams:
return (m.T[strm, stg] - m.T[strm, stg + 1]) * m.FCp[strm] == sum(
m.heat_exchanged[hot, strm, stg] for hot in m.hot_streams)
@m.Constraint(m.streams)
def inlet_temperature_assignment(m, strm):
return m.inlet_T[strm] == (m.T[strm, 1] if strm in m.hot_streams else
m.T[strm, num_stages + 1])
@m.Constraint(m.stages, m.streams)
def stagewise_temperature_feasibility(m, stg, strm):
return m.T[strm, stg] >= m.T[strm, stg + 1]
@m.Constraint(m.hot_streams)
def hot_stream_exit_temperature_feasibility(m, strm):
return m.outlet_T[strm] <= m.T[strm, num_stages + 1]
@m.Constraint(m.cold_streams)
def cold_stream_exit_temperature_feasibility(m, strm):
return m.outlet_T[strm] >= m.T[strm, 1]
@m.Constraint(m.hot_streams)
def cold_utility_load(m, strm):
return ((m.T[strm, num_stages + 1] - m.outlet_T[strm])
* m.FCp[strm]) == m.utility_needed[strm]
@m.Constraint(m.cold_streams)
def hot_utility_load(m, strm):
return ((m.outlet_T[strm] - m.T[strm, 1])
* m.FCp[strm]) == m.utility_needed[strm]
m.utility_cost = Var(
m.streams, doc="Annual utility cost", domain=NonNegativeReals,
bounds=(0, 100000))
m.match_exchanger_fixed_cost = Var(
m.stages, m.hot_streams, m.cold_streams,
doc="Fixed cost for an exchanger between a hot and cold stream.",
domain=NonNegativeReals, bounds=(0, 5000))
m.utility_exchanger_fixed_cost = Var(
m.streams,
doc="Fixed cost for the utility exchanger.",
domain=NonNegativeReals, bounds=(0, 5000))
m.match_exchanger_area = Var(
m.stages, m.hot_streams, m.cold_streams,
doc="Exchanger area for a match between a hot and cold stream.",
domain=NonNegativeReals, bounds=(0, 500))
m.match_exchanger_area_cost = Var(
m.stages, m.hot_streams, m.cold_streams,
doc="Capital cost contribution from exchanger area.",
domain=NonNegativeReals, bounds=(0, 100000))
m.utility_exchanger_area = Var(
m.streams,
doc="Exchanger area for the hot or cold utility for a stream.",
domain=NonNegativeReals, bounds=(0, 500))
m.utility_exchanger_area_cost = Var(
m.streams,
doc="Capital cost contribution from utility exchanger area.",
domain=NonNegativeReals, bounds=(0, 100000))
def _match_exists(disj, hot, cold, stg):
# disj.conventional = Disjunct()
# disj.modular = Disjunct(m.module_sizes)
disj.match_exchanger_area_cost = Constraint(
expr=m.match_exchanger_area_cost[stg, hot, cold] * 1E-3 >=
m.area_cost_coefficient[hot, cold] * 1E-3 *
m.match_exchanger_area[stg, hot, cold] ** m.area_cost_exponent)
m.BigM[disj.match_exchanger_area_cost] = 100
disj.match_exchanger_area = Constraint(
expr=m.match_exchanger_area[stg, hot, cold] * (
m.U[hot, cold] * (
m.exchanger_approach_T[hot, cold, stg] *
m.exchanger_approach_T[hot, cold, stg + 1] *
(m.exchanger_approach_T[hot, cold, stg] +
m.exchanger_approach_T[hot, cold, stg + 1]) / 2
) ** (1 / 3)) >=
m.heat_exchanged[hot, cold, stg])
m.BigM[disj.match_exchanger_area] = 5000
disj.match_exchanger_fixed_cost = Constraint(
expr=m.match_exchanger_fixed_cost[stg, hot, cold] ==
m.exchanger_fixed_cost[hot, cold])
disj.stage_hot_approach_temperature = Constraint(
expr=m.exchanger_approach_T[hot, cold, stg] <=
m.T[hot, stg] - m.T[cold, stg])
disj.stage_cold_approach_temperature = Constraint(
expr=m.exchanger_approach_T[hot, cold, stg + 1] <=
m.T[hot, stg + 1] - m.T[cold, stg + 1])
pass
def _match_absent(disj, hot, cold, stg):
disj.no_match_exchanger_cost = Constraint(
expr=m.match_exchanger_area_cost[stg, hot, cold] == 0)
disj.no_match_exchanger_area = Constraint(
expr=m.match_exchanger_area[stg, hot, cold] == 0)
disj.no_match_exchanger_fixed_cost = Constraint(
expr=m.match_exchanger_fixed_cost[stg, hot, cold] == 0)
disj.no_heat_exchange = Constraint(
expr=m.heat_exchanged[hot, cold, stg] == 0)
pass
m.match_exists = Disjunct(
m.hot_streams, m.cold_streams, m.stages,
doc="Disjunct for the presence of an exchanger between a "
"hot stream and a cold stream at a stage.", rule=_match_exists)
m.match_absent = Disjunct(
m.hot_streams, m.cold_streams, m.stages,
doc="Disjunct for the absence of an exchanger between a "
"hot stream and a cold stream at a stage.", rule=_match_absent)
def _match_exists_or_absent(m, hot, cold, stg):
return [m.match_exists[hot, cold, stg], m.match_absent[hot, cold, stg]]
m.match_exists_or_absent = Disjunction(
m.hot_streams, m.cold_streams, m.stages,
doc="Disjunction between presence or absence of an exchanger between "
"a hot stream and a cold stream at a stage.",
rule=_match_exists_or_absent)
def _utility_exists(disj, strm):
disj.utility_exchanger_area_cost = Constraint(
expr=m.utility_exchanger_area_cost[strm] * 1E-3 >=
m.utility_area_cost_coefficient[strm] * 1E-3 *
m.utility_exchanger_area[strm]
** m.utility_area_cost_exponent[strm])
m.BigM[disj.utility_exchanger_area_cost] = 100
# temperature difference between utility and process stream at process
# stream outlet
outlet_T_diff = ((m.outlet_T[strm] - m.cold_util_T_in)
if strm in m.hot_streams else
(m.hot_util_T_in - m.outlet_T[strm]))
disj.utility_exchanger_area = Constraint(
expr=m.utility_exchanger_area[strm] * (
m.utility_U[strm] * (
(m.utility_approach_T[strm] *
outlet_T_diff) *
(m.utility_approach_T[strm] +
outlet_T_diff)
/ 2
) ** (1 / 3)) >=
m.utility_needed[strm])
m.BigM[disj.utility_exchanger_area] = 5000
disj.utility_exchanger_fixed_cost = Constraint(
expr=m.utility_exchanger_fixed_cost[strm] ==
m.utility_exchanger_unit_cost[strm])
disj.utility_cost = Constraint(
expr=m.utility_cost[strm] == m.utility_needed[strm] * (
m.cost_cold_util if strm in m.hot_streams
else m.cost_hot_util))
disj.utility_approach_temperature = Constraint(
expr=m.utility_approach_T[strm] <= (
(m.T[strm, num_stages + 1] - m.cold_util_outlet_T)
if strm in m.hot_streams else
(m.hot_util_outlet_T - m.T[strm, 1])))
disj.minimum_utility_approach_temperature = Constraint(
expr=m.utility_approach_T[strm] >= 0.1)
pass
def _utility_absent(disj, strm):
disj.no_area_cost = Constraint(
expr=m.utility_exchanger_area_cost[strm] == 0)
disj.no_area = Constraint(expr=m.utility_exchanger_area[strm] == 0)
disj.no_fixed_cost = Constraint(
expr=m.utility_exchanger_fixed_cost[strm] == 0)
disj.no_utility_cost = Constraint(expr=m.utility_cost[strm] == 0)
disj.no_utility = Constraint(expr=m.utility_needed[strm] == 0)
pass
m.utility_exists = Disjunct(
m.streams, doc="Disjunct for the presence of a utility exchanger "
"for a stream.",
rule=_utility_exists)
m.utility_absent = Disjunct(
m.streams, doc="Disjunct for the absence of a utility exchanger "
"for a stream.",
rule=_utility_absent)
def _utility_exists_or_absent(m, strm):
return [m.utility_exists[strm], m.utility_absent[strm]]
m.utility_exists_or_absent = Disjunction(
m.streams,
doc="Disjunction between presence or absence of a utility exchanger "
"for a stream.",
rule=_utility_exists_or_absent)
m.total_cost = Objective(
expr=sum(m.utility_cost[strm] for strm in m.streams)
+ sum(m.match_exchanger_fixed_cost[stg, hot, cold]
for stg in m.stages
for hot in m.hot_streams
for cold in m.cold_streams)
+ sum(m.utility_exchanger_fixed_cost[strm]
for strm in m.streams)
+ sum(m.match_exchanger_area_cost[stg, hot, cold]
for stg in m.stages
for hot in m.hot_streams
for cold in m.cold_streams)
+ sum(m.utility_exchanger_area_cost[strm]
for strm in m.streams),
sense=minimize
)
return m
if __name__ == "__main__":
m = build_model()
from pyomo.environ import SolverFactory, TransformationFactory
# m.display()
# TransformationFactory('core.relax_integrality').apply_to(m)
# result = SolverFactory('ipopt').solve(
# m, tee=True, options={'halt_on_ampl_error': 'no'})
# result = SolverFactory('gams').solve(m, tee=True, solver='conopt')
TransformationFactory('gdp.bigm').apply_to(m)
result = SolverFactory('gams').solve(
m, tee=True, solver='baron',
add_options=['OPTION optcr = 0.01;'],
keepfiles=False)
# result = SolverFactory('gams').solve(m, tee=True, solver='dicopt',
# add_options=['OPTION NLP = ipopt;'])
print(result)
m.utility_cost.display()
m.utility_exchanger_area.display()
m.match_exchanger_area.display() | instances/heat_exchangers/yee_gdp.py | from __future__ import division
from pyomo.environ import (ConcreteModel, Constraint, NonNegativeReals,
Objective, Param, RangeSet, Set, Suffix, Var,
minimize)
from pyomo.gdp import Disjunct, Disjunction
def build_model():
"""Build the model."""
m = ConcreteModel()
m.streams = Set(initialize=['H1', 'H2', 'C1', 'C2'])
m.hot_streams = Set(within=m.streams, initialize=['H1', 'H2'])
m.cold_streams = Set(within=m.streams, initialize=['C1', 'C2'])
num_stages = 2
m.stages = RangeSet(num_stages)
m.stages_plus_one = RangeSet(num_stages + 1)
m.inlet_T = Param(
m.streams, doc="Inlet temperature of stream [K]",
initialize={'H1': 443,
'H2': 423,
'C1': 293,
'C2': 353})
m.outlet_T = Param(
m.streams, doc="Outlet temperature of stream [K]",
initialize={'H1': 333,
'H2': 303,
'C1': 408,
'C2': 413})
m.cold_util_outlet_T = Param(default=313)
m.hot_util_outlet_T = Param(default=450)
# m.bigM_process_heat = Param(
# m.hot_streams, m.cold_streams, m.stages,
# doc="Big-M value for process match existence.",
# default=10000)
# m.bigM_cold_utility = Param(m.hot_streams, default=10000)
# m.bigM_hot_utility = Param(m.cold_streams, default=10000)
m.heat_exchanged = Var(
m.hot_streams, m.cold_streams, m.stages,
domain=NonNegativeReals,
doc="Heat exchanged from hot stream to cold stream in stage",
initialize=1, bounds=(0, 5000))
m.FCp = Param(m.streams, doc="Flow times heat capacity of stream",
initialize={'H1': 30,
'H2': 15,
'C1': 20,
'C2': 40})
m.utility_needed = Var(
m.streams,
doc="Hot or cold utility needed to bring a stream "
"to its required exit temperature.",
domain=NonNegativeReals, initialize=1, bounds=(0, 5000))
m.T = Var(m.streams, m.stages_plus_one,
doc="Temperature of stream at hot end of stage",
bounds=(293, 450))
m.bigM_T_approach = Param(default=500)
m.BigM = Suffix(direction=Suffix.LOCAL)
m.cost_cold_util = Param(default=20)
m.cost_hot_util = Param(default=80)
m.exchanger_fixed_cost = Param(
m.hot_streams, m.cold_streams, default=0)
m.utility_exchanger_unit_cost = Param(
m.streams, default=0)
m.area_cost_coefficient = Param(
m.hot_streams, m.cold_streams, default=1000)
m.utility_area_cost_coefficient = Param(
m.streams, initialize={
strm: (1000 if strm in m.hot_streams else 1200)
for strm in m.streams},
doc="1200 for heaters. 1000 for all other exchangers.")
m.area_cost_exponent = Param(default=0.6)
m.U = Param(m.hot_streams, m.cold_streams, default=0.8)
m.utility_U = Param(
m.streams, initialize={
strm: (0.8 if strm in m.hot_streams else 1.2)
for strm in m.streams},
doc="1.2 for heaters. 0.8 for everything else.")
m.cold_util_T_in = Param(default=293)
m.utility_area_cost_exponent = Param(m.streams, default=0.6)
m.hot_util_T_in = Param(default=450)
m.exchanger_approach_T = Var(
m.hot_streams, m.cold_streams, m.stages_plus_one,
doc="Temperature approach for exchanger between "
"hot and cold stream at a stage.",
bounds=(0.1, 500))
m.utility_approach_T = Var(
m.streams, doc="Temperature approach for utility exchangers",
bounds=(0.1, 500))
@m.Constraint(m.streams)
def overall_stream_heat_balance(m, strm):
if strm in m.hot_streams:
return (m.inlet_T[strm] - m.outlet_T[strm]) * m.FCp[strm] == (
sum(m.heat_exchanged[strm, cold, stg]
for cold in m.cold_streams for stg in m.stages)
+ m.utility_needed[strm])
if strm in m.cold_streams:
return (m.outlet_T[strm] - m.inlet_T[strm]) * m.FCp[strm] == (
sum(m.heat_exchanged[hot, strm, stg]
for hot in m.hot_streams for stg in m.stages)
+ m.utility_needed[strm])
@m.Constraint(m.stages, m.streams)
def stage_heat_balance(m, stg, strm):
if strm in m.hot_streams:
return (m.T[strm, stg] - m.T[strm, stg + 1]) * m.FCp[strm] == sum(
m.heat_exchanged[strm, cold, stg] for cold in m.cold_streams)
if strm in m.cold_streams:
return (m.T[strm, stg] - m.T[strm, stg + 1]) * m.FCp[strm] == sum(
m.heat_exchanged[hot, strm, stg] for hot in m.hot_streams)
@m.Constraint(m.streams)
def inlet_temperature_assignment(m, strm):
return m.inlet_T[strm] == (m.T[strm, 1] if strm in m.hot_streams else
m.T[strm, num_stages + 1])
@m.Constraint(m.stages, m.streams)
def stagewise_temperature_feasibility(m, stg, strm):
return m.T[strm, stg] >= m.T[strm, stg + 1]
@m.Constraint(m.hot_streams)
def hot_stream_exit_temperature_feasibility(m, strm):
return m.outlet_T[strm] <= m.T[strm, num_stages + 1]
@m.Constraint(m.cold_streams)
def cold_stream_exit_temperature_feasibility(m, strm):
return m.outlet_T[strm] >= m.T[strm, 1]
@m.Constraint(m.hot_streams)
def cold_utility_load(m, strm):
return ((m.T[strm, num_stages + 1] - m.outlet_T[strm])
* m.FCp[strm]) == m.utility_needed[strm]
@m.Constraint(m.cold_streams)
def hot_utility_load(m, strm):
return ((m.outlet_T[strm] - m.T[strm, 1])
* m.FCp[strm]) == m.utility_needed[strm]
m.utility_cost = Var(
m.streams, doc="Annual utility cost", domain=NonNegativeReals,
bounds=(0, 100000))
m.match_exchanger_fixed_cost = Var(
m.stages, m.hot_streams, m.cold_streams,
doc="Fixed cost for an exchanger between a hot and cold stream.",
domain=NonNegativeReals, bounds=(0, 5000))
m.utility_exchanger_fixed_cost = Var(
m.streams,
doc="Fixed cost for the utility exchanger.",
domain=NonNegativeReals, bounds=(0, 5000))
m.match_exchanger_area = Var(
m.stages, m.hot_streams, m.cold_streams,
doc="Exchanger area for a match between a hot and cold stream.",
domain=NonNegativeReals, bounds=(0, 500))
m.match_exchanger_area_cost = Var(
m.stages, m.hot_streams, m.cold_streams,
doc="Capital cost contribution from exchanger area.",
domain=NonNegativeReals, bounds=(0, 100000))
m.utility_exchanger_area = Var(
m.streams,
doc="Exchanger area for the hot or cold utility for a stream.",
domain=NonNegativeReals, bounds=(0, 500))
m.utility_exchanger_area_cost = Var(
m.streams,
doc="Capital cost contribution from utility exchanger area.",
domain=NonNegativeReals, bounds=(0, 100000))
def _match_exists(disj, hot, cold, stg):
# disj.conventional = Disjunct()
# disj.modular = Disjunct(m.module_sizes)
disj.match_exchanger_area_cost = Constraint(
expr=m.match_exchanger_area_cost[stg, hot, cold] * 1E-3 >=
m.area_cost_coefficient[hot, cold] * 1E-3 *
m.match_exchanger_area[stg, hot, cold] ** m.area_cost_exponent)
m.BigM[disj.match_exchanger_area_cost] = 100
disj.match_exchanger_area = Constraint(
expr=m.match_exchanger_area[stg, hot, cold] * (
m.U[hot, cold] * (
m.exchanger_approach_T[hot, cold, stg] *
m.exchanger_approach_T[hot, cold, stg + 1] *
(m.exchanger_approach_T[hot, cold, stg] +
m.exchanger_approach_T[hot, cold, stg + 1]) / 2
) ** (1 / 3)) >=
m.heat_exchanged[hot, cold, stg])
m.BigM[disj.match_exchanger_area] = 5000
disj.match_exchanger_fixed_cost = Constraint(
expr=m.match_exchanger_fixed_cost[stg, hot, cold] ==
m.exchanger_fixed_cost[hot, cold])
disj.stage_hot_approach_temperature = Constraint(
expr=m.exchanger_approach_T[hot, cold, stg] <=
m.T[hot, stg] - m.T[cold, stg])
disj.stage_cold_approach_temperature = Constraint(
expr=m.exchanger_approach_T[hot, cold, stg + 1] <=
m.T[hot, stg + 1] - m.T[cold, stg + 1])
pass
def _match_absent(disj, hot, cold, stg):
disj.no_match_exchanger_cost = Constraint(
expr=m.match_exchanger_area_cost[stg, hot, cold] == 0)
disj.no_match_exchanger_area = Constraint(
expr=m.match_exchanger_area[stg, hot, cold] == 0)
disj.no_match_exchanger_fixed_cost = Constraint(
expr=m.match_exchanger_fixed_cost[stg, hot, cold] == 0)
disj.no_heat_exchange = Constraint(
expr=m.heat_exchanged[hot, cold, stg] == 0)
pass
m.match_exists = Disjunct(
m.hot_streams, m.cold_streams, m.stages,
doc="Disjunct for the presence of an exchanger between a "
"hot stream and a cold stream at a stage.", rule=_match_exists)
m.match_absent = Disjunct(
m.hot_streams, m.cold_streams, m.stages,
doc="Disjunct for the absence of an exchanger between a "
"hot stream and a cold stream at a stage.", rule=_match_absent)
def _match_exists_or_absent(m, hot, cold, stg):
return [m.match_exists[hot, cold, stg], m.match_absent[hot, cold, stg]]
m.match_exists_or_absent = Disjunction(
m.hot_streams, m.cold_streams, m.stages,
doc="Disjunction between presence or absence of an exchanger between "
"a hot stream and a cold stream at a stage.",
rule=_match_exists_or_absent)
def _utility_exists(disj, strm):
disj.utility_exchanger_area_cost = Constraint(
expr=m.utility_exchanger_area_cost[strm] * 1E-3 >=
m.utility_area_cost_coefficient[strm] * 1E-3 *
m.utility_exchanger_area[strm]
** m.utility_area_cost_exponent[strm])
m.BigM[disj.utility_exchanger_area_cost] = 100
# temperature difference between utility and process stream at process
# stream outlet
outlet_T_diff = ((m.outlet_T[strm] - m.cold_util_T_in)
if strm in m.hot_streams else
(m.hot_util_T_in - m.outlet_T[strm]))
disj.utility_exchanger_area = Constraint(
expr=m.utility_exchanger_area[strm] * (
m.utility_U[strm] * (
(m.utility_approach_T[strm] *
outlet_T_diff) *
(m.utility_approach_T[strm] +
outlet_T_diff)
/ 2
) ** (1 / 3)) >=
m.utility_needed[strm])
m.BigM[disj.utility_exchanger_area] = 5000
disj.utility_exchanger_fixed_cost = Constraint(
expr=m.utility_exchanger_fixed_cost[strm] ==
m.utility_exchanger_unit_cost[strm])
disj.utility_cost = Constraint(
expr=m.utility_cost[strm] == m.utility_needed[strm] * (
m.cost_cold_util if strm in m.hot_streams
else m.cost_hot_util))
disj.utility_approach_temperature = Constraint(
expr=m.utility_approach_T[strm] <= (
(m.T[strm, num_stages + 1] - m.cold_util_outlet_T)
if strm in m.hot_streams else
(m.hot_util_outlet_T - m.T[strm, 1])))
disj.minimum_utility_approach_temperature = Constraint(
expr=m.utility_approach_T[strm] >= 0.1)
pass
def _utility_absent(disj, strm):
disj.no_area_cost = Constraint(
expr=m.utility_exchanger_area_cost[strm] == 0)
disj.no_area = Constraint(expr=m.utility_exchanger_area[strm] == 0)
disj.no_fixed_cost = Constraint(
expr=m.utility_exchanger_fixed_cost[strm] == 0)
disj.no_utility_cost = Constraint(expr=m.utility_cost[strm] == 0)
disj.no_utility = Constraint(expr=m.utility_needed[strm] == 0)
pass
m.utility_exists = Disjunct(
m.streams, doc="Disjunct for the presence of a utility exchanger "
"for a stream.",
rule=_utility_exists)
m.utility_absent = Disjunct(
m.streams, doc="Disjunct for the absence of a utility exchanger "
"for a stream.",
rule=_utility_absent)
def _utility_exists_or_absent(m, strm):
return [m.utility_exists[strm], m.utility_absent[strm]]
m.utility_exists_or_absent = Disjunction(
m.streams,
doc="Disjunction between presence or absence of a utility exchanger "
"for a stream.",
rule=_utility_exists_or_absent)
m.total_cost = Objective(
expr=sum(m.utility_cost[strm] for strm in m.streams)
+ sum(m.match_exchanger_fixed_cost[stg, hot, cold]
for stg in m.stages
for hot in m.hot_streams
for cold in m.cold_streams)
+ sum(m.utility_exchanger_fixed_cost[strm]
for strm in m.streams)
+ sum(m.match_exchanger_area_cost[stg, hot, cold]
for stg in m.stages
for hot in m.hot_streams
for cold in m.cold_streams)
+ sum(m.utility_exchanger_area_cost[strm]
for strm in m.streams),
sense=minimize
)
return m
if __name__ == "__main__":
m = build_model()
from pyomo.environ import SolverFactory, TransformationFactory
# m.display()
# TransformationFactory('core.relax_integrality').apply_to(m)
# result = SolverFactory('ipopt').solve(
# m, tee=True, options={'halt_on_ampl_error': 'no'})
# result = SolverFactory('gams').solve(m, tee=True, solver='conopt')
TransformationFactory('gdp.bigm').apply_to(m)
result = SolverFactory('gams').solve(
m, tee=True, solver='baron',
add_options=['OPTION optcr = 0.01;'],
keepfiles=False)
# result = SolverFactory('gams').solve(m, tee=True, solver='dicopt',
# add_options=['OPTION NLP = ipopt;'])
print(result)
m.utility_cost.display()
m.utility_exchanger_area.display()
m.match_exchanger_area.display() | 0.652906 | 0.27814 |
from flask import Response, request, jsonify
from flask_restful import Resource
from models.media import Song, Podcast, Audiobook #models created in media.py
from api.errors import invalid_request #handling 400 error
"""This class creates a dictionary to map the oaudiFileType with their respective database model"""
class MediaTypeDatabase():
def __init__(self):
self.audiofile = {
'song': Song,
'podcast': Podcast,
'audiobook': Audiobook
}
def database(self):
return self.audiofile
"""This class handles the create of media file"""
class MediaFileAPI(Resource):
def __init__(self):
self.database = MediaTypeDatabase().database()
def post(self) -> Response:
audioFileType = request.get_json()['audioFileType']
audioFileMetadata = request.get_json()['audioFileMetadata']
db = self.database[audioFileType]
if audioFileType == 'song':
try:
post_data = db(**audioFileMetadata).save()
result = {'id', str(post_data.id)}
return Response(status=200)
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
elif audioFileType == 'podcast':
try:
post_data = db(**audioFileMetadata).save()
result = {'id', str(post_data.id)}
return Response(status=200)
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
elif audioFileType == 'audiobook':
try:
post_data = db(**audioFileMetadata).save()
result = {'id', str(post_data.id)}
return Response(status=200)
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
else:
return invalid_request()
"""This class handles the get, update and delete of media file"""
class MediaFilesAPI(Resource):
def __init__(self):
self.database = MediaTypeDatabase().database()
def get(self, audioFileType: str, audioFileID: str = None) -> Response:
try:
db = self.database[audioFileType]
if audioFileID is not None:
data = db.objects.get(id=audioFileID)
response = jsonify({'data': data})
response.status_code = 200
return response
else:
data = db.objects()
response = jsonify({'data': data})
response.status_code = 200
return response
except:
return Response(status=500)
def put(self, audioFileType: str , audioFileID: str) -> Response:
db = self.database[audioFileType]
audioFileMetadata = request.get_json()['audioFileMetadata']
try:
data = db.objects(id=audioFileID).update(**audioFileMetadata)
response = jsonify(({'result': 'audio file updated'}))
response.status_code = 200
return response
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
def delete(self, audioFileType: str = None, audioFileID: str = None) -> Response:
db = self.database[audioFileType]
try:
data = db.objects(id=audioFileID).delete()
response = jsonify({'result': 'audio file deleted'})
response.status_code = 200
return response
except:
return invalid_request() | api/mediafile.py |
from flask import Response, request, jsonify
from flask_restful import Resource
from models.media import Song, Podcast, Audiobook #models created in media.py
from api.errors import invalid_request #handling 400 error
"""This class creates a dictionary to map the oaudiFileType with their respective database model"""
class MediaTypeDatabase():
def __init__(self):
self.audiofile = {
'song': Song,
'podcast': Podcast,
'audiobook': Audiobook
}
def database(self):
return self.audiofile
"""This class handles the create of media file"""
class MediaFileAPI(Resource):
def __init__(self):
self.database = MediaTypeDatabase().database()
def post(self) -> Response:
audioFileType = request.get_json()['audioFileType']
audioFileMetadata = request.get_json()['audioFileMetadata']
db = self.database[audioFileType]
if audioFileType == 'song':
try:
post_data = db(**audioFileMetadata).save()
result = {'id', str(post_data.id)}
return Response(status=200)
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
elif audioFileType == 'podcast':
try:
post_data = db(**audioFileMetadata).save()
result = {'id', str(post_data.id)}
return Response(status=200)
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
elif audioFileType == 'audiobook':
try:
post_data = db(**audioFileMetadata).save()
result = {'id', str(post_data.id)}
return Response(status=200)
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
else:
return invalid_request()
"""This class handles the get, update and delete of media file"""
class MediaFilesAPI(Resource):
def __init__(self):
self.database = MediaTypeDatabase().database()
def get(self, audioFileType: str, audioFileID: str = None) -> Response:
try:
db = self.database[audioFileType]
if audioFileID is not None:
data = db.objects.get(id=audioFileID)
response = jsonify({'data': data})
response.status_code = 200
return response
else:
data = db.objects()
response = jsonify({'data': data})
response.status_code = 200
return response
except:
return Response(status=500)
def put(self, audioFileType: str , audioFileID: str) -> Response:
db = self.database[audioFileType]
audioFileMetadata = request.get_json()['audioFileMetadata']
try:
data = db.objects(id=audioFileID).update(**audioFileMetadata)
response = jsonify(({'result': 'audio file updated'}))
response.status_code = 200
return response
except Exception as error:
if error.__class__.__name__ == 'ValidationError':
return invalid_request()
else:
return Response(status=500)
def delete(self, audioFileType: str = None, audioFileID: str = None) -> Response:
db = self.database[audioFileType]
try:
data = db.objects(id=audioFileID).delete()
response = jsonify({'result': 'audio file deleted'})
response.status_code = 200
return response
except:
return invalid_request() | 0.405096 | 0.085633 |
import logging
# external packages
# local imports
from mountcontrol.connection import Connection
from mountcontrol.convert import valueToFloat
from mountcontrol.convert import valueToInt
class Setting(object):
"""
The class Setting inherits all information and handling of setting
attributes of the connected mount and provides the abstracted interface
to a 10 micron mount.
>>> setting = Setting(host='')
"""
__all__ = ['Setting',
]
log = logging.getLogger(__name__)
def __init__(self,
host=None,
):
self.host = host
self._slewRate = None
self._slewRateMin = None
self._slewRateMax = None
self._timeToFlip = None
self._meridianLimitTrack = None
self._meridianLimitSlew = None
self._refractionTemp = None
self._refractionPress = None
self._telescopeTempDEC = None
self._statusRefraction = None
self._statusUnattendedFlip = None
self._statusDualAxisTracking = None
self._horizonLimitHigh = None
self._horizonLimitLow = None
self._wakeOnLan = None
self._UTCValid = None
self._UTCExpire = None
self._gpsSynced = None
self._typeConnection = None
self._addressLanMAC = None
self._addressWirelessMAC = None
self._weatherStatus = None
self._weatherPressure = None
self._weatherTemperature = None
self._weatherHumidity = None
self._weatherDewPoint = None
self._trackingRate = None
self._webInterfaceStat = None
@property
def slewRate(self):
return self._slewRate
@slewRate.setter
def slewRate(self, value):
self._slewRate = valueToFloat(value)
@property
def slewRateMin(self):
return self._slewRateMin
@slewRateMin.setter
def slewRateMin(self, value):
self._slewRateMin = valueToFloat(value)
@property
def slewRateMax(self):
return self._slewRateMax
@slewRateMax.setter
def slewRateMax(self, value):
self._slewRateMax = valueToFloat(value)
@property
def timeToFlip(self):
return self._timeToFlip
@timeToFlip.setter
def timeToFlip(self, value):
self._timeToFlip = valueToFloat(value)
@property
def meridianLimitTrack(self):
return self._meridianLimitTrack
@meridianLimitTrack.setter
def meridianLimitTrack(self, value):
self._meridianLimitTrack = valueToFloat(value)
@property
def meridianLimitSlew(self):
return self._meridianLimitSlew
@meridianLimitSlew.setter
def meridianLimitSlew(self, value):
self._meridianLimitSlew = valueToFloat(value)
def timeToMeridian(self):
if self._timeToFlip is not None and self._meridianLimitTrack is not None:
return int(self._timeToFlip - self._meridianLimitTrack * 4)
else:
return None
@property
def refractionTemp(self):
return self._refractionTemp
@refractionTemp.setter
def refractionTemp(self, value):
self._refractionTemp = valueToFloat(value)
@property
def refractionPress(self):
return self._refractionPress
@refractionPress.setter
def refractionPress(self, value):
self._refractionPress = valueToFloat(value)
@property
def telescopeTempDEC(self):
return self._telescopeTempDEC
@telescopeTempDEC.setter
def telescopeTempDEC(self, value):
self._telescopeTempDEC = valueToFloat(value)
@property
def statusRefraction(self):
return self._statusRefraction
@statusRefraction.setter
def statusRefraction(self, value):
self._statusRefraction = bool(value)
@property
def statusUnattendedFlip(self):
return self._statusUnattendedFlip
@statusUnattendedFlip.setter
def statusUnattendedFlip(self, value):
self._statusUnattendedFlip = bool(value)
@property
def statusDualAxisTracking(self):
return self._statusDualAxisTracking
@statusDualAxisTracking.setter
def statusDualAxisTracking(self, value):
self._statusDualAxisTracking = bool(value)
@property
def horizonLimitHigh(self):
return self._horizonLimitHigh
@horizonLimitHigh.setter
def horizonLimitHigh(self, value):
self._horizonLimitHigh = valueToFloat(value)
@property
def horizonLimitLow(self):
return self._horizonLimitLow
@horizonLimitLow.setter
def horizonLimitLow(self, value):
self._horizonLimitLow = valueToFloat(value)
@property
def UTCValid(self):
return self._UTCValid
@UTCValid.setter
def UTCValid(self, value):
self._UTCValid = bool(value)
@property
def UTCExpire(self):
return self._UTCExpire
@UTCExpire.setter
def UTCExpire(self, value):
if isinstance(value, str):
self._UTCExpire = value
else:
self._UTCExpire = None
@property
def typeConnection(self):
return self._typeConnection
@typeConnection.setter
def typeConnection(self, value):
value = valueToInt(value)
if value is None:
self._typeConnection = value
elif not 0 <= value <= 3:
value = None
self._typeConnection = value
@property
def gpsSynced(self):
return self._gpsSynced
@gpsSynced.setter
def gpsSynced(self, value):
self._gpsSynced = bool(value)
@property
def addressLanMAC(self):
return self._addressLanMAC
@addressLanMAC.setter
def addressLanMAC(self, value):
self._addressLanMAC = value.upper().replace('.', ':')
@property
def addressWirelessMAC(self):
return self._addressWirelessMAC
@addressWirelessMAC.setter
def addressWirelessMAC(self, value):
self._addressWirelessMAC = value.upper().replace('.', ':')
@property
def wakeOnLan(self):
return self._wakeOnLan
@wakeOnLan.setter
def wakeOnLan(self, value):
if value == 'N':
self._wakeOnLan = 'None'
elif value == '0':
self._wakeOnLan = 'OFF'
elif value == '1':
self._wakeOnLan = 'ON'
else:
self._wakeOnLan = None
@property
def weatherStatus(self):
return self._weatherStatus
@weatherStatus.setter
def weatherStatus(self, value):
value = valueToInt(value)
if value is None:
self._weatherStatus = value
elif 0 <= value <= 2:
self._weatherStatus = value
else:
self._weatherStatus = None
@property
def weatherPressure(self):
return self._weatherPressure
@weatherPressure.setter
def weatherPressure(self, value):
self._weatherPressure = valueToFloat(value)
@property
def weatherTemperature(self):
return self._weatherTemperature
@weatherTemperature.setter
def weatherTemperature(self, value):
self._weatherTemperature = valueToFloat(value)
@property
def weatherHumidity(self):
return self._weatherHumidity
@weatherHumidity.setter
def weatherHumidity(self, value):
self._weatherHumidity = valueToFloat(value)
@property
def weatherDewPoint(self):
return self._weatherDewPoint
@weatherDewPoint.setter
def weatherDewPoint(self, value):
self._weatherDewPoint = valueToFloat(value)
@property
def trackingRate(self):
return self._trackingRate
@trackingRate.setter
def trackingRate(self, value):
self._trackingRate = valueToFloat(value)
@property
def webInterfaceStat(self):
return self._webInterfaceStat
@webInterfaceStat.setter
def webInterfaceStat(self, value):
value = valueToFloat(value)
if value is None:
self._webInterfaceStat = None
else:
self._webInterfaceStat = bool(value)
def parseSetting(self, response, numberOfChunks):
"""
Parsing the polling med command.
:param response: data load from mount
:param numberOfChunks:
:return: success: True if ok, False if not
"""
if len(response) != numberOfChunks:
self.log.warning('wrong number of chunks')
return False
self.slewRate = response[0]
self.slewRateMin = response[1]
self.slewRateMax = response[2]
self.timeToFlip = response[3]
self.meridianLimitTrack = response[4]
self.meridianLimitSlew = response[5]
self.refractionTemp = response[6]
self.refractionPress = response[7]
self.telescopeTempDEC = response[8]
self.statusRefraction = (response[9][0] == '1')
self.statusUnattendedFlip = (response[9][1] == '1')
self.statusDualAxisTracking = (response[9][2] == '1')
self.horizonLimitHigh = response[9][3:6]
self.horizonLimitLow = response[10][0:3]
valid, expirationDate = response[11].split(',')
self.UTCValid = (valid == 'V')
self.UTCExpire = expirationDate
self.typeConnection = response[12]
self.gpsSynced = (response[13] == '1')
self.addressLanMAC = response[14]
self.wakeOnLan = response[15]
self.weatherStatus = response[16]
self.weatherPressure = response[17].split(',')[0]
self.weatherTemperature = response[18].split(',')[0]
self.weatherHumidity = response[19].split(',')[0]
self.weatherDewPoint = response[20].split(',')[0]
self.trackingRate = response[21]
self.webInterfaceStat = response[22]
return True
def pollSetting(self):
"""
Sending the polling med command. As the mount need polling the data, I
send a set of commands to get the data back to be able to process and
store it.
:return: success: True if ok, False if not
"""
conn = Connection(self.host)
cs1 = ':U2#:GMs#:GMsa#:GMsb#:Gmte#:Glmt#:Glms#:GRTMP#:GRPRS#:GTMP1#'
cs2 = ':GREF#:Guaf#:Gdat#:Gh#:Go#:GDUTV#:GINQ#:gtg#:GMAC#:GWOL#'
cs3 = ':WSG#:WSP#:WST#:WSH#:WSD#:GT#:NTGweb#'
commandString = cs1 + cs2 + cs3
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
suc = self.parseSetting(response, numberOfChunks)
return suc
def setSlewRate(self, value):
"""
setSlewRate sends the command for setting the max slew rate to the mount.
:param value: float for max slew rate in degrees per second
:return: success
"""
if value is None:
return False
if not isinstance(value, (int, float)):
return False
if value < 2:
return False
elif value > 15:
return False
conn = Connection(self.host)
commandString = f':Sw{value:02.0f}#:RMs{value:02.0f}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setSlewSpeedMax(self):
"""
setSlewSpeedMax set the slewing speed to max
:return: success
"""
conn = Connection(self.host)
commandString = ':RS#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setSlewSpeedHigh(self):
"""
setSlewSpeedHigh set the slewing speed to centering rate. the different
speeds are set through setting different centering rates, because setting
different slew speeds leads to a scenario, that we get a different setup
in max slew speed as well.
:return: success
"""
conn = Connection(self.host)
commandString = ':RC2#:RC#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setSlewSpeedMed(self):
"""
setSlewSpeedMed set the slewing speed to centering rate. the different
speeds are set through setting different centering rates, because setting
different slew speeds leads to a scenario, that we get a different setup
in max slew speed as well.
:return: success
"""
conn = Connection(self.host)
centerSpeed = 255
commandString = f':Rc{centerSpeed:02.0f}#:RC#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setSlewSpeedLow(self):
"""
setSlewSpeedLow set the slewing speed to centering rate. the different
speeds are set through setting different centering rates, because setting
different slew speeds leads to a scenario, that we get a different setup
in max slew speed as well.
:return: success
"""
conn = Connection(self.host)
centerSpeed = 128
commandString = f':Rc{centerSpeed:02.0f}#:RC#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setRefractionParam(self, temperature=None, pressure=None):
"""
setRefractionParam sends the command for setting the temperature and
pressure to the mount. the limits are set to -40 to +75 for temp and 500
to 1300 hPa for pressure, but there is not real documented limit.
:param temperature: float for temperature correction in Celsius
:param pressure: float for pressure correction in hPa
:return: success
"""
if temperature is None:
return False
if pressure is None:
return False
if temperature < -40:
return False
elif temperature > 75:
return False
if pressure < 500:
return False
elif pressure > 1300:
return False
conn = Connection(self.host)
commandString = f':SRPRS{pressure:06.1f}#:SRTMP{temperature:+06.1f}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '11':
return False
return True
def setRefractionTemp(self, value):
"""
setRefractionTemp sends the command for setting the temperature to the
mount. the limit is set to -40 to +75, but there is not real documented
limit.
:param value: float for temperature correction in Celsius
:return: success
"""
if value is None:
return False
if value < -40:
return False
elif value > 75:
return False
conn = Connection(self.host)
commandString = ':SRTMP{0:+06.1f}#'.format(value)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setRefractionPress(self, value):
"""
setRefractionPress sends the command for setting the pressure to the
mount. the limit is set from 500 to 1300 hPa. no limit give from the
mount. limits here are relevant over 5000m height
:param value: float for pressure correction
:return: success
"""
if value is None:
return False
if value < 500:
return False
elif value > 1300:
return False
conn = Connection(self.host)
commandString = ':SRPRS{0:06.1f}#'.format(value)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setRefraction(self, status):
"""
setRefraction sends the command to the mount.
:param status: bool for enable or disable refraction correction
:return: success
"""
conn = Connection(self.host)
commandString = ':SREF{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setUnattendedFlip(self, status):
"""
setUnattendedFlip sends the command to the mount. the command returns nothing.
:param status: bool for enable or disable unattended flip
:return: success
"""
conn = Connection(self.host)
commandString = ':Suaf{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setDualAxisTracking(self, status):
"""
setDualAxisTracking sends the command to the mount.
:param status: bool for enable or disable dual tracking
:return: success
"""
conn = Connection(self.host)
commandString = ':Sdat{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setWOL(self, status):
"""
setWOL sends the command to the mount.
:param status: bool for enable or disable WOL
:return: success
"""
conn = Connection(self.host)
commandString = ':SWOL{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setMeridianLimitTrack(self, value):
"""
setMeridianLimitTrack sends the command for setting flip limit to the mount.
the limit is set from 1 to 30 degrees
:param value: float for degrees
:return: success
"""
if value < 1:
return False
elif value > 30:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':Slmt{value:02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setMeridianLimitSlew(self, value):
"""
setMeridianLimitSlew sends the command for setting flip limit to the mount.
the limit is set to -20 to 20 degrees
:param value: float / int for degrees
:return: success
"""
if value < 0:
return False
elif value > 30:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':Slms{value:02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setHorizonLimitHigh(self, value):
"""
setHorizonLimitHigh sends the command for setting the limit to the mount.
the limit is set from 0 to 90 degrees
:param value: float / int for degrees
:return: success
"""
if value < 0:
return False
elif value > 90:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':Sh+{value:02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setHorizonLimitLow(self, value):
"""
setHorizonLimitLow sends the command for setting the limit to the mount. the limit
has to be between -5 and +45 degrees
:param value: float / int for degrees
:return: success
"""
if value < -5:
return False
elif value > 45:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':So{value:+02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setDirectWeatherUpdateType(self, value):
"""
setDirectWeatherUpdateType sends the command for setting the operating mode for
updating the refraction data from weather station.
0 do not update the refraction model data
1 update only while the mount is not tracking
2 update continuously, with a 15s smoothing filter
:param value: int
:return: success
"""
if value < 0:
return False
elif value > 2:
return False
value = int(value)
conn = Connection(self.host)
commandString = f':WSS{value:1d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def checkRateLunar(self):
"""
:return:
"""
if self._trackingRate == 62.4:
return True
else:
return False
def checkRateSidereal(self):
"""
:return:
"""
if self._trackingRate == 60.2:
return True
else:
return False
def checkRateSolar(self):
"""
:return:
"""
if self._trackingRate == 60.3:
return True
else:
return False
def setLunarTracking(self):
"""
:return: success
"""
conn = Connection(self.host)
suc, response, numberOfChunks = conn.communicate(':RT0#')
return suc
def setSiderealTracking(self):
"""
:return: success
"""
conn = Connection(self.host)
suc, response, numberOfChunks = conn.communicate(':RT2#')
return suc
def setSolarTracking(self):
"""
:return: success
"""
conn = Connection(self.host)
suc, response, numberOfChunks = conn.communicate(':RT1#')
return suc
def setWebInterface(self, status):
"""
:return: success
"""
conn = Connection(self.host)
commandString = ':NTSweb{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True | mw4/mountcontrol/setting.py | import logging
# external packages
# local imports
from mountcontrol.connection import Connection
from mountcontrol.convert import valueToFloat
from mountcontrol.convert import valueToInt
class Setting(object):
"""
The class Setting inherits all information and handling of setting
attributes of the connected mount and provides the abstracted interface
to a 10 micron mount.
>>> setting = Setting(host='')
"""
__all__ = ['Setting',
]
log = logging.getLogger(__name__)
def __init__(self,
host=None,
):
self.host = host
self._slewRate = None
self._slewRateMin = None
self._slewRateMax = None
self._timeToFlip = None
self._meridianLimitTrack = None
self._meridianLimitSlew = None
self._refractionTemp = None
self._refractionPress = None
self._telescopeTempDEC = None
self._statusRefraction = None
self._statusUnattendedFlip = None
self._statusDualAxisTracking = None
self._horizonLimitHigh = None
self._horizonLimitLow = None
self._wakeOnLan = None
self._UTCValid = None
self._UTCExpire = None
self._gpsSynced = None
self._typeConnection = None
self._addressLanMAC = None
self._addressWirelessMAC = None
self._weatherStatus = None
self._weatherPressure = None
self._weatherTemperature = None
self._weatherHumidity = None
self._weatherDewPoint = None
self._trackingRate = None
self._webInterfaceStat = None
@property
def slewRate(self):
return self._slewRate
@slewRate.setter
def slewRate(self, value):
self._slewRate = valueToFloat(value)
@property
def slewRateMin(self):
return self._slewRateMin
@slewRateMin.setter
def slewRateMin(self, value):
self._slewRateMin = valueToFloat(value)
@property
def slewRateMax(self):
return self._slewRateMax
@slewRateMax.setter
def slewRateMax(self, value):
self._slewRateMax = valueToFloat(value)
@property
def timeToFlip(self):
return self._timeToFlip
@timeToFlip.setter
def timeToFlip(self, value):
self._timeToFlip = valueToFloat(value)
@property
def meridianLimitTrack(self):
return self._meridianLimitTrack
@meridianLimitTrack.setter
def meridianLimitTrack(self, value):
self._meridianLimitTrack = valueToFloat(value)
@property
def meridianLimitSlew(self):
return self._meridianLimitSlew
@meridianLimitSlew.setter
def meridianLimitSlew(self, value):
self._meridianLimitSlew = valueToFloat(value)
def timeToMeridian(self):
if self._timeToFlip is not None and self._meridianLimitTrack is not None:
return int(self._timeToFlip - self._meridianLimitTrack * 4)
else:
return None
@property
def refractionTemp(self):
return self._refractionTemp
@refractionTemp.setter
def refractionTemp(self, value):
self._refractionTemp = valueToFloat(value)
@property
def refractionPress(self):
return self._refractionPress
@refractionPress.setter
def refractionPress(self, value):
self._refractionPress = valueToFloat(value)
@property
def telescopeTempDEC(self):
return self._telescopeTempDEC
@telescopeTempDEC.setter
def telescopeTempDEC(self, value):
self._telescopeTempDEC = valueToFloat(value)
@property
def statusRefraction(self):
return self._statusRefraction
@statusRefraction.setter
def statusRefraction(self, value):
self._statusRefraction = bool(value)
@property
def statusUnattendedFlip(self):
return self._statusUnattendedFlip
@statusUnattendedFlip.setter
def statusUnattendedFlip(self, value):
self._statusUnattendedFlip = bool(value)
@property
def statusDualAxisTracking(self):
return self._statusDualAxisTracking
@statusDualAxisTracking.setter
def statusDualAxisTracking(self, value):
self._statusDualAxisTracking = bool(value)
@property
def horizonLimitHigh(self):
return self._horizonLimitHigh
@horizonLimitHigh.setter
def horizonLimitHigh(self, value):
self._horizonLimitHigh = valueToFloat(value)
@property
def horizonLimitLow(self):
return self._horizonLimitLow
@horizonLimitLow.setter
def horizonLimitLow(self, value):
self._horizonLimitLow = valueToFloat(value)
@property
def UTCValid(self):
return self._UTCValid
@UTCValid.setter
def UTCValid(self, value):
self._UTCValid = bool(value)
@property
def UTCExpire(self):
return self._UTCExpire
@UTCExpire.setter
def UTCExpire(self, value):
if isinstance(value, str):
self._UTCExpire = value
else:
self._UTCExpire = None
@property
def typeConnection(self):
return self._typeConnection
@typeConnection.setter
def typeConnection(self, value):
value = valueToInt(value)
if value is None:
self._typeConnection = value
elif not 0 <= value <= 3:
value = None
self._typeConnection = value
@property
def gpsSynced(self):
return self._gpsSynced
@gpsSynced.setter
def gpsSynced(self, value):
self._gpsSynced = bool(value)
@property
def addressLanMAC(self):
return self._addressLanMAC
@addressLanMAC.setter
def addressLanMAC(self, value):
self._addressLanMAC = value.upper().replace('.', ':')
@property
def addressWirelessMAC(self):
return self._addressWirelessMAC
@addressWirelessMAC.setter
def addressWirelessMAC(self, value):
self._addressWirelessMAC = value.upper().replace('.', ':')
@property
def wakeOnLan(self):
return self._wakeOnLan
@wakeOnLan.setter
def wakeOnLan(self, value):
if value == 'N':
self._wakeOnLan = 'None'
elif value == '0':
self._wakeOnLan = 'OFF'
elif value == '1':
self._wakeOnLan = 'ON'
else:
self._wakeOnLan = None
@property
def weatherStatus(self):
return self._weatherStatus
@weatherStatus.setter
def weatherStatus(self, value):
value = valueToInt(value)
if value is None:
self._weatherStatus = value
elif 0 <= value <= 2:
self._weatherStatus = value
else:
self._weatherStatus = None
@property
def weatherPressure(self):
return self._weatherPressure
@weatherPressure.setter
def weatherPressure(self, value):
self._weatherPressure = valueToFloat(value)
@property
def weatherTemperature(self):
return self._weatherTemperature
@weatherTemperature.setter
def weatherTemperature(self, value):
self._weatherTemperature = valueToFloat(value)
@property
def weatherHumidity(self):
return self._weatherHumidity
@weatherHumidity.setter
def weatherHumidity(self, value):
self._weatherHumidity = valueToFloat(value)
@property
def weatherDewPoint(self):
return self._weatherDewPoint
@weatherDewPoint.setter
def weatherDewPoint(self, value):
self._weatherDewPoint = valueToFloat(value)
@property
def trackingRate(self):
return self._trackingRate
@trackingRate.setter
def trackingRate(self, value):
self._trackingRate = valueToFloat(value)
@property
def webInterfaceStat(self):
return self._webInterfaceStat
@webInterfaceStat.setter
def webInterfaceStat(self, value):
value = valueToFloat(value)
if value is None:
self._webInterfaceStat = None
else:
self._webInterfaceStat = bool(value)
def parseSetting(self, response, numberOfChunks):
"""
Parsing the polling med command.
:param response: data load from mount
:param numberOfChunks:
:return: success: True if ok, False if not
"""
if len(response) != numberOfChunks:
self.log.warning('wrong number of chunks')
return False
self.slewRate = response[0]
self.slewRateMin = response[1]
self.slewRateMax = response[2]
self.timeToFlip = response[3]
self.meridianLimitTrack = response[4]
self.meridianLimitSlew = response[5]
self.refractionTemp = response[6]
self.refractionPress = response[7]
self.telescopeTempDEC = response[8]
self.statusRefraction = (response[9][0] == '1')
self.statusUnattendedFlip = (response[9][1] == '1')
self.statusDualAxisTracking = (response[9][2] == '1')
self.horizonLimitHigh = response[9][3:6]
self.horizonLimitLow = response[10][0:3]
valid, expirationDate = response[11].split(',')
self.UTCValid = (valid == 'V')
self.UTCExpire = expirationDate
self.typeConnection = response[12]
self.gpsSynced = (response[13] == '1')
self.addressLanMAC = response[14]
self.wakeOnLan = response[15]
self.weatherStatus = response[16]
self.weatherPressure = response[17].split(',')[0]
self.weatherTemperature = response[18].split(',')[0]
self.weatherHumidity = response[19].split(',')[0]
self.weatherDewPoint = response[20].split(',')[0]
self.trackingRate = response[21]
self.webInterfaceStat = response[22]
return True
def pollSetting(self):
"""
Sending the polling med command. As the mount need polling the data, I
send a set of commands to get the data back to be able to process and
store it.
:return: success: True if ok, False if not
"""
conn = Connection(self.host)
cs1 = ':U2#:GMs#:GMsa#:GMsb#:Gmte#:Glmt#:Glms#:GRTMP#:GRPRS#:GTMP1#'
cs2 = ':GREF#:Guaf#:Gdat#:Gh#:Go#:GDUTV#:GINQ#:gtg#:GMAC#:GWOL#'
cs3 = ':WSG#:WSP#:WST#:WSH#:WSD#:GT#:NTGweb#'
commandString = cs1 + cs2 + cs3
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
suc = self.parseSetting(response, numberOfChunks)
return suc
def setSlewRate(self, value):
"""
setSlewRate sends the command for setting the max slew rate to the mount.
:param value: float for max slew rate in degrees per second
:return: success
"""
if value is None:
return False
if not isinstance(value, (int, float)):
return False
if value < 2:
return False
elif value > 15:
return False
conn = Connection(self.host)
commandString = f':Sw{value:02.0f}#:RMs{value:02.0f}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setSlewSpeedMax(self):
"""
setSlewSpeedMax set the slewing speed to max
:return: success
"""
conn = Connection(self.host)
commandString = ':RS#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setSlewSpeedHigh(self):
"""
setSlewSpeedHigh set the slewing speed to centering rate. the different
speeds are set through setting different centering rates, because setting
different slew speeds leads to a scenario, that we get a different setup
in max slew speed as well.
:return: success
"""
conn = Connection(self.host)
commandString = ':RC2#:RC#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setSlewSpeedMed(self):
"""
setSlewSpeedMed set the slewing speed to centering rate. the different
speeds are set through setting different centering rates, because setting
different slew speeds leads to a scenario, that we get a different setup
in max slew speed as well.
:return: success
"""
conn = Connection(self.host)
centerSpeed = 255
commandString = f':Rc{centerSpeed:02.0f}#:RC#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setSlewSpeedLow(self):
"""
setSlewSpeedLow set the slewing speed to centering rate. the different
speeds are set through setting different centering rates, because setting
different slew speeds leads to a scenario, that we get a different setup
in max slew speed as well.
:return: success
"""
conn = Connection(self.host)
centerSpeed = 128
commandString = f':Rc{centerSpeed:02.0f}#:RC#'
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setRefractionParam(self, temperature=None, pressure=None):
"""
setRefractionParam sends the command for setting the temperature and
pressure to the mount. the limits are set to -40 to +75 for temp and 500
to 1300 hPa for pressure, but there is not real documented limit.
:param temperature: float for temperature correction in Celsius
:param pressure: float for pressure correction in hPa
:return: success
"""
if temperature is None:
return False
if pressure is None:
return False
if temperature < -40:
return False
elif temperature > 75:
return False
if pressure < 500:
return False
elif pressure > 1300:
return False
conn = Connection(self.host)
commandString = f':SRPRS{pressure:06.1f}#:SRTMP{temperature:+06.1f}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '11':
return False
return True
def setRefractionTemp(self, value):
"""
setRefractionTemp sends the command for setting the temperature to the
mount. the limit is set to -40 to +75, but there is not real documented
limit.
:param value: float for temperature correction in Celsius
:return: success
"""
if value is None:
return False
if value < -40:
return False
elif value > 75:
return False
conn = Connection(self.host)
commandString = ':SRTMP{0:+06.1f}#'.format(value)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setRefractionPress(self, value):
"""
setRefractionPress sends the command for setting the pressure to the
mount. the limit is set from 500 to 1300 hPa. no limit give from the
mount. limits here are relevant over 5000m height
:param value: float for pressure correction
:return: success
"""
if value is None:
return False
if value < 500:
return False
elif value > 1300:
return False
conn = Connection(self.host)
commandString = ':SRPRS{0:06.1f}#'.format(value)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setRefraction(self, status):
"""
setRefraction sends the command to the mount.
:param status: bool for enable or disable refraction correction
:return: success
"""
conn = Connection(self.host)
commandString = ':SREF{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setUnattendedFlip(self, status):
"""
setUnattendedFlip sends the command to the mount. the command returns nothing.
:param status: bool for enable or disable unattended flip
:return: success
"""
conn = Connection(self.host)
commandString = ':Suaf{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
return suc
def setDualAxisTracking(self, status):
"""
setDualAxisTracking sends the command to the mount.
:param status: bool for enable or disable dual tracking
:return: success
"""
conn = Connection(self.host)
commandString = ':Sdat{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setWOL(self, status):
"""
setWOL sends the command to the mount.
:param status: bool for enable or disable WOL
:return: success
"""
conn = Connection(self.host)
commandString = ':SWOL{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setMeridianLimitTrack(self, value):
"""
setMeridianLimitTrack sends the command for setting flip limit to the mount.
the limit is set from 1 to 30 degrees
:param value: float for degrees
:return: success
"""
if value < 1:
return False
elif value > 30:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':Slmt{value:02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setMeridianLimitSlew(self, value):
"""
setMeridianLimitSlew sends the command for setting flip limit to the mount.
the limit is set to -20 to 20 degrees
:param value: float / int for degrees
:return: success
"""
if value < 0:
return False
elif value > 30:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':Slms{value:02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setHorizonLimitHigh(self, value):
"""
setHorizonLimitHigh sends the command for setting the limit to the mount.
the limit is set from 0 to 90 degrees
:param value: float / int for degrees
:return: success
"""
if value < 0:
return False
elif value > 90:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':Sh+{value:02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setHorizonLimitLow(self, value):
"""
setHorizonLimitLow sends the command for setting the limit to the mount. the limit
has to be between -5 and +45 degrees
:param value: float / int for degrees
:return: success
"""
if value < -5:
return False
elif value > 45:
return False
conn = Connection(self.host)
value = int(value)
commandString = f':So{value:+02d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def setDirectWeatherUpdateType(self, value):
"""
setDirectWeatherUpdateType sends the command for setting the operating mode for
updating the refraction data from weather station.
0 do not update the refraction model data
1 update only while the mount is not tracking
2 update continuously, with a 15s smoothing filter
:param value: int
:return: success
"""
if value < 0:
return False
elif value > 2:
return False
value = int(value)
conn = Connection(self.host)
commandString = f':WSS{value:1d}#'
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True
def checkRateLunar(self):
"""
:return:
"""
if self._trackingRate == 62.4:
return True
else:
return False
def checkRateSidereal(self):
"""
:return:
"""
if self._trackingRate == 60.2:
return True
else:
return False
def checkRateSolar(self):
"""
:return:
"""
if self._trackingRate == 60.3:
return True
else:
return False
def setLunarTracking(self):
"""
:return: success
"""
conn = Connection(self.host)
suc, response, numberOfChunks = conn.communicate(':RT0#')
return suc
def setSiderealTracking(self):
"""
:return: success
"""
conn = Connection(self.host)
suc, response, numberOfChunks = conn.communicate(':RT2#')
return suc
def setSolarTracking(self):
"""
:return: success
"""
conn = Connection(self.host)
suc, response, numberOfChunks = conn.communicate(':RT1#')
return suc
def setWebInterface(self, status):
"""
:return: success
"""
conn = Connection(self.host)
commandString = ':NTSweb{0:1d}#'.format(1 if status else 0)
suc, response, numberOfChunks = conn.communicate(commandString)
if not suc:
return False
if response[0] != '1':
return False
return True | 0.818084 | 0.344526 |
import os
import random
import phonenumbers as pn
from flask import Flask, Response, request
from authy.api import AuthyApiClient
from twilio.rest import Client
from twilio.twiml.messaging_response import MessagingResponse
authy_api = AuthyApiClient(os.environ["PUSH_DEMO_AUTHY_API_KEY"])
TWILIO_NUMBER = os.environ["PUSH_DEMO_FROM"]
client = Client()
app = Flask(__name__)
def _push(phone, text):
"""
Returns the uuid of the push notification, otherwise sends
"""
country_code = phone.country_code
number = phone.national_number
user = authy_api.users.create(
'<EMAIL>',
number,
country_code)
status = authy_api.users.status(user.id)
if status.ok():
devices = status.content['status'].get('devices')
# No Authy App installed, send user link to download
if not devices:
message = "Download the Authy App to receive your notification: https://authy.com/download/"
return message
logo = {
'res': 'default',
'url': 'https://github.com/robinske/sms-push-demo/blob/master/wave.png?raw=true'
}
phone_number = str(country_code) + str(number)
details = {
'Account Number': str(user.id),
'Phone Number': phone_number
}
usernames = ['Opalescent Tree Shark', 'Perfect Sunflower', 'Rainbow Infused Space Unicorn', 'Beautiful Rule-breaking Moth']
details['Username'] = random.choice(usernames)
message = "You said: {}".format(text)
response = authy_api.one_touch.send_request(
user.id,
message,
seconds_to_expire=1200,
details=details,
logos=[logo])
if response.ok():
message = "Check your Authy app for a push notification!"
# Add note about downloading if first time texting this number
prev_messages = client.messages.list(from_=TWILIO_NUMBER, to=phone_number)
if not prev_messages:
message = message + " If you need to download the app, visit https://authy.com/download/"
return message
else:
return "There was an error sending the request: {}".format(response.errors())
@app.route("/callback", methods=["GET", "POST"])
def callback():
status = request.args.get("status")
message = "The request was {}".format(status)
to = request.args.get("approval_request[transaction][details][Phone Number]")
resp = client.messages.create(body=message, from_=TWILIO_NUMBER, to=to)
return resp.sid
@app.route("/push", methods=["GET", "POST"])
def push():
from_ = request.values.get("From")
text = request.values.get("Body")
message = _push(phone=pn.parse(from_), text=text)
resp = MessagingResponse()
resp.message(message)
return str(resp) | push.py | import os
import random
import phonenumbers as pn
from flask import Flask, Response, request
from authy.api import AuthyApiClient
from twilio.rest import Client
from twilio.twiml.messaging_response import MessagingResponse
authy_api = AuthyApiClient(os.environ["PUSH_DEMO_AUTHY_API_KEY"])
TWILIO_NUMBER = os.environ["PUSH_DEMO_FROM"]
client = Client()
app = Flask(__name__)
def _push(phone, text):
"""
Returns the uuid of the push notification, otherwise sends
"""
country_code = phone.country_code
number = phone.national_number
user = authy_api.users.create(
'<EMAIL>',
number,
country_code)
status = authy_api.users.status(user.id)
if status.ok():
devices = status.content['status'].get('devices')
# No Authy App installed, send user link to download
if not devices:
message = "Download the Authy App to receive your notification: https://authy.com/download/"
return message
logo = {
'res': 'default',
'url': 'https://github.com/robinske/sms-push-demo/blob/master/wave.png?raw=true'
}
phone_number = str(country_code) + str(number)
details = {
'Account Number': str(user.id),
'Phone Number': phone_number
}
usernames = ['Opalescent Tree Shark', 'Perfect Sunflower', 'Rainbow Infused Space Unicorn', 'Beautiful Rule-breaking Moth']
details['Username'] = random.choice(usernames)
message = "You said: {}".format(text)
response = authy_api.one_touch.send_request(
user.id,
message,
seconds_to_expire=1200,
details=details,
logos=[logo])
if response.ok():
message = "Check your Authy app for a push notification!"
# Add note about downloading if first time texting this number
prev_messages = client.messages.list(from_=TWILIO_NUMBER, to=phone_number)
if not prev_messages:
message = message + " If you need to download the app, visit https://authy.com/download/"
return message
else:
return "There was an error sending the request: {}".format(response.errors())
@app.route("/callback", methods=["GET", "POST"])
def callback():
status = request.args.get("status")
message = "The request was {}".format(status)
to = request.args.get("approval_request[transaction][details][Phone Number]")
resp = client.messages.create(body=message, from_=TWILIO_NUMBER, to=to)
return resp.sid
@app.route("/push", methods=["GET", "POST"])
def push():
from_ = request.values.get("From")
text = request.values.get("Body")
message = _push(phone=pn.parse(from_), text=text)
resp = MessagingResponse()
resp.message(message)
return str(resp) | 0.478041 | 0.08196 |
# Copyright 2019 <NAME>
# Copyright 2020 <NAME>
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Subsampling layer definition."""
import numpy as np
import torch
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
def _context_concat(seq, context_size=0):
""" seq is of size length x feat_dim.
output is of size length x (feat_dim*(1+2*context_size)).
"""
if context_size == 0:
return seq
output = []
length = seq.size(0)
# Left concatenation.
for j in range(context_size):
tmp = torch.cat([seq[0:1, :].repeat([j + 1, 1]), seq[0:(length - j - 1), :]], dim=0)
output.append(tmp)
# Add original inputs.
output.append(seq)
# Right concatenation.
for j in range(context_size):
tmp = torch.cat([seq[(j + 1):length, :], seq[length-1:length, :].repeat([j + 1, 1])], dim=0)
output.append(tmp)
return torch.cat(output, dim=1)
def _context_concat_numpy(seq, context_size=0):
""" seq is of size length x feat_dim.
output is of size length x (feat_dim*(1+2*context_size)).
"""
if context_size == 0:
return seq
output = []
length = seq.shape[0]
# Left concatenation.
for j in range(context_size):
tmp = np.concatenate([np.repeat(seq[np.newaxis, 0, :], j + 1, axis=0), seq[0:(length - j - 1), :]], 0)
output.append(tmp)
# Add original inputs.
output.append(seq)
# Right concatenation.
for j in range(context_size):
tmp = np.concatenate([seq[(j + 1):length, :], np.repeat(seq[np.newaxis, length - 1, :], j + 1, axis=0)], 0)
output.append(tmp)
return np.concatenate(output, 1)
class Conv2dSubsampling(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/4 length).
:param int idim: input dim
:param int odim: output dim
:param float dropout_rate: dropout rate
"""
def __init__(self, idim, odim, dropout_rate):
"""Construct an Conv2dSubsampling object."""
super(Conv2dSubsampling, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU()
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),
PositionalEncoding(odim, dropout_rate)
)
def forward(self, x, x_mask):
"""Subsample x.
:param torch.Tensor x: input tensor
:param torch.Tensor x_mask: input mask
:return: subsampled x and mask
:rtype Tuple[torch.Tensor, torch.Tensor]
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
if x_mask.size(1)==1:
return x, x_mask[:, :, :-2:2][:, :, :-2:2]
else:
# Weiran: if the mask is full, both time dimensions need to be subsampled.
return x, x_mask[:, :-2:2, :-2:2][:, :-2:2, :-2:2] | espnet/nets/pytorch_backend/transformer/subsampling.py | # Copyright 2019 <NAME>
# Copyright 2020 <NAME>
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Subsampling layer definition."""
import numpy as np
import torch
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
def _context_concat(seq, context_size=0):
""" seq is of size length x feat_dim.
output is of size length x (feat_dim*(1+2*context_size)).
"""
if context_size == 0:
return seq
output = []
length = seq.size(0)
# Left concatenation.
for j in range(context_size):
tmp = torch.cat([seq[0:1, :].repeat([j + 1, 1]), seq[0:(length - j - 1), :]], dim=0)
output.append(tmp)
# Add original inputs.
output.append(seq)
# Right concatenation.
for j in range(context_size):
tmp = torch.cat([seq[(j + 1):length, :], seq[length-1:length, :].repeat([j + 1, 1])], dim=0)
output.append(tmp)
return torch.cat(output, dim=1)
def _context_concat_numpy(seq, context_size=0):
""" seq is of size length x feat_dim.
output is of size length x (feat_dim*(1+2*context_size)).
"""
if context_size == 0:
return seq
output = []
length = seq.shape[0]
# Left concatenation.
for j in range(context_size):
tmp = np.concatenate([np.repeat(seq[np.newaxis, 0, :], j + 1, axis=0), seq[0:(length - j - 1), :]], 0)
output.append(tmp)
# Add original inputs.
output.append(seq)
# Right concatenation.
for j in range(context_size):
tmp = np.concatenate([seq[(j + 1):length, :], np.repeat(seq[np.newaxis, length - 1, :], j + 1, axis=0)], 0)
output.append(tmp)
return np.concatenate(output, 1)
class Conv2dSubsampling(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/4 length).
:param int idim: input dim
:param int odim: output dim
:param float dropout_rate: dropout rate
"""
def __init__(self, idim, odim, dropout_rate):
"""Construct an Conv2dSubsampling object."""
super(Conv2dSubsampling, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU()
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),
PositionalEncoding(odim, dropout_rate)
)
def forward(self, x, x_mask):
"""Subsample x.
:param torch.Tensor x: input tensor
:param torch.Tensor x_mask: input mask
:return: subsampled x and mask
:rtype Tuple[torch.Tensor, torch.Tensor]
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
if x_mask.size(1)==1:
return x, x_mask[:, :, :-2:2][:, :, :-2:2]
else:
# Weiran: if the mask is full, both time dimensions need to be subsampled.
return x, x_mask[:, :-2:2, :-2:2][:, :-2:2, :-2:2] | 0.815049 | 0.488771 |
import zipfile
import shutil
import os
import sys
#源apk
source_release_apk = 'app-google-release.apk'
#app名称
app_name = 'app'
# 空文件 便于写入此空文件到apk包中作为channel文件
src_empty_file = 'channel/czt.txt'
# 创建一个空文件(不存在则创建)
f = open(src_empty_file, 'w')
f.close()
# python3 : os.listdir()即可,这里使用兼容Python2的os.listdir('.')
for file in os.listdir('.'):
if os.path.isfile(file):
extension = os.path.splitext(file)[1][1:]
if extension in 'apk':
os.remove(file)
# 创建生成目录,与文件名相关
output_dir = 'outputs_apk/'
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
if not os.path.exists("../build/outputs/apk") or not os.path.isfile("../build/outputs/apk/"+source_release_apk):
print 'Please build the release apk at first. \n \tTips: gradle clean assembleRelease'
sys.exit(1)
shutil.copyfile("../build/outputs/apk/"+source_release_apk, source_release_apk)
# 获取当前目录中所有的apk源包
src_apks = []
# python3 : os.listdir()即可,这里使用兼容Python2的os.listdir('.')
for file in os.listdir('.'):
if os.path.isfile(file):
extension = os.path.splitext(file)[1][1:]
if extension in 'apk':
src_apks.append(file)
# 获取渠道列表
channel_file = 'channel/channel.txt'
f = open(channel_file)
lines = f.readlines()
f.close()
line_count = 0
for channel_name in lines:
line_count += 1
print "channel: "+channel_name,
print ""
print "channel list size: "+str(line_count)
print "---------build multi channel task-----------"
for src_apk in src_apks:
# file name (with extension)
src_apk_file_name = os.path.basename(src_apk)
# 分割文件名与后缀
temp_list = os.path.splitext(src_apk_file_name)
# name without extension
src_apk_name = (temp_list[0].split('-'))[0]
# 后缀名,包含. 例如: ".apk "
src_apk_extension = temp_list[1]
# 目录不存在则创建
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# 遍历渠道号并创建对应渠道号的apk文件
for line in lines:
# 获取当前渠道号,因为从渠道文件中获得带有\n,所有strip一下
target_channel = line.strip()
# 拼接对应渠道号的apk
target_apk = output_dir + src_apk_name + "-" + target_channel + "-release"+src_apk_extension
# 拷贝建立新apk
shutil.copy(src_apk, target_apk)
# zip获取新建立的apk文件
zipped = zipfile.ZipFile(target_apk, 'a', zipfile.ZIP_DEFLATED)
# 初始化渠道信息
empty_channel_file = "META-INF/cztchannel_{channel}".format(channel = target_channel)
# 写入渠道信息
zipped.write(src_empty_file, empty_channel_file)
print "build successful: "+app_name+"/python/"+target_apk
# 关闭zip流
zipped.close()
print "work done."
print "you can run the install.sh script(\"./install.sh\") to install apk" | app/python/build.py | import zipfile
import shutil
import os
import sys
#源apk
source_release_apk = 'app-google-release.apk'
#app名称
app_name = 'app'
# 空文件 便于写入此空文件到apk包中作为channel文件
src_empty_file = 'channel/czt.txt'
# 创建一个空文件(不存在则创建)
f = open(src_empty_file, 'w')
f.close()
# python3 : os.listdir()即可,这里使用兼容Python2的os.listdir('.')
for file in os.listdir('.'):
if os.path.isfile(file):
extension = os.path.splitext(file)[1][1:]
if extension in 'apk':
os.remove(file)
# 创建生成目录,与文件名相关
output_dir = 'outputs_apk/'
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
if not os.path.exists("../build/outputs/apk") or not os.path.isfile("../build/outputs/apk/"+source_release_apk):
print 'Please build the release apk at first. \n \tTips: gradle clean assembleRelease'
sys.exit(1)
shutil.copyfile("../build/outputs/apk/"+source_release_apk, source_release_apk)
# 获取当前目录中所有的apk源包
src_apks = []
# python3 : os.listdir()即可,这里使用兼容Python2的os.listdir('.')
for file in os.listdir('.'):
if os.path.isfile(file):
extension = os.path.splitext(file)[1][1:]
if extension in 'apk':
src_apks.append(file)
# 获取渠道列表
channel_file = 'channel/channel.txt'
f = open(channel_file)
lines = f.readlines()
f.close()
line_count = 0
for channel_name in lines:
line_count += 1
print "channel: "+channel_name,
print ""
print "channel list size: "+str(line_count)
print "---------build multi channel task-----------"
for src_apk in src_apks:
# file name (with extension)
src_apk_file_name = os.path.basename(src_apk)
# 分割文件名与后缀
temp_list = os.path.splitext(src_apk_file_name)
# name without extension
src_apk_name = (temp_list[0].split('-'))[0]
# 后缀名,包含. 例如: ".apk "
src_apk_extension = temp_list[1]
# 目录不存在则创建
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# 遍历渠道号并创建对应渠道号的apk文件
for line in lines:
# 获取当前渠道号,因为从渠道文件中获得带有\n,所有strip一下
target_channel = line.strip()
# 拼接对应渠道号的apk
target_apk = output_dir + src_apk_name + "-" + target_channel + "-release"+src_apk_extension
# 拷贝建立新apk
shutil.copy(src_apk, target_apk)
# zip获取新建立的apk文件
zipped = zipfile.ZipFile(target_apk, 'a', zipfile.ZIP_DEFLATED)
# 初始化渠道信息
empty_channel_file = "META-INF/cztchannel_{channel}".format(channel = target_channel)
# 写入渠道信息
zipped.write(src_empty_file, empty_channel_file)
print "build successful: "+app_name+"/python/"+target_apk
# 关闭zip流
zipped.close()
print "work done."
print "you can run the install.sh script(\"./install.sh\") to install apk" | 0.073775 | 0.04778 |
from unittest import TestCase
from unittest.mock import patch, MagicMock, Mock
from ait.commons.util.command.download import CmdDownload
def mock_transfer(_, fs):
for f in fs:
f.successful = True
f.complete = True
class TestDownload(TestCase):
def setUp(self) -> None:
self.aws_mock = MagicMock()
self.client = MagicMock()
self.client.put_object = Mock()
bucket_policy = MagicMock()
bucket_policy.policy = None
bucket = Mock()
bucket.upload_file = Mock()
bucket.objects = Mock()
self.bucket = bucket
self.upload_file = bucket.upload_file
self.download_file = bucket.download_file
resource = MagicMock()
resource.BucketPolicy = Mock(return_value=bucket_policy)
resource.Bucket = Mock(return_value=bucket)
session = MagicMock()
session.client = Mock(return_value=self.client)
session.resource = Mock(return_value=resource)
self.aws_mock.is_user = False
self.aws_mock.common_session = session
self.aws_mock.bucket_name = 'bucket-name'
self.aws_mock.new_session.return_value = session
@patch('ait.commons.util.command.download.get_selected_area')
def test_download_no_upload_area_selected(self, get_selected_area):
# given
get_selected_area.return_value = None
args = MagicMock()
# when
success, msg = CmdDownload(self.aws_mock, args).run()
# then
self.assertFalse(success)
self.assertEqual(msg, 'No area selected')
@patch('ait.commons.util.command.download.get_selected_area')
@patch('ait.commons.util.command.download.os')
@patch('ait.commons.util.command.download.TransferProgress')
def test_download_all_files_from_selected_upload_area(self, transfer_progress, os, get_selected_area):
# given
get_selected_area.return_value = 'selected'
def mock_transfer_progress(f):
f.successful = True
f.complete = True
transfer_progress.side_effect = mock_transfer_progress
obj = Mock()
obj.key = 'selected'
obj2 = Mock()
obj2.key = 'filename'
obj2.size = 2
obj3 = Mock()
obj3.key = 'filename2'
obj3.size = 2
self.bucket.objects.filter.return_value = [obj, obj2, obj3]
os.getcwd.return_value = 'cwd'
args = MagicMock()
args.a = True
# when
cmd = CmdDownload(self.aws_mock, args)
success, msg = cmd.run()
# then
self.assertTrue(success)
downloaded_files = [f.key for f in cmd.files]
self.assertEqual(downloaded_files, ['filename', 'filename2'])
self.assertEqual(self.download_file.call_count, 2, 'should download all files')
@patch('ait.commons.util.command.download.get_selected_area')
@patch('ait.commons.util.command.download.os')
@patch('ait.commons.util.command.download.TransferProgress')
def test_download_file_from_selected_upload_area(self, transfer_progress, os, get_selected_area):
# given
get_selected_area.return_value = 'selected/'
def mock_transfer_progress(f):
f.successful = True
f.complete = True
transfer_progress.side_effect = mock_transfer_progress
obj = Mock()
obj.key = 'selected/'
obj2 = Mock()
obj2.key = 'filename'
obj2.size = 2
obj3 = Mock()
obj3.key = 'filename2'
obj3.size = 2
self.bucket.objects.filter.return_value = [obj, obj2, obj3]
os.getcwd.return_value = 'cwd'
args = MagicMock()
args.a = False
args.f = ['filename']
# when
cmd = CmdDownload(self.aws_mock, args)
success, msg = cmd.run()
# then
self.assertTrue(success)
downloaded_files = [f.key for f in cmd.files]
self.assertEqual(downloaded_files, ['selected/filename'])
self.assertEqual(self.download_file.call_count, 1, 'should download file')
@patch('ait.commons.util.command.download.get_selected_area')
@patch('ait.commons.util.command.download.os')
@patch('ait.commons.util.command.download.TransferProgress')
def test_download_empty_file_from_selected_upload_area(self, transfer_progress, os, get_selected_area):
# given
get_selected_area.return_value = 'selected'
def mock_transfer_progress(f):
f.successful = True
f.complete = True
transfer_progress.side_effect = mock_transfer_progress
obj = Mock()
obj.key = 'selected'
obj2 = Mock()
obj2.key = 'filename'
obj2.size = 0
self.bucket.objects.filter.return_value = [obj, obj2]
os.getcwd.return_value = 'cwd'
args = MagicMock()
args.a = True
# when
cmd = CmdDownload(self.aws_mock, args)
success, msg = cmd.run()
# then
self.assertTrue(success)
downloaded_files = [f.key for f in cmd.files]
self.assertEqual(downloaded_files, ['filename'])
self.download_file.assert_called_once() | ait/commons/util/tests/command/test_download.py | from unittest import TestCase
from unittest.mock import patch, MagicMock, Mock
from ait.commons.util.command.download import CmdDownload
def mock_transfer(_, fs):
for f in fs:
f.successful = True
f.complete = True
class TestDownload(TestCase):
def setUp(self) -> None:
self.aws_mock = MagicMock()
self.client = MagicMock()
self.client.put_object = Mock()
bucket_policy = MagicMock()
bucket_policy.policy = None
bucket = Mock()
bucket.upload_file = Mock()
bucket.objects = Mock()
self.bucket = bucket
self.upload_file = bucket.upload_file
self.download_file = bucket.download_file
resource = MagicMock()
resource.BucketPolicy = Mock(return_value=bucket_policy)
resource.Bucket = Mock(return_value=bucket)
session = MagicMock()
session.client = Mock(return_value=self.client)
session.resource = Mock(return_value=resource)
self.aws_mock.is_user = False
self.aws_mock.common_session = session
self.aws_mock.bucket_name = 'bucket-name'
self.aws_mock.new_session.return_value = session
@patch('ait.commons.util.command.download.get_selected_area')
def test_download_no_upload_area_selected(self, get_selected_area):
# given
get_selected_area.return_value = None
args = MagicMock()
# when
success, msg = CmdDownload(self.aws_mock, args).run()
# then
self.assertFalse(success)
self.assertEqual(msg, 'No area selected')
@patch('ait.commons.util.command.download.get_selected_area')
@patch('ait.commons.util.command.download.os')
@patch('ait.commons.util.command.download.TransferProgress')
def test_download_all_files_from_selected_upload_area(self, transfer_progress, os, get_selected_area):
# given
get_selected_area.return_value = 'selected'
def mock_transfer_progress(f):
f.successful = True
f.complete = True
transfer_progress.side_effect = mock_transfer_progress
obj = Mock()
obj.key = 'selected'
obj2 = Mock()
obj2.key = 'filename'
obj2.size = 2
obj3 = Mock()
obj3.key = 'filename2'
obj3.size = 2
self.bucket.objects.filter.return_value = [obj, obj2, obj3]
os.getcwd.return_value = 'cwd'
args = MagicMock()
args.a = True
# when
cmd = CmdDownload(self.aws_mock, args)
success, msg = cmd.run()
# then
self.assertTrue(success)
downloaded_files = [f.key for f in cmd.files]
self.assertEqual(downloaded_files, ['filename', 'filename2'])
self.assertEqual(self.download_file.call_count, 2, 'should download all files')
@patch('ait.commons.util.command.download.get_selected_area')
@patch('ait.commons.util.command.download.os')
@patch('ait.commons.util.command.download.TransferProgress')
def test_download_file_from_selected_upload_area(self, transfer_progress, os, get_selected_area):
# given
get_selected_area.return_value = 'selected/'
def mock_transfer_progress(f):
f.successful = True
f.complete = True
transfer_progress.side_effect = mock_transfer_progress
obj = Mock()
obj.key = 'selected/'
obj2 = Mock()
obj2.key = 'filename'
obj2.size = 2
obj3 = Mock()
obj3.key = 'filename2'
obj3.size = 2
self.bucket.objects.filter.return_value = [obj, obj2, obj3]
os.getcwd.return_value = 'cwd'
args = MagicMock()
args.a = False
args.f = ['filename']
# when
cmd = CmdDownload(self.aws_mock, args)
success, msg = cmd.run()
# then
self.assertTrue(success)
downloaded_files = [f.key for f in cmd.files]
self.assertEqual(downloaded_files, ['selected/filename'])
self.assertEqual(self.download_file.call_count, 1, 'should download file')
@patch('ait.commons.util.command.download.get_selected_area')
@patch('ait.commons.util.command.download.os')
@patch('ait.commons.util.command.download.TransferProgress')
def test_download_empty_file_from_selected_upload_area(self, transfer_progress, os, get_selected_area):
# given
get_selected_area.return_value = 'selected'
def mock_transfer_progress(f):
f.successful = True
f.complete = True
transfer_progress.side_effect = mock_transfer_progress
obj = Mock()
obj.key = 'selected'
obj2 = Mock()
obj2.key = 'filename'
obj2.size = 0
self.bucket.objects.filter.return_value = [obj, obj2]
os.getcwd.return_value = 'cwd'
args = MagicMock()
args.a = True
# when
cmd = CmdDownload(self.aws_mock, args)
success, msg = cmd.run()
# then
self.assertTrue(success)
downloaded_files = [f.key for f in cmd.files]
self.assertEqual(downloaded_files, ['filename'])
self.download_file.assert_called_once() | 0.72952 | 0.341445 |
from . import api
from flask import jsonify,g
from datetime import datetime
from flask_restful import Resource, abort, reqparse, Api
from app.models import db, URLMapping,Permission,User
from .authentication import auth
from utils import transform
from app.decorators import confirmed_required
@auth.login_required
@confirmed_required
@api.route("/user/<int:id>/urlmaps/",methods=['GET'])
def get_urlmaps_by_userID(id):
u=User.query.get_or_404(id)
if (g.current_user.can(Permission.MODERATE_COMMENTS)) or (g.current_user.id == id):
urlmaps=u.urlmaps.all()
return jsonify([urlmap.to_json() for urlmap in urlmaps]),200
else:
return jsonify({"msg":"权限不够"}),403
api = Api(api, prefix="/urlmap")
parser = reqparse.RequestParser()
# parser是post请求的参数要求
parser.add_argument('long_url', type=str, required=True, help="原始长url")
parser.add_argument('custom_short_code', type=str, help="用户自定义短码,可选参数")
# parser_copy是put请求的参数要求
parser_copy = parser.copy()
parser_copy.remove_argument("custom_short_code")
parser_copy.replace_argument('long_url', type=str,
required=False, help="需要更改成的目标长url")
parser_copy.add_argument("password",type=str,required=False,help="需要设置的密码")
parser_copy.add_argument('lock',type=bool,required=False,help="上锁和取消锁")
class URLMapHandlerClass(Resource):
@auth.login_required
@confirmed_required
def get(self, id):
url_map = URLMapping.query.get_or_404(id)
return url_map.to_json(), 200
@auth.login_required
@confirmed_required
def post(self, id):
args = parser.parse_args(strict=True)
short_code = args["custom_short_code"]
long_url = args["long_url"]
urlmap = URLMapping.query.filter_by(long_url=long_url).first()
if urlmap: # 长url已经存在,此时如果自定义了短码则忽略
return urlmap.to_json(), 200
else: # long_url不存在
if short_code: # 用户自定义了短码
urlmap = URLMapping.query.filter_by(short_code=short_code).first()
if urlmap: # 短码存在
return {"msg": "short_code {} already exist".format(short_code)}, 202
else: # 短码不存在
um = URLMapping(long_url=long_url, short_code=short_code,
item_type="user-defined", id_used=False,
user_id=g.current_user.id)
db.session.add(um)
db.session.commit()
return um.to_json(), 200
else: # long_url不存在,用户未自定义短码
custom_um = URLMapping.query.filter_by(id_used=False).first()
if custom_um:
real_short_code = transform(custom_um.id)
um = URLMapping(long_url=long_url, short_code=real_short_code,
id_used=False, user_id=g.current_user.id)
custom_um.id_used = True
db.session.add_all([um, custom_um])
db.session.commit()
return um.to_json(), 200
else:
um = URLMapping(long_url=long_url, short_code="placeholder", id_used=True,
user_id=g.current_user.id)
db.session.add(um)
db.session.commit()
um.short_code = transform(um.id)
db.session.add(um)
db.session.commit()
return um.to_json(), 200
@auth.login_required
@confirmed_required
def delete(self, id):
um = URLMapping.query.get_or_404(id)
if (g.current_user.is_administrator()) or (g.current_user.id == um.user_id):
db.session.delete(um)
db.session.commit()
return {"msg": 'urlmapping deleted'}, 200
else:
return {"msg": "你无权删除该资源"}, 403
@auth.login_required
@confirmed_required
def put(self, id):
um = URLMapping.query.get_or_404(id)
if (g.current_user.can(Permission.MODERATE_COMMENTS)) or (g.current_user.id == um.user_id):
args = parser_copy.parse_args(strict=True)
long_url = args['long_url']
password=args['password']
lock=args['lock']
if long_url is not None:
if URLMapping.query.filter_by(long_url=long_url).first() is not None:
return {"msg": "更新的目标url已经存在"}, 202
um.long_url = long_url
if password is not None:
um.password=password
if lock is not None:
um.is_locked=lock
um.update_time=datetime.utcnow()
db.session.add(um)
db.session.commit()
return {"msg": "URLMapping updated"}, 200
else:
return {"msg": "你无权更改该资源"}, 403
api.add_resource(URLMapHandlerClass, '/<int:id>/', endpoint="URLmap") | app/api/urlmap.py | from . import api
from flask import jsonify,g
from datetime import datetime
from flask_restful import Resource, abort, reqparse, Api
from app.models import db, URLMapping,Permission,User
from .authentication import auth
from utils import transform
from app.decorators import confirmed_required
@auth.login_required
@confirmed_required
@api.route("/user/<int:id>/urlmaps/",methods=['GET'])
def get_urlmaps_by_userID(id):
u=User.query.get_or_404(id)
if (g.current_user.can(Permission.MODERATE_COMMENTS)) or (g.current_user.id == id):
urlmaps=u.urlmaps.all()
return jsonify([urlmap.to_json() for urlmap in urlmaps]),200
else:
return jsonify({"msg":"权限不够"}),403
api = Api(api, prefix="/urlmap")
parser = reqparse.RequestParser()
# parser是post请求的参数要求
parser.add_argument('long_url', type=str, required=True, help="原始长url")
parser.add_argument('custom_short_code', type=str, help="用户自定义短码,可选参数")
# parser_copy是put请求的参数要求
parser_copy = parser.copy()
parser_copy.remove_argument("custom_short_code")
parser_copy.replace_argument('long_url', type=str,
required=False, help="需要更改成的目标长url")
parser_copy.add_argument("password",type=str,required=False,help="需要设置的密码")
parser_copy.add_argument('lock',type=bool,required=False,help="上锁和取消锁")
class URLMapHandlerClass(Resource):
@auth.login_required
@confirmed_required
def get(self, id):
url_map = URLMapping.query.get_or_404(id)
return url_map.to_json(), 200
@auth.login_required
@confirmed_required
def post(self, id):
args = parser.parse_args(strict=True)
short_code = args["custom_short_code"]
long_url = args["long_url"]
urlmap = URLMapping.query.filter_by(long_url=long_url).first()
if urlmap: # 长url已经存在,此时如果自定义了短码则忽略
return urlmap.to_json(), 200
else: # long_url不存在
if short_code: # 用户自定义了短码
urlmap = URLMapping.query.filter_by(short_code=short_code).first()
if urlmap: # 短码存在
return {"msg": "short_code {} already exist".format(short_code)}, 202
else: # 短码不存在
um = URLMapping(long_url=long_url, short_code=short_code,
item_type="user-defined", id_used=False,
user_id=g.current_user.id)
db.session.add(um)
db.session.commit()
return um.to_json(), 200
else: # long_url不存在,用户未自定义短码
custom_um = URLMapping.query.filter_by(id_used=False).first()
if custom_um:
real_short_code = transform(custom_um.id)
um = URLMapping(long_url=long_url, short_code=real_short_code,
id_used=False, user_id=g.current_user.id)
custom_um.id_used = True
db.session.add_all([um, custom_um])
db.session.commit()
return um.to_json(), 200
else:
um = URLMapping(long_url=long_url, short_code="placeholder", id_used=True,
user_id=g.current_user.id)
db.session.add(um)
db.session.commit()
um.short_code = transform(um.id)
db.session.add(um)
db.session.commit()
return um.to_json(), 200
@auth.login_required
@confirmed_required
def delete(self, id):
um = URLMapping.query.get_or_404(id)
if (g.current_user.is_administrator()) or (g.current_user.id == um.user_id):
db.session.delete(um)
db.session.commit()
return {"msg": 'urlmapping deleted'}, 200
else:
return {"msg": "你无权删除该资源"}, 403
@auth.login_required
@confirmed_required
def put(self, id):
um = URLMapping.query.get_or_404(id)
if (g.current_user.can(Permission.MODERATE_COMMENTS)) or (g.current_user.id == um.user_id):
args = parser_copy.parse_args(strict=True)
long_url = args['long_url']
password=args['password']
lock=args['lock']
if long_url is not None:
if URLMapping.query.filter_by(long_url=long_url).first() is not None:
return {"msg": "更新的目标url已经存在"}, 202
um.long_url = long_url
if password is not None:
um.password=password
if lock is not None:
um.is_locked=lock
um.update_time=datetime.utcnow()
db.session.add(um)
db.session.commit()
return {"msg": "URLMapping updated"}, 200
else:
return {"msg": "你无权更改该资源"}, 403
api.add_resource(URLMapHandlerClass, '/<int:id>/', endpoint="URLmap") | 0.208139 | 0.054349 |
from mpu import MPU
import math
import time
# variables
rad2deg = 57.2957786
# device address
device_address = 0X68
mpu6050 = MPU(device_address)
mpu6050.initialize(gyro_config=int('00001000',2), smplrt_div_value = 1, general_config=int('00000110', 2), accelerometer_config=int('00011000',2))
#gyro related variables
gyro_to_angle_dt = 65.5
accl_config_const = 2048
dt = 0.05 # 10 ms -- Changing the sampling time will affect the output values. DO NOT DO IT!!!!!
print("calibrating gyroscope and accelerometer")
gyro_x_offset = 0
gyro_y_offset = 0
gyro_z_offset = 0
accl_x_offset = 0
accl_y_offset = 0
accl_z_offset = 0
samples = 100
for i in range(samples):
gyro_x_offset += mpu6050.get_gyro_x()
gyro_y_offset += mpu6050.get_gyro_y()
gyro_z_offset += mpu6050.get_gyro_z()
time.sleep(0.001)
gyro_x_offset /= samples
gyro_y_offset /= samples
gyro_z_offset /= samples
accl_x = mpu6050.get_accl_x()
accl_y= mpu6050.get_accl_y()
accl_z = mpu6050.get_accl_z()
accl_angle_y_offset = round(math.atan2(accl_x,accl_z) * rad2deg, 2) #calculated pitch
accl_angle_x_offset = round(math.atan(-accl_y/math.sqrt((accl_x**2)+(accl_z**2))) * rad2deg, 2) #Calculated roll
print('gyroscope offsets x, y, z ', gyro_x_offset, gyro_y_offset, gyro_z_offset)
prev_gyro_angle_x = 0
prev_gyro_angle_y =0
gyro_angle_x = 0
gyro_angle_y = 0
gyro_angle_x_change = 0
gyro_angle_y_change = 0
prev_accl_angle_x = 0
prev_accl_angle_y = 0
accl_angle_x = 0
accl_angle_y = 0
accl_angle_x_change = 0
accl_angle_y_change = 0
trust_accl_angle_x = trust_accl_angle_y = False
angle_x = 0
angle_y = 0
accl_trust_factor = 2
prev_time = time.time()
while True:
# Angle calculation from gyroscope
gyro_x = mpu6050.get_gyro_x() - gyro_x_offset
gyro_y = mpu6050.get_gyro_y() - gyro_y_offset
gyro_z = mpu6050.get_gyro_z() - gyro_z_offset
gyro_angle_x_dt = int(gyro_x / gyro_to_angle_dt)
gyro_angle_y_dt = int(gyro_y / gyro_to_angle_dt)
gyro_angle_z_dt = int(gyro_z / gyro_to_angle_dt)
prev_gyro_angle_x = gyro_angle_x
prev_gyro_angle_y = gyro_angle_y
gyro_angle_x = round(gyro_angle_x + (gyro_angle_x_dt * dt),2)
gyro_angle_y = round(gyro_angle_y + (gyro_angle_y_dt * dt),2)
#print(gyro_angle_x, gyro_angle_y)
#Angle calculation from accelerometer
accl_x = mpu6050.get_accl_x()
accl_y = mpu6050.get_accl_y()
accl_z = mpu6050.get_accl_z()
prev_accl_angle_x = accl_angle_x
prev_accl_angle_y = accl_angle_y
accl_angle_y = round(math.atan2(accl_x,accl_z) * rad2deg, 2) #calculated pitch
accl_angle_x = round(math.atan(-accl_y/math.sqrt((accl_x**2)+(accl_z**2))) * rad2deg, 2) #Calculated roll
accl_angle_x -= accl_angle_x_offset
accl_angle_y -= accl_angle_y_offset
#print(gyro_angle_x, gyro_angle_y)
#print(gyro_angle_x, accl_angle_x, gyro_angle_y, accl_angle_y)
#Calculate change in angles
gyro_angle_x_change = abs(prev_gyro_angle_x - gyro_angle_x)
gyro_angle_y_change = abs(prev_gyro_angle_y - gyro_angle_y)
accl_angle_x_change = abs(prev_accl_angle_x - accl_angle_x)
accl_angle_y_change = abs(prev_accl_angle_y - accl_angle_y)
trust_accl_angle_x = trust_accl_angle_y = False
angle_x = gyro_angle_x
angle_y = gyro_angle_y
if int(gyro_angle_x_change):
if abs(gyro_angle_x_change - accl_angle_x_change) < accl_trust_factor:
if abs(gyro_angle_x - accl_angle_x) < accl_trust_factor:
#print("X uses accl -- motion")
trust_accl_angle_x = True
#angle_x = (0.6 * gyro_angle_x) +(0.4 * accl_angle_x)
#gyro_angle_x = angle_x
else:
#print("X -- accl moving fast")
pass
else:
#No change in gyro values
if not int(accl_angle_x_change):
if abs(gyro_angle_x - accl_angle_x) < 10:
#print("X uses accl -- stable")
trust_accl_angle_x = True
else:
#print("X -- accl alone moving")
pass
if int(gyro_angle_y_change):
if abs(gyro_angle_y_change - accl_angle_y_change) < accl_trust_factor:
if abs(gyro_angle_y - accl_angle_y) < accl_trust_factor:
#print("Y uses accl -- motion")
trust_accl_angle_y = True
#angle_y = (0.6 * gyro_angle_y) +(0.4 * accl_angle_y)
#gyro_angle_y = angle_y
else:
#print("y -- accl moving fast")
pass
else:
#No change in gyro values
if not int(accl_angle_y_change):
if abs(gyro_angle_y - accl_angle_y) < 10:
#print("Y uses accl -- stable")
trust_accl_angle_y = True
else:
#print("Y -- accl alone moving")
pass
if(trust_accl_angle_x and trust_accl_angle_y):
angle_x = (0.6 * gyro_angle_x) +(0.4 * accl_angle_x)
gyro_angle_x = angle_x
angle_y = (0.6 * gyro_angle_y) +(0.4 * accl_angle_y)
gyro_angle_y = angle_y
'''if(trust_accl_angle_y):
angle_y = (0.6 * gyro_angle_y) +(0.4 * accl_angle_y)
gyro_angle_y = angle_y '''
angle_x = round(angle_x,2)
angle_y = round(angle_y,2)
print(angle_x, angle_y)
#Set gyro angles to original angles to assist integration
#gyro_angle_x = angle_x
#gyro_angle_y = angle_y
#complementary filter
#compl_angle_x = (0.996 * gyro_angle_x) +(0.004 * accl_angle_x)
#compl_angle_y = (0.996 * gyro_angle_y) +(0.004 * accl_angle_y)
#angle_x = (angle_x * 0.75) + (compl_angle_x * 0.25)
#angle_y = (angle_y * 0.75) + (compl_angle_y * 0.25)
#print(round(angle_x, 2), round(angle_y, 2))
#print(round(gyro_angle_x, 2), round(gyro_angle_y, 2))
#print("before loop" , time.time() - p_time)
#time.sleep(dt)
while (time.time() - prev_time) < dt:
#print("untul")
time.sleep(0.001)
pass
#print(time.time() - prev_time)
prev_time = time.time() | inclinometer_alpha.py |
from mpu import MPU
import math
import time
# variables
rad2deg = 57.2957786
# device address
device_address = 0X68
mpu6050 = MPU(device_address)
mpu6050.initialize(gyro_config=int('00001000',2), smplrt_div_value = 1, general_config=int('00000110', 2), accelerometer_config=int('00011000',2))
#gyro related variables
gyro_to_angle_dt = 65.5
accl_config_const = 2048
dt = 0.05 # 10 ms -- Changing the sampling time will affect the output values. DO NOT DO IT!!!!!
print("calibrating gyroscope and accelerometer")
gyro_x_offset = 0
gyro_y_offset = 0
gyro_z_offset = 0
accl_x_offset = 0
accl_y_offset = 0
accl_z_offset = 0
samples = 100
for i in range(samples):
gyro_x_offset += mpu6050.get_gyro_x()
gyro_y_offset += mpu6050.get_gyro_y()
gyro_z_offset += mpu6050.get_gyro_z()
time.sleep(0.001)
gyro_x_offset /= samples
gyro_y_offset /= samples
gyro_z_offset /= samples
accl_x = mpu6050.get_accl_x()
accl_y= mpu6050.get_accl_y()
accl_z = mpu6050.get_accl_z()
accl_angle_y_offset = round(math.atan2(accl_x,accl_z) * rad2deg, 2) #calculated pitch
accl_angle_x_offset = round(math.atan(-accl_y/math.sqrt((accl_x**2)+(accl_z**2))) * rad2deg, 2) #Calculated roll
print('gyroscope offsets x, y, z ', gyro_x_offset, gyro_y_offset, gyro_z_offset)
prev_gyro_angle_x = 0
prev_gyro_angle_y =0
gyro_angle_x = 0
gyro_angle_y = 0
gyro_angle_x_change = 0
gyro_angle_y_change = 0
prev_accl_angle_x = 0
prev_accl_angle_y = 0
accl_angle_x = 0
accl_angle_y = 0
accl_angle_x_change = 0
accl_angle_y_change = 0
trust_accl_angle_x = trust_accl_angle_y = False
angle_x = 0
angle_y = 0
accl_trust_factor = 2
prev_time = time.time()
while True:
# Angle calculation from gyroscope
gyro_x = mpu6050.get_gyro_x() - gyro_x_offset
gyro_y = mpu6050.get_gyro_y() - gyro_y_offset
gyro_z = mpu6050.get_gyro_z() - gyro_z_offset
gyro_angle_x_dt = int(gyro_x / gyro_to_angle_dt)
gyro_angle_y_dt = int(gyro_y / gyro_to_angle_dt)
gyro_angle_z_dt = int(gyro_z / gyro_to_angle_dt)
prev_gyro_angle_x = gyro_angle_x
prev_gyro_angle_y = gyro_angle_y
gyro_angle_x = round(gyro_angle_x + (gyro_angle_x_dt * dt),2)
gyro_angle_y = round(gyro_angle_y + (gyro_angle_y_dt * dt),2)
#print(gyro_angle_x, gyro_angle_y)
#Angle calculation from accelerometer
accl_x = mpu6050.get_accl_x()
accl_y = mpu6050.get_accl_y()
accl_z = mpu6050.get_accl_z()
prev_accl_angle_x = accl_angle_x
prev_accl_angle_y = accl_angle_y
accl_angle_y = round(math.atan2(accl_x,accl_z) * rad2deg, 2) #calculated pitch
accl_angle_x = round(math.atan(-accl_y/math.sqrt((accl_x**2)+(accl_z**2))) * rad2deg, 2) #Calculated roll
accl_angle_x -= accl_angle_x_offset
accl_angle_y -= accl_angle_y_offset
#print(gyro_angle_x, gyro_angle_y)
#print(gyro_angle_x, accl_angle_x, gyro_angle_y, accl_angle_y)
#Calculate change in angles
gyro_angle_x_change = abs(prev_gyro_angle_x - gyro_angle_x)
gyro_angle_y_change = abs(prev_gyro_angle_y - gyro_angle_y)
accl_angle_x_change = abs(prev_accl_angle_x - accl_angle_x)
accl_angle_y_change = abs(prev_accl_angle_y - accl_angle_y)
trust_accl_angle_x = trust_accl_angle_y = False
angle_x = gyro_angle_x
angle_y = gyro_angle_y
if int(gyro_angle_x_change):
if abs(gyro_angle_x_change - accl_angle_x_change) < accl_trust_factor:
if abs(gyro_angle_x - accl_angle_x) < accl_trust_factor:
#print("X uses accl -- motion")
trust_accl_angle_x = True
#angle_x = (0.6 * gyro_angle_x) +(0.4 * accl_angle_x)
#gyro_angle_x = angle_x
else:
#print("X -- accl moving fast")
pass
else:
#No change in gyro values
if not int(accl_angle_x_change):
if abs(gyro_angle_x - accl_angle_x) < 10:
#print("X uses accl -- stable")
trust_accl_angle_x = True
else:
#print("X -- accl alone moving")
pass
if int(gyro_angle_y_change):
if abs(gyro_angle_y_change - accl_angle_y_change) < accl_trust_factor:
if abs(gyro_angle_y - accl_angle_y) < accl_trust_factor:
#print("Y uses accl -- motion")
trust_accl_angle_y = True
#angle_y = (0.6 * gyro_angle_y) +(0.4 * accl_angle_y)
#gyro_angle_y = angle_y
else:
#print("y -- accl moving fast")
pass
else:
#No change in gyro values
if not int(accl_angle_y_change):
if abs(gyro_angle_y - accl_angle_y) < 10:
#print("Y uses accl -- stable")
trust_accl_angle_y = True
else:
#print("Y -- accl alone moving")
pass
if(trust_accl_angle_x and trust_accl_angle_y):
angle_x = (0.6 * gyro_angle_x) +(0.4 * accl_angle_x)
gyro_angle_x = angle_x
angle_y = (0.6 * gyro_angle_y) +(0.4 * accl_angle_y)
gyro_angle_y = angle_y
'''if(trust_accl_angle_y):
angle_y = (0.6 * gyro_angle_y) +(0.4 * accl_angle_y)
gyro_angle_y = angle_y '''
angle_x = round(angle_x,2)
angle_y = round(angle_y,2)
print(angle_x, angle_y)
#Set gyro angles to original angles to assist integration
#gyro_angle_x = angle_x
#gyro_angle_y = angle_y
#complementary filter
#compl_angle_x = (0.996 * gyro_angle_x) +(0.004 * accl_angle_x)
#compl_angle_y = (0.996 * gyro_angle_y) +(0.004 * accl_angle_y)
#angle_x = (angle_x * 0.75) + (compl_angle_x * 0.25)
#angle_y = (angle_y * 0.75) + (compl_angle_y * 0.25)
#print(round(angle_x, 2), round(angle_y, 2))
#print(round(gyro_angle_x, 2), round(gyro_angle_y, 2))
#print("before loop" , time.time() - p_time)
#time.sleep(dt)
while (time.time() - prev_time) < dt:
#print("untul")
time.sleep(0.001)
pass
#print(time.time() - prev_time)
prev_time = time.time() | 0.122714 | 0.165998 |
from datetime import datetime
from flask_restplus import Resource, reqparse
from flask import current_app
from ...permit.models.permit import Permit
from ..models.permit_amendment import PermitAmendment
from ..models.permit_amendment_document import PermitAmendmentDocument
from app.extensions import api
from ....utils.access_decorators import requires_role_mine_view, requires_role_mine_create, requires_role_mine_admin
from ....utils.resources_mixins import UserMixin, ErrorMixin
class PermitAmendmentResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
parser.add_argument(
'received_date',
location='json',
type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,
store_missing=False)
parser.add_argument(
'issue_date',
location='json',
type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,
store_missing=False)
parser.add_argument(
'authorization_end_date',
location='json',
type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,
store_missing=False)
parser.add_argument(
'permit_amendment_type_code', type=str, location='json', store_missing=False)
parser.add_argument(
'permit_amendment_status_code', type=str, location='json', store_missing=False)
parser.add_argument('description', type=str, location='json', store_missing=False)
parser.add_argument('uploadedFiles', type=list, location='json', store_missing=False)
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_view
def get(self, permit_guid=None, permit_amendment_guid=None):
if permit_amendment_guid:
permit_amendment = PermitAmendment.find_by_permit_amendment_guid(permit_amendment_guid)
if permit_amendment:
return permit_amendment.json()
if permit_guid:
permit = Permit.find_by_permit_guid(permit_guid)
if permit:
permit_amendments = PermitAmendment.find_by_permit_id(permit.permit_id)
if permit_amendments:
return [x.json() for x in permit_amendments]
return self.create_error_payload(404, 'Permit amendment(s) not found'), 404
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_create
def post(self, permit_guid=None, permit_amendment_guid=None):
if not permit_guid:
return self.create_error_payload(400, 'Permit_guid must be provided'), 400
if permit_amendment_guid:
return self.create_error_payload(400, 'unexpected permit_amendement_id'), 400
permit = Permit.find_by_permit_guid(permit_guid)
if not permit:
return self.create_error_payload(404, 'permit does not exist'), 404
data = self.parser.parse_args()
current_app.logger.info(f'creating permit_amendment with >> {data}')
received_date = data.get('received_date')
issue_date = data.get('issue_date')
authorization_end_date = data.get('authorization_end_date')
permit_amendment_type_code = data.get('permit_amendment_type_code', 'AMD')
description = data.get('description')
uploadedFiles = data.get('uploadedFiles', [])
try:
new_pa = PermitAmendment.create(
permit,
received_date,
issue_date,
authorization_end_date,
permit_amendment_type_code,
self.get_create_update_dict(),
description=description,
save=True)
for newFile in uploadedFiles:
new_pa_doc = PermitAmendmentDocument(
document_name=newFile['fileName'],
document_manager_guid=newFile['document_manager_guid'],
mine_guid=permit.mine_guid,
**self.get_create_update_dict(),
)
new_pa.documents.append(new_pa_doc)
new_pa.save()
except Exception as e:
return self.create_error_payload(500, 'Error: {}'.format(e)), 500
return new_pa.json()
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_create
def put(self, permit_guid=None, permit_amendment_guid=None):
if not permit_amendment_guid:
return self.create_error_payload(400, 'permit_amendment_id must be provided'), 400
pa = PermitAmendment.find_by_permit_amendment_guid(permit_amendment_guid)
if not pa:
return self.create_error_payload(404, 'permit amendment not found'), 404
data = self.parser.parse_args()
current_app.logger.info(f'updating {pa} with >> {data}')
try:
if 'received_date' in data:
pa.received_date = data.get('received_date')
if 'issue_date' in data:
pa.issue_date = data.get('issue_date')
if 'authorization_end_date' in data:
pa.authorization_end_date = data.get('authorization_end_date')
if 'permit_amendment_status_code' in data:
pa.permit_amendment_status_code = data.get('permit_amendment_status_code')
if 'permit_amendment_type_code' in data:
pa.permit_amendment_type_code = data.get('permit_amendment_type_code')
if 'description' in data:
pa.description = data.get('description')
for newFile in data.get('uploadedFiles', []):
new_pa_doc = PermitAmendmentDocument(
document_name=newFile['fileName'],
document_manager_guid=newFile['document_manager_guid'],
mine_guid=pa.permit.mine_guid,
**self.get_create_update_dict(),
)
pa.documents.append(new_pa_doc)
pa.save()
except Exception as e:
current_app.logger.error(f'PermitAmendmentResource.Put: Error >> {e}')
return self.create_error_payload(500, f'Error: {e}'), 500
return pa.json()
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_admin
def delete(self, permit_guid=None, permit_amendment_guid=None):
if not permit_amendment_guid:
return self.create_error_payload(400, 'permit_amendment_id must be provided'), 400
pa = PermitAmendment.find_by_permit_amendment_guid(permit_amendment_guid)
if not pa:
return self.create_error_payload(404, 'permit amendment not found'), 404
pa.deleted_ind = True
try:
pa.save()
except Exception as e:
return self.create_error_payload(500, 'Error: {}'.format(e)), 500
return ('', 204) | python-backend/app/api/permits/permit_amendment/resources/permit_amendment.py | from datetime import datetime
from flask_restplus import Resource, reqparse
from flask import current_app
from ...permit.models.permit import Permit
from ..models.permit_amendment import PermitAmendment
from ..models.permit_amendment_document import PermitAmendmentDocument
from app.extensions import api
from ....utils.access_decorators import requires_role_mine_view, requires_role_mine_create, requires_role_mine_admin
from ....utils.resources_mixins import UserMixin, ErrorMixin
class PermitAmendmentResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
parser.add_argument(
'received_date',
location='json',
type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,
store_missing=False)
parser.add_argument(
'issue_date',
location='json',
type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,
store_missing=False)
parser.add_argument(
'authorization_end_date',
location='json',
type=lambda x: datetime.strptime(x, '%Y-%m-%d') if x else None,
store_missing=False)
parser.add_argument(
'permit_amendment_type_code', type=str, location='json', store_missing=False)
parser.add_argument(
'permit_amendment_status_code', type=str, location='json', store_missing=False)
parser.add_argument('description', type=str, location='json', store_missing=False)
parser.add_argument('uploadedFiles', type=list, location='json', store_missing=False)
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_view
def get(self, permit_guid=None, permit_amendment_guid=None):
if permit_amendment_guid:
permit_amendment = PermitAmendment.find_by_permit_amendment_guid(permit_amendment_guid)
if permit_amendment:
return permit_amendment.json()
if permit_guid:
permit = Permit.find_by_permit_guid(permit_guid)
if permit:
permit_amendments = PermitAmendment.find_by_permit_id(permit.permit_id)
if permit_amendments:
return [x.json() for x in permit_amendments]
return self.create_error_payload(404, 'Permit amendment(s) not found'), 404
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_create
def post(self, permit_guid=None, permit_amendment_guid=None):
if not permit_guid:
return self.create_error_payload(400, 'Permit_guid must be provided'), 400
if permit_amendment_guid:
return self.create_error_payload(400, 'unexpected permit_amendement_id'), 400
permit = Permit.find_by_permit_guid(permit_guid)
if not permit:
return self.create_error_payload(404, 'permit does not exist'), 404
data = self.parser.parse_args()
current_app.logger.info(f'creating permit_amendment with >> {data}')
received_date = data.get('received_date')
issue_date = data.get('issue_date')
authorization_end_date = data.get('authorization_end_date')
permit_amendment_type_code = data.get('permit_amendment_type_code', 'AMD')
description = data.get('description')
uploadedFiles = data.get('uploadedFiles', [])
try:
new_pa = PermitAmendment.create(
permit,
received_date,
issue_date,
authorization_end_date,
permit_amendment_type_code,
self.get_create_update_dict(),
description=description,
save=True)
for newFile in uploadedFiles:
new_pa_doc = PermitAmendmentDocument(
document_name=newFile['fileName'],
document_manager_guid=newFile['document_manager_guid'],
mine_guid=permit.mine_guid,
**self.get_create_update_dict(),
)
new_pa.documents.append(new_pa_doc)
new_pa.save()
except Exception as e:
return self.create_error_payload(500, 'Error: {}'.format(e)), 500
return new_pa.json()
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_create
def put(self, permit_guid=None, permit_amendment_guid=None):
if not permit_amendment_guid:
return self.create_error_payload(400, 'permit_amendment_id must be provided'), 400
pa = PermitAmendment.find_by_permit_amendment_guid(permit_amendment_guid)
if not pa:
return self.create_error_payload(404, 'permit amendment not found'), 404
data = self.parser.parse_args()
current_app.logger.info(f'updating {pa} with >> {data}')
try:
if 'received_date' in data:
pa.received_date = data.get('received_date')
if 'issue_date' in data:
pa.issue_date = data.get('issue_date')
if 'authorization_end_date' in data:
pa.authorization_end_date = data.get('authorization_end_date')
if 'permit_amendment_status_code' in data:
pa.permit_amendment_status_code = data.get('permit_amendment_status_code')
if 'permit_amendment_type_code' in data:
pa.permit_amendment_type_code = data.get('permit_amendment_type_code')
if 'description' in data:
pa.description = data.get('description')
for newFile in data.get('uploadedFiles', []):
new_pa_doc = PermitAmendmentDocument(
document_name=newFile['fileName'],
document_manager_guid=newFile['document_manager_guid'],
mine_guid=pa.permit.mine_guid,
**self.get_create_update_dict(),
)
pa.documents.append(new_pa_doc)
pa.save()
except Exception as e:
current_app.logger.error(f'PermitAmendmentResource.Put: Error >> {e}')
return self.create_error_payload(500, f'Error: {e}'), 500
return pa.json()
@api.doc(params={
'permit_amendment_guid': 'Permit amendment guid.',
'permit_guid': 'Permit GUID'
})
@requires_role_mine_admin
def delete(self, permit_guid=None, permit_amendment_guid=None):
if not permit_amendment_guid:
return self.create_error_payload(400, 'permit_amendment_id must be provided'), 400
pa = PermitAmendment.find_by_permit_amendment_guid(permit_amendment_guid)
if not pa:
return self.create_error_payload(404, 'permit amendment not found'), 404
pa.deleted_ind = True
try:
pa.save()
except Exception as e:
return self.create_error_payload(500, 'Error: {}'.format(e)), 500
return ('', 204) | 0.469763 | 0.082623 |
# modified script based on https://github.com/rmchurch/synthetic_blobs
print('executing xgc blob synthesizer...')
import os
import h5py
import numpy as np
from scipy.integrate import odeint
from scipy.interpolate import LinearNDInterpolator
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.tri import Triangulation,LinearTriInterpolator
def interpolate_fieldLineFollow(Lstart,phiEnd,Binterp,dphi = None):
#define RHS of ODE system of equations, dy/dt = f(y,t)
def f(L,phi,Binterp):
R=L[0]
Z=L[1]
B=Binterp(R,Z)
BR=B[0]
BZ=B[1]
Bphi=B[2]
#model equations
f0 = R*BR/Bphi
f1 = R*BZ/Bphi
#f2 = 1.
return [f0,f1]#,f2]
#create an array of phi coordinates for which the particle position
#will be calculated, in between the initial and end phi poisitions
Npts = 1000
if dphi is not None:
Npts = int(np.abs((phiEnd-Lstart[2])/dphi)) + 1
phi = Lstart[2] + np.sign(phiEnd-Lstart[2])*np.arange(Npts)*dphi
else:
phi = np.linspace(Lstart[2],phiEnd,Npts)
soln = odeint(f,Lstart[0:2],phi,args=(Binterp,))
Lout = np.hstack((soln,phi[:,np.newaxis]))
return Lout
class syntheticBlobs():
def __init__(self,RZ,psin,tri,Bgrid,sml_nphi):
self.RZ = RZ
self.R0,self.Z0 = RZ[0,:]
self.psin = psin
self.tri = tri
self.triObj = Triangulation(RZ[:,0],RZ[:,1],tri)
self.theta = self.calc_theta(RZ[:,0], RZ[:,1])
##find x-point, to exclude from interpolator
#Bpol = np.sqrt(np.sum(Bgrid[:,0:2]**2.,axis=1))
#ind = np.argmin(Bpol[10:])+10
#eq_x_r,eq_x_z = RZ[ind,:]
#goodinds = ~((psin>=1) | ((psin<=1) & (RZ[:,1]>eq_x_z)))
#mask = np.all(goodinds[tri],axis=1)
#self.triObj_psintheta = Triangulation(self.psin,self.theta,self.tri,mask=mask)
#self.fRZ2psin = LinearTriInterpolator(self.triObj_psintheta,self.RZ[:,0])
#self.fpsintheta2Z = LinearTriInterpolator(self.triObj_psintheta,self.RZ[:,1])
self.fRZ2psin = LinearTriInterpolator(self.triObj,self.psin)
self.Binterp = LinearNDInterpolator(RZ, Bgrid, fill_value = np.inf)
self.sml_nphi = sml_nphi
def psintheta2RZ(self,psin,theta):
return self.fpsinitheta2R(psin,theta),self.fpsintheta2Z(psin,theta)
def RZ2psin(self,R,Z):
return self.fRZ2psin(R,Z)
def calc_theta(self,R,Z):
"""Calculate poloidal angle, with 0 deg at LFS midplane"""
return np.arctan2(Z-self.Z0, R-self.R0)
def generate(self,xcenter,ntor,Lpol,Lrad,dnOvern,use_RZ=True):
""" Generate a blob from the input characteristics
xcenter [3]: Blob center coordinates (psin, theta, phi)
ntor [1]: Toroidal mode number of the blob
Lpol [1]: Blob diameter in poloidal direction
Lrad [1]: Blob diameter in radial direction
dnOvern [1]: Scalar of the magnitude of the blob at center, dn/n
use_RZ [bool]: input xcenter with (R,Z,phi) or (psin,theta,phi)
"""
if use_RZ:
R1 = xcenter[0]; Z1 = xcenter[1]; phi1 = xcenter[2]
psin1 = self.fRZ2psin(R1,Z1)
theta1 = self.calc_theta(R1,Z1)
else:
raise ValueError("use_RZ==False not implemented yet")
psin1 = xcenter[0]
theta1 = xcenter[1]
phi1 = xcenter[2]
R1,Z1 = self.psintheta2RZ(psin1,theta1)
#force quantized phi1
dphi = 2*np.pi/self.sml_nphi
phis = np.arange(self.sml_nphi)*dphi
phiInd = int(np.round(phi1 / dphi) % self.sml_nphi)
phi1 = phis[phiInd]
#assume toridal mode number ntor = 2*pi/lambda_tor, lambda_tor the toroidal wavelength
dphiEnd = 2*np.pi/ntor*R1/self.R0/2.
Lstart = np.array([R1,Z1,phi1])
#generate field-line path
LoutFwd = interpolate_fieldLineFollow(Lstart, phi1+dphiEnd,Binterp,dphi=dphi)
LoutBwd = interpolate_fieldLineFollow(Lstart, phi1-dphiEnd,Binterp,dphi=dphi)
Lout = np.concatenate((LoutBwd[1:,:][::-1,:],LoutFwd) ) #remove duplicate point, concatenate
phioutInds = (np.round(Lout[:,2] / dphi) % self.sml_nphi).astype(int)
tmp = np.sin(np.arange(Lout.shape[0])/(Lout.shape[0]-1)*np.pi)
dn_par = np.zeros((self.sml_nphi,))
dn_par[phioutInds] = tmp
#loop through toroidal planes, interpolate onto XGC R,Z mesh, witha cutoff of 3*sigma
#interpolate onto the phi XGC grid
#(wont be needed if using dphi input to interpolate_fieldLineFollow)
#loop through toroidal planes, interpolate onto XGC R,Z mesh, witha cutoff of 3*sigma
Bfield1 = self.Binterp(R1,Z1)
Bpol1 = np.sqrt(np.sum(Bfield1[0:2]**2.))
B1 = np.sqrt(np.sum(Bfield1**2.))
alpha1 = np.arccos(Bfield1[0]/Bpol1)
dnXGC = np.zeros((self.sml_nphi,RZ.shape[0]))
R = self.RZ[:,0]; Z = self.RZ[:,1]
for p,phip in enumerate(phioutInds):
Rp = Lout[p,0]; Zp = Lout[p,1]
#first, adjust blob size in radial and poloidal directions, based on flux expansion
Bfieldp = self.Binterp(Lout[p,0],Lout[p,1])
Bpolp = np.sqrt(np.sum(Bfieldp[0:2]**2.))
Bp = np.sqrt(np.sum(Bfieldp**2.))
Lradp = Lrad*(Rp*Bpolp)/(R1*Bpol1)
Lpolp = Lpol*B1/Bp*Lrad/Lradp
#adjust the angle
alphap = np.arccos(Bfieldp[0]/Bpolp)
alpha = alpha1 - alphap
dnXGC[phip,:] = dnOvern*dn_par[phip]*np.exp(-(((R-Rp)*np.cos(alpha) + (Z-Zp)*np.sin(alpha))/Lrad)**2 + -(((R-Rp)*np.sin(alpha) - (Z-Zp)*np.cos(alpha))/Lpol)**2)
return dnXGC
fileDir = os.getenv('FTK_XGC_TEST_DATA_PATH') + "/"
print('xgc_file_dir=', fileDir)
fileBfield = fileDir + 'xgc.bfield.h5'
fb = h5py.File(fileBfield,'r')
Bgrid = fb['node_data[0]/values'][:]
fileMesh = fileDir + 'xgc.mesh.h5'
fm = h5py.File(fileMesh,'r')
RZ = fm['coordinates/values'][:]
#you may have to replace this with a hardcoded value from units.m
try:
fileEq = fileDir + 'xgc.equil.h5'
feq = h5py.File(fileEq,'r')
psi_x = feq['eq_psi_x'][...]
except:
feq = open(fileDir+'units.m')
for line in feq:
if 'psi_x' in line:
psi_x = float(line.split()[1])
psin = fm['psi'][:]/psi_x
tri= fm['cell_set[0]/node_connect_list'][...]
triObj = Triangulation(RZ[:,0],RZ[:,1],tri)
file3d = fileDir + 'xgc.3d.00001.h5'
f3d = h5py.File(file3d,'r')
sml_nphi = f3d['nphi'][0]
sml_iphi = f3d['iphi'][0]
#setup Bfield interpolator (could use higher order interpolation scheme)
Binterp = LinearNDInterpolator(RZ, Bgrid, fill_value = np.inf)
blob_generator = syntheticBlobs(RZ,psin,tri,Bgrid,sml_nphi)
#now generate some blobs
#xcenter = np.array([0.95,0,0]) #(psin,theta,phi)
#xcenter = np.array([2.26,0,0]) #(R,Z,phi)
for timestep in range (0, 5):
xcenter = np.array([2.26, timestep * 0.01, 0]) #(R,Z,phi)
ntor = 5
Lpol = 0.01
Lrad = Lpol/1.5
dnOvernMag = 0.1
dnOvernXGC = blob_generator.generate(xcenter,ntor,Lpol,Lrad,dnOvernMag)
print("synthesizing xgc timestep", timestep) #dnOvernXGC.shape)
file_output = 'xgc.synthetic.%04d.h5' % timestep
fo = h5py.File(file_output,'w')
fo['/dnOvernXGC'] = dnOvernXGC.transpose()
fo['/nphi'] = sml_nphi
fo['/iphi'] = sml_iphi
fo.close() | tests/synthesize_xgc_data.py |
# modified script based on https://github.com/rmchurch/synthetic_blobs
print('executing xgc blob synthesizer...')
import os
import h5py
import numpy as np
from scipy.integrate import odeint
from scipy.interpolate import LinearNDInterpolator
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.tri import Triangulation,LinearTriInterpolator
def interpolate_fieldLineFollow(Lstart,phiEnd,Binterp,dphi = None):
#define RHS of ODE system of equations, dy/dt = f(y,t)
def f(L,phi,Binterp):
R=L[0]
Z=L[1]
B=Binterp(R,Z)
BR=B[0]
BZ=B[1]
Bphi=B[2]
#model equations
f0 = R*BR/Bphi
f1 = R*BZ/Bphi
#f2 = 1.
return [f0,f1]#,f2]
#create an array of phi coordinates for which the particle position
#will be calculated, in between the initial and end phi poisitions
Npts = 1000
if dphi is not None:
Npts = int(np.abs((phiEnd-Lstart[2])/dphi)) + 1
phi = Lstart[2] + np.sign(phiEnd-Lstart[2])*np.arange(Npts)*dphi
else:
phi = np.linspace(Lstart[2],phiEnd,Npts)
soln = odeint(f,Lstart[0:2],phi,args=(Binterp,))
Lout = np.hstack((soln,phi[:,np.newaxis]))
return Lout
class syntheticBlobs():
def __init__(self,RZ,psin,tri,Bgrid,sml_nphi):
self.RZ = RZ
self.R0,self.Z0 = RZ[0,:]
self.psin = psin
self.tri = tri
self.triObj = Triangulation(RZ[:,0],RZ[:,1],tri)
self.theta = self.calc_theta(RZ[:,0], RZ[:,1])
##find x-point, to exclude from interpolator
#Bpol = np.sqrt(np.sum(Bgrid[:,0:2]**2.,axis=1))
#ind = np.argmin(Bpol[10:])+10
#eq_x_r,eq_x_z = RZ[ind,:]
#goodinds = ~((psin>=1) | ((psin<=1) & (RZ[:,1]>eq_x_z)))
#mask = np.all(goodinds[tri],axis=1)
#self.triObj_psintheta = Triangulation(self.psin,self.theta,self.tri,mask=mask)
#self.fRZ2psin = LinearTriInterpolator(self.triObj_psintheta,self.RZ[:,0])
#self.fpsintheta2Z = LinearTriInterpolator(self.triObj_psintheta,self.RZ[:,1])
self.fRZ2psin = LinearTriInterpolator(self.triObj,self.psin)
self.Binterp = LinearNDInterpolator(RZ, Bgrid, fill_value = np.inf)
self.sml_nphi = sml_nphi
def psintheta2RZ(self,psin,theta):
return self.fpsinitheta2R(psin,theta),self.fpsintheta2Z(psin,theta)
def RZ2psin(self,R,Z):
return self.fRZ2psin(R,Z)
def calc_theta(self,R,Z):
"""Calculate poloidal angle, with 0 deg at LFS midplane"""
return np.arctan2(Z-self.Z0, R-self.R0)
def generate(self,xcenter,ntor,Lpol,Lrad,dnOvern,use_RZ=True):
""" Generate a blob from the input characteristics
xcenter [3]: Blob center coordinates (psin, theta, phi)
ntor [1]: Toroidal mode number of the blob
Lpol [1]: Blob diameter in poloidal direction
Lrad [1]: Blob diameter in radial direction
dnOvern [1]: Scalar of the magnitude of the blob at center, dn/n
use_RZ [bool]: input xcenter with (R,Z,phi) or (psin,theta,phi)
"""
if use_RZ:
R1 = xcenter[0]; Z1 = xcenter[1]; phi1 = xcenter[2]
psin1 = self.fRZ2psin(R1,Z1)
theta1 = self.calc_theta(R1,Z1)
else:
raise ValueError("use_RZ==False not implemented yet")
psin1 = xcenter[0]
theta1 = xcenter[1]
phi1 = xcenter[2]
R1,Z1 = self.psintheta2RZ(psin1,theta1)
#force quantized phi1
dphi = 2*np.pi/self.sml_nphi
phis = np.arange(self.sml_nphi)*dphi
phiInd = int(np.round(phi1 / dphi) % self.sml_nphi)
phi1 = phis[phiInd]
#assume toridal mode number ntor = 2*pi/lambda_tor, lambda_tor the toroidal wavelength
dphiEnd = 2*np.pi/ntor*R1/self.R0/2.
Lstart = np.array([R1,Z1,phi1])
#generate field-line path
LoutFwd = interpolate_fieldLineFollow(Lstart, phi1+dphiEnd,Binterp,dphi=dphi)
LoutBwd = interpolate_fieldLineFollow(Lstart, phi1-dphiEnd,Binterp,dphi=dphi)
Lout = np.concatenate((LoutBwd[1:,:][::-1,:],LoutFwd) ) #remove duplicate point, concatenate
phioutInds = (np.round(Lout[:,2] / dphi) % self.sml_nphi).astype(int)
tmp = np.sin(np.arange(Lout.shape[0])/(Lout.shape[0]-1)*np.pi)
dn_par = np.zeros((self.sml_nphi,))
dn_par[phioutInds] = tmp
#loop through toroidal planes, interpolate onto XGC R,Z mesh, witha cutoff of 3*sigma
#interpolate onto the phi XGC grid
#(wont be needed if using dphi input to interpolate_fieldLineFollow)
#loop through toroidal planes, interpolate onto XGC R,Z mesh, witha cutoff of 3*sigma
Bfield1 = self.Binterp(R1,Z1)
Bpol1 = np.sqrt(np.sum(Bfield1[0:2]**2.))
B1 = np.sqrt(np.sum(Bfield1**2.))
alpha1 = np.arccos(Bfield1[0]/Bpol1)
dnXGC = np.zeros((self.sml_nphi,RZ.shape[0]))
R = self.RZ[:,0]; Z = self.RZ[:,1]
for p,phip in enumerate(phioutInds):
Rp = Lout[p,0]; Zp = Lout[p,1]
#first, adjust blob size in radial and poloidal directions, based on flux expansion
Bfieldp = self.Binterp(Lout[p,0],Lout[p,1])
Bpolp = np.sqrt(np.sum(Bfieldp[0:2]**2.))
Bp = np.sqrt(np.sum(Bfieldp**2.))
Lradp = Lrad*(Rp*Bpolp)/(R1*Bpol1)
Lpolp = Lpol*B1/Bp*Lrad/Lradp
#adjust the angle
alphap = np.arccos(Bfieldp[0]/Bpolp)
alpha = alpha1 - alphap
dnXGC[phip,:] = dnOvern*dn_par[phip]*np.exp(-(((R-Rp)*np.cos(alpha) + (Z-Zp)*np.sin(alpha))/Lrad)**2 + -(((R-Rp)*np.sin(alpha) - (Z-Zp)*np.cos(alpha))/Lpol)**2)
return dnXGC
fileDir = os.getenv('FTK_XGC_TEST_DATA_PATH') + "/"
print('xgc_file_dir=', fileDir)
fileBfield = fileDir + 'xgc.bfield.h5'
fb = h5py.File(fileBfield,'r')
Bgrid = fb['node_data[0]/values'][:]
fileMesh = fileDir + 'xgc.mesh.h5'
fm = h5py.File(fileMesh,'r')
RZ = fm['coordinates/values'][:]
#you may have to replace this with a hardcoded value from units.m
try:
fileEq = fileDir + 'xgc.equil.h5'
feq = h5py.File(fileEq,'r')
psi_x = feq['eq_psi_x'][...]
except:
feq = open(fileDir+'units.m')
for line in feq:
if 'psi_x' in line:
psi_x = float(line.split()[1])
psin = fm['psi'][:]/psi_x
tri= fm['cell_set[0]/node_connect_list'][...]
triObj = Triangulation(RZ[:,0],RZ[:,1],tri)
file3d = fileDir + 'xgc.3d.00001.h5'
f3d = h5py.File(file3d,'r')
sml_nphi = f3d['nphi'][0]
sml_iphi = f3d['iphi'][0]
#setup Bfield interpolator (could use higher order interpolation scheme)
Binterp = LinearNDInterpolator(RZ, Bgrid, fill_value = np.inf)
blob_generator = syntheticBlobs(RZ,psin,tri,Bgrid,sml_nphi)
#now generate some blobs
#xcenter = np.array([0.95,0,0]) #(psin,theta,phi)
#xcenter = np.array([2.26,0,0]) #(R,Z,phi)
for timestep in range (0, 5):
xcenter = np.array([2.26, timestep * 0.01, 0]) #(R,Z,phi)
ntor = 5
Lpol = 0.01
Lrad = Lpol/1.5
dnOvernMag = 0.1
dnOvernXGC = blob_generator.generate(xcenter,ntor,Lpol,Lrad,dnOvernMag)
print("synthesizing xgc timestep", timestep) #dnOvernXGC.shape)
file_output = 'xgc.synthetic.%04d.h5' % timestep
fo = h5py.File(file_output,'w')
fo['/dnOvernXGC'] = dnOvernXGC.transpose()
fo['/nphi'] = sml_nphi
fo['/iphi'] = sml_iphi
fo.close() | 0.482429 | 0.568775 |
#!/usr/bin/env python
# coding: utf-8
# In[51]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from scipy.stats import stats
import matplotlib.backends.backend_pdf
import math
import random
from matplotlib import pyplot as plt
import numpy as np
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import sys
from pyecharts.charts import Bar
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.charts import Bar
from pyecharts import options as opts
import dataframe_image as dfi
from jupyterthemes import get_themes
import jupyterthemes as jt
from jupyterthemes.stylefx import set_nb_theme
from IPython.core.display import display, HTML
import time
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
# In[52]:
#Load the dataset with the calculated differences Y[t], commit the first value because difference is NaN and print the head()
def file(fileinput):
if not ".csv" in fileinput:
fileinput = "data/" + fileinput + ".csv"
df = pd.read_csv(fileinput,skiprows=0)
return df
# In[53]:
def main():
user_input = str(input("Please enter the name of the .csv file you want to view: "))
df = file(user_input)
df.head()
#Rename the columns
df.columns = ['date', 'value']
df.head()
df.date = pd.to_datetime(df.date)
df.set_index('date', inplace=True)
df.head()
plt.figure()
df[['value']].plot(figsize = (20,10), linewidth = 5, fontsize = 20)
plt.xlabel('Date', fontsize = 30)
plt.ylabel('Load Value', fontsize = 30)
plt.title('Load Value Time Series', fontsize = 40)
plt.legend(loc=2, prop={'size': 20})
plt.savefig('timeseries_analysis/time_series_data' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure()
print("Smoothing")
values = df[['value']]
values.rolling(14).mean().plot(figsize = (20,10), linewidth = 5, fontsize = 20)
plt.xlabel('Date', fontsize = 30)
plt.ylabel('Load Value', fontsize = 30)
plt.title('Smoothed out Time Series', fontsize = 40)
plt.legend(loc=2, prop={'size': 20})
plt.savefig('timeseries_analysis/smoothed_data' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure()
values.diff().plot(figsize = (20,10), linewidth = 5, fontsize = 20)
plt.xlabel('Date', fontsize = 30)
plt.ylabel('Load Value', fontsize = 30)
plt.title('Differenced Time Series', fontsize = 40)
plt.legend(loc=2, prop={'size': 20})
plt.savefig('timeseries_analysis/differencing_data' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure()
values = df['value']
pd.plotting.autocorrelation_plot(values)
plt.savefig('timeseries_analysis/autocorrelation' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
df.corr()
return
# In[54]:
if __name__ == "__main__":
print("Welcome to our Time Series Analyis Tool!")
main()
# In[ ]: | build/time-series_analysis.py |
#!/usr/bin/env python
# coding: utf-8
# In[51]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from scipy.stats import stats
import matplotlib.backends.backend_pdf
import math
import random
from matplotlib import pyplot as plt
import numpy as np
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import sys
from pyecharts.charts import Bar
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.charts import Bar
from pyecharts import options as opts
import dataframe_image as dfi
from jupyterthemes import get_themes
import jupyterthemes as jt
from jupyterthemes.stylefx import set_nb_theme
from IPython.core.display import display, HTML
import time
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
# In[52]:
#Load the dataset with the calculated differences Y[t], commit the first value because difference is NaN and print the head()
def file(fileinput):
if not ".csv" in fileinput:
fileinput = "data/" + fileinput + ".csv"
df = pd.read_csv(fileinput,skiprows=0)
return df
# In[53]:
def main():
user_input = str(input("Please enter the name of the .csv file you want to view: "))
df = file(user_input)
df.head()
#Rename the columns
df.columns = ['date', 'value']
df.head()
df.date = pd.to_datetime(df.date)
df.set_index('date', inplace=True)
df.head()
plt.figure()
df[['value']].plot(figsize = (20,10), linewidth = 5, fontsize = 20)
plt.xlabel('Date', fontsize = 30)
plt.ylabel('Load Value', fontsize = 30)
plt.title('Load Value Time Series', fontsize = 40)
plt.legend(loc=2, prop={'size': 20})
plt.savefig('timeseries_analysis/time_series_data' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure()
print("Smoothing")
values = df[['value']]
values.rolling(14).mean().plot(figsize = (20,10), linewidth = 5, fontsize = 20)
plt.xlabel('Date', fontsize = 30)
plt.ylabel('Load Value', fontsize = 30)
plt.title('Smoothed out Time Series', fontsize = 40)
plt.legend(loc=2, prop={'size': 20})
plt.savefig('timeseries_analysis/smoothed_data' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure()
values.diff().plot(figsize = (20,10), linewidth = 5, fontsize = 20)
plt.xlabel('Date', fontsize = 30)
plt.ylabel('Load Value', fontsize = 30)
plt.title('Differenced Time Series', fontsize = 40)
plt.legend(loc=2, prop={'size': 20})
plt.savefig('timeseries_analysis/differencing_data' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure()
values = df['value']
pd.plotting.autocorrelation_plot(values)
plt.savefig('timeseries_analysis/autocorrelation' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
df.corr()
return
# In[54]:
if __name__ == "__main__":
print("Welcome to our Time Series Analyis Tool!")
main()
# In[ ]: | 0.257018 | 0.184473 |
from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from oauth.models import UserProfile
class KonnektURLsTestCase(TestCase):
@classmethod
def setUp(cls):
cls.client = Client()
cls.user = User.objects.create(username='test_user', first_name='test')
cls.user_profile = UserProfile.objects.create(user=cls.user, roll='B00CS000', dob=timezone.now(),
phone='1234567890', branch='CSE')
def test_konnekt_url_without_logged_in_case_1(self):
"""url without logged in user case: index page"""
# url without login --> redirect to login page
response = self.client.get(reverse('konnekt:index'), follow=True)
self.assertRedirects(response, reverse('login') + "?next=" + reverse('konnekt:index'))
def test_konnekt_url_without_looged_in_case_2(self):
"""url without logged in user case: search page"""
response = self.client.get(reverse('konnekt:search'), follow=True)
self.assertRedirects(response, reverse('login') + "?next=" + reverse('konnekt:search'))
def test_konnekt_index_url_with_logged_in(self):
"""index url with logged in user"""
self.client.force_login(self.user)
# index url --> index page used
response = self.client.get(reverse('konnekt:index'), follow=True)
self.assertTemplateUsed(response, 'konnekt/index.html')
self.client.logout()
def test_konnekt_search_url_with_logged_in_case_1(self):
"""search url with logged in user and query=None"""
self.client.force_login(self.user)
response = self.client.get(reverse('konnekt:search'), follow=True)
self.assertTemplateUsed(response, 'konnekt/search.html')
self.client.logout()
def test_konnekt_search_url_with_logged_in_case_2(self):
"""search url with logged in user and with query"""
self.client.force_login(self.user)
response = self.client.get(reverse('konnekt:search') + '?q=test', follow=True)
self.assertTemplateUsed(response, 'konnekt/search.html')
self.client.logout()
class KonnektQueryTestCase(TestCase):
@classmethod
def setUp(cls):
cls.user = User.objects.create(username='test_user', first_name='test')
cls.user_profile = UserProfile.objects.create(user=cls.user, roll='B00CS000', dob=timezone.now(),
phone='1234567890', branch='CSE')
def test_konnekt_query_case_1(self):
"""case: query None"""
self.assertEqual(UserProfile.objects.search(None).count(), 0)
def test_konnekt_query_case_2(self):
"""case: query term < 3"""
self.assertEqual(UserProfile.objects.search('tes').count(), 0)
def test_konnekt_query_case_3(self):
"""case: query term >= 3"""
self.assertEqual(UserProfile.objects.search('test').count(), 1) | src/konnekt/tests.py | from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from oauth.models import UserProfile
class KonnektURLsTestCase(TestCase):
@classmethod
def setUp(cls):
cls.client = Client()
cls.user = User.objects.create(username='test_user', first_name='test')
cls.user_profile = UserProfile.objects.create(user=cls.user, roll='B00CS000', dob=timezone.now(),
phone='1234567890', branch='CSE')
def test_konnekt_url_without_logged_in_case_1(self):
"""url without logged in user case: index page"""
# url without login --> redirect to login page
response = self.client.get(reverse('konnekt:index'), follow=True)
self.assertRedirects(response, reverse('login') + "?next=" + reverse('konnekt:index'))
def test_konnekt_url_without_looged_in_case_2(self):
"""url without logged in user case: search page"""
response = self.client.get(reverse('konnekt:search'), follow=True)
self.assertRedirects(response, reverse('login') + "?next=" + reverse('konnekt:search'))
def test_konnekt_index_url_with_logged_in(self):
"""index url with logged in user"""
self.client.force_login(self.user)
# index url --> index page used
response = self.client.get(reverse('konnekt:index'), follow=True)
self.assertTemplateUsed(response, 'konnekt/index.html')
self.client.logout()
def test_konnekt_search_url_with_logged_in_case_1(self):
"""search url with logged in user and query=None"""
self.client.force_login(self.user)
response = self.client.get(reverse('konnekt:search'), follow=True)
self.assertTemplateUsed(response, 'konnekt/search.html')
self.client.logout()
def test_konnekt_search_url_with_logged_in_case_2(self):
"""search url with logged in user and with query"""
self.client.force_login(self.user)
response = self.client.get(reverse('konnekt:search') + '?q=test', follow=True)
self.assertTemplateUsed(response, 'konnekt/search.html')
self.client.logout()
class KonnektQueryTestCase(TestCase):
@classmethod
def setUp(cls):
cls.user = User.objects.create(username='test_user', first_name='test')
cls.user_profile = UserProfile.objects.create(user=cls.user, roll='B00CS000', dob=timezone.now(),
phone='1234567890', branch='CSE')
def test_konnekt_query_case_1(self):
"""case: query None"""
self.assertEqual(UserProfile.objects.search(None).count(), 0)
def test_konnekt_query_case_2(self):
"""case: query term < 3"""
self.assertEqual(UserProfile.objects.search('tes').count(), 0)
def test_konnekt_query_case_3(self):
"""case: query term >= 3"""
self.assertEqual(UserProfile.objects.search('test').count(), 1) | 0.516839 | 0.273065 |
import numpy
from .dc_motor import *
class LineFollowerBot:
def __init__(self, pb_client, model_file_name, config, starting_position):
self.pb_client = pb_client
orientation = self._to_quaternion(starting_position[1][0], 0.0, 0.0)
self.bot_model = self.pb_client.loadURDF(model_file_name, basePosition = starting_position[0], baseOrientation = orientation)
self.dt = config.dt
self.supply_voltage = config.supply_voltage
self.speed_max = config.no_load_speed
self.left_motor = DCMotor(config.nominal_voltage, config.no_load_speed, config.stall_torque)
self.right_motor = DCMotor(config.nominal_voltage, config.no_load_speed, config.stall_torque)
self.left_velocity = 0
self.right_velocity = 0
self.left_wheel_joint = 0
self.right_wheel_joint = 1
self.pb_client.setJointMotorControl2(bodyIndex=self.bot_model, jointIndex=self.left_wheel_joint,
controlMode=self.pb_client.VELOCITY_CONTROL, force=0)
self.pb_client.setJointMotorControl2(bodyIndex=self.bot_model, jointIndex=self.right_wheel_joint,
controlMode=self.pb_client.VELOCITY_CONTROL, force=0)
self.time = 0.0
def set_throttle(self, left_power, right_power):
left_power = numpy.clip(left_power, -1.0, 1.0)
right_power = numpy.clip(right_power, -1.0, 1.0)
left_voltage = self.supply_voltage*left_power
right_voltage = self.supply_voltage*right_power
self.left_velocity, self.right_velocity = self.get_wheel_velocity()
left_torque = self.left_motor.step(left_voltage, self.left_velocity)
right_torque = self.right_motor.step(right_voltage, self.right_velocity)
self.set_wheel_torque(left_torque, right_torque)
self.time+= self.dt
def get_wheel_position(self):
l_pos, l_vel, l_react, l_torque = self.pb_client.getJointState(self.bot_model, self.left_wheel_joint)
r_pos, r_vel, r_react, r_torque = self.pb_client.getJointState(self.bot_model, self.right_wheel_joint)
return l_pos, r_pos
def get_wheel_torque(self):
l_pos, l_vel, l_react, l_torque = self.pb_client.getJointState(self.bot_model, self.left_wheel_joint)
r_pos, r_vel, r_react, r_torque = self.pb_client.getJointState(self.bot_model, self.right_wheel_joint)
return l_torque, r_torque
def get_wheel_velocity(self):
l_pos, l_vel, l_react, l_torque = self.pb_client.getJointState(self.bot_model, self.left_wheel_joint)
r_pos, r_vel, r_react, r_torque = self.pb_client.getJointState(self.bot_model, self.right_wheel_joint)
return l_vel*60.0/(2.0*numpy.pi), r_vel*60.0/(2.0*numpy.pi)
def get_position(self):
position, orientation = self.pb_client.getBasePositionAndOrientation(self.bot_model)
x, y, z = position
orientation = self.pb_client.getEulerFromQuaternion(orientation)
pitch, roll, yaw = orientation
return x, y, z, pitch, roll, yaw
def set_wheel_torque(self, left_torque, right_torque):
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex=self.left_wheel_joint,
controlMode=self.pb_client.TORQUE_CONTROL,
force=left_torque)
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex=self.right_wheel_joint,
controlMode=self.pb_client.TORQUE_CONTROL,
force=right_torque)
def set_wheel_velocity(self, left_velocity, right_velocity):
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex = self.left_wheel_joint,
controlMode= self.pb_client.VELOCITY_CONTROL,
targetVelocity = left_velocity)
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex = self.right_wheel_joint,
controlMode= self.pb_client.VELOCITY_CONTROL,
targetVelocity = right_velocity)
def _to_quaternion(self, yaw, pitch, roll):
cy = numpy.cos(yaw * 0.5)
sy = numpy.sin(yaw * 0.5)
cp = numpy.cos(pitch * 0.5)
sp = numpy.sin(pitch * 0.5)
cr = numpy.cos(roll * 0.5)
sr = numpy.sin(roll * 0.5)
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
w = cy * cp * cr + sy * sp * sr
return x, y, z, w
def get_image(self, yaw, pitch, roll, distance, target_x, target_y, target_z, width = 512, height = 512, fov = 120):
vm = self.pb_client.computeViewMatrixFromYawPitchRoll([target_x, target_y, target_z], distance, yaw, pitch, roll, 2)
pm = self.pb_client.computeProjectionMatrixFOV(fov=fov,
aspect=width / height,
nearVal=0.0001,
farVal=10.1)
w, h, rgb, deth, seg = self.pb_client.getCameraImage(width=width,
height=height,
viewMatrix=vm,
projectionMatrix=pm,
renderer=self.pb_client.ER_BULLET_HARDWARE_OPENGL)
rgb = numpy.array(rgb)
rgb = rgb[:, :, :3]
return rgb | src/gym-line_follower/gym_line_follower/envs/LineFollowerBot.py | import numpy
from .dc_motor import *
class LineFollowerBot:
def __init__(self, pb_client, model_file_name, config, starting_position):
self.pb_client = pb_client
orientation = self._to_quaternion(starting_position[1][0], 0.0, 0.0)
self.bot_model = self.pb_client.loadURDF(model_file_name, basePosition = starting_position[0], baseOrientation = orientation)
self.dt = config.dt
self.supply_voltage = config.supply_voltage
self.speed_max = config.no_load_speed
self.left_motor = DCMotor(config.nominal_voltage, config.no_load_speed, config.stall_torque)
self.right_motor = DCMotor(config.nominal_voltage, config.no_load_speed, config.stall_torque)
self.left_velocity = 0
self.right_velocity = 0
self.left_wheel_joint = 0
self.right_wheel_joint = 1
self.pb_client.setJointMotorControl2(bodyIndex=self.bot_model, jointIndex=self.left_wheel_joint,
controlMode=self.pb_client.VELOCITY_CONTROL, force=0)
self.pb_client.setJointMotorControl2(bodyIndex=self.bot_model, jointIndex=self.right_wheel_joint,
controlMode=self.pb_client.VELOCITY_CONTROL, force=0)
self.time = 0.0
def set_throttle(self, left_power, right_power):
left_power = numpy.clip(left_power, -1.0, 1.0)
right_power = numpy.clip(right_power, -1.0, 1.0)
left_voltage = self.supply_voltage*left_power
right_voltage = self.supply_voltage*right_power
self.left_velocity, self.right_velocity = self.get_wheel_velocity()
left_torque = self.left_motor.step(left_voltage, self.left_velocity)
right_torque = self.right_motor.step(right_voltage, self.right_velocity)
self.set_wheel_torque(left_torque, right_torque)
self.time+= self.dt
def get_wheel_position(self):
l_pos, l_vel, l_react, l_torque = self.pb_client.getJointState(self.bot_model, self.left_wheel_joint)
r_pos, r_vel, r_react, r_torque = self.pb_client.getJointState(self.bot_model, self.right_wheel_joint)
return l_pos, r_pos
def get_wheel_torque(self):
l_pos, l_vel, l_react, l_torque = self.pb_client.getJointState(self.bot_model, self.left_wheel_joint)
r_pos, r_vel, r_react, r_torque = self.pb_client.getJointState(self.bot_model, self.right_wheel_joint)
return l_torque, r_torque
def get_wheel_velocity(self):
l_pos, l_vel, l_react, l_torque = self.pb_client.getJointState(self.bot_model, self.left_wheel_joint)
r_pos, r_vel, r_react, r_torque = self.pb_client.getJointState(self.bot_model, self.right_wheel_joint)
return l_vel*60.0/(2.0*numpy.pi), r_vel*60.0/(2.0*numpy.pi)
def get_position(self):
position, orientation = self.pb_client.getBasePositionAndOrientation(self.bot_model)
x, y, z = position
orientation = self.pb_client.getEulerFromQuaternion(orientation)
pitch, roll, yaw = orientation
return x, y, z, pitch, roll, yaw
def set_wheel_torque(self, left_torque, right_torque):
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex=self.left_wheel_joint,
controlMode=self.pb_client.TORQUE_CONTROL,
force=left_torque)
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex=self.right_wheel_joint,
controlMode=self.pb_client.TORQUE_CONTROL,
force=right_torque)
def set_wheel_velocity(self, left_velocity, right_velocity):
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex = self.left_wheel_joint,
controlMode= self.pb_client.VELOCITY_CONTROL,
targetVelocity = left_velocity)
self.pb_client.setJointMotorControl2(self.bot_model,
jointIndex = self.right_wheel_joint,
controlMode= self.pb_client.VELOCITY_CONTROL,
targetVelocity = right_velocity)
def _to_quaternion(self, yaw, pitch, roll):
cy = numpy.cos(yaw * 0.5)
sy = numpy.sin(yaw * 0.5)
cp = numpy.cos(pitch * 0.5)
sp = numpy.sin(pitch * 0.5)
cr = numpy.cos(roll * 0.5)
sr = numpy.sin(roll * 0.5)
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
w = cy * cp * cr + sy * sp * sr
return x, y, z, w
def get_image(self, yaw, pitch, roll, distance, target_x, target_y, target_z, width = 512, height = 512, fov = 120):
vm = self.pb_client.computeViewMatrixFromYawPitchRoll([target_x, target_y, target_z], distance, yaw, pitch, roll, 2)
pm = self.pb_client.computeProjectionMatrixFOV(fov=fov,
aspect=width / height,
nearVal=0.0001,
farVal=10.1)
w, h, rgb, deth, seg = self.pb_client.getCameraImage(width=width,
height=height,
viewMatrix=vm,
projectionMatrix=pm,
renderer=self.pb_client.ER_BULLET_HARDWARE_OPENGL)
rgb = numpy.array(rgb)
rgb = rgb[:, :, :3]
return rgb | 0.628635 | 0.263676 |
import logging
import requests
import pytest
from app.auth import Authentication, _read_auth_info_from_file, _AuthInfo
class MockedResponse:
status_code = 600
def __repr__(self):
return '<Response [%s]>' % self.status_code
@pytest.fixture
def auth_service():
yield Authentication()
@pytest.fixture
def patch_file_open_for_auth_info(monkeypatch):
class open:
def __init__(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return self
def read(self):
return """ {
"url": "test",
"username": "hello",
"password": "<PASSWORD>",
"token_name": "token"
} """
monkeypatch.setattr('builtins.open', open)
@pytest.fixture
def mock_get_requests(monkeypatch):
old_get = requests.get
def mock_get(uri, *args, **kwargs):
return MockedResponse()
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture
def mock_post_authentication_valid(monkeypatch):
class MockedAuthResponse(MockedResponse):
status_code = 200
def json(self):
return {
'token': 'token'
}
def mocked_post(*args, **kwargs):
return MockedAuthResponse()
monkeypatch.setattr(requests, 'post', mocked_post)
@pytest.fixture
def mock_post_authentication_invalid(monkeypatch):
class MockedUnAuthResponse(MockedResponse):
status_code = 401
def mocked_post(*args, **kwargs):
return MockedUnAuthResponse()
monkeypatch.setattr(requests, 'post', mocked_post)
def test_read_auth_info_from_file(patch_file_open_for_auth_info):
auth_info = _read_auth_info_from_file('test file')
assert auth_info.url == 'test'
assert auth_info.username == 'hello'
assert auth_info.password == '<PASSWORD>'
assert auth_info.token_name == 'token'
def test_get_auth_token_valid(mock_post_authentication_valid,
patch_file_open_for_auth_info, auth_service):
status = auth_service.get_auth_token()
assert status == 200
assert auth_service.auth_token == 'token'
def test_get_auth_token_invalid(mock_post_authentication_invalid,
patch_file_open_for_auth_info, auth_service):
status = auth_service.get_auth_token()
assert status != 200
assert auth_service.auth_token is None
def test_rebounce_on_401_valid(mock_post_authentication_valid,
patch_file_open_for_auth_info, auth_service):
status = auth_service.rebounce_on_401(lambda *args, **kwargs: 401)
assert auth_service.auth_token == 'token'
def test_rebounce_on_401_invalid_creds(mock_post_authentication_invalid,
patch_file_open_for_auth_info, auth_service):
with pytest.raises(Exception):
status = auth_service.rebounce_on_401(lambda *args, **kwargs: 401)
assert status == 401
assert auth_service.auth_token is None
def test_get_bearer_header(auth_service):
test_token = 'test-token'
auth_service.auth_token = test_token
assert auth_service.get_bearer_header() == {'Authorization':'Bearer %s' % test_token}
def test_fixture(auth_service):
assert auth_service.auth_token is None
def test_merge_kwargs_with_headers(auth_service):
table = [
{
'vars': {'a': 'b', 'c': 'd'},
'expect': {
'a': 'b', 'c': 'd',
'headers': {
'Authorization': 'Bearer %s' % auth_service.auth_token
}
}
},
]
for test in table:
assert auth_service._merge_kwargs_with_headers(**test['vars']) == test['expect'] | app/__tests__/test_auth.py | import logging
import requests
import pytest
from app.auth import Authentication, _read_auth_info_from_file, _AuthInfo
class MockedResponse:
status_code = 600
def __repr__(self):
return '<Response [%s]>' % self.status_code
@pytest.fixture
def auth_service():
yield Authentication()
@pytest.fixture
def patch_file_open_for_auth_info(monkeypatch):
class open:
def __init__(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return self
def read(self):
return """ {
"url": "test",
"username": "hello",
"password": "<PASSWORD>",
"token_name": "token"
} """
monkeypatch.setattr('builtins.open', open)
@pytest.fixture
def mock_get_requests(monkeypatch):
old_get = requests.get
def mock_get(uri, *args, **kwargs):
return MockedResponse()
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture
def mock_post_authentication_valid(monkeypatch):
class MockedAuthResponse(MockedResponse):
status_code = 200
def json(self):
return {
'token': 'token'
}
def mocked_post(*args, **kwargs):
return MockedAuthResponse()
monkeypatch.setattr(requests, 'post', mocked_post)
@pytest.fixture
def mock_post_authentication_invalid(monkeypatch):
class MockedUnAuthResponse(MockedResponse):
status_code = 401
def mocked_post(*args, **kwargs):
return MockedUnAuthResponse()
monkeypatch.setattr(requests, 'post', mocked_post)
def test_read_auth_info_from_file(patch_file_open_for_auth_info):
auth_info = _read_auth_info_from_file('test file')
assert auth_info.url == 'test'
assert auth_info.username == 'hello'
assert auth_info.password == '<PASSWORD>'
assert auth_info.token_name == 'token'
def test_get_auth_token_valid(mock_post_authentication_valid,
patch_file_open_for_auth_info, auth_service):
status = auth_service.get_auth_token()
assert status == 200
assert auth_service.auth_token == 'token'
def test_get_auth_token_invalid(mock_post_authentication_invalid,
patch_file_open_for_auth_info, auth_service):
status = auth_service.get_auth_token()
assert status != 200
assert auth_service.auth_token is None
def test_rebounce_on_401_valid(mock_post_authentication_valid,
patch_file_open_for_auth_info, auth_service):
status = auth_service.rebounce_on_401(lambda *args, **kwargs: 401)
assert auth_service.auth_token == 'token'
def test_rebounce_on_401_invalid_creds(mock_post_authentication_invalid,
patch_file_open_for_auth_info, auth_service):
with pytest.raises(Exception):
status = auth_service.rebounce_on_401(lambda *args, **kwargs: 401)
assert status == 401
assert auth_service.auth_token is None
def test_get_bearer_header(auth_service):
test_token = 'test-token'
auth_service.auth_token = test_token
assert auth_service.get_bearer_header() == {'Authorization':'Bearer %s' % test_token}
def test_fixture(auth_service):
assert auth_service.auth_token is None
def test_merge_kwargs_with_headers(auth_service):
table = [
{
'vars': {'a': 'b', 'c': 'd'},
'expect': {
'a': 'b', 'c': 'd',
'headers': {
'Authorization': 'Bearer %s' % auth_service.auth_token
}
}
},
]
for test in table:
assert auth_service._merge_kwargs_with_headers(**test['vars']) == test['expect'] | 0.472197 | 0.216198 |
def test_login_process(test_client, login):
"""
Тест процесса авторизации: должен быть редирект
на Index page, категория алерта - success
"""
response = login
assert response.status_code == 200
assert b'Index page' in response.data
assert b'success' in response.data
def test_login_invalid_data(test_client):
"""
Тест процесса логина с 3 вариантами неправильных данных:
неправильный логин, неправильный пароль, неправильный логин И
пароль;во всех случаях должен возвращаться редирект
на Sign in страницу, категория алерта - 'danger'
"""
invalid_username = test_client.post('users/process-login',
data=dict(username='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert invalid_username.status_code == 200
assert b'Sign in' in invalid_username.data
assert b'danger' in invalid_username.data
invalid_password = test_client.post('users/process-login',
data=dict(username='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert invalid_password.status_code == 200
assert b'Sign in' in invalid_password.data
assert b'danger' in invalid_password.data
invalid_username_and_password = test_client.post('users/process-login',
data=dict(username='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert invalid_username_and_password.status_code == 200
assert b'Sign in' in invalid_username_and_password.data
assert b'danger' in invalid_username_and_password.data
def test_logout(test_client, login):
"""
Тест процесса выхода из учетной записи:
должен быть редирект на Index page,
категория алерта - success
"""
response = test_client.get('users/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Index page' in response.data
assert b'success' in response.data
def test_logout_without_auth(test_client):
"""
Тест процесса выхода из учетной записи
БЕЗ авторизованного пользователя, должен быть редирект
на Index page, категория алерта - danger
"""
response = test_client.get('users/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Index page' in response.data
assert b'danger' in response.data
def test_reg_process(test_client):
"""
Тест процесса регистрации, если успешно - должен быть редирект на страницу
логина, категория алерта - success
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing test',
password='<PASSWORD>',
password2='<PASSWORD>',
company='T.E.S.T',
position='Manager',
date_of_birth='10.01.1984',
phone_number='+70000000000'),
follow_redirects=True)
assert response.status_code == 200
assert b'Sign in' in response.data
assert b'success' in response.data
def test_reg_process_invalid_email(test_client):
"""
Тест процесса регистрации, с ошибкой в поле E-mail:
использован e-mail уже существующего аккаунта, должен
быть повторный редирект на страницу регистрации,
категрия алерта - danger
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing test',
password='<PASSWORD>',
password2='<PASSWORD>',
company='T.E.S.T',
position='Manager',
date_of_birth='10.01.1984',
phone_number='+70000000000'))
assert response.status_code == 200
assert b'Sign up' in response.data
assert b'danger' in response.data
def test_reg_process_invalid_fio(test_client):
"""
Тест процесса регистрации, с ошибкой в поле Ф.И.О:
использовано ФИО уже существующего аккаунта, должен
быть повторный редирект на страницу регистрации,
категрия алерта - danger
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing',
password='<PASSWORD>',
password2='<PASSWORD>',
company='T.E.S.T',
position='Manager',
date_of_birth='10.01.1984',
phone_number='+70000000000'))
assert response.status_code == 200
assert b'Sign up' in response.data
assert b'danger' in response.data
def test_reg_missing_data(test_client):
"""
Тест процесса регистрации, с несколькими незаполненными полями:
должен быть повторный редирект на страницу регистрации,
категрия алерта - danger
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing test',
password='<PASSWORD>',
company='T.E.S.T',
phone_number='+70000000000'),
follow_redirects=True)
assert response.status_code == 200
assert b'Sign up' in response.data
assert b'danger' in response.data | webapp/tests/test_login_register.py | def test_login_process(test_client, login):
"""
Тест процесса авторизации: должен быть редирект
на Index page, категория алерта - success
"""
response = login
assert response.status_code == 200
assert b'Index page' in response.data
assert b'success' in response.data
def test_login_invalid_data(test_client):
"""
Тест процесса логина с 3 вариантами неправильных данных:
неправильный логин, неправильный пароль, неправильный логин И
пароль;во всех случаях должен возвращаться редирект
на Sign in страницу, категория алерта - 'danger'
"""
invalid_username = test_client.post('users/process-login',
data=dict(username='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert invalid_username.status_code == 200
assert b'Sign in' in invalid_username.data
assert b'danger' in invalid_username.data
invalid_password = test_client.post('users/process-login',
data=dict(username='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert invalid_password.status_code == 200
assert b'Sign in' in invalid_password.data
assert b'danger' in invalid_password.data
invalid_username_and_password = test_client.post('users/process-login',
data=dict(username='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert invalid_username_and_password.status_code == 200
assert b'Sign in' in invalid_username_and_password.data
assert b'danger' in invalid_username_and_password.data
def test_logout(test_client, login):
"""
Тест процесса выхода из учетной записи:
должен быть редирект на Index page,
категория алерта - success
"""
response = test_client.get('users/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Index page' in response.data
assert b'success' in response.data
def test_logout_without_auth(test_client):
"""
Тест процесса выхода из учетной записи
БЕЗ авторизованного пользователя, должен быть редирект
на Index page, категория алерта - danger
"""
response = test_client.get('users/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Index page' in response.data
assert b'danger' in response.data
def test_reg_process(test_client):
"""
Тест процесса регистрации, если успешно - должен быть редирект на страницу
логина, категория алерта - success
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing test',
password='<PASSWORD>',
password2='<PASSWORD>',
company='T.E.S.T',
position='Manager',
date_of_birth='10.01.1984',
phone_number='+70000000000'),
follow_redirects=True)
assert response.status_code == 200
assert b'Sign in' in response.data
assert b'success' in response.data
def test_reg_process_invalid_email(test_client):
"""
Тест процесса регистрации, с ошибкой в поле E-mail:
использован e-mail уже существующего аккаунта, должен
быть повторный редирект на страницу регистрации,
категрия алерта - danger
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing test',
password='<PASSWORD>',
password2='<PASSWORD>',
company='T.E.S.T',
position='Manager',
date_of_birth='10.01.1984',
phone_number='+70000000000'))
assert response.status_code == 200
assert b'Sign up' in response.data
assert b'danger' in response.data
def test_reg_process_invalid_fio(test_client):
"""
Тест процесса регистрации, с ошибкой в поле Ф.И.О:
использовано ФИО уже существующего аккаунта, должен
быть повторный редирект на страницу регистрации,
категрия алерта - danger
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing',
password='<PASSWORD>',
password2='<PASSWORD>',
company='T.E.S.T',
position='Manager',
date_of_birth='10.01.1984',
phone_number='+70000000000'))
assert response.status_code == 200
assert b'Sign up' in response.data
assert b'danger' in response.data
def test_reg_missing_data(test_client):
"""
Тест процесса регистрации, с несколькими незаполненными полями:
должен быть повторный редирект на страницу регистрации,
категрия алерта - danger
"""
response = test_client.post('users/register',
data=dict(username='<EMAIL>',
fio='Test testing test',
password='<PASSWORD>',
company='T.E.S.T',
phone_number='+70000000000'),
follow_redirects=True)
assert response.status_code == 200
assert b'Sign up' in response.data
assert b'danger' in response.data | 0.351534 | 0.572902 |
import string
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
_CHR_IDX = string.ascii_lowercase
# This function is directly copied from the tf.keras.layers.MultiHeadAttention
# implementation.
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ''
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = ''.join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = '%s,%s->%s' % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = '%s,%s->%s' % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUSoftmax(tf.keras.layers.Softmax):
"""EdgeTPU/Quantization friendly implementation for the SoftMax.
When export quant model, use -120 mask value.
When export float model and run inference with bf16 on device, use -10000.
"""
def __init__(self,
mask_value: int = -120,
**kwargs):
self._mask_value = mask_value
super(EdgeTPUSoftmax, self).__init__(**kwargs)
def get_config(self):
config = {
'mask_value': self._mask_value
}
base_config = super(EdgeTPUSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, mask=None):
if mask is not None:
adder = (1.0 - tf.cast(mask, inputs.dtype)) * self._mask_value
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return tf.exp(inputs - tf.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return tf.keras.backend.softmax(inputs, axis=self.axis[0])
return tf.keras.backend.softmax(inputs, axis=self.axis)
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""Quantization friendly implementation for the MultiHeadAttention."""
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
costomize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(
rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = EdgeTPUSoftmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
class EdgetpuMobileBertTransformer(layers.MobileBertTransformer):
"""Quantization friendly MobileBertTransformer.
Inherits from the MobileBertTransformer but use our customized MHA.
"""
def __init__(self, **kwargs):
super(EdgetpuMobileBertTransformer, self).__init__(**kwargs)
attention_head_size = int(
self.intra_bottleneck_size / self.num_attention_heads)
attention_layer = EdgeTPUMultiHeadAttention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=self.initializer,
name='attention')
layer_norm = self.block_layers['attention'][1]
self.block_layers['attention'] = [attention_layer, layer_norm] | official/projects/edgetpu/nlp/modeling/edgetpu_layers.py | import string
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
_CHR_IDX = string.ascii_lowercase
# This function is directly copied from the tf.keras.layers.MultiHeadAttention
# implementation.
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ''
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = ''.join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = '%s,%s->%s' % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = '%s,%s->%s' % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUSoftmax(tf.keras.layers.Softmax):
"""EdgeTPU/Quantization friendly implementation for the SoftMax.
When export quant model, use -120 mask value.
When export float model and run inference with bf16 on device, use -10000.
"""
def __init__(self,
mask_value: int = -120,
**kwargs):
self._mask_value = mask_value
super(EdgeTPUSoftmax, self).__init__(**kwargs)
def get_config(self):
config = {
'mask_value': self._mask_value
}
base_config = super(EdgeTPUSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, mask=None):
if mask is not None:
adder = (1.0 - tf.cast(mask, inputs.dtype)) * self._mask_value
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return tf.exp(inputs - tf.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return tf.keras.backend.softmax(inputs, axis=self.axis[0])
return tf.keras.backend.softmax(inputs, axis=self.axis)
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""Quantization friendly implementation for the MultiHeadAttention."""
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
costomize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(
rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = EdgeTPUSoftmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
class EdgetpuMobileBertTransformer(layers.MobileBertTransformer):
"""Quantization friendly MobileBertTransformer.
Inherits from the MobileBertTransformer but use our customized MHA.
"""
def __init__(self, **kwargs):
super(EdgetpuMobileBertTransformer, self).__init__(**kwargs)
attention_head_size = int(
self.intra_bottleneck_size / self.num_attention_heads)
attention_layer = EdgeTPUMultiHeadAttention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=self.initializer,
name='attention')
layer_norm = self.block_layers['attention'][1]
self.block_layers['attention'] = [attention_layer, layer_norm] | 0.941122 | 0.588475 |
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gramdp_count import gramdp_count
from gramdp_mean import gramdp_mean
from gramdp_sum import gramdp_sum
from gramdp_var import gramdp_var
import matplotlib.gridspec as gs
from matplotlib.lines import Line2D
'''
query : string : 'count', 'sum', 'mean', 'std', 'var'
desired_privacy : string : 'very_high', 'high', 'moderate', 'low', 'very_low'
'''
dataset_path = 'adult.csv'
column = 'age'
query = 'var'
df = pd.read_csv(dataset_path)
array = df[column]
Desired_privacy = ['very_high', 'high', 'moderate', 'low', 'very_low']
dp_results_list =[]
std_se_results_list =[]
percent_results_list =[]
true_results_list =[]
MSE_results = []
scaled_error_results = []
percentage_error_results = []
eps_list = []
itr = []
for desired_privacy in Desired_privacy:
print('Calculating results for {d} privacy.'.format(d=desired_privacy))
for i in range(1000, 11000, 500):
iterations =i
itr.append(i)
dp_result = eval('gramdp_'+ query)(array=array, desired_privacy=desired_privacy, iterations=iterations)
eps_list.append(dp_result[0])
dp_results_list.append(dp_result[1])
std_se_results_list.append(dp_result[2])
percent_results_list.append(dp_result[3])
MSE_results.append(dp_result[4])
scaled_error_results.append(dp_result[5])
percentage_error_results.append(dp_result[6])
if query == 'count':
true_results_list.append(len(array))
else:
true_results_list.append(eval('np.{q}'.format(q=query))(array))
gs1 = gs.GridSpec(nrows=2, ncols=2)
figure = plt.gcf()
ax1 = plt.subplot(gs1[0,0])
ax1.plot(eps_list, dp_results_list, color='xkcd:orangish red')
ax1.set_ylabel('Average DP', size=19)
ax2 = plt.subplot(gs1[0,1])
ax2.plot(eps_list, MSE_results, color='xkcd:orangish red')
ax2.set_ylabel('Mean Squared Error (MSE)', size=19)
ax3 = plt.subplot(gs1[1,0])
ax3.plot(eps_list, scaled_error_results, color='xkcd:orangish red')
ax3.set_ylabel('Mean Scaled Error', size=19)
ax4 = plt.subplot(gs1[1,1])
ax4.plot(eps_list, percent_results_list, color='xkcd:orangish red')
ax4.set_ylabel('Root Mean Squared \n Percentage Error (RMSPE) [%]', size=19)
ax1.set_xticks([])
ax2.set_xticks([])
ax3.set_xlabel('Epsilon', size=21)
ax4.set_xlabel('Epsilon', size=21)
plt.subplots_adjust(hspace=0.06)
if query == 'count':
figure.suptitle('Count Query', fontsize=25)
if query == 'sum':
figure.suptitle('Sum Query', fontsize=25)
if query == 'mean':
figure.suptitle('Mean Query', fontsize=25)
if query == 'var':
figure.suptitle('Variance Query', fontsize=25)
plt.show() | GRAM_DP/gramdp_Analysis.py | import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gramdp_count import gramdp_count
from gramdp_mean import gramdp_mean
from gramdp_sum import gramdp_sum
from gramdp_var import gramdp_var
import matplotlib.gridspec as gs
from matplotlib.lines import Line2D
'''
query : string : 'count', 'sum', 'mean', 'std', 'var'
desired_privacy : string : 'very_high', 'high', 'moderate', 'low', 'very_low'
'''
dataset_path = 'adult.csv'
column = 'age'
query = 'var'
df = pd.read_csv(dataset_path)
array = df[column]
Desired_privacy = ['very_high', 'high', 'moderate', 'low', 'very_low']
dp_results_list =[]
std_se_results_list =[]
percent_results_list =[]
true_results_list =[]
MSE_results = []
scaled_error_results = []
percentage_error_results = []
eps_list = []
itr = []
for desired_privacy in Desired_privacy:
print('Calculating results for {d} privacy.'.format(d=desired_privacy))
for i in range(1000, 11000, 500):
iterations =i
itr.append(i)
dp_result = eval('gramdp_'+ query)(array=array, desired_privacy=desired_privacy, iterations=iterations)
eps_list.append(dp_result[0])
dp_results_list.append(dp_result[1])
std_se_results_list.append(dp_result[2])
percent_results_list.append(dp_result[3])
MSE_results.append(dp_result[4])
scaled_error_results.append(dp_result[5])
percentage_error_results.append(dp_result[6])
if query == 'count':
true_results_list.append(len(array))
else:
true_results_list.append(eval('np.{q}'.format(q=query))(array))
gs1 = gs.GridSpec(nrows=2, ncols=2)
figure = plt.gcf()
ax1 = plt.subplot(gs1[0,0])
ax1.plot(eps_list, dp_results_list, color='xkcd:orangish red')
ax1.set_ylabel('Average DP', size=19)
ax2 = plt.subplot(gs1[0,1])
ax2.plot(eps_list, MSE_results, color='xkcd:orangish red')
ax2.set_ylabel('Mean Squared Error (MSE)', size=19)
ax3 = plt.subplot(gs1[1,0])
ax3.plot(eps_list, scaled_error_results, color='xkcd:orangish red')
ax3.set_ylabel('Mean Scaled Error', size=19)
ax4 = plt.subplot(gs1[1,1])
ax4.plot(eps_list, percent_results_list, color='xkcd:orangish red')
ax4.set_ylabel('Root Mean Squared \n Percentage Error (RMSPE) [%]', size=19)
ax1.set_xticks([])
ax2.set_xticks([])
ax3.set_xlabel('Epsilon', size=21)
ax4.set_xlabel('Epsilon', size=21)
plt.subplots_adjust(hspace=0.06)
if query == 'count':
figure.suptitle('Count Query', fontsize=25)
if query == 'sum':
figure.suptitle('Sum Query', fontsize=25)
if query == 'mean':
figure.suptitle('Mean Query', fontsize=25)
if query == 'var':
figure.suptitle('Variance Query', fontsize=25)
plt.show() | 0.318167 | 0.258985 |
# Author(s): <NAME> (hychen) <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import unittest
from boliau import cmdlib
class RegisterArgumentsTestCase(unittest.TestCase):
def setUp(self):
self.cmd = cmdlib.Command()
def test_empty_setting(self):
self.assertRaises(ValueError, self.cmd.register_arguments, [])
self.assertRaises(ValueError, self.cmd.register_arguments, None)
def test_regist_argconf(self):
conf = [
(['id'], None),
(['-d', '--desc'], None)
]
self.cmd.register_arguments(conf)
def test_regist_both(self):
conf = [
(['-s', '--scripts'], {'nargs': '+'})
]
self.cmd.register_arguments(conf)
self.cmd.argv = ['-s', 'a', 'b']
self.assertEquals(['a', 'b'],
self.cmd.parse_argv().scripts)
class ExecuteCommandTestCase(unittest.TestCase):
def setUp(self):
self.cmd = cmdlib.Command()
def test_no_action(self):
self.assertRaises(ValueError, self.cmd.call)
def test_action_has_wrong_type(self):
self.assertRaises(TypeError, self.cmd.action, None)
def test_sum(self):
self.cmd.register_arguments([(['num'], {'nargs': '+'})])
self.cmd.argv = ['1', '2', '3']
self.cmd.action = lambda num : sum(map(int, num))
self.assertEquals(6, self.cmd.call(self.cmd.parse_argv()))
def test_as_command(self):
newcmd = cmdlib.as_command(lambda num : sum(map(int, num)),
[(['num'], {'nargs': '+'})])
newcmd.argv = ['1', '2', '3']
self.assertEquals(6, newcmd.call(newcmd.parse_argv())) | test/test_cmdlib.py |
# Author(s): <NAME> (hychen) <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import unittest
from boliau import cmdlib
class RegisterArgumentsTestCase(unittest.TestCase):
def setUp(self):
self.cmd = cmdlib.Command()
def test_empty_setting(self):
self.assertRaises(ValueError, self.cmd.register_arguments, [])
self.assertRaises(ValueError, self.cmd.register_arguments, None)
def test_regist_argconf(self):
conf = [
(['id'], None),
(['-d', '--desc'], None)
]
self.cmd.register_arguments(conf)
def test_regist_both(self):
conf = [
(['-s', '--scripts'], {'nargs': '+'})
]
self.cmd.register_arguments(conf)
self.cmd.argv = ['-s', 'a', 'b']
self.assertEquals(['a', 'b'],
self.cmd.parse_argv().scripts)
class ExecuteCommandTestCase(unittest.TestCase):
def setUp(self):
self.cmd = cmdlib.Command()
def test_no_action(self):
self.assertRaises(ValueError, self.cmd.call)
def test_action_has_wrong_type(self):
self.assertRaises(TypeError, self.cmd.action, None)
def test_sum(self):
self.cmd.register_arguments([(['num'], {'nargs': '+'})])
self.cmd.argv = ['1', '2', '3']
self.cmd.action = lambda num : sum(map(int, num))
self.assertEquals(6, self.cmd.call(self.cmd.parse_argv()))
def test_as_command(self):
newcmd = cmdlib.as_command(lambda num : sum(map(int, num)),
[(['num'], {'nargs': '+'})])
newcmd.argv = ['1', '2', '3']
self.assertEquals(6, newcmd.call(newcmd.parse_argv())) | 0.55929 | 0.336168 |
# Main python libraries
import sys
import os
# PIP3 imports
try:
import yaml
from sqlalchemy import create_engine
import pymysql
except ImportError:
import pip
_PACKAGES = ['PyYAML', 'sqlalchemy', 'pymysql']
for _PACKAGE in _PACKAGES:
pip.main(['install', '--user', _PACKAGE])
print(
'New Python packages installed. Please run this script again to '
'complete the Infoset-NG installation.')
# Must exit abnormally as the script didn't complete
sys.exit(2)
# Try to create a working PYTHONPATH
_MAINT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
_ROOT_DIRECTORY = os.path.abspath(
os.path.join(_MAINT_DIRECTORY, os.pardir))
if _ROOT_DIRECTORY.endswith('/infoset-ng') is True:
sys.path.append(_ROOT_DIRECTORY)
else:
print(
'Infoset-NG is not installed in a "infoset-ng/" directory. '
'Please fix.')
sys.exit(2)
# Infoset libraries
from infoset.utils import log
from infoset.utils import configuration
from infoset.utils import general
from infoset.db.db_orm import BASE, Agent, Department, Device, Billcode
from infoset.db.db_orm import Configuration, DeviceAgent, Datapoint, AgentName
from infoset.db import URL
from infoset.db import db_configuration
from infoset.db import db_billcode
from infoset.db import db_department
from infoset.db import db_device
from infoset.db import db_agent
from infoset.db import db_agentname
from infoset.db import db_deviceagent
from infoset.db import db_datapoint
from infoset.db import db
from maintenance import shared
class _DatabaseSetup(object):
"""Class to setup database."""
def __init__(self):
"""Function for intializing the class.
Args:
None
Returns:
None
"""
# Initialize key variables
self.reserved = '_SYSTEM_RESERVED_'
self.config = configuration.Config()
def _insert_datapoint(self):
"""Insert first datapoint in the database.
Args:
None
Returns:
None
"""
# Insert
if db_datapoint.idx_datapoint_exists(1) is False:
record = Datapoint(
id_datapoint=general.encode(self.reserved),
agent_label=general.encode(self.reserved),
agent_source=general.encode(self.reserved)
)
database = db.Database()
database.add(record, 1047)
def _insert_department(self):
"""Insert first department in the database.
Args:
None
Returns:
None
"""
# Insert
if db_department.idx_department_exists(1) is False:
record = Department(
code=general.encode(self.reserved),
name=general.encode(self.reserved))
database = db.Database()
database.add(record, 1102)
def _insert_billcode(self):
"""Insert first billcode in the database.
Args:
None
Returns:
None
"""
# Insert
if db_billcode.idx_billcode_exists(1) is False:
record = Billcode(
code=general.encode(self.reserved),
name=general.encode(self.reserved))
database = db.Database()
database.add(record, 1104)
def _insert_agent_device(self):
"""Insert first agent and device in the database.
Args:
None
Returns:
None
"""
# Initialize key variables
idx_agentname = 1
idx_agent = 1
idx_device = 1
# Add agent name
if db_agentname.idx_agentname_exists(idx_agentname) is False:
# Generate a name add a record in the database
record = AgentName(
name=general.encode(self.reserved))
database = db.Database()
database.add(record, 1019)
# Add agent
if db_agent.idx_agent_exists(idx_agent) is False:
# Generate an Agent ID and add a record in the database
record = Agent(id_agent=general.encode(self.reserved))
database = db.Database()
database.add(record, 1109)
# Add device
if db_device.idx_device_exists(idx_device) is False:
record = Device(
description=general.encode(self.reserved),
devicename=general.encode(self.reserved)
)
database = db.Database()
database.add(record, 1106)
# Add to Agent / Device table
if db_deviceagent.device_agent_exists(idx_device, idx_agent) is False:
record = DeviceAgent(idx_device=idx_device, idx_agent=idx_agent)
database = db.Database()
database.add(record, 1107)
def _insert_config(self):
"""Insert first config in the database.
Args:
None
Returns:
None
"""
# Initialize key variables
key_values = [('version', '0.0.0.0')]
# Cycle through all the key value pairs
for item in key_values:
key = item[0]
value = item[1]
# Check if value exists and insert if not
if db_configuration.config_key_exists(key) is False:
record = Configuration(
config_key=general.encode(key),
config_value=general.encode(value))
database = db.Database()
database.add(record, 1108)
def run(self):
"""Setup database.
Args:
None
Returns:
None
"""
# Initialize key variables
use_mysql = True
pool_size = 25
max_overflow = 25
config = self.config
# Create DB connection pool
if use_mysql is True:
# Add MySQL to the pool
engine = create_engine(
URL, echo=True,
encoding='utf8',
max_overflow=max_overflow,
pool_size=pool_size, pool_recycle=3600)
# Try to create the database
shared.print_ok('Attempting to create database tables')
try:
sql_string = (
'ALTER DATABASE %s CHARACTER SET utf8mb4 '
'COLLATE utf8mb4_general_ci') % (config.db_name())
engine.execute(sql_string)
except:
log_message = (
'Cannot connect to database %s. '
'Verify database server is started. '
'Verify database is created. '
'Verify that the configured database authentication '
'is correct.') % (config.db_name())
log.log2die(1136, log_message)
# Apply schemas
shared.print_ok('Applying Schemas.')
BASE.metadata.create_all(engine)
# Insert database entries
self._insert_agent_device()
self._insert_billcode()
self._insert_department()
self._insert_datapoint()
self._insert_config()
def run():
"""Setup infoset-ng.
Args:
None
Returns:
None
"""
# Run server setup
_DatabaseSetup().run()
# All done
shared.print_ok('Database installation successful.')
if __name__ == '__main__':
# Run setup
run() | maintenance/database.py | # Main python libraries
import sys
import os
# PIP3 imports
try:
import yaml
from sqlalchemy import create_engine
import pymysql
except ImportError:
import pip
_PACKAGES = ['PyYAML', 'sqlalchemy', 'pymysql']
for _PACKAGE in _PACKAGES:
pip.main(['install', '--user', _PACKAGE])
print(
'New Python packages installed. Please run this script again to '
'complete the Infoset-NG installation.')
# Must exit abnormally as the script didn't complete
sys.exit(2)
# Try to create a working PYTHONPATH
_MAINT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
_ROOT_DIRECTORY = os.path.abspath(
os.path.join(_MAINT_DIRECTORY, os.pardir))
if _ROOT_DIRECTORY.endswith('/infoset-ng') is True:
sys.path.append(_ROOT_DIRECTORY)
else:
print(
'Infoset-NG is not installed in a "infoset-ng/" directory. '
'Please fix.')
sys.exit(2)
# Infoset libraries
from infoset.utils import log
from infoset.utils import configuration
from infoset.utils import general
from infoset.db.db_orm import BASE, Agent, Department, Device, Billcode
from infoset.db.db_orm import Configuration, DeviceAgent, Datapoint, AgentName
from infoset.db import URL
from infoset.db import db_configuration
from infoset.db import db_billcode
from infoset.db import db_department
from infoset.db import db_device
from infoset.db import db_agent
from infoset.db import db_agentname
from infoset.db import db_deviceagent
from infoset.db import db_datapoint
from infoset.db import db
from maintenance import shared
class _DatabaseSetup(object):
"""Class to setup database."""
def __init__(self):
"""Function for intializing the class.
Args:
None
Returns:
None
"""
# Initialize key variables
self.reserved = '_SYSTEM_RESERVED_'
self.config = configuration.Config()
def _insert_datapoint(self):
"""Insert first datapoint in the database.
Args:
None
Returns:
None
"""
# Insert
if db_datapoint.idx_datapoint_exists(1) is False:
record = Datapoint(
id_datapoint=general.encode(self.reserved),
agent_label=general.encode(self.reserved),
agent_source=general.encode(self.reserved)
)
database = db.Database()
database.add(record, 1047)
def _insert_department(self):
"""Insert first department in the database.
Args:
None
Returns:
None
"""
# Insert
if db_department.idx_department_exists(1) is False:
record = Department(
code=general.encode(self.reserved),
name=general.encode(self.reserved))
database = db.Database()
database.add(record, 1102)
def _insert_billcode(self):
"""Insert first billcode in the database.
Args:
None
Returns:
None
"""
# Insert
if db_billcode.idx_billcode_exists(1) is False:
record = Billcode(
code=general.encode(self.reserved),
name=general.encode(self.reserved))
database = db.Database()
database.add(record, 1104)
def _insert_agent_device(self):
"""Insert first agent and device in the database.
Args:
None
Returns:
None
"""
# Initialize key variables
idx_agentname = 1
idx_agent = 1
idx_device = 1
# Add agent name
if db_agentname.idx_agentname_exists(idx_agentname) is False:
# Generate a name add a record in the database
record = AgentName(
name=general.encode(self.reserved))
database = db.Database()
database.add(record, 1019)
# Add agent
if db_agent.idx_agent_exists(idx_agent) is False:
# Generate an Agent ID and add a record in the database
record = Agent(id_agent=general.encode(self.reserved))
database = db.Database()
database.add(record, 1109)
# Add device
if db_device.idx_device_exists(idx_device) is False:
record = Device(
description=general.encode(self.reserved),
devicename=general.encode(self.reserved)
)
database = db.Database()
database.add(record, 1106)
# Add to Agent / Device table
if db_deviceagent.device_agent_exists(idx_device, idx_agent) is False:
record = DeviceAgent(idx_device=idx_device, idx_agent=idx_agent)
database = db.Database()
database.add(record, 1107)
def _insert_config(self):
"""Insert first config in the database.
Args:
None
Returns:
None
"""
# Initialize key variables
key_values = [('version', '0.0.0.0')]
# Cycle through all the key value pairs
for item in key_values:
key = item[0]
value = item[1]
# Check if value exists and insert if not
if db_configuration.config_key_exists(key) is False:
record = Configuration(
config_key=general.encode(key),
config_value=general.encode(value))
database = db.Database()
database.add(record, 1108)
def run(self):
"""Setup database.
Args:
None
Returns:
None
"""
# Initialize key variables
use_mysql = True
pool_size = 25
max_overflow = 25
config = self.config
# Create DB connection pool
if use_mysql is True:
# Add MySQL to the pool
engine = create_engine(
URL, echo=True,
encoding='utf8',
max_overflow=max_overflow,
pool_size=pool_size, pool_recycle=3600)
# Try to create the database
shared.print_ok('Attempting to create database tables')
try:
sql_string = (
'ALTER DATABASE %s CHARACTER SET utf8mb4 '
'COLLATE utf8mb4_general_ci') % (config.db_name())
engine.execute(sql_string)
except:
log_message = (
'Cannot connect to database %s. '
'Verify database server is started. '
'Verify database is created. '
'Verify that the configured database authentication '
'is correct.') % (config.db_name())
log.log2die(1136, log_message)
# Apply schemas
shared.print_ok('Applying Schemas.')
BASE.metadata.create_all(engine)
# Insert database entries
self._insert_agent_device()
self._insert_billcode()
self._insert_department()
self._insert_datapoint()
self._insert_config()
def run():
"""Setup infoset-ng.
Args:
None
Returns:
None
"""
# Run server setup
_DatabaseSetup().run()
# All done
shared.print_ok('Database installation successful.')
if __name__ == '__main__':
# Run setup
run() | 0.492432 | 0.070081 |
from datetime import timedelta, datetime
import lcoreapi
from django.conf import settings
import logging
cluster_messages = settings.LAMBDAINST_CLUSTER_MESSAGES
lcore_settings = settings.LCORE
LCORE_BASE_URL = lcore_settings.get('BASE_URL')
LCORE_API_KEY = lcore_settings['API_KEY']
LCORE_API_SECRET = lcore_settings['API_SECRET']
LCORE_SOURCE_ADDR = lcore_settings.get('SOURCE_ADDRESS')
LCORE_INST_SECRET = lcore_settings['INST_SECRET']
LCORE_TIMEOUT = lcore_settings.get('TIMEOUT', 10)
# The default is to log the exception and only raise it if we cannot show
# the previous value or a default value instead.
LCORE_RAISE_ERRORS = bool(lcore_settings.get('RAISE_ERRORS', False))
LCORE_CACHE_TTL = lcore_settings.get('CACHE_TTL', 60)
if isinstance(LCORE_CACHE_TTL, int):
LCORE_CACHE_TTL = timedelta(seconds=LCORE_CACHE_TTL)
assert isinstance(LCORE_CACHE_TTL, timedelta)
VPN_AUTH_STORAGE = settings.VPN_AUTH_STORAGE
assert VPN_AUTH_STORAGE in ('core', 'inst')
core_api = lcoreapi.API(LCORE_API_KEY, LCORE_API_SECRET, LCORE_BASE_URL,
timeout=LCORE_TIMEOUT)
class APICache:
""" Cache data for a time, try to update and silence errors.
Outdated data is not a problem.
"""
def __init__(self, ttl=None, initial=None):
self.cache_date = datetime.fromtimestamp(0)
self.ttl = ttl or LCORE_CACHE_TTL
self.has_cached_value = initial is not None
self.cached = initial() if initial else None
def query(self, wrapped, *args, **kwargs):
try:
return wrapped(*args, **kwargs)
except lcoreapi.APIError:
logger = logging.getLogger('django.request')
logger.exception("core api error")
if LCORE_RAISE_ERRORS:
raise
if not self.has_cached_value:
# We only return a default value if we were given one.
# Prevents returning an unexpected None.
raise
# Return previous value
return self.cached
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
if self.cache_date > (datetime.now() - self.ttl):
return self.cached
self.cached = self.query(wrapped, *args, **kwargs)
# New results *and* errors are cached
self.cache_date = datetime.now()
return self.cached
return wrapper
@APICache(initial=lambda: 0)
def current_active_sessions():
return core_api.get(core_api.info['current_instance'] + '/sessions', active=True)['total_count']
@APICache(initial=lambda: [])
def get_locations():
gateways = core_api.get('/gateways/', enabled=True)
locations = {}
for gw in gateways.list_iter():
cc = gw['cluster_name']
if cc not in locations:
locations[cc] = dict(
servers=0,
bandwidth=0,
hostname='gw.' + cc + '.204vpn.net',
country_code=cc,
message=cluster_messages.get(cc),
)
locations[cc]['servers'] += 1
locations[cc]['bandwidth'] += gw['bandwidth']
locations = sorted(locations.items(), key=lambda x: x[1]['country_code'])
return locations
@APICache(initial=lambda: [])
def get_gateway_exit_ips():
gateways = core_api.get('/gateways/', enabled=True)
ipv4_list = []
ipv6_list = []
for gw in gateways.list_iter():
ma = gw['main_addr']
if ma.get('ipv4'):
ipv4_list.append(ma['ipv4'])
if ma.get('ipv6'):
ipv6_list.append(ma['ipv6'])
# TODO: IPv6 support
return ipv4_list
def is_vpn_gateway(ip):
addresses = get_gateway_exit_ips()
return ip in addresses
def create_user(username, cleartext_password):
""" The password will be hashed and stored safely on the core,
so we have to send it clearly here.
"""
path = core_api.info['current_instance'] + '/users/'
core_api.post(path, data={
'username': username,
'password': <PASSWORD>,
'expiration_date': datetime(1, 1, 1).isoformat(), # Expired.
})
def update_user_expiration(user):
path = core_api.info['current_instance'] + '/users/' + user.username
try:
if not user.is_active:
core_api.patch(path, data={
'expiration_date': datetime(1, 1, 1).isoformat(), # Expired.
})
return
core_api.patch(path, data={
'expiration_date': user.vpnuser.expiration,
})
except lcoreapi.APIError:
# User can't do anything to this, we should just report it
logger = logging.getLogger('django.request')
logger.exception("core api error, missing user (exp update)")
def update_user_password(user, cleartext_password):
path = core_api.info['current_instance'] + '/users/' + user.username
try:
core_api.patch(path, data={
'password': <PASSWORD>,
})
except lcoreapi.APINotFoundError:
# This time we can try fix it!
create_user(user.username, cleartext_password)
except lcoreapi.APIError:
# and maybe fail.
logger = logging.getLogger('django.request')
logger.exception("core api error (password update)")
def delete_user(username):
path = core_api.info['current_instance'] + '/users/' + username
core_api.delete(path) | lambdainst/core.py | from datetime import timedelta, datetime
import lcoreapi
from django.conf import settings
import logging
cluster_messages = settings.LAMBDAINST_CLUSTER_MESSAGES
lcore_settings = settings.LCORE
LCORE_BASE_URL = lcore_settings.get('BASE_URL')
LCORE_API_KEY = lcore_settings['API_KEY']
LCORE_API_SECRET = lcore_settings['API_SECRET']
LCORE_SOURCE_ADDR = lcore_settings.get('SOURCE_ADDRESS')
LCORE_INST_SECRET = lcore_settings['INST_SECRET']
LCORE_TIMEOUT = lcore_settings.get('TIMEOUT', 10)
# The default is to log the exception and only raise it if we cannot show
# the previous value or a default value instead.
LCORE_RAISE_ERRORS = bool(lcore_settings.get('RAISE_ERRORS', False))
LCORE_CACHE_TTL = lcore_settings.get('CACHE_TTL', 60)
if isinstance(LCORE_CACHE_TTL, int):
LCORE_CACHE_TTL = timedelta(seconds=LCORE_CACHE_TTL)
assert isinstance(LCORE_CACHE_TTL, timedelta)
VPN_AUTH_STORAGE = settings.VPN_AUTH_STORAGE
assert VPN_AUTH_STORAGE in ('core', 'inst')
core_api = lcoreapi.API(LCORE_API_KEY, LCORE_API_SECRET, LCORE_BASE_URL,
timeout=LCORE_TIMEOUT)
class APICache:
""" Cache data for a time, try to update and silence errors.
Outdated data is not a problem.
"""
def __init__(self, ttl=None, initial=None):
self.cache_date = datetime.fromtimestamp(0)
self.ttl = ttl or LCORE_CACHE_TTL
self.has_cached_value = initial is not None
self.cached = initial() if initial else None
def query(self, wrapped, *args, **kwargs):
try:
return wrapped(*args, **kwargs)
except lcoreapi.APIError:
logger = logging.getLogger('django.request')
logger.exception("core api error")
if LCORE_RAISE_ERRORS:
raise
if not self.has_cached_value:
# We only return a default value if we were given one.
# Prevents returning an unexpected None.
raise
# Return previous value
return self.cached
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
if self.cache_date > (datetime.now() - self.ttl):
return self.cached
self.cached = self.query(wrapped, *args, **kwargs)
# New results *and* errors are cached
self.cache_date = datetime.now()
return self.cached
return wrapper
@APICache(initial=lambda: 0)
def current_active_sessions():
return core_api.get(core_api.info['current_instance'] + '/sessions', active=True)['total_count']
@APICache(initial=lambda: [])
def get_locations():
gateways = core_api.get('/gateways/', enabled=True)
locations = {}
for gw in gateways.list_iter():
cc = gw['cluster_name']
if cc not in locations:
locations[cc] = dict(
servers=0,
bandwidth=0,
hostname='gw.' + cc + '.204vpn.net',
country_code=cc,
message=cluster_messages.get(cc),
)
locations[cc]['servers'] += 1
locations[cc]['bandwidth'] += gw['bandwidth']
locations = sorted(locations.items(), key=lambda x: x[1]['country_code'])
return locations
@APICache(initial=lambda: [])
def get_gateway_exit_ips():
gateways = core_api.get('/gateways/', enabled=True)
ipv4_list = []
ipv6_list = []
for gw in gateways.list_iter():
ma = gw['main_addr']
if ma.get('ipv4'):
ipv4_list.append(ma['ipv4'])
if ma.get('ipv6'):
ipv6_list.append(ma['ipv6'])
# TODO: IPv6 support
return ipv4_list
def is_vpn_gateway(ip):
addresses = get_gateway_exit_ips()
return ip in addresses
def create_user(username, cleartext_password):
""" The password will be hashed and stored safely on the core,
so we have to send it clearly here.
"""
path = core_api.info['current_instance'] + '/users/'
core_api.post(path, data={
'username': username,
'password': <PASSWORD>,
'expiration_date': datetime(1, 1, 1).isoformat(), # Expired.
})
def update_user_expiration(user):
path = core_api.info['current_instance'] + '/users/' + user.username
try:
if not user.is_active:
core_api.patch(path, data={
'expiration_date': datetime(1, 1, 1).isoformat(), # Expired.
})
return
core_api.patch(path, data={
'expiration_date': user.vpnuser.expiration,
})
except lcoreapi.APIError:
# User can't do anything to this, we should just report it
logger = logging.getLogger('django.request')
logger.exception("core api error, missing user (exp update)")
def update_user_password(user, cleartext_password):
path = core_api.info['current_instance'] + '/users/' + user.username
try:
core_api.patch(path, data={
'password': <PASSWORD>,
})
except lcoreapi.APINotFoundError:
# This time we can try fix it!
create_user(user.username, cleartext_password)
except lcoreapi.APIError:
# and maybe fail.
logger = logging.getLogger('django.request')
logger.exception("core api error (password update)")
def delete_user(username):
path = core_api.info['current_instance'] + '/users/' + username
core_api.delete(path) | 0.532182 | 0.090856 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class PointTransInfo(object):
def __init__(self):
self._op_time = None
self._point = None
self._remark = None
self._trans_no = None
self._trans_type = None
@property
def op_time(self):
return self._op_time
@op_time.setter
def op_time(self, value):
self._op_time = value
@property
def point(self):
return self._point
@point.setter
def point(self, value):
self._point = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def trans_no(self):
return self._trans_no
@trans_no.setter
def trans_no(self, value):
self._trans_no = value
@property
def trans_type(self):
return self._trans_type
@trans_type.setter
def trans_type(self, value):
self._trans_type = value
def to_alipay_dict(self):
params = dict()
if self.op_time:
if hasattr(self.op_time, 'to_alipay_dict'):
params['op_time'] = self.op_time.to_alipay_dict()
else:
params['op_time'] = self.op_time
if self.point:
if hasattr(self.point, 'to_alipay_dict'):
params['point'] = self.point.to_alipay_dict()
else:
params['point'] = self.point
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.trans_no:
if hasattr(self.trans_no, 'to_alipay_dict'):
params['trans_no'] = self.trans_no.to_alipay_dict()
else:
params['trans_no'] = self.trans_no
if self.trans_type:
if hasattr(self.trans_type, 'to_alipay_dict'):
params['trans_type'] = self.trans_type.to_alipay_dict()
else:
params['trans_type'] = self.trans_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PointTransInfo()
if 'op_time' in d:
o.op_time = d['op_time']
if 'point' in d:
o.point = d['point']
if 'remark' in d:
o.remark = d['remark']
if 'trans_no' in d:
o.trans_no = d['trans_no']
if 'trans_type' in d:
o.trans_type = d['trans_type']
return o | alipay/aop/api/domain/PointTransInfo.py | import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class PointTransInfo(object):
def __init__(self):
self._op_time = None
self._point = None
self._remark = None
self._trans_no = None
self._trans_type = None
@property
def op_time(self):
return self._op_time
@op_time.setter
def op_time(self, value):
self._op_time = value
@property
def point(self):
return self._point
@point.setter
def point(self, value):
self._point = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def trans_no(self):
return self._trans_no
@trans_no.setter
def trans_no(self, value):
self._trans_no = value
@property
def trans_type(self):
return self._trans_type
@trans_type.setter
def trans_type(self, value):
self._trans_type = value
def to_alipay_dict(self):
params = dict()
if self.op_time:
if hasattr(self.op_time, 'to_alipay_dict'):
params['op_time'] = self.op_time.to_alipay_dict()
else:
params['op_time'] = self.op_time
if self.point:
if hasattr(self.point, 'to_alipay_dict'):
params['point'] = self.point.to_alipay_dict()
else:
params['point'] = self.point
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.trans_no:
if hasattr(self.trans_no, 'to_alipay_dict'):
params['trans_no'] = self.trans_no.to_alipay_dict()
else:
params['trans_no'] = self.trans_no
if self.trans_type:
if hasattr(self.trans_type, 'to_alipay_dict'):
params['trans_type'] = self.trans_type.to_alipay_dict()
else:
params['trans_type'] = self.trans_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PointTransInfo()
if 'op_time' in d:
o.op_time = d['op_time']
if 'point' in d:
o.point = d['point']
if 'remark' in d:
o.remark = d['remark']
if 'trans_no' in d:
o.trans_no = d['trans_no']
if 'trans_type' in d:
o.trans_type = d['trans_type']
return o | 0.52683 | 0.176033 |
import os
import time
data_dir = "ic-data//extra"
label_file = 'ic-data//extra.label'
write_label = open(label_file, 'w+')
def file_list(data_dir):
filenames = []
for root, dirs, files in os.walk(data_dir):
for file in files:
if os.path.splitext(file)[1] == '.jpg':
filenames.append(os.path.splitext(file)[0])
return filenames
filenames = file_list(data_dir)
for filename in filenames:
name = int(filename)
print(name)
if name > 0 and name < 101:
write_content = str(name)+' '+str(7)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 295 and name < 396:
write_content = str(name)+' '+str(2)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 600 and name < 701:
write_content = str(name)+' '+str(6)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1030 and name < 1131:
write_content = str(name)+' '+str(5)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1334 and name < 1435:
write_content = str(name)+' '+str(10)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1638 and name < 1739:
write_content = str(name)+' '+str(4)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1946 and name < 2047:
write_content = str(name)+' '+str(1)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 2214 and name < 2365:
write_content = str(name)+' '+str(8)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 2691 and name < 2792:
write_content = str(name)+' '+str(9)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 3121 and name < 3222:
write_content = str(name)+' '+str(3)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 63350 and name < 66708:
write_content = str(name)+' '+str(11)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 72196:
write_content = str(name)+' '+str(12)
print(write_content)
write_label.writelines(write_content + '\n')
write_label.close() | extra_label.py | import os
import time
data_dir = "ic-data//extra"
label_file = 'ic-data//extra.label'
write_label = open(label_file, 'w+')
def file_list(data_dir):
filenames = []
for root, dirs, files in os.walk(data_dir):
for file in files:
if os.path.splitext(file)[1] == '.jpg':
filenames.append(os.path.splitext(file)[0])
return filenames
filenames = file_list(data_dir)
for filename in filenames:
name = int(filename)
print(name)
if name > 0 and name < 101:
write_content = str(name)+' '+str(7)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 295 and name < 396:
write_content = str(name)+' '+str(2)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 600 and name < 701:
write_content = str(name)+' '+str(6)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1030 and name < 1131:
write_content = str(name)+' '+str(5)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1334 and name < 1435:
write_content = str(name)+' '+str(10)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1638 and name < 1739:
write_content = str(name)+' '+str(4)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 1946 and name < 2047:
write_content = str(name)+' '+str(1)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 2214 and name < 2365:
write_content = str(name)+' '+str(8)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 2691 and name < 2792:
write_content = str(name)+' '+str(9)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 3121 and name < 3222:
write_content = str(name)+' '+str(3)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 63350 and name < 66708:
write_content = str(name)+' '+str(11)
print(write_content)
write_label.writelines(write_content + '\n')
if name > 72196:
write_content = str(name)+' '+str(12)
print(write_content)
write_label.writelines(write_content + '\n')
write_label.close() | 0.094463 | 0.054727 |
import numpy as np
import tensorflow as tf
from .nes import NesModel
tf.compat.v1.disable_v2_behavior()
class NesRbModel(NesModel):
"""
Use forwardpass to determine random bases coordinates.
There is the subtle difference with standard NES in that all offspring are evaluated on the same mini-batch
"""
def on_create(self):
self._ipu_.get_session()
self._ipu_.configure()
self._image_data_.load()
self.offspring_seed = None
self._weights = []
infeed = self.prepare_data(
self.data["train"], self._image_data_.steps_per_epoch("train")
)
with self._ipu_.device():
self.train_op = self._ipu_.compile(
lambda: self._ipu_.loops_repeat(
n=self._image_data_.steps_per_epoch("train"),
body=lambda *args, **kwargs: self._build(*args, **kwargs),
inputs=[tf.constant(0, tf.float32), tf.constant(0, tf.float32)],
infeed_queue=infeed,
divide_by_n=True,
),
[],
)
self.network.summary()
# evaluation (placed on CPU)
self.eval_op = {
split: self._ipu_.loops_repeat(
n=self._image_data_.steps_per_epoch(split),
body=lambda *args, **kwargs: self._build(
*args, evaluate=True, **kwargs
),
inputs=[tf.constant(0, tf.float32), tf.constant(0, tf.float32)],
infeed_queue=self.prepare_data(
self.data[split],
self._image_data_.steps_per_epoch(split),
infeed=False,
),
divide_by_n=True,
mode="cpu",
)
for split in ["validation", "test"]
}
if self.config.ipu.enabled:
self.sess.run(infeed.initializer)
self.sess.run(tf.compat.v1.global_variables_initializer())
def _build(
self, total_loss, total_acc, t, image, label, lr, worker=None, evaluate=False
):
self.coordinates = tf.Variable(
lambda: tf.zeros(shape=[self.config.base_dimensions])
)
if evaluate:
if not getattr(self, "evaluation_network", None):
self.evaluation_network = self._image_network_.load(
name=self.config.network,
classes=self.dataset_info.features["label"].num_classes,
input_shape=self.dataset_info.features["image"].shape,
)
predictions = self.evaluation_network(image)
loss = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(label, predictions)
)
acc = tf.reduce_mean(
tf.keras.metrics.categorical_accuracy(label, predictions)
)
return tf.add(total_loss, loss), tf.add(total_acc, acc)
if self.config.reset_base_each_step:
base = t
else:
base = 0
# generate seeds
seeds = self.base_seeds_generator(base)
ta_seeds = tf.TensorArray(
dtype=tf.int32, size=self.config.base_dimensions, element_shape=[]
).unstack(seeds)
def offspring_loop(index, population_loss, population_acc):
self.offspring_seed = ta_seeds.read(index)
self._apply_layer_ops()
self.network = network = self._image_network_.load(
name=self.config.network,
classes=self.dataset_info.features["label"].num_classes,
input_shape=self.dataset_info.features["image"].shape,
)
predictions = network(image)
self._rollback_layer_ops()
loss_object = tf.keras.losses.CategoricalCrossentropy()
loss = loss_object(label, predictions)
acc = tf.reduce_mean(
tf.keras.metrics.categorical_accuracy(label, predictions)
)
write_op = tf.compat.v1.scatter_update(self.coordinates, index, loss)
with tf.control_dependencies([write_op]):
return (
tf.add(index, 1),
tf.add(population_loss, loss),
tf.add(population_acc, acc),
)
offspring, offspring_loss, offspring_acc = self._ipu_.loops_repeat(
n=self.config.base_dimensions,
body=offspring_loop,
inputs=[
tf.constant(0, tf.int32),
tf.constant(0, tf.float32),
tf.constant(0, tf.float32),
],
divide_by_n=True,
mode="tensorflow",
)
with tf.control_dependencies([offspring]):
values = tf.identity(self.coordinates)
if self.config.transformation == "norm":
zero = values - (
tf.ones(self.config.base_dimensions) * tf.reduce_min(values)
)
norm = tf.divide(zero, tf.reduce_max(zero))
transformed = (norm - 0.5) * -1 # shift and invert
elif self.config.transformation == "ranks":
argsort = tf.argsort(values, direction="DESCENDING")
ranks = tf.compat.v1.scatter_update(
self.coordinates,
argsort,
tf.cast(tf.range(tf.shape(values)[0]), dtype=tf.float32),
)
transformed = (
tf.divide(ranks, tf.cast(tf.shape(ranks)[0] - 1, dtype=tf.float32))
- 0.5
)
else:
transformed = tf.identity(values)
coordinates = transformed
update_ops = []
for (weight, state) in self._weights:
gradient = self._random_base_.product(
coordinates=coordinates,
seeds=seeds,
state=state,
shape=weight.shape,
)
step = lr * gradient
update_op = tf.keras.backend.update_add(weight, step)
update_ops.append(update_op)
update_op = tf.group(update_ops)
with tf.control_dependencies([update_op]):
return tf.add(total_loss, offspring_loss), tf.add(total_acc, offspring_acc)
def on_execute(self):
r = self.record
for epoch in range(1, int(self.config.epochs)):
loss, acc = self.sess.run(self.train_op)
self.evaluation_network.set_weights(self.network.get_weights())
r["val_loss"], r["val_acc"] = self.sess.run(self.eval_op["validation"])
r["val_acc"] *= 100
r["epoch"] = epoch
r["loss"] = loss
r["acc"] = acc * 100
r["steps"] = self._image_data_.steps_per_epoch("train")
r["images"] = self._image_data_.images_per_epoch("train")
r["images_total"] = r["images"] * epoch
r["images_per_second"] = r["images"] / self.record.timing()
coordinates = self.sess.run(self.coordinates)
r["coordinates"] = {
"mean": np.mean(coordinates),
"std": np.std(coordinates),
"min": np.min(coordinates),
"max": np.max(coordinates),
}
if self.config.stop_on_nan:
if np.isnan(r["val_loss"]) or (epoch > 10 and r["val_acc"] <= 15):
r.save(echo=True)
self.log.info(
"Training finished early due to NaNs or non-convergence"
)
return
r.save(echo=True)
self.evaluation_network.set_weights(self.network.get_weights())
test_loss, test_acc = self.sess.run(self.eval_op["test"])
self.storage.save_data(
"eval.json", {"test_acc": test_acc, "test_loss": test_loss}
) | models/rbd_nes.py |
import numpy as np
import tensorflow as tf
from .nes import NesModel
tf.compat.v1.disable_v2_behavior()
class NesRbModel(NesModel):
"""
Use forwardpass to determine random bases coordinates.
There is the subtle difference with standard NES in that all offspring are evaluated on the same mini-batch
"""
def on_create(self):
self._ipu_.get_session()
self._ipu_.configure()
self._image_data_.load()
self.offspring_seed = None
self._weights = []
infeed = self.prepare_data(
self.data["train"], self._image_data_.steps_per_epoch("train")
)
with self._ipu_.device():
self.train_op = self._ipu_.compile(
lambda: self._ipu_.loops_repeat(
n=self._image_data_.steps_per_epoch("train"),
body=lambda *args, **kwargs: self._build(*args, **kwargs),
inputs=[tf.constant(0, tf.float32), tf.constant(0, tf.float32)],
infeed_queue=infeed,
divide_by_n=True,
),
[],
)
self.network.summary()
# evaluation (placed on CPU)
self.eval_op = {
split: self._ipu_.loops_repeat(
n=self._image_data_.steps_per_epoch(split),
body=lambda *args, **kwargs: self._build(
*args, evaluate=True, **kwargs
),
inputs=[tf.constant(0, tf.float32), tf.constant(0, tf.float32)],
infeed_queue=self.prepare_data(
self.data[split],
self._image_data_.steps_per_epoch(split),
infeed=False,
),
divide_by_n=True,
mode="cpu",
)
for split in ["validation", "test"]
}
if self.config.ipu.enabled:
self.sess.run(infeed.initializer)
self.sess.run(tf.compat.v1.global_variables_initializer())
def _build(
self, total_loss, total_acc, t, image, label, lr, worker=None, evaluate=False
):
self.coordinates = tf.Variable(
lambda: tf.zeros(shape=[self.config.base_dimensions])
)
if evaluate:
if not getattr(self, "evaluation_network", None):
self.evaluation_network = self._image_network_.load(
name=self.config.network,
classes=self.dataset_info.features["label"].num_classes,
input_shape=self.dataset_info.features["image"].shape,
)
predictions = self.evaluation_network(image)
loss = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(label, predictions)
)
acc = tf.reduce_mean(
tf.keras.metrics.categorical_accuracy(label, predictions)
)
return tf.add(total_loss, loss), tf.add(total_acc, acc)
if self.config.reset_base_each_step:
base = t
else:
base = 0
# generate seeds
seeds = self.base_seeds_generator(base)
ta_seeds = tf.TensorArray(
dtype=tf.int32, size=self.config.base_dimensions, element_shape=[]
).unstack(seeds)
def offspring_loop(index, population_loss, population_acc):
self.offspring_seed = ta_seeds.read(index)
self._apply_layer_ops()
self.network = network = self._image_network_.load(
name=self.config.network,
classes=self.dataset_info.features["label"].num_classes,
input_shape=self.dataset_info.features["image"].shape,
)
predictions = network(image)
self._rollback_layer_ops()
loss_object = tf.keras.losses.CategoricalCrossentropy()
loss = loss_object(label, predictions)
acc = tf.reduce_mean(
tf.keras.metrics.categorical_accuracy(label, predictions)
)
write_op = tf.compat.v1.scatter_update(self.coordinates, index, loss)
with tf.control_dependencies([write_op]):
return (
tf.add(index, 1),
tf.add(population_loss, loss),
tf.add(population_acc, acc),
)
offspring, offspring_loss, offspring_acc = self._ipu_.loops_repeat(
n=self.config.base_dimensions,
body=offspring_loop,
inputs=[
tf.constant(0, tf.int32),
tf.constant(0, tf.float32),
tf.constant(0, tf.float32),
],
divide_by_n=True,
mode="tensorflow",
)
with tf.control_dependencies([offspring]):
values = tf.identity(self.coordinates)
if self.config.transformation == "norm":
zero = values - (
tf.ones(self.config.base_dimensions) * tf.reduce_min(values)
)
norm = tf.divide(zero, tf.reduce_max(zero))
transformed = (norm - 0.5) * -1 # shift and invert
elif self.config.transformation == "ranks":
argsort = tf.argsort(values, direction="DESCENDING")
ranks = tf.compat.v1.scatter_update(
self.coordinates,
argsort,
tf.cast(tf.range(tf.shape(values)[0]), dtype=tf.float32),
)
transformed = (
tf.divide(ranks, tf.cast(tf.shape(ranks)[0] - 1, dtype=tf.float32))
- 0.5
)
else:
transformed = tf.identity(values)
coordinates = transformed
update_ops = []
for (weight, state) in self._weights:
gradient = self._random_base_.product(
coordinates=coordinates,
seeds=seeds,
state=state,
shape=weight.shape,
)
step = lr * gradient
update_op = tf.keras.backend.update_add(weight, step)
update_ops.append(update_op)
update_op = tf.group(update_ops)
with tf.control_dependencies([update_op]):
return tf.add(total_loss, offspring_loss), tf.add(total_acc, offspring_acc)
def on_execute(self):
r = self.record
for epoch in range(1, int(self.config.epochs)):
loss, acc = self.sess.run(self.train_op)
self.evaluation_network.set_weights(self.network.get_weights())
r["val_loss"], r["val_acc"] = self.sess.run(self.eval_op["validation"])
r["val_acc"] *= 100
r["epoch"] = epoch
r["loss"] = loss
r["acc"] = acc * 100
r["steps"] = self._image_data_.steps_per_epoch("train")
r["images"] = self._image_data_.images_per_epoch("train")
r["images_total"] = r["images"] * epoch
r["images_per_second"] = r["images"] / self.record.timing()
coordinates = self.sess.run(self.coordinates)
r["coordinates"] = {
"mean": np.mean(coordinates),
"std": np.std(coordinates),
"min": np.min(coordinates),
"max": np.max(coordinates),
}
if self.config.stop_on_nan:
if np.isnan(r["val_loss"]) or (epoch > 10 and r["val_acc"] <= 15):
r.save(echo=True)
self.log.info(
"Training finished early due to NaNs or non-convergence"
)
return
r.save(echo=True)
self.evaluation_network.set_weights(self.network.get_weights())
test_loss, test_acc = self.sess.run(self.eval_op["test"])
self.storage.save_data(
"eval.json", {"test_acc": test_acc, "test_loss": test_loss}
) | 0.882896 | 0.332161 |
import tornado.httpserver
import tornado.web
from MysqlHelper import MysqlHelper
from lib.CCPRestSDK import REST
import json
import os
import glob
import time
from lib.sendMsg import Mail
import random
import hashlib
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
upload_path = os.path.join(os.path.dirname(__file__), '../files') # 文件的暂存路径
class IndexHandler(tornado.web.RequestHandler):
def get(self):
cookie = self.get_secure_cookie("sessionid")
# print(cookie)
# 没有cookie 设置cookie
if not cookie:
random_value = time.time() + random.uniform(0, 100)
session_id = hashlib.md5(str(random_value)).hexdigest()
self.set_secure_cookie('sessionid', session_id)
else:
# 有cookie 找到这个文件夹, 删除所有文件
session_id_path = upload_path + '/' + cookie
report_path = './report/' + cookie
self.del_file(session_id_path)
self.del_file(report_path)
self.render("index.html")
def post(self):
cookie = self.get_secure_cookie("sessionid")
session_id_path = upload_path + '/' + cookie
if not os.path.exists(session_id_path):
os.makedirs(session_id_path)
file_metas = self.request.files['file'] # 提取表单中‘name’为‘file’的文件元数据
for meta in file_metas:
filename = meta['filename']
filepath = os.path.join(session_id_path, filename)
with open(filepath, 'wb') as up:
up.write(meta['body'])
self.write(json.dumps({"code": 0, "msg": "返回成功"}))
def del_file(self,path):
if os.path.exists(path):
for i in os.listdir(path):
path_file = os.path.join(path, i)
if os.path.isfile(path_file):
os.remove(path_file)
else:
self.del_file(path_file)
class MergeHandler(tornado.web.RequestHandler):
def get(self):
cookie = self.get_secure_cookie("sessionid")
report_path = './report/' + cookie
report_csv_list = glob.glob(report_path +'/*.csv')
report_csv_list.sort()
# print(report_csv_list)
if len(report_csv_list):
download_file_path = report_csv_list[-1]
print('开始下载' + download_file_path)
download_file_name = os.path.basename(download_file_path)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + download_file_name)
# 读取的模式需要根据实际情况进行修改
with open(download_file_path, 'rb') as f:
while True:
data = f.read(1024*1024)
if not data:
break
self.write(data)
self.finish()
else:
self.write(json.dumps({"code": 0, "msg": "当前没有可下载文件"}))
def post(self):
email = self.get_argument('email','')
print(email)
cookie = self.get_secure_cookie("sessionid")
session_id_path = upload_path + '/' + cookie
report_path = './report/' + cookie
csv_list = glob.glob(session_id_path +'/*.csv')
if len(csv_list):
print('共发现%s个CSV文件' % len(csv_list))
print('正在处理............')
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
report_path = './report/' + cookie
if not os.path.exists(report_path):
os.makedirs(report_path)
file_name = report_path + '/' + now + r"_report.csv"
print(file_name)
for i in csv_list:
file = open(i, 'r').read()
with open(file_name, 'a') as f:
f.write(file)
print('合并完毕!')
if len(email):
mailto_list = [email]
if Mail.send_mail_part(mailto_list, "数据已合并完毕", "你好,数据已处理完毕,请查收。", file_name):
self.write(json.dumps({"code": 1, "msg": "正在发送至您的邮箱"}))
else:
self.write(json.dumps({"code": 0, "msg": "发送邮箱失败,请直接下载"}))
else:
self.write(json.dumps({"code": 1, "msg": "开始下载"}))
else:
print('没有可合并的文件! ')
self.write(json.dumps({"code": 0, "msg": "当前没有可合并的文件"})) | handlers/index.py |
import tornado.httpserver
import tornado.web
from MysqlHelper import MysqlHelper
from lib.CCPRestSDK import REST
import json
import os
import glob
import time
from lib.sendMsg import Mail
import random
import hashlib
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
upload_path = os.path.join(os.path.dirname(__file__), '../files') # 文件的暂存路径
class IndexHandler(tornado.web.RequestHandler):
def get(self):
cookie = self.get_secure_cookie("sessionid")
# print(cookie)
# 没有cookie 设置cookie
if not cookie:
random_value = time.time() + random.uniform(0, 100)
session_id = hashlib.md5(str(random_value)).hexdigest()
self.set_secure_cookie('sessionid', session_id)
else:
# 有cookie 找到这个文件夹, 删除所有文件
session_id_path = upload_path + '/' + cookie
report_path = './report/' + cookie
self.del_file(session_id_path)
self.del_file(report_path)
self.render("index.html")
def post(self):
cookie = self.get_secure_cookie("sessionid")
session_id_path = upload_path + '/' + cookie
if not os.path.exists(session_id_path):
os.makedirs(session_id_path)
file_metas = self.request.files['file'] # 提取表单中‘name’为‘file’的文件元数据
for meta in file_metas:
filename = meta['filename']
filepath = os.path.join(session_id_path, filename)
with open(filepath, 'wb') as up:
up.write(meta['body'])
self.write(json.dumps({"code": 0, "msg": "返回成功"}))
def del_file(self,path):
if os.path.exists(path):
for i in os.listdir(path):
path_file = os.path.join(path, i)
if os.path.isfile(path_file):
os.remove(path_file)
else:
self.del_file(path_file)
class MergeHandler(tornado.web.RequestHandler):
def get(self):
cookie = self.get_secure_cookie("sessionid")
report_path = './report/' + cookie
report_csv_list = glob.glob(report_path +'/*.csv')
report_csv_list.sort()
# print(report_csv_list)
if len(report_csv_list):
download_file_path = report_csv_list[-1]
print('开始下载' + download_file_path)
download_file_name = os.path.basename(download_file_path)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + download_file_name)
# 读取的模式需要根据实际情况进行修改
with open(download_file_path, 'rb') as f:
while True:
data = f.read(1024*1024)
if not data:
break
self.write(data)
self.finish()
else:
self.write(json.dumps({"code": 0, "msg": "当前没有可下载文件"}))
def post(self):
email = self.get_argument('email','')
print(email)
cookie = self.get_secure_cookie("sessionid")
session_id_path = upload_path + '/' + cookie
report_path = './report/' + cookie
csv_list = glob.glob(session_id_path +'/*.csv')
if len(csv_list):
print('共发现%s个CSV文件' % len(csv_list))
print('正在处理............')
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
report_path = './report/' + cookie
if not os.path.exists(report_path):
os.makedirs(report_path)
file_name = report_path + '/' + now + r"_report.csv"
print(file_name)
for i in csv_list:
file = open(i, 'r').read()
with open(file_name, 'a') as f:
f.write(file)
print('合并完毕!')
if len(email):
mailto_list = [email]
if Mail.send_mail_part(mailto_list, "数据已合并完毕", "你好,数据已处理完毕,请查收。", file_name):
self.write(json.dumps({"code": 1, "msg": "正在发送至您的邮箱"}))
else:
self.write(json.dumps({"code": 0, "msg": "发送邮箱失败,请直接下载"}))
else:
self.write(json.dumps({"code": 1, "msg": "开始下载"}))
else:
print('没有可合并的文件! ')
self.write(json.dumps({"code": 0, "msg": "当前没有可合并的文件"})) | 0.061565 | 0.070208 |
import enum
from .available_cook_mode import AvailableCookMode
from .erd_oven_cook_mode import ErdOvenCookMode
@enum.unique
class ErdAvailableCookMode(enum.Enum):
"""
Available cooking modes.
In the XMPP API, they are represented as an index into an array of bytes and a bitmask.
Thus these take the form (byte: int, mask: int, cook_mode: ErdOvenCookMode). See ErdAvailableCookMode.smali
in the Android app.
The App appears to be a very small subset of the actual modes available. In addition, based on some older
documentation, it doesn't even look right. However, it may be that the modes in the app are the only usable
ones, so we will just comment out all the other modes... TODO: further testing on which modes are actually
available.
"""
# From GE Maker Site
# BYTE1_BIT0_1 = AvailableCookMode(byte=1, mask=1, cook_mode=ErdOvenCookMode.BAKE_NOOPTION)
# BYTE1_BIT1_1 = AvailableCookMode(byte=1, mask=2, cook_mode=ErdOvenCookMode.BAKE_PROBE)
# BYTE1_BIT2_1 = AvailableCookMode(byte=1, mask=4, cook_mode=ErdOvenCookMode.BAKE_DELAYSTART)
# #BYTE1_BIT3_1 = AvailableCookMode(byte=1, mask=8, cook_mode=ErdOvenCookMode.BAKETIMED)
# BYTE1_BIT4_1 = AvailableCookMode(byte=1, mask=16, cook_mode=ErdOvenCookMode.BAKETIMED_WARM)
# BYTE1_BIT5_1 = AvailableCookMode(byte=1, mask=32, cook_mode=ErdOvenCookMode.BAKETIMED_TWOTEMP)
# BYTE1_BIT6_1 = AvailableCookMode(byte=1, mask=64, cook_mode=ErdOvenCookMode.BAKE_PROBE_DELAYSTART)
# #BYTE1_BIT7_1 = AvailableCookMode(byte=1, mask=128, cook_mode=ErdOvenCookMode.BAKETIMED_DELAYSTART)
# BYTE2_BIT0_1 = AvailableCookMode(byte=2, mask=1, cook_mode=ErdOvenCookMode.BAKETIMED_WARM_DELAYSTART)
# BYTE2_BIT1_1 = AvailableCookMode(byte=2, mask=2, cook_mode=ErdOvenCookMode.BAKETIMED_TWOTEMP_DELAYSTART)
# BYTE2_BIT2_1 = AvailableCookMode(byte=2, mask=4, cook_mode=ErdOvenCookMode.BAKE_SABBATH)
# BYTE2_BIT3_1 = AvailableCookMode(byte=2, mask=8, cook_mode=ErdOvenCookMode.BROIL_HIGH)
# BYTE2_BIT4_1 = AvailableCookMode(byte=2, mask=16, cook_mode=ErdOvenCookMode.BROIL_LOW)
# BYTE2_BIT5_1 = AvailableCookMode(byte=2, mask=32, cook_mode=ErdOvenCookMode.PROOF_NOOPTION)
# BYTE2_BIT6_1 = AvailableCookMode(byte=2, mask=64, cook_mode=ErdOvenCookMode.WARM_NOOPTION)
# BYTE2_BIT7_1 = AvailableCookMode(byte=2, mask=128, cook_mode=ErdOvenCookMode.WARM_PROBE)
# BYTE3_BIT0_1 = AvailableCookMode(byte=3, mask=1, cook_mode=ErdOvenCookMode.CONVBAKE_NOOPTION)
# BYTE3_BIT1_1 = AvailableCookMode(byte=3, mask=2, cook_mode=ErdOvenCookMode.CONVBAKE_PROBE)
# BYTE3_BIT2_1 = AvailableCookMode(byte=3, mask=4, cook_mode=ErdOvenCookMode.CONVBAKE_DELAYSTART)
# #BYTE3_BIT3_1 = AvailableCookMode(byte=3, mask=8, cook_mode=ErdOvenCookMode.CONVBAKETIMED)
# BYTE3_BIT4_1 = AvailableCookMode(byte=3, mask=16, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM)
# BYTE3_BIT5_1 = AvailableCookMode(byte=3, mask=32, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP)
# BYTE3_BIT6_1 = AvailableCookMode(byte=3, mask=64, cook_mode=ErdOvenCookMode.CONVBAKE_PROBE_DELAYSTART)
# #BYTE3_BIT7_1 = AvailableCookMode(byte=3, mask=128, cook_mode=ErdOvenCookMode.CONVBAKETIMED_DELAYSTART)
# BYTE4_BIT0_1 = AvailableCookMode(byte=4, mask=1, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM_DELAYSTART)
# BYTE4_BIT1_1 = AvailableCookMode(byte=4, mask=2, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP_DELAYSTART)
# BYTE4_BIT2_1 = AvailableCookMode(byte=4, mask=4, cook_mode=ErdOvenCookMode.BAKE_SABBATH)
# BYTE4_BIT3_1 = AvailableCookMode(byte=4, mask=8, cook_mode=ErdOvenCookMode.BROIL_HIGH)
# BYTE4_BIT4_1 = AvailableCookMode(byte=4, mask=16, cook_mode=ErdOvenCookMode.BROIL_LOW)
# BYTE4_BIT5_1 = AvailableCookMode(byte=4, mask=32, cook_mode=ErdOvenCookMode.PROOF_NOOPTION)
# BYTE4_BIT6_1 = AvailableCookMode(byte=4, mask=64, cook_mode=ErdOvenCookMode.WARM_NOOPTION)
# BYTE4_BIT7_1 = AvailableCookMode(byte=4, mask=128, cook_mode=ErdOvenCookMode.WARM_PROBE)
# BYTE5_BIT0_1 = AvailableCookMode(byte=5, mask=1, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_NOOPTION)
# BYTE5_BIT1_1 = AvailableCookMode(byte=5, mask=2, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_PROBE)
# BYTE5_BIT2_1 = AvailableCookMode(byte=5, mask=4, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_DELAYSTART)
# #BYTE5_BIT3_1 = AvailableCookMode(byte=5, mask=8, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED)
# BYTE5_BIT4_1 = AvailableCookMode(byte=5, mask=16, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM)
# BYTE5_BIT5_1 = AvailableCookMode(byte=5, mask=32, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP)
# BYTE5_BIT6_1 = AvailableCookMode(byte=5, mask=64, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_PROBE_DELAYSTART)
# #BYTE5_BIT7_1 = AvailableCookMode(byte=5, mask=128, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED_DELAYSTART)
# BYTE6_BIT0_1 = AvailableCookMode(byte=6, mask=1, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED_WARM_DELAYSTART)
# BYTE6_BIT1_1 = AvailableCookMode(byte=6, mask=2, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED_TWOTEMP_DELAYSTART)
# BYTE6_BIT2_1 = AvailableCookMode(byte=6, mask=4, cook_mode=ErdOvenCookMode.CONVROAST_NOOPTION)
# BYTE6_BIT3_1 = AvailableCookMode(byte=6, mask=8, cook_mode=ErdOvenCookMode.CONVROAST_PROBE)
# BYTE6_BIT4_1 = AvailableCookMode(byte=6, mask=16, cook_mode=ErdOvenCookMode.CONVROAST_DELAYSTART)
# #BYTE6_BIT5_1 = AvailableCookMode(byte=6, mask=32, cook_mode=ErdOvenCookMode.CONVROASTTIMED)
# BYTE6_BIT6_1 = AvailableCookMode(byte=6, mask=64, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM)
# BYTE6_BIT7_1 = AvailableCookMode(byte=6, mask=128, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP)
# BYTE7_BIT0_1 = AvailableCookMode(byte=7, mask=1, cook_mode=ErdOvenCookMode.CONVROAST_PROBE_DELAYSTART)
# #BYTE7_BIT1_1 = AvailableCookMode(byte=7, mask=2, cook_mode=ErdOvenCookMode.CONVROASTTIMED_DELAYSTART)
# BYTE7_BIT2_1 = AvailableCookMode(byte=7, mask=4, cook_mode=ErdOvenCookMode.CONVBROIL_LOW_NOOPTION)
# BYTE7_BIT3_1 = AvailableCookMode(byte=7, mask=8, cook_mode=ErdOvenCookMode.CONVBROIL_HIGH_NOOPTION)
# BYTE7_BIT4_1 = AvailableCookMode(byte=7, mask=16, cook_mode=ErdOvenCookMode.CONVBROILCRISP_NOOPTION)
# BYTE7_BIT5_1 = AvailableCookMode(byte=7, mask=32, cook_mode=ErdOvenCookMode.CONVBROILCRISP_PROBE)
# #BYTE7_BIT6_1 = AvailableCookMode(byte=7, mask=64, cook_mode=ErdOvenCookMode.SELFCLEAN)
# BYTE7_BIT7_1 = AvailableCookMode(byte=7, mask=128, cook_mode=ErdOvenCookMode.STEAMCLEAN)
# From SmartHQ App
OVEN_BAKE = AvailableCookMode(byte=9, mask=2, cook_mode=ErdOvenCookMode.BAKE_NOOPTION)
OVEN_CONVECTION_BAKE = AvailableCookMode(byte=7, mask=4, cook_mode=ErdOvenCookMode.CONVBAKE_NOOPTION)
OVEN_CONVECTION_MULTI_BAKE = AvailableCookMode(byte=6, mask=8, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_NOOPTION)
OVEN_CONVECTION_ROAST = AvailableCookMode(byte=5, mask=16, cook_mode=ErdOvenCookMode.CONVROAST_NOOPTION)
OVEN_FROZEN_SNACKS = AvailableCookMode(byte=2, mask=1, cook_mode=ErdOvenCookMode.FROZEN_SNACKS)
OVEN_FROZEN_SNACKS_MULTI = AvailableCookMode(byte=2, mask=2, cook_mode=ErdOvenCookMode.FROZEN_SNACKS_MULTI)
OVEN_FROZEN_PIZZA = AvailableCookMode(byte=2, mask=4, cook_mode=ErdOvenCookMode.FROZEN_PIZZA)
OVEN_FROZEN_PIZZA_MULTI = AvailableCookMode(byte=2, mask=8, cook_mode=ErdOvenCookMode.FROZEN_PIZZA_MULTI)
OVEN_BAKED_GOODS = AvailableCookMode(byte=2, mask=16, cook_mode=ErdOvenCookMode.BAKED_GOODS) | gehomesdk/erd/values/oven/erd_available_cook_mode.py | import enum
from .available_cook_mode import AvailableCookMode
from .erd_oven_cook_mode import ErdOvenCookMode
@enum.unique
class ErdAvailableCookMode(enum.Enum):
"""
Available cooking modes.
In the XMPP API, they are represented as an index into an array of bytes and a bitmask.
Thus these take the form (byte: int, mask: int, cook_mode: ErdOvenCookMode). See ErdAvailableCookMode.smali
in the Android app.
The App appears to be a very small subset of the actual modes available. In addition, based on some older
documentation, it doesn't even look right. However, it may be that the modes in the app are the only usable
ones, so we will just comment out all the other modes... TODO: further testing on which modes are actually
available.
"""
# From GE Maker Site
# BYTE1_BIT0_1 = AvailableCookMode(byte=1, mask=1, cook_mode=ErdOvenCookMode.BAKE_NOOPTION)
# BYTE1_BIT1_1 = AvailableCookMode(byte=1, mask=2, cook_mode=ErdOvenCookMode.BAKE_PROBE)
# BYTE1_BIT2_1 = AvailableCookMode(byte=1, mask=4, cook_mode=ErdOvenCookMode.BAKE_DELAYSTART)
# #BYTE1_BIT3_1 = AvailableCookMode(byte=1, mask=8, cook_mode=ErdOvenCookMode.BAKETIMED)
# BYTE1_BIT4_1 = AvailableCookMode(byte=1, mask=16, cook_mode=ErdOvenCookMode.BAKETIMED_WARM)
# BYTE1_BIT5_1 = AvailableCookMode(byte=1, mask=32, cook_mode=ErdOvenCookMode.BAKETIMED_TWOTEMP)
# BYTE1_BIT6_1 = AvailableCookMode(byte=1, mask=64, cook_mode=ErdOvenCookMode.BAKE_PROBE_DELAYSTART)
# #BYTE1_BIT7_1 = AvailableCookMode(byte=1, mask=128, cook_mode=ErdOvenCookMode.BAKETIMED_DELAYSTART)
# BYTE2_BIT0_1 = AvailableCookMode(byte=2, mask=1, cook_mode=ErdOvenCookMode.BAKETIMED_WARM_DELAYSTART)
# BYTE2_BIT1_1 = AvailableCookMode(byte=2, mask=2, cook_mode=ErdOvenCookMode.BAKETIMED_TWOTEMP_DELAYSTART)
# BYTE2_BIT2_1 = AvailableCookMode(byte=2, mask=4, cook_mode=ErdOvenCookMode.BAKE_SABBATH)
# BYTE2_BIT3_1 = AvailableCookMode(byte=2, mask=8, cook_mode=ErdOvenCookMode.BROIL_HIGH)
# BYTE2_BIT4_1 = AvailableCookMode(byte=2, mask=16, cook_mode=ErdOvenCookMode.BROIL_LOW)
# BYTE2_BIT5_1 = AvailableCookMode(byte=2, mask=32, cook_mode=ErdOvenCookMode.PROOF_NOOPTION)
# BYTE2_BIT6_1 = AvailableCookMode(byte=2, mask=64, cook_mode=ErdOvenCookMode.WARM_NOOPTION)
# BYTE2_BIT7_1 = AvailableCookMode(byte=2, mask=128, cook_mode=ErdOvenCookMode.WARM_PROBE)
# BYTE3_BIT0_1 = AvailableCookMode(byte=3, mask=1, cook_mode=ErdOvenCookMode.CONVBAKE_NOOPTION)
# BYTE3_BIT1_1 = AvailableCookMode(byte=3, mask=2, cook_mode=ErdOvenCookMode.CONVBAKE_PROBE)
# BYTE3_BIT2_1 = AvailableCookMode(byte=3, mask=4, cook_mode=ErdOvenCookMode.CONVBAKE_DELAYSTART)
# #BYTE3_BIT3_1 = AvailableCookMode(byte=3, mask=8, cook_mode=ErdOvenCookMode.CONVBAKETIMED)
# BYTE3_BIT4_1 = AvailableCookMode(byte=3, mask=16, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM)
# BYTE3_BIT5_1 = AvailableCookMode(byte=3, mask=32, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP)
# BYTE3_BIT6_1 = AvailableCookMode(byte=3, mask=64, cook_mode=ErdOvenCookMode.CONVBAKE_PROBE_DELAYSTART)
# #BYTE3_BIT7_1 = AvailableCookMode(byte=3, mask=128, cook_mode=ErdOvenCookMode.CONVBAKETIMED_DELAYSTART)
# BYTE4_BIT0_1 = AvailableCookMode(byte=4, mask=1, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM_DELAYSTART)
# BYTE4_BIT1_1 = AvailableCookMode(byte=4, mask=2, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP_DELAYSTART)
# BYTE4_BIT2_1 = AvailableCookMode(byte=4, mask=4, cook_mode=ErdOvenCookMode.BAKE_SABBATH)
# BYTE4_BIT3_1 = AvailableCookMode(byte=4, mask=8, cook_mode=ErdOvenCookMode.BROIL_HIGH)
# BYTE4_BIT4_1 = AvailableCookMode(byte=4, mask=16, cook_mode=ErdOvenCookMode.BROIL_LOW)
# BYTE4_BIT5_1 = AvailableCookMode(byte=4, mask=32, cook_mode=ErdOvenCookMode.PROOF_NOOPTION)
# BYTE4_BIT6_1 = AvailableCookMode(byte=4, mask=64, cook_mode=ErdOvenCookMode.WARM_NOOPTION)
# BYTE4_BIT7_1 = AvailableCookMode(byte=4, mask=128, cook_mode=ErdOvenCookMode.WARM_PROBE)
# BYTE5_BIT0_1 = AvailableCookMode(byte=5, mask=1, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_NOOPTION)
# BYTE5_BIT1_1 = AvailableCookMode(byte=5, mask=2, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_PROBE)
# BYTE5_BIT2_1 = AvailableCookMode(byte=5, mask=4, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_DELAYSTART)
# #BYTE5_BIT3_1 = AvailableCookMode(byte=5, mask=8, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED)
# BYTE5_BIT4_1 = AvailableCookMode(byte=5, mask=16, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM)
# BYTE5_BIT5_1 = AvailableCookMode(byte=5, mask=32, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP)
# BYTE5_BIT6_1 = AvailableCookMode(byte=5, mask=64, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_PROBE_DELAYSTART)
# #BYTE5_BIT7_1 = AvailableCookMode(byte=5, mask=128, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED_DELAYSTART)
# BYTE6_BIT0_1 = AvailableCookMode(byte=6, mask=1, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED_WARM_DELAYSTART)
# BYTE6_BIT1_1 = AvailableCookMode(byte=6, mask=2, cook_mode=ErdOvenCookMode.CONVMULTIBAKETIMED_TWOTEMP_DELAYSTART)
# BYTE6_BIT2_1 = AvailableCookMode(byte=6, mask=4, cook_mode=ErdOvenCookMode.CONVROAST_NOOPTION)
# BYTE6_BIT3_1 = AvailableCookMode(byte=6, mask=8, cook_mode=ErdOvenCookMode.CONVROAST_PROBE)
# BYTE6_BIT4_1 = AvailableCookMode(byte=6, mask=16, cook_mode=ErdOvenCookMode.CONVROAST_DELAYSTART)
# #BYTE6_BIT5_1 = AvailableCookMode(byte=6, mask=32, cook_mode=ErdOvenCookMode.CONVROASTTIMED)
# BYTE6_BIT6_1 = AvailableCookMode(byte=6, mask=64, cook_mode=ErdOvenCookMode.CONVBAKETIMED_WARM)
# BYTE6_BIT7_1 = AvailableCookMode(byte=6, mask=128, cook_mode=ErdOvenCookMode.CONVBAKETIMED_TWOTEMP)
# BYTE7_BIT0_1 = AvailableCookMode(byte=7, mask=1, cook_mode=ErdOvenCookMode.CONVROAST_PROBE_DELAYSTART)
# #BYTE7_BIT1_1 = AvailableCookMode(byte=7, mask=2, cook_mode=ErdOvenCookMode.CONVROASTTIMED_DELAYSTART)
# BYTE7_BIT2_1 = AvailableCookMode(byte=7, mask=4, cook_mode=ErdOvenCookMode.CONVBROIL_LOW_NOOPTION)
# BYTE7_BIT3_1 = AvailableCookMode(byte=7, mask=8, cook_mode=ErdOvenCookMode.CONVBROIL_HIGH_NOOPTION)
# BYTE7_BIT4_1 = AvailableCookMode(byte=7, mask=16, cook_mode=ErdOvenCookMode.CONVBROILCRISP_NOOPTION)
# BYTE7_BIT5_1 = AvailableCookMode(byte=7, mask=32, cook_mode=ErdOvenCookMode.CONVBROILCRISP_PROBE)
# #BYTE7_BIT6_1 = AvailableCookMode(byte=7, mask=64, cook_mode=ErdOvenCookMode.SELFCLEAN)
# BYTE7_BIT7_1 = AvailableCookMode(byte=7, mask=128, cook_mode=ErdOvenCookMode.STEAMCLEAN)
# From SmartHQ App
OVEN_BAKE = AvailableCookMode(byte=9, mask=2, cook_mode=ErdOvenCookMode.BAKE_NOOPTION)
OVEN_CONVECTION_BAKE = AvailableCookMode(byte=7, mask=4, cook_mode=ErdOvenCookMode.CONVBAKE_NOOPTION)
OVEN_CONVECTION_MULTI_BAKE = AvailableCookMode(byte=6, mask=8, cook_mode=ErdOvenCookMode.CONVMULTIBAKE_NOOPTION)
OVEN_CONVECTION_ROAST = AvailableCookMode(byte=5, mask=16, cook_mode=ErdOvenCookMode.CONVROAST_NOOPTION)
OVEN_FROZEN_SNACKS = AvailableCookMode(byte=2, mask=1, cook_mode=ErdOvenCookMode.FROZEN_SNACKS)
OVEN_FROZEN_SNACKS_MULTI = AvailableCookMode(byte=2, mask=2, cook_mode=ErdOvenCookMode.FROZEN_SNACKS_MULTI)
OVEN_FROZEN_PIZZA = AvailableCookMode(byte=2, mask=4, cook_mode=ErdOvenCookMode.FROZEN_PIZZA)
OVEN_FROZEN_PIZZA_MULTI = AvailableCookMode(byte=2, mask=8, cook_mode=ErdOvenCookMode.FROZEN_PIZZA_MULTI)
OVEN_BAKED_GOODS = AvailableCookMode(byte=2, mask=16, cook_mode=ErdOvenCookMode.BAKED_GOODS) | 0.313945 | 0.223748 |
import json
from abc import ABC
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from sqlalchemy.engine import Connection, default
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import compiler
from sqlalchemy.types import DATE, TIMESTAMP, BigInteger, Boolean, Float, Integer, String
import sqlalchemy_kusto
def parse_bool_argument(value: str) -> bool:
if value in ("True", "true"):
return True
if value in ("False", "false"):
return False
raise ValueError(f"Expected boolean found {value}")
kql_to_sql_types = {
"bool": Boolean,
"boolean": Boolean,
"datetime": TIMESTAMP,
"date": DATE,
"dynamic": String,
"stringbuffer": String,
"guid": String,
"int": Integer,
"i32": Integer,
"i16": Integer,
"i8": Integer,
"r64": Float,
"r32": Float,
"long": BigInteger,
"i64": BigInteger,
"string": String,
"timespan": String,
"decimal": Float,
"real": Float,
}
class KustoBaseDialect(default.DefaultDialect, ABC):
driver = "rest"
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = False
supports_pk_autoincrement = False
supports_default_values = True
supports_empty_insert = False
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
supports_native_boolean = True
supports_simple_order_by_label = True
_map_parse_connection_parameters: Dict[str, Any] = {
"msi": parse_bool_argument,
"azure_ad_client_id": str,
"azure_ad_client_secret": str,
"azure_ad_tenant_id": str,
"user_msi": str,
}
@classmethod
def dbapi(cls) -> ModuleType: # pylint: disable-msg=method-hidden
return sqlalchemy_kusto
def create_connect_args(self, url: URL) -> Tuple[List[Any], Dict[str, Any]]:
kwargs: Dict[str, Any] = {
"cluster": "https://" + url.host,
"database": url.database,
}
if url.query:
kwargs.update(url.query)
for name, parse_func in self._map_parse_connection_parameters.items():
if name in kwargs:
kwargs[name] = parse_func(url.query[name])
return [], kwargs
def get_schema_names(self, connection: Connection, **kwargs) -> List[str]:
result = connection.execute(".show databases | project DatabaseName")
return [row.DatabaseName for row in result]
def has_table(self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs) -> bool:
return table_name in self.get_table_names(connection, schema)
def get_table_names(self, connection: Connection, schema: Optional[str] = None, **kwargs) -> List[str]:
# Schema is not used in Kusto cause database is written in the connection string
result = connection.execute(".show tables | project TableName")
return [row.TableName for row in result]
def get_columns(
self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs
) -> List[Dict[str, Any]]:
table_search_query = f"""
.show tables
| where TableName == "{table_name}"
"""
table_search_result = connection.execute(table_search_query)
entity_type = "table" if table_search_result.rowcount == 1 else "materialized-view"
query = f".show {entity_type} {table_name} schema as json"
query_result = connection.execute(query)
rows = list(query_result)
entity_schema = json.loads(rows[0].Schema)
return [
{
"name": column["Name"],
"type": kql_to_sql_types[column["CslType"].lower()],
"nullable": True,
"default": "",
}
for column in entity_schema["OrderedColumns"]
]
def get_view_names(self, connection: Connection, schema: Optional[str] = None, **kwargs) -> List[str]:
result = connection.execute(".show materialized-views | project Name")
return [row.Name for row in result]
def get_pk_constraint(self, conn: Connection, table_name: str, schema: Optional[str] = None, **kw):
return {"constrained_columns": [], "name": None}
def get_foreign_keys(self, connection, table_name, schema=None, **kwargs):
return []
def get_check_constraints(self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs):
return []
def get_table_comment(
self, connection: Connection, table_name, schema: Optional[str] = None, **kwargs
) -> Dict[str, Any]:
"""Not implemented"""
return {"text": ""}
def get_indexes(
self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs
) -> List[Dict[str, Any]]:
return []
def get_unique_constraints(self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs):
return []
def _check_unicode_returns(self, connection: Connection, additional_tests: List[Any] = None) -> bool:
return True
def _check_unicode_description(self, connection: Connection) -> bool:
return True
def do_ping(self, dbapi_connection: sqlalchemy_kusto.dbapi.Connection):
try:
query = ".show tables"
dbapi_connection.execute(query)
return True
except sqlalchemy_kusto.OperationalError:
return False
def do_rollback(self, dbapi_connection: sqlalchemy_kusto.dbapi.Connection):
pass
def get_temp_table_names(self, connection, schema=None, **kw):
pass
def get_sequence_names(self, connection, schema=None, **kw):
pass
def get_temp_view_names(self, connection, schema=None, **kw):
pass
def has_sequence(self, connection, sequence_name, schema=None, **kw):
pass
def _get_server_version_info(self, connection):
pass
def _get_default_schema_name(self, connection):
pass
def do_set_input_sizes(self, cursor, list_of_tuples, context):
pass
def do_begin_twophase(self, connection, xid):
pass
def do_prepare_twophase(self, connection, xid):
pass
def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False):
pass
def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False):
pass
def do_recover_twophase(self, connection):
pass
def set_isolation_level(self, dbapi_conn, level):
pass
def get_isolation_level(self, dbapi_conn):
pass
def get_view_definition(self, connection: Connection, view_name: str, schema: Optional[str] = None, **kwargs):
pass
def get_primary_keys(self, connection, table_name, schema=None, **kw):
pass | sqlalchemy_kusto/dialect_base.py | import json
from abc import ABC
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from sqlalchemy.engine import Connection, default
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import compiler
from sqlalchemy.types import DATE, TIMESTAMP, BigInteger, Boolean, Float, Integer, String
import sqlalchemy_kusto
def parse_bool_argument(value: str) -> bool:
if value in ("True", "true"):
return True
if value in ("False", "false"):
return False
raise ValueError(f"Expected boolean found {value}")
kql_to_sql_types = {
"bool": Boolean,
"boolean": Boolean,
"datetime": TIMESTAMP,
"date": DATE,
"dynamic": String,
"stringbuffer": String,
"guid": String,
"int": Integer,
"i32": Integer,
"i16": Integer,
"i8": Integer,
"r64": Float,
"r32": Float,
"long": BigInteger,
"i64": BigInteger,
"string": String,
"timespan": String,
"decimal": Float,
"real": Float,
}
class KustoBaseDialect(default.DefaultDialect, ABC):
driver = "rest"
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = False
supports_pk_autoincrement = False
supports_default_values = True
supports_empty_insert = False
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
supports_native_boolean = True
supports_simple_order_by_label = True
_map_parse_connection_parameters: Dict[str, Any] = {
"msi": parse_bool_argument,
"azure_ad_client_id": str,
"azure_ad_client_secret": str,
"azure_ad_tenant_id": str,
"user_msi": str,
}
@classmethod
def dbapi(cls) -> ModuleType: # pylint: disable-msg=method-hidden
return sqlalchemy_kusto
def create_connect_args(self, url: URL) -> Tuple[List[Any], Dict[str, Any]]:
kwargs: Dict[str, Any] = {
"cluster": "https://" + url.host,
"database": url.database,
}
if url.query:
kwargs.update(url.query)
for name, parse_func in self._map_parse_connection_parameters.items():
if name in kwargs:
kwargs[name] = parse_func(url.query[name])
return [], kwargs
def get_schema_names(self, connection: Connection, **kwargs) -> List[str]:
result = connection.execute(".show databases | project DatabaseName")
return [row.DatabaseName for row in result]
def has_table(self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs) -> bool:
return table_name in self.get_table_names(connection, schema)
def get_table_names(self, connection: Connection, schema: Optional[str] = None, **kwargs) -> List[str]:
# Schema is not used in Kusto cause database is written in the connection string
result = connection.execute(".show tables | project TableName")
return [row.TableName for row in result]
def get_columns(
self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs
) -> List[Dict[str, Any]]:
table_search_query = f"""
.show tables
| where TableName == "{table_name}"
"""
table_search_result = connection.execute(table_search_query)
entity_type = "table" if table_search_result.rowcount == 1 else "materialized-view"
query = f".show {entity_type} {table_name} schema as json"
query_result = connection.execute(query)
rows = list(query_result)
entity_schema = json.loads(rows[0].Schema)
return [
{
"name": column["Name"],
"type": kql_to_sql_types[column["CslType"].lower()],
"nullable": True,
"default": "",
}
for column in entity_schema["OrderedColumns"]
]
def get_view_names(self, connection: Connection, schema: Optional[str] = None, **kwargs) -> List[str]:
result = connection.execute(".show materialized-views | project Name")
return [row.Name for row in result]
def get_pk_constraint(self, conn: Connection, table_name: str, schema: Optional[str] = None, **kw):
return {"constrained_columns": [], "name": None}
def get_foreign_keys(self, connection, table_name, schema=None, **kwargs):
return []
def get_check_constraints(self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs):
return []
def get_table_comment(
self, connection: Connection, table_name, schema: Optional[str] = None, **kwargs
) -> Dict[str, Any]:
"""Not implemented"""
return {"text": ""}
def get_indexes(
self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs
) -> List[Dict[str, Any]]:
return []
def get_unique_constraints(self, connection: Connection, table_name: str, schema: Optional[str] = None, **kwargs):
return []
def _check_unicode_returns(self, connection: Connection, additional_tests: List[Any] = None) -> bool:
return True
def _check_unicode_description(self, connection: Connection) -> bool:
return True
def do_ping(self, dbapi_connection: sqlalchemy_kusto.dbapi.Connection):
try:
query = ".show tables"
dbapi_connection.execute(query)
return True
except sqlalchemy_kusto.OperationalError:
return False
def do_rollback(self, dbapi_connection: sqlalchemy_kusto.dbapi.Connection):
pass
def get_temp_table_names(self, connection, schema=None, **kw):
pass
def get_sequence_names(self, connection, schema=None, **kw):
pass
def get_temp_view_names(self, connection, schema=None, **kw):
pass
def has_sequence(self, connection, sequence_name, schema=None, **kw):
pass
def _get_server_version_info(self, connection):
pass
def _get_default_schema_name(self, connection):
pass
def do_set_input_sizes(self, cursor, list_of_tuples, context):
pass
def do_begin_twophase(self, connection, xid):
pass
def do_prepare_twophase(self, connection, xid):
pass
def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False):
pass
def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False):
pass
def do_recover_twophase(self, connection):
pass
def set_isolation_level(self, dbapi_conn, level):
pass
def get_isolation_level(self, dbapi_conn):
pass
def get_view_definition(self, connection: Connection, view_name: str, schema: Optional[str] = None, **kwargs):
pass
def get_primary_keys(self, connection, table_name, schema=None, **kw):
pass | 0.791096 | 0.231766 |
from functools import lru_cache
from erica.erica_legacy.elster_xml import elster_xml_generator
from erica.erica_legacy.elster_xml.xml_parsing.erica_xml_parsing import remove_declaration_and_namespace
from erica.erica_legacy.pyeric.pyeric_controller import PermitListingPyericProcessController
SPECIAL_TESTMERKER_IDNR = ['04452397687',
'02259674819',
'04452317681',
'09952417688',
'03352417692',
'03352419681',
'03352417981',
'03392417683',
'03352917681',
'03359417681']
NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION = []
def reset_new_request_id_list():
global NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION
NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION = []
def add_new_request_id_to_cache_list(request_id):
global NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION
NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION.append(request_id)
def get_vast_list_from_xml(xml):
simple_xml = remove_declaration_and_namespace(xml)
return {antrag.find('.//AntragsID').text: antrag.find('.//DateninhaberIdNr').text for antrag in simple_xml.findall('.//Antrag')}
@lru_cache
def get_list_vast_requests(pyeric_controller):
xml = elster_xml_generator.generate_full_vast_list_xml()
result = pyeric_controller(xml=xml).get_eric_response()
vast_request_list = get_vast_list_from_xml(result.server_response)
reset_new_request_id_list()
return vast_request_list
def tax_id_number_is_test_id_number(tax_id_number):
return tax_id_number in SPECIAL_TESTMERKER_IDNR
def request_needs_testmerker(request_id):
if request_id in NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION:
get_list_vast_requests.cache_clear()
return tax_id_number_is_test_id_number(get_list_vast_requests(PermitListingPyericProcessController).get(request_id)) | erica/erica_legacy/pyeric/check_elster_request_id.py | from functools import lru_cache
from erica.erica_legacy.elster_xml import elster_xml_generator
from erica.erica_legacy.elster_xml.xml_parsing.erica_xml_parsing import remove_declaration_and_namespace
from erica.erica_legacy.pyeric.pyeric_controller import PermitListingPyericProcessController
SPECIAL_TESTMERKER_IDNR = ['04452397687',
'02259674819',
'04452317681',
'09952417688',
'03352417692',
'03352419681',
'03352417981',
'03392417683',
'03352917681',
'03359417681']
NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION = []
def reset_new_request_id_list():
global NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION
NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION = []
def add_new_request_id_to_cache_list(request_id):
global NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION
NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION.append(request_id)
def get_vast_list_from_xml(xml):
simple_xml = remove_declaration_and_namespace(xml)
return {antrag.find('.//AntragsID').text: antrag.find('.//DateninhaberIdNr').text for antrag in simple_xml.findall('.//Antrag')}
@lru_cache
def get_list_vast_requests(pyeric_controller):
xml = elster_xml_generator.generate_full_vast_list_xml()
result = pyeric_controller(xml=xml).get_eric_response()
vast_request_list = get_vast_list_from_xml(result.server_response)
reset_new_request_id_list()
return vast_request_list
def tax_id_number_is_test_id_number(tax_id_number):
return tax_id_number in SPECIAL_TESTMERKER_IDNR
def request_needs_testmerker(request_id):
if request_id in NEW_REQUEST_ID_SINCE_LAST_CACHE_INVALIDATION:
get_list_vast_requests.cache_clear()
return tax_id_number_is_test_id_number(get_list_vast_requests(PermitListingPyericProcessController).get(request_id)) | 0.304248 | 0.103115 |
from netforce.model import Model, fields, get_model
import time
class Project(Model):
_name = "project"
_string = "Project"
_audit_log = True
_fields = {
"name": fields.Char("Project Name", required=True, search=True),
"number": fields.Char("Project Number", search=True),
"contact_id": fields.Many2One("contact", "Customer", search=True),
"start_date": fields.Date("Start Date", required=True),
"end_date": fields.Date("End Date"),
"product_id": fields.Many2One("product", "Product"), # XXX: deprecated
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"state": fields.Selection([["in_progress", "In Progress"], ["done", "Completed"], ["canceled", "Canceled"]], "Status", required=True),
"jobs": fields.One2Many("job", "project_id", "Jobs"),
"tasks": fields.One2Many("task", "project_id", "Tasks"),
"work_time": fields.One2Many("work.time", "job_id", "Work Time"),
"claims": fields.One2Many("product.claim", "project_id", "Claim Bills"),
"borrows": fields.One2Many("product.borrow", "project_id", "Borrow Requests"),
"description": fields.Text("Description"),
"track_id": fields.Many2One("account.track.categ","Actual Cost Tracking Code"),
"track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"track_id.balance"}),
"sub_tracks": fields.One2Many("account.track.categ",None,"Actual Cost Sub-Tracking Codes",function="_get_related",function_context={"path":"track_id.sub_tracks"}),
"est_track_id": fields.Many2One("account.track.categ","Estimate Cost Tracking Code"),
"est_track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"est_track_id.balance"}),
"est_sub_tracks": fields.One2Many("account.track.categ",None,"Est. Cost Sub-Tracking Codes",function="_get_related",function_context={"path":"est_track_id.sub_tracks"}),
"issues": fields.One2Many("issue","project_id","Issues"),
"resources": fields.Many2Many("service.resource","Resources"),
"milestones": fields.One2Many("project.milestone","project_id","Milestones"),
}
_order = "start_date"
_defaults = {
"start_date": lambda *a: time.strftime("%Y-%m-%d"),
"state": "in_progress",
}
def copy(self,ids,context={}):
obj=self.browse(ids[0])
vals={
"name": obj.name,
"number": obj.number,
"contact_id": obj.contact_id.id,
"start_date": obj.start_date,
"end_date": obj.end_date,
"description": description,
"resources": [("set",[r.id for r in obj.resources])],
}
new_proj_id=self.create(vals,context=context)
new_proj=self.browse(new_proj_id)
track=obj.track_id
if track:
vals={
"name": track.name, # XXX
"type": track.type,
"code": track.code, # XXX
}
new_track_id=get_model("account.track.categ").create(vals)
new_proj.write({"track_id":new_track_id})
for subtrack in track.sub_tracks:
vals={
"parent_id": new_track_id,
"name": subtrack.name,
"type": subtrack.type,
"code": subtrack.code,
}
get_model("account.track.categ").create(vals)
return {
"next": {
"name": "project",
"mode": "form",
"active_id": new_proj_id,
},
"flash": "New project copied from %s"%obj.name,
}
Project.register() | netforce_service/netforce_service/models/project.py |
from netforce.model import Model, fields, get_model
import time
class Project(Model):
_name = "project"
_string = "Project"
_audit_log = True
_fields = {
"name": fields.Char("Project Name", required=True, search=True),
"number": fields.Char("Project Number", search=True),
"contact_id": fields.Many2One("contact", "Customer", search=True),
"start_date": fields.Date("Start Date", required=True),
"end_date": fields.Date("End Date"),
"product_id": fields.Many2One("product", "Product"), # XXX: deprecated
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"state": fields.Selection([["in_progress", "In Progress"], ["done", "Completed"], ["canceled", "Canceled"]], "Status", required=True),
"jobs": fields.One2Many("job", "project_id", "Jobs"),
"tasks": fields.One2Many("task", "project_id", "Tasks"),
"work_time": fields.One2Many("work.time", "job_id", "Work Time"),
"claims": fields.One2Many("product.claim", "project_id", "Claim Bills"),
"borrows": fields.One2Many("product.borrow", "project_id", "Borrow Requests"),
"description": fields.Text("Description"),
"track_id": fields.Many2One("account.track.categ","Actual Cost Tracking Code"),
"track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"track_id.balance"}),
"sub_tracks": fields.One2Many("account.track.categ",None,"Actual Cost Sub-Tracking Codes",function="_get_related",function_context={"path":"track_id.sub_tracks"}),
"est_track_id": fields.Many2One("account.track.categ","Estimate Cost Tracking Code"),
"est_track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"est_track_id.balance"}),
"est_sub_tracks": fields.One2Many("account.track.categ",None,"Est. Cost Sub-Tracking Codes",function="_get_related",function_context={"path":"est_track_id.sub_tracks"}),
"issues": fields.One2Many("issue","project_id","Issues"),
"resources": fields.Many2Many("service.resource","Resources"),
"milestones": fields.One2Many("project.milestone","project_id","Milestones"),
}
_order = "start_date"
_defaults = {
"start_date": lambda *a: time.strftime("%Y-%m-%d"),
"state": "in_progress",
}
def copy(self,ids,context={}):
obj=self.browse(ids[0])
vals={
"name": obj.name,
"number": obj.number,
"contact_id": obj.contact_id.id,
"start_date": obj.start_date,
"end_date": obj.end_date,
"description": description,
"resources": [("set",[r.id for r in obj.resources])],
}
new_proj_id=self.create(vals,context=context)
new_proj=self.browse(new_proj_id)
track=obj.track_id
if track:
vals={
"name": track.name, # XXX
"type": track.type,
"code": track.code, # XXX
}
new_track_id=get_model("account.track.categ").create(vals)
new_proj.write({"track_id":new_track_id})
for subtrack in track.sub_tracks:
vals={
"parent_id": new_track_id,
"name": subtrack.name,
"type": subtrack.type,
"code": subtrack.code,
}
get_model("account.track.categ").create(vals)
return {
"next": {
"name": "project",
"mode": "form",
"active_id": new_proj_id,
},
"flash": "New project copied from %s"%obj.name,
}
Project.register() | 0.546254 | 0.307085 |
from datetime import datetime
from json import loads
from typing import List
import requests as requests
from interrail.data import StopLocation, Trip
INTERRAIL_API_URI = "https://api.eurail.com"
LANG = "en"
def get_stop_locations(query: str) -> List[StopLocation]:
"""
Retrieves a list of StopLocations matching the query.
:param query: the search query
:return: the StopLocations returned by interrail
"""
param = {"input": query}
res = requests.get(INTERRAIL_API_URI + "/timetable/location.name", params=param)
data = loads(res.text)
stop_locations = data["stopLocationOrCoordLocation"]
stop_locations = list(
map(lambda x: StopLocation.from_dict(x["StopLocation"]), stop_locations)
)
return stop_locations
def get_stop_location(query: str) -> StopLocation:
"""
Retrieves the top StopLocation matching the query.
:param query: the search query
:return: the first StopLocations returned by interrail
"""
return get_stop_locations(query)[0]
def get_trips(
origin: StopLocation, dest: StopLocation, departure_time: datetime
) -> List[Trip]:
"""
Retrieves possible trips between two StopLocations.
:param origin: the start location of the trip
:param dest: the end location of the trip
:param departure_time: the minimum departure time
:return: the trips
"""
param = {
"lang": LANG,
"originId": origin.id,
"destId": dest.id,
"date": format(departure_time.date(), "%Y-%m-%d"),
"time": format(departure_time.time(), "%H:%M:%S"),
}
res = requests.get(INTERRAIL_API_URI + "/timetable/trip", params=param)
data = loads(res.text)
trips = data["Trip"]
trips = list(map(lambda x: Trip.from_dict(x), trips))
return trips
def get_trip(
origin: StopLocation, dest: StopLocation, departure_time: datetime
) -> Trip:
"""
Retrieves the first trip between two StopLocations
:param origin: the start location of the trip
:param dest: the end location of the trip
:param departure_time: the minimum departure time
:return: the first trip
"""
return get_trips(origin, dest, departure_time)[0] | interrail/api.py | from datetime import datetime
from json import loads
from typing import List
import requests as requests
from interrail.data import StopLocation, Trip
INTERRAIL_API_URI = "https://api.eurail.com"
LANG = "en"
def get_stop_locations(query: str) -> List[StopLocation]:
"""
Retrieves a list of StopLocations matching the query.
:param query: the search query
:return: the StopLocations returned by interrail
"""
param = {"input": query}
res = requests.get(INTERRAIL_API_URI + "/timetable/location.name", params=param)
data = loads(res.text)
stop_locations = data["stopLocationOrCoordLocation"]
stop_locations = list(
map(lambda x: StopLocation.from_dict(x["StopLocation"]), stop_locations)
)
return stop_locations
def get_stop_location(query: str) -> StopLocation:
"""
Retrieves the top StopLocation matching the query.
:param query: the search query
:return: the first StopLocations returned by interrail
"""
return get_stop_locations(query)[0]
def get_trips(
origin: StopLocation, dest: StopLocation, departure_time: datetime
) -> List[Trip]:
"""
Retrieves possible trips between two StopLocations.
:param origin: the start location of the trip
:param dest: the end location of the trip
:param departure_time: the minimum departure time
:return: the trips
"""
param = {
"lang": LANG,
"originId": origin.id,
"destId": dest.id,
"date": format(departure_time.date(), "%Y-%m-%d"),
"time": format(departure_time.time(), "%H:%M:%S"),
}
res = requests.get(INTERRAIL_API_URI + "/timetable/trip", params=param)
data = loads(res.text)
trips = data["Trip"]
trips = list(map(lambda x: Trip.from_dict(x), trips))
return trips
def get_trip(
origin: StopLocation, dest: StopLocation, departure_time: datetime
) -> Trip:
"""
Retrieves the first trip between two StopLocations
:param origin: the start location of the trip
:param dest: the end location of the trip
:param departure_time: the minimum departure time
:return: the first trip
"""
return get_trips(origin, dest, departure_time)[0] | 0.882833 | 0.342558 |
from asgiref.sync import async_to_sync
from channels.generic.websocket import JsonWebsocketConsumer
from django.contrib.auth.models import User
from django.core import serializers
from django.utils.html import escape
from chatchannels.models import ChatChannel, ChatMessage, chat_message_serialize
class ChatChannelConsumer(JsonWebsocketConsumer):
"""
private (__*) methods are for receiving from downstream (but the entry poiny
from downstream is receive_json.
g_* methods are for receiving from channel group
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.channel_id = None # Number of the channel in DB
self.channel_inst = None # Instance of a ChatChannel model
self.user = None # Instance of downstream User object
self.username = None # username of downstream user
self.channel_group_name = None # Channel-layer level group name
self.sync_group_send = None
self.sync_unique_send = None
def __isadmin(self):
"""
Must be a method because adding an admin can change this object's flag
"""
return self.channel_inst.admins.filter(username=self.username).exists()
def connect(self):
self.sync_group_send = async_to_sync(self.channel_layer.group_send)
self.sync_unique_send = async_to_sync(self.channel_layer.send)
self.channel_id = self.scope['url_route']['kwargs']['chat_channel_id']
try:
self.channel_inst = ChatChannel.objects.get(pk=self.channel_id)
except ChatChannel.DoesNotExist:
return self.close(code=404)
self.user = self.scope['user']
self.username = self.scope['user'].username
if not self.channel_inst.is_public \
and not self.__isadmin() \
and not self.channel_inst.allowed_participants.filter(pk=self.scope['user'].id).exists():
return self.close(code=403)
self.channel_group_name = 'ChatChannel_%s' % self.channel_id
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.channel_group_name,
self.channel_name
)
self.accept()
self.group_send({
'type': 'g_entered',
'username': self.username,
'channel': self.channel_name
})
def disconnect(self, close_code):
# Leave room group
if self.channel_group_name is None:
return
self.group_send({
'type': 'g_exit',
'username': self.username
})
async_to_sync(self.channel_layer.group_discard)(
self.channel_group_name,
self.channel_name
)
def group_send(self, dictionary):
self.sync_group_send(self.channel_group_name, dictionary)
def unique_send(self, channel_name, dictionary):
self.sync_unique_send(channel_name, dictionary)
def __add_admin(self, content):
if not self.__isadmin():
return
try:
username = content['username']
except KeyError:
return
try:
related_user = User.objects.get(username=username)
except:
return
# No local-cache divergences
self.channel_inst.admins.add(related_user)
self.channel_inst.allowed_participants.add(related_user)
def __message(self, content):
try:
message = content['message']
except KeyError:
return
if not isinstance(message, str):
return
# Sanitization
message = escape(message)
content = None
msg_obj = ChatMessage(
content=message, author=self.user, chat_channel=self.channel_inst)
msg_obj.save()
self.channel_inst.chat_message_set.add(msg_obj)
self.channel_inst.save()
# Send message to room group
self.group_send({
'type': 'g_chat_message',
'message': chat_message_serialize(msg_obj)
})
def __rm_admin(self, content):
"""
:deprecated:
:param content: json from downstream
:return: None
"""
if not self.__isadmin():
return
try:
username = content['username']
except KeyError:
return
try:
user = User.objects.get(username=username)
except:
return
self.channel_inst.admins.remove(user)
def __allow(self, content):
"""
Adds a user to list of allowed participants. Is idempotent.
Only honored if issued by an admin
:param content: json from downstream
:return: None
"""
if not self.__isadmin():
return
try:
username = content['username']
except:
return
try:
user = User.objects.get(username=username)
except:
return
self.channel_inst.allowed_participants.add(user)
def __disallow(self, content):
"""
Removes user from allowed participants. Has no
effect if user is admin. Only honored if issuer is admin
:param content:
:return:
"""
if not self.__isadmin():
return
try:
username = content['username']
user = User.objects.get(username=username)
except:
return
if self.channel_inst.admins.filter(username=username).exists():
return
self.channel_inst.allowed_participants.remove(user)
self.group_send({
'type': 'g_disallow',
'username': username
})
def __publicize(self, content):
"""
Switches channel from public to private (vice-versa). Only honored
if issued by an admin
:param content: json containing new channel public-status
:return: None
"""
if not self.__isadmin():
return
try:
public_status = content['public']
except KeyError:
return
if not isinstance(public_status, bool):
return
self.channel_inst.is_public = public_status
self.channel_inst.save()
if public_status is False: # Broadcast to group for kicking users not allowed
self.group_send({
'type': 'g_privatized'
})
def __latest(self, content):
"""
Gets the latest 'limit' messages when 'offset' messages are skipped
"""
try:
limit = content['limit']
offset = content['offset']
except KeyError:
return
if not isinstance(offset, int) or not isinstance(limit, int):
return
if limit < 0 or offset < 0:
return
objs = ChatMessage.objects.filter(chat_channel=self.channel_inst).order_by(
'-timestamp')[offset:offset + limit]
self.send_json({
'type': 'latest',
'offset': offset,
'limit': limit,
'messages': list(chat_message_serialize(msg) for msg in objs)
})
def receive_json(self, event, **kwargs):
"""
Receives message directly from associated client
"""
try:
msg_type = event['type']
except KeyError:
return
if msg_type == 'add_admin':
self.__add_admin(event)
elif msg_type == 'message':
self.__message(event)
elif msg_type == 'rm_admin':
return # Deprecated
elif msg_type == 'allow':
self.__allow(event)
elif msg_type == 'disallow':
self.__disallow(event)
elif msg_type == 'publicize':
self.__publicize(event)
elif msg_type == 'latest':
self.__latest(event)
def g_disallow(self, event):
"""
Receives message broadcasted in channel group, removing itself
from connected clients if self.username is the target of the disallow
:param event: json containing username disallowed
:return: None
"""
if self.username == event['username']:
self.close(code=403)
def g_chat_message(self, event):
"""
Receives message from channel group and sends it downstream
:param event: json containing message
:return: None
"""
# Send message to WebSocket
self.send_json({
'type': 'message',
'message': event['message']
})
def g_privatized(self, event):
"""
Kicks user from the channel if it is set to 'private' and user
does not belong to 'allowed_participants'
:return: None
"""
if not self.channel_inst.allowed_participants.filter(username=self.username).exists():
return self.close(403)
def g_entered(self, event):
self.send_json({
'type': 'entered',
'username': event['username']
})
self.unique_send(event['channel'], {
'type': 'g_i_am_here',
'username': self.username
})
def g_i_am_here(self, event):
self.send_json({
'type': 'i_am_here',
'username': event['username']
})
def g_exit(self, event):
self.send_json({
'type': 'exit',
'username': event['username']
})
def g_channel_deleted(self, event):
"""
Sent (maybe not exclusively) from pre_delete signal of ChatChannel
"""
self.close() | chatchannels/consumers.py | from asgiref.sync import async_to_sync
from channels.generic.websocket import JsonWebsocketConsumer
from django.contrib.auth.models import User
from django.core import serializers
from django.utils.html import escape
from chatchannels.models import ChatChannel, ChatMessage, chat_message_serialize
class ChatChannelConsumer(JsonWebsocketConsumer):
"""
private (__*) methods are for receiving from downstream (but the entry poiny
from downstream is receive_json.
g_* methods are for receiving from channel group
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.channel_id = None # Number of the channel in DB
self.channel_inst = None # Instance of a ChatChannel model
self.user = None # Instance of downstream User object
self.username = None # username of downstream user
self.channel_group_name = None # Channel-layer level group name
self.sync_group_send = None
self.sync_unique_send = None
def __isadmin(self):
"""
Must be a method because adding an admin can change this object's flag
"""
return self.channel_inst.admins.filter(username=self.username).exists()
def connect(self):
self.sync_group_send = async_to_sync(self.channel_layer.group_send)
self.sync_unique_send = async_to_sync(self.channel_layer.send)
self.channel_id = self.scope['url_route']['kwargs']['chat_channel_id']
try:
self.channel_inst = ChatChannel.objects.get(pk=self.channel_id)
except ChatChannel.DoesNotExist:
return self.close(code=404)
self.user = self.scope['user']
self.username = self.scope['user'].username
if not self.channel_inst.is_public \
and not self.__isadmin() \
and not self.channel_inst.allowed_participants.filter(pk=self.scope['user'].id).exists():
return self.close(code=403)
self.channel_group_name = 'ChatChannel_%s' % self.channel_id
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.channel_group_name,
self.channel_name
)
self.accept()
self.group_send({
'type': 'g_entered',
'username': self.username,
'channel': self.channel_name
})
def disconnect(self, close_code):
# Leave room group
if self.channel_group_name is None:
return
self.group_send({
'type': 'g_exit',
'username': self.username
})
async_to_sync(self.channel_layer.group_discard)(
self.channel_group_name,
self.channel_name
)
def group_send(self, dictionary):
self.sync_group_send(self.channel_group_name, dictionary)
def unique_send(self, channel_name, dictionary):
self.sync_unique_send(channel_name, dictionary)
def __add_admin(self, content):
if not self.__isadmin():
return
try:
username = content['username']
except KeyError:
return
try:
related_user = User.objects.get(username=username)
except:
return
# No local-cache divergences
self.channel_inst.admins.add(related_user)
self.channel_inst.allowed_participants.add(related_user)
def __message(self, content):
try:
message = content['message']
except KeyError:
return
if not isinstance(message, str):
return
# Sanitization
message = escape(message)
content = None
msg_obj = ChatMessage(
content=message, author=self.user, chat_channel=self.channel_inst)
msg_obj.save()
self.channel_inst.chat_message_set.add(msg_obj)
self.channel_inst.save()
# Send message to room group
self.group_send({
'type': 'g_chat_message',
'message': chat_message_serialize(msg_obj)
})
def __rm_admin(self, content):
"""
:deprecated:
:param content: json from downstream
:return: None
"""
if not self.__isadmin():
return
try:
username = content['username']
except KeyError:
return
try:
user = User.objects.get(username=username)
except:
return
self.channel_inst.admins.remove(user)
def __allow(self, content):
"""
Adds a user to list of allowed participants. Is idempotent.
Only honored if issued by an admin
:param content: json from downstream
:return: None
"""
if not self.__isadmin():
return
try:
username = content['username']
except:
return
try:
user = User.objects.get(username=username)
except:
return
self.channel_inst.allowed_participants.add(user)
def __disallow(self, content):
"""
Removes user from allowed participants. Has no
effect if user is admin. Only honored if issuer is admin
:param content:
:return:
"""
if not self.__isadmin():
return
try:
username = content['username']
user = User.objects.get(username=username)
except:
return
if self.channel_inst.admins.filter(username=username).exists():
return
self.channel_inst.allowed_participants.remove(user)
self.group_send({
'type': 'g_disallow',
'username': username
})
def __publicize(self, content):
"""
Switches channel from public to private (vice-versa). Only honored
if issued by an admin
:param content: json containing new channel public-status
:return: None
"""
if not self.__isadmin():
return
try:
public_status = content['public']
except KeyError:
return
if not isinstance(public_status, bool):
return
self.channel_inst.is_public = public_status
self.channel_inst.save()
if public_status is False: # Broadcast to group for kicking users not allowed
self.group_send({
'type': 'g_privatized'
})
def __latest(self, content):
"""
Gets the latest 'limit' messages when 'offset' messages are skipped
"""
try:
limit = content['limit']
offset = content['offset']
except KeyError:
return
if not isinstance(offset, int) or not isinstance(limit, int):
return
if limit < 0 or offset < 0:
return
objs = ChatMessage.objects.filter(chat_channel=self.channel_inst).order_by(
'-timestamp')[offset:offset + limit]
self.send_json({
'type': 'latest',
'offset': offset,
'limit': limit,
'messages': list(chat_message_serialize(msg) for msg in objs)
})
def receive_json(self, event, **kwargs):
"""
Receives message directly from associated client
"""
try:
msg_type = event['type']
except KeyError:
return
if msg_type == 'add_admin':
self.__add_admin(event)
elif msg_type == 'message':
self.__message(event)
elif msg_type == 'rm_admin':
return # Deprecated
elif msg_type == 'allow':
self.__allow(event)
elif msg_type == 'disallow':
self.__disallow(event)
elif msg_type == 'publicize':
self.__publicize(event)
elif msg_type == 'latest':
self.__latest(event)
def g_disallow(self, event):
"""
Receives message broadcasted in channel group, removing itself
from connected clients if self.username is the target of the disallow
:param event: json containing username disallowed
:return: None
"""
if self.username == event['username']:
self.close(code=403)
def g_chat_message(self, event):
"""
Receives message from channel group and sends it downstream
:param event: json containing message
:return: None
"""
# Send message to WebSocket
self.send_json({
'type': 'message',
'message': event['message']
})
def g_privatized(self, event):
"""
Kicks user from the channel if it is set to 'private' and user
does not belong to 'allowed_participants'
:return: None
"""
if not self.channel_inst.allowed_participants.filter(username=self.username).exists():
return self.close(403)
def g_entered(self, event):
self.send_json({
'type': 'entered',
'username': event['username']
})
self.unique_send(event['channel'], {
'type': 'g_i_am_here',
'username': self.username
})
def g_i_am_here(self, event):
self.send_json({
'type': 'i_am_here',
'username': event['username']
})
def g_exit(self, event):
self.send_json({
'type': 'exit',
'username': event['username']
})
def g_channel_deleted(self, event):
"""
Sent (maybe not exclusively) from pre_delete signal of ChatChannel
"""
self.close() | 0.540681 | 0.074635 |
import sys
import json
import argparse
import pytorch_pretrained_bert
sys.path.append('.')
from scripts.data_convert.text_proc import SpacyTextParser
from scripts.data_convert.convert_common import STOPWORD_FILE, BERT_TOK_OPT_HELP, BERT_TOK_OPT, \
FileWrapper, read_stop_words, add_retokenized_field
from scripts.config import TEXT_BERT_TOKENIZED_NAME, \
TEXT_FIELD_NAME, DOCID_FIELD, BERT_BASE_MODEL, \
TEXT_RAW_FIELD_NAME, TEXT_UNLEMM_FIELD_NAME, \
IMAP_PROC_CHUNK_QTY, REPORT_QTY, SPACY_MODEL
parser = argparse.ArgumentParser(description='Convert MSMARCO-adhoc queries.')
parser.add_argument('--input', metavar='input file', help='input file',
type=str, required=True)
parser.add_argument('--output', metavar='output file', help='output file',
type=str, required=True)
parser.add_argument('--min_query_token_qty', type=int, default=0,
metavar='min # of query tokens', help='ignore queries that have smaller # of tokens')
parser.add_argument('--' + BERT_TOK_OPT, action='store_true', help=BERT_TOK_OPT_HELP)
args = parser.parse_args()
print(args)
arg_vars = vars(args)
inp_file = FileWrapper(args.input)
out_file = FileWrapper(args.output, 'w')
min_query_tok_qty = args.min_query_token_qty
stop_words = read_stop_words(STOPWORD_FILE, lower_case=True)
print(stop_words)
nlp = SpacyTextParser(SPACY_MODEL, stop_words, keep_only_alpha_num=True, lower_case=True)
if arg_vars[BERT_TOK_OPT]:
print('BERT-tokenizing input into the field: ' + TEXT_BERT_TOKENIZED_NAME)
bert_tokenizer = pytorch_pretrained_bert.BertTokenizer.from_pretrained(BERT_BASE_MODEL)
# Input file is a TSV file
ln = 0
for line in inp_file:
ln += 1
line = line.strip()
if not line:
continue
fields = line.split('\t')
if len(fields) != 2:
print('Misformated line %d ignoring:' % ln)
print(line.replace('\t', '<field delimiter>'))
continue
did, query_orig = fields
query_lemmas, query_unlemm = nlp.proc_text(query_orig)
query_toks = query_lemmas.split()
if len(query_toks) >= min_query_tok_qty:
doc = {DOCID_FIELD: did,
TEXT_FIELD_NAME: query_lemmas,
TEXT_UNLEMM_FIELD_NAME: query_unlemm,
TEXT_RAW_FIELD_NAME: query_orig}
add_retokenized_field(doc, TEXT_RAW_FIELD_NAME, TEXT_BERT_TOKENIZED_NAME, bert_tokenizer)
doc_str = json.dumps(doc) + '\n'
out_file.write(doc_str)
if ln % REPORT_QTY == 0:
print('Processed %d queries' % ln)
print('Processed %d queries' % ln)
inp_file.close()
out_file.close() | scripts/data_convert/msmarco/convert_queries.py | import sys
import json
import argparse
import pytorch_pretrained_bert
sys.path.append('.')
from scripts.data_convert.text_proc import SpacyTextParser
from scripts.data_convert.convert_common import STOPWORD_FILE, BERT_TOK_OPT_HELP, BERT_TOK_OPT, \
FileWrapper, read_stop_words, add_retokenized_field
from scripts.config import TEXT_BERT_TOKENIZED_NAME, \
TEXT_FIELD_NAME, DOCID_FIELD, BERT_BASE_MODEL, \
TEXT_RAW_FIELD_NAME, TEXT_UNLEMM_FIELD_NAME, \
IMAP_PROC_CHUNK_QTY, REPORT_QTY, SPACY_MODEL
parser = argparse.ArgumentParser(description='Convert MSMARCO-adhoc queries.')
parser.add_argument('--input', metavar='input file', help='input file',
type=str, required=True)
parser.add_argument('--output', metavar='output file', help='output file',
type=str, required=True)
parser.add_argument('--min_query_token_qty', type=int, default=0,
metavar='min # of query tokens', help='ignore queries that have smaller # of tokens')
parser.add_argument('--' + BERT_TOK_OPT, action='store_true', help=BERT_TOK_OPT_HELP)
args = parser.parse_args()
print(args)
arg_vars = vars(args)
inp_file = FileWrapper(args.input)
out_file = FileWrapper(args.output, 'w')
min_query_tok_qty = args.min_query_token_qty
stop_words = read_stop_words(STOPWORD_FILE, lower_case=True)
print(stop_words)
nlp = SpacyTextParser(SPACY_MODEL, stop_words, keep_only_alpha_num=True, lower_case=True)
if arg_vars[BERT_TOK_OPT]:
print('BERT-tokenizing input into the field: ' + TEXT_BERT_TOKENIZED_NAME)
bert_tokenizer = pytorch_pretrained_bert.BertTokenizer.from_pretrained(BERT_BASE_MODEL)
# Input file is a TSV file
ln = 0
for line in inp_file:
ln += 1
line = line.strip()
if not line:
continue
fields = line.split('\t')
if len(fields) != 2:
print('Misformated line %d ignoring:' % ln)
print(line.replace('\t', '<field delimiter>'))
continue
did, query_orig = fields
query_lemmas, query_unlemm = nlp.proc_text(query_orig)
query_toks = query_lemmas.split()
if len(query_toks) >= min_query_tok_qty:
doc = {DOCID_FIELD: did,
TEXT_FIELD_NAME: query_lemmas,
TEXT_UNLEMM_FIELD_NAME: query_unlemm,
TEXT_RAW_FIELD_NAME: query_orig}
add_retokenized_field(doc, TEXT_RAW_FIELD_NAME, TEXT_BERT_TOKENIZED_NAME, bert_tokenizer)
doc_str = json.dumps(doc) + '\n'
out_file.write(doc_str)
if ln % REPORT_QTY == 0:
print('Processed %d queries' % ln)
print('Processed %d queries' % ln)
inp_file.close()
out_file.close() | 0.219923 | 0.077239 |
"""Test BIDS meta data parser """
from os.path import join as opj
from simplejson import dumps
from datalad.distribution.dataset import Dataset
from datalad.metadata.parsers.datacite import MetadataParser
from nose.tools import assert_true, assert_false, assert_equal
from datalad.tests.utils import with_tree, with_tempfile
@with_tree(tree={'.datalad': {'meta.datacite.xml': """\
<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-2.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd">
<identifier identifierType="DOI">10.6080/K0QN64NG</identifier>
<creators>
<creator>
<creatorName>Last1, First1</creatorName>
</creator>
<creator>
<creatorName>Last2, First2</creatorName>
</creator>
</creators>
<titles>
<title>Main
title</title>
<title titleType="AlternativeTitle">CRCNS.org xxx-1</title>
</titles>
<publisher>CRCNS.org</publisher>
<publicationYear>2011</publicationYear>
<subjects>
<subject>Neuroscience</subject>
<subject>fMRI</subject>
</subjects>
<language>eng</language>
<resourceType resourceTypeGeneral="Dataset">Dataset/Neurophysiology</resourceType>
<sizes>
<size>10 GB</size>
</sizes>
<formats>
<format>application/matlab</format>
<format>NIFTY</format>
</formats>
<version>1.0</version>
<descriptions>
<description descriptionType="Other">
Some long
description.
</description>
</descriptions>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="DOI" relationType="IsDocumentedBy">10.1016/j.cub.2011.08.031</relatedIdentifier>
</relatedIdentifiers>
</resource>
"""}})
def test_get_metadata(path):
ds = Dataset(path)
meta = MetadataParser(ds).get_metadata('ID')
assert_equal(
dumps(meta, sort_keys=True, indent=2),
"""\
{
"@context": {
"@vocab": "http://schema.org/",
"doap": "http://usefulinc.com/ns/doap#"
},
"@id": "ID",
"author": [
"Last1, First1",
"Last2, First2"
],
"citation": [
"10.1016/j.cub.2011.08.031"
],
"dcterms:conformsTo": "http://docs.datalad.org/metadata.html#v0-1",
"description": "Some long description.",
"doap:Version": "1.0",
"doap:shortdesc": "Main title",
"formats": [
"application/matlab",
"NIFTY"
],
"keywords": [
"Neuroscience",
"fMRI"
],
"name": "CRCNS.org xxx-1",
"sameAs": "10.6080/K0QN64NG",
"title": "Main title"
}""") | datalad/metadata/parsers/tests/test_datacite_xml.py | """Test BIDS meta data parser """
from os.path import join as opj
from simplejson import dumps
from datalad.distribution.dataset import Dataset
from datalad.metadata.parsers.datacite import MetadataParser
from nose.tools import assert_true, assert_false, assert_equal
from datalad.tests.utils import with_tree, with_tempfile
@with_tree(tree={'.datalad': {'meta.datacite.xml': """\
<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-2.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd">
<identifier identifierType="DOI">10.6080/K0QN64NG</identifier>
<creators>
<creator>
<creatorName>Last1, First1</creatorName>
</creator>
<creator>
<creatorName>Last2, First2</creatorName>
</creator>
</creators>
<titles>
<title>Main
title</title>
<title titleType="AlternativeTitle">CRCNS.org xxx-1</title>
</titles>
<publisher>CRCNS.org</publisher>
<publicationYear>2011</publicationYear>
<subjects>
<subject>Neuroscience</subject>
<subject>fMRI</subject>
</subjects>
<language>eng</language>
<resourceType resourceTypeGeneral="Dataset">Dataset/Neurophysiology</resourceType>
<sizes>
<size>10 GB</size>
</sizes>
<formats>
<format>application/matlab</format>
<format>NIFTY</format>
</formats>
<version>1.0</version>
<descriptions>
<description descriptionType="Other">
Some long
description.
</description>
</descriptions>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="DOI" relationType="IsDocumentedBy">10.1016/j.cub.2011.08.031</relatedIdentifier>
</relatedIdentifiers>
</resource>
"""}})
def test_get_metadata(path):
ds = Dataset(path)
meta = MetadataParser(ds).get_metadata('ID')
assert_equal(
dumps(meta, sort_keys=True, indent=2),
"""\
{
"@context": {
"@vocab": "http://schema.org/",
"doap": "http://usefulinc.com/ns/doap#"
},
"@id": "ID",
"author": [
"Last1, First1",
"Last2, First2"
],
"citation": [
"10.1016/j.cub.2011.08.031"
],
"dcterms:conformsTo": "http://docs.datalad.org/metadata.html#v0-1",
"description": "Some long description.",
"doap:Version": "1.0",
"doap:shortdesc": "Main title",
"formats": [
"application/matlab",
"NIFTY"
],
"keywords": [
"Neuroscience",
"fMRI"
],
"name": "CRCNS.org xxx-1",
"sameAs": "10.6080/K0QN64NG",
"title": "Main title"
}""") | 0.5144 | 0.460228 |
import unittest
from problem_2 import find_files
class Test_FindFiles(unittest.TestCase):
def test_should_return_empty_list_when_path_given(self):
"""
Test find_files() should return an empty list when no path is given.
"""
self.assertListEqual([], find_files('', ''))
self.assertListEqual([], find_files('', None))
self.assertListEqual([], find_files('', False))
self.assertListEqual([], find_files('', []))
self.assertListEqual([], find_files('.c', ''))
self.assertListEqual([], find_files('.h', ''))
self.assertListEqual([], find_files('.py', ''))
self.assertListEqual([], find_files('.md', ''))
def test_should_return_all_files_when_no_suffix_given(self):
"""
Test find_files() should return a list of all files when no suffix is given.
"""
expected = [
'./fixtures/problem_2/testdir/t1.c',
'./fixtures/problem_2/testdir/t1.h',
'./fixtures/problem_2/testdir/subdir2/.gitkeep',
'./fixtures/problem_2/testdir/subdir1/a.c',
'./fixtures/problem_2/testdir/subdir1/a.h',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.c',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.h',
'./fixtures/problem_2/testdir/subdir4/.gitkeep',
'./fixtures/problem_2/testdir/subdir5/a.c',
'./fixtures/problem_2/testdir/subdir5/a.h',
]
expected.sort()
actual = find_files('', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_set_should_return_all_files_ending_in_c(self):
"""
Test find_files() should return a list of all files that end with the .c extension.
"""
expected = [
'./fixtures/problem_2/testdir/t1.c',
'./fixtures/problem_2/testdir/subdir1/a.c',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.c',
'./fixtures/problem_2/testdir/subdir5/a.c',
]
expected.sort()
actual = find_files('c', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.c', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.c', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_set_should_return_all_files_ending_in_h(self):
"""
Test find_files() should return a list of all files that end with the .h extension.
"""
expected = [
'./fixtures/problem_2/testdir/t1.h',
'./fixtures/problem_2/testdir/subdir1/a.h',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.h',
'./fixtures/problem_2/testdir/subdir5/a.h',
]
expected.sort()
actual = find_files('h', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.h', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.h', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_should_return_all_files_ending_in_gitkeep(self):
"""
Test find_files() should return a list of all files that end with the .h extension.
"""
expected = [
'./fixtures/problem_2/testdir/subdir2/.gitkeep',
'./fixtures/problem_2/testdir/subdir4/.gitkeep',
]
actual = find_files('keep', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.gitkeep', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_should_return_all_files_with_given_suffix(self):
"""
Test find_files() should return a list of all files that end with the given suffix.
"""
expected = [
'./problem_2.py',
'./tests_2.py',
]
actual = find_files('_2.py', '.')
self.assertListEqual(expected, actual)
expected = [
'./explanation_2.md',
'./given_2.md',
]
actual = find_files('_2.md', '.')
self.assertListEqual(expected, actual)
if __name__ == '__main__':
unittest.main() | tests_2.py |
import unittest
from problem_2 import find_files
class Test_FindFiles(unittest.TestCase):
def test_should_return_empty_list_when_path_given(self):
"""
Test find_files() should return an empty list when no path is given.
"""
self.assertListEqual([], find_files('', ''))
self.assertListEqual([], find_files('', None))
self.assertListEqual([], find_files('', False))
self.assertListEqual([], find_files('', []))
self.assertListEqual([], find_files('.c', ''))
self.assertListEqual([], find_files('.h', ''))
self.assertListEqual([], find_files('.py', ''))
self.assertListEqual([], find_files('.md', ''))
def test_should_return_all_files_when_no_suffix_given(self):
"""
Test find_files() should return a list of all files when no suffix is given.
"""
expected = [
'./fixtures/problem_2/testdir/t1.c',
'./fixtures/problem_2/testdir/t1.h',
'./fixtures/problem_2/testdir/subdir2/.gitkeep',
'./fixtures/problem_2/testdir/subdir1/a.c',
'./fixtures/problem_2/testdir/subdir1/a.h',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.c',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.h',
'./fixtures/problem_2/testdir/subdir4/.gitkeep',
'./fixtures/problem_2/testdir/subdir5/a.c',
'./fixtures/problem_2/testdir/subdir5/a.h',
]
expected.sort()
actual = find_files('', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_set_should_return_all_files_ending_in_c(self):
"""
Test find_files() should return a list of all files that end with the .c extension.
"""
expected = [
'./fixtures/problem_2/testdir/t1.c',
'./fixtures/problem_2/testdir/subdir1/a.c',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.c',
'./fixtures/problem_2/testdir/subdir5/a.c',
]
expected.sort()
actual = find_files('c', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.c', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.c', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_set_should_return_all_files_ending_in_h(self):
"""
Test find_files() should return a list of all files that end with the .h extension.
"""
expected = [
'./fixtures/problem_2/testdir/t1.h',
'./fixtures/problem_2/testdir/subdir1/a.h',
'./fixtures/problem_2/testdir/subdir3/subsubdir1/b.h',
'./fixtures/problem_2/testdir/subdir5/a.h',
]
expected.sort()
actual = find_files('h', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.h', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.h', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_should_return_all_files_ending_in_gitkeep(self):
"""
Test find_files() should return a list of all files that end with the .h extension.
"""
expected = [
'./fixtures/problem_2/testdir/subdir2/.gitkeep',
'./fixtures/problem_2/testdir/subdir4/.gitkeep',
]
actual = find_files('keep', './fixtures/problem_2')
actual.sort()
self.assertListEqual(expected, actual)
actual = find_files('.gitkeep', './fixtures/problem_2/testdir')
actual.sort()
self.assertListEqual(expected, actual)
def test_should_return_all_files_with_given_suffix(self):
"""
Test find_files() should return a list of all files that end with the given suffix.
"""
expected = [
'./problem_2.py',
'./tests_2.py',
]
actual = find_files('_2.py', '.')
self.assertListEqual(expected, actual)
expected = [
'./explanation_2.md',
'./given_2.md',
]
actual = find_files('_2.md', '.')
self.assertListEqual(expected, actual)
if __name__ == '__main__':
unittest.main() | 0.659953 | 0.709651 |