id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
177975
|
from __future__ import absolute_import, print_function, division
import warnings
import numpy as np
import astropy.units as u
__all__ = ["_get_x_in_wavenumbers", "_test_valid_x_range"]
def _get_x_in_wavenumbers(in_x):
"""
Convert input x to wavenumber given x has units.
Otherwise, assume x is in waveneumbers and issue a warning to this effect.
Parameters
----------
in_x : astropy.quantity or simple floats
x values
Returns
-------
x : floats
input x values in wavenumbers w/o units
"""
# handles the case where x is a scaler
in_x = np.atleast_1d(in_x)
# check if in_x is an astropy quantity, if not issue a warning
if not isinstance(in_x, u.Quantity):
warnings.warn(
"x has no units, assuming x units are inverse microns", UserWarning
)
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(in_x, 1.0 / u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
return x_quant.value
def _test_valid_x_range(x, x_range, outname):
"""
Test if any of the x values are outside of the valid range
Parameters
----------
x : float array
wavenumbers in inverse microns
x_range: 2 floats
allowed min/max of x
outname: str
name of curve for error message
"""
if np.logical_or(np.any(x < x_range[0]), np.any(x > x_range[1])):
raise ValueError(
"Input x outside of range defined for "
+ outname
+ " ["
+ str(x_range[0])
+ " <= x <= "
+ str(x_range[1])
+ ", x has units 1/micron]"
)
|
177979
|
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch, Mock
from requests.exceptions import HTTPError
from requests import Session
from test.test_account import TestAccount
from gphotos.LocalData import LocalData
import test.test_setup as ts
photos_root = Path("photos")
original_get = Session.get
call_count = 0
def patched_get(self, url, stream=True, timeout=20):
global call_count
call_count += 1
# succeed occasionally only
succeed = call_count % 10 == 0
if "discovery" in url or succeed:
return original_get(self, url, stream=stream, timeout=timeout)
else:
raise HTTPError(Mock(status=500), "ouch!")
class TestNetwork(TestCase):
@patch.object(Session, "get", patched_get)
def test_max_retries_hit(self):
s = ts.SetupDbAndCredentials()
args = ["--skip-albums"]
s.test_setup("test_max_retries_hit", args=args, trash_files=True, trash_db=True)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(TestAccount.total_count, count[0])
pat = str(photos_root / "*" / "*" / "*")
self.assertEqual(
9, len(sorted(s.root.glob(pat))), "mismatch on image file count"
)
|
178008
|
class color(object):
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
RESET_ALL = '\033[0m'
|
178022
|
from secml.optim.optimizers.tests import COptimizerTestCases
from secml.array import CArray
from secml.optim.optimizers import COptimizerPGDLS
from secml.optim.constraints import CConstraintBox, CConstraintL1
class TestCOptimizerPGDLSDiscrete(COptimizerTestCases):
"""Unittests for COptimizerPGDLS in discrete space."""
def test_minimize_3h_camel(self):
"""Test for COptimizer.minimize() method on 3h-camel fun.
This function tests the optimization in discrete space,
with an integer eta and an integer starting point.
The solution expected by this test is a integer vector.
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'bounds': CConstraintBox(lb=-1, ub=1)
}
self._test_minimize(COptimizerPGDLS, '3h-camel',
opt_params=opt_params,
label='discrete',
out_int=True)
def test_minimize_3h_camel_l1(self):
"""Test for COptimizer.minimize() method on 3h-camel fun.
This function tests the optimization in discrete space,
with a floating eta (l1 constraint) and an integer starting point.
The solution expected by this test is a float vector.
"""
opt_params = {
'eta': 0.5, 'eta_min': 0.5, 'eps': 1e-12,
'constr': CConstraintL1(radius=2),
'bounds': CConstraintBox(lb=-1, ub=1)
}
self._test_minimize(COptimizerPGDLS, '3h-camel',
opt_params=opt_params,
label='discrete-l1')
def test_minimize_beale(self):
"""Test for COptimizer.minimize() method on 3h-camel fun.
This function tests the optimization in discrete space,
with a floating eta (l1 constraint) and an integer starting point.
The solution expected by this test is a float vector.
"""
opt_params = {
'eta': 1e-6, 'eta_min': 1e-4, 'eps': 1e-12,
'constr': CConstraintL1(center=CArray([2, 0]), radius=2),
'bounds': CConstraintBox(lb=0, ub=4)
}
self._test_minimize(COptimizerPGDLS, 'beale',
opt_params=opt_params,
label='discrete-l1')
def test_minimize_quad2d_no_bound(self):
"""Test for COptimizer.minimize() method on a quadratic function in
a 2-dimensional space.
This function tests the optimization in discrete space,
with an integer eta, an integer starting point and without any bound.
The solution expected by this test is an integer vector.
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12
}
# both the starting point and eta are integer,
# therefore we expect an integer solution
self._test_minimize(COptimizerPGDLS, 'quad-2',
opt_params=opt_params,
label='quad-2-discrete',
out_int=True)
def test_minimize_quad2d_bound(self):
"""Test for COptimizer.minimize() method on a quadratic function in
a 2-dimensional space.
This function tests the optimization in discrete space, with an
integer eta, an integer starting point and with a box constraint.
The solution expected by this test is an integer vector.
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'bounds': CConstraintBox(lb=-2, ub=3)
}
self._test_minimize(
COptimizerPGDLS, 'quad-2',
opt_params=opt_params,
label='quad-2-discrete-bounded',
out_int=True)
def test_minimize_quad100d_sparse(self):
"""Test for COptimizer.minimize() method on a quadratic function in
a 100-dimensional space.
This function tests the optimization in discrete space, with an
integer eta, an integer and sparse starting point with box constraint.
The solution expected by this test is an integer sparse vector.
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'bounds': CConstraintBox(lb=-2, ub=3)
}
self._test_minimize(
COptimizerPGDLS, 'quad-100-sparse',
opt_params=opt_params,
label='quad-100-sparse-discrete-bounded',
out_int=True)
def test_minimize_quad100d_l1_sparse(self):
"""Test for COptimizer.minimize() method on a quadratic function in
a 100-dimensional space.
This function tests the optimization in discrete space, with an
integer eta (l1 constraint), an integer sparse starting point
with box constraint.
The solution expected by this test is an integer sparse vector.
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'constr': CConstraintL1(radius=100),
'bounds': CConstraintBox(lb=-2, ub=3)
}
self._test_minimize(
COptimizerPGDLS, 'quad-100-sparse',
opt_params=opt_params,
label='quad-100-sparse-discrete-bounded-l1',
out_int=True)
def test_minimize_poly_2d_bounded(self):
"""Test for COptimizer.minimize() method on a polynomial function in
a 2-dimensional space.
This function tests the optimization in discrete space, with an
integer eta, an integer starting point with a box constraint.
The solution expected by this test is an integer vector.
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'bounds': CConstraintBox(lb=-1, ub=1)}
self._test_minimize(
COptimizerPGDLS, 'poly-2',
opt_params=opt_params,
label='poly-discrete-bounded',
out_int=True
)
def test_minimize_poly_100d_bounded(self):
"""Test for COptimizer.minimize() method on a polynomial function in
a 2-dimensional space.
This function tests the optimization in discrete space, with an
integer eta, an integer starting point with a box constraint.
The solution of this problem is an integer vector (of zeros).
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'bounds': CConstraintBox(lb=-1, ub=1)
}
self._test_minimize(
COptimizerPGDLS, 'poly-100-int',
opt_params=opt_params,
label='poly-int-discrete-bounded',
out_int=True)
def test_minimize_poly_100d_bounded_sparse(self):
"""Test for COptimizer.minimize() method on a polynomial function in
a 100-dimensional space.
This function tests the optimization in discrete space, with an
integer eta, an integer and sparse starting point (zeros vector)
with a box constraint.
The solution expected by this test is an integer sparse vector (of zeros).
"""
opt_params = {
'eta': 1, 'eta_min': 1, 'eps': 1e-12,
'bounds': CConstraintBox(lb=-1, ub=1)
}
self._test_minimize(
COptimizerPGDLS, 'poly-100-int-sparse',
opt_params=opt_params,
label='poly-int-sparse-discrete-bounded',
out_int=True)
if __name__ == '__main__':
COptimizerTestCases.main()
|
178042
|
import sys
import os
from platform import python_version
# https://github.com/willmcgugan/rich
from rich.console import Console
from rich.theme import Theme
import pydbhub.dbhub as dbhub
if __name__ == '__main__':
custom_theme = Theme({
"info": "green",
"warning": "yellow",
"error": "bold red"
})
console = Console(theme=custom_theme)
if python_version()[0:3] < '3.7':
console.print(
"[ERROR] Make sure you have Python 3.7+ installed, quitting.\n\n", style="error")
sys.exit(1)
# Create a new DBHub.io API object
db = dbhub.Dbhub(config_file=f"{os.path.join(os.path.dirname(__file__), '..', 'config.ini')}")
# Delete a remote database
dbName = "Join Testing.sqlite"
err = db.Delete(dbName)
if err is not None:
console.print(f"[ERROR] {err}", style="error")
else:
# Display a success message
console.print(f"Database '{dbName}' deleted", style="info")
|
178058
|
from typing import List
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Response, status
from app.api.dependencies import get_api_key, get_db
from app.db.database import MSSQLConnection
from app.schemas.security import SecurityLoginResponse, SecurityResponseBase
from app.services.exceptions import (
InternalDatabaseError,
InvalidAuthenticationKeyForRequest,
)
from app.services.security import SecurityService
router = APIRouter()
@router.post(
"/login",
response_model=SecurityLoginResponse,
responses={
status.HTTP_401_UNAUTHORIZED: {"description": "Invalid credentials."},
status.HTTP_403_FORBIDDEN: {
"description": "Invalid permissions or credentials."
},
},
)
async def login(
body: SecurityResponseBase, db: MSSQLConnection = Depends(get_db)
) -> SecurityLoginResponse:
"""
**Creates a new vaccine availability with the entity enclosed in the
request body.** On success, the new vaccine availability is returned in the
body of the response.
"""
try:
resp: SecurityLoginResponse = await SecurityService(db).Login(
body.name, body.password
)
except InvalidAuthenticationKeyForRequest as e:
raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)
except InternalDatabaseError:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)
return resp
|
178068
|
import unittest
from contextlib import contextmanager
from numba import njit
from numba.core import errors, cpu, utils, typing
from numba.core.descriptors import TargetDescriptor
from numba.core.dispatcher import TargetConfigurationStack
from numba.core.retarget import BasicRetarget
from numba.core.extending import overload
from numba.core.target_extension import (
dispatcher_registry,
CPUDispatcher,
CPU,
target_registry,
jit_registry,
)
# ------------ A custom target ------------
CUSTOM_TARGET = ".".join([__name__, "CustomCPU"])
class CustomCPU(CPU):
"""Extend from the CPU target
"""
pass
# Nested contexts to help with isolatings bits of compilations
class _NestedContext(object):
_typing_context = None
_target_context = None
@contextmanager
def nested(self, typing_context, target_context):
old_nested = self._typing_context, self._target_context
try:
self._typing_context = typing_context
self._target_context = target_context
yield
finally:
self._typing_context, self._target_context = old_nested
# Implement a CustomCPU TargetDescriptor, this one borrows bits from the CPU
class CustomTargetDescr(TargetDescriptor):
options = cpu.CPUTargetOptions
_nested = _NestedContext()
@utils.cached_property
def _toplevel_target_context(self):
# Lazily-initialized top-level target context, for all threads
return cpu.CPUContext(self.typing_context, self._target_name)
@utils.cached_property
def _toplevel_typing_context(self):
# Lazily-initialized top-level typing context, for all threads
return typing.Context()
@property
def target_context(self):
"""
The target context for DPU targets.
"""
nested = self._nested._target_context
if nested is not None:
return nested
else:
return self._toplevel_target_context
@property
def typing_context(self):
"""
The typing context for CPU targets.
"""
nested = self._nested._typing_context
if nested is not None:
return nested
else:
return self._toplevel_typing_context
def nested_context(self, typing_context, target_context):
"""
A context manager temporarily replacing the contexts with the
given ones, for the current thread of execution.
"""
return self._nested.nested(typing_context, target_context)
custom_target = CustomTargetDescr(CUSTOM_TARGET)
class CustomCPUDispatcher(CPUDispatcher):
targetdescr = custom_target
target_registry[CUSTOM_TARGET] = CustomCPU
dispatcher_registry[target_registry[CUSTOM_TARGET]] = CustomCPUDispatcher
def custom_jit(*args, **kwargs):
assert 'target' not in kwargs
assert '_target' not in kwargs
return njit(*args, _target=CUSTOM_TARGET, **kwargs)
jit_registry[target_registry[CUSTOM_TARGET]] = custom_jit
# ------------ For switching target ------------
class CustomCPURetarget(BasicRetarget):
@property
def output_target(self):
return CUSTOM_TARGET
def compile_retarget(self, cpu_disp):
kernel = njit(_target=CUSTOM_TARGET)(cpu_disp.py_func)
return kernel
class TestRetargeting(unittest.TestCase):
def setUp(self):
# Generate fresh functions for each test method to avoid caching
@njit(_target="cpu")
def fixed_target(x):
"""
This has a fixed target to "cpu".
Cannot be used in CUSTOM_TARGET target.
"""
return x + 10
@njit
def flex_call_fixed(x):
"""
This has a flexible target, but uses a fixed target function.
Cannot be used in CUSTOM_TARGET target.
"""
return fixed_target(x) + 100
@njit
def flex_target(x):
"""
This has a flexible target.
Can be used in CUSTOM_TARGET target.
"""
return x + 1000
# Save these functions for use
self.functions = locals()
# Refresh the retarget function
self.retarget = CustomCPURetarget()
def switch_target(self):
return TargetConfigurationStack.switch_target(self.retarget)
@contextmanager
def check_retarget_error(self):
with self.assertRaises(errors.NumbaError) as raises:
yield
self.assertIn(f"{CUSTOM_TARGET} != cpu", str(raises.exception))
def check_non_empty_cache(self):
# Retargeting occurred. The cache must NOT be empty
stats = self.retarget.cache.stats()
# Because multiple function compilations are triggered, we don't know
# precisely how many cache hit/miss there are.
self.assertGreater(stats['hit'] + stats['miss'], 0)
def test_case0(self):
fixed_target = self.functions["fixed_target"]
flex_target = self.functions["flex_target"]
@njit
def foo(x):
x = fixed_target(x)
x = flex_target(x)
return x
r = foo(123)
self.assertEqual(r, 123 + 10 + 1000)
# No retargeting occurred. The cache must be empty
stats = self.retarget.cache.stats()
self.assertEqual(stats, dict(hit=0, miss=0))
def test_case1(self):
flex_target = self.functions["flex_target"]
@njit
def foo(x):
x = flex_target(x)
return x
with self.switch_target():
r = foo(123)
self.assertEqual(r, 123 + 1000)
self.check_non_empty_cache()
def test_case2(self):
"""
The non-nested call into fixed_target should raise error.
"""
fixed_target = self.functions["fixed_target"]
flex_target = self.functions["flex_target"]
@njit
def foo(x):
x = fixed_target(x)
x = flex_target(x)
return x
with self.check_retarget_error():
with self.switch_target():
foo(123)
def test_case3(self):
"""
The nested call into fixed_target should raise error
"""
flex_target = self.functions["flex_target"]
flex_call_fixed = self.functions["flex_call_fixed"]
@njit
def foo(x):
x = flex_call_fixed(x) # calls fixed_target indirectly
x = flex_target(x)
return x
with self.check_retarget_error():
with self.switch_target():
foo(123)
def test_case4(self):
"""
Same as case2 but flex_call_fixed() is invoked outside of CUSTOM_TARGET
target before the switch_target.
"""
flex_target = self.functions["flex_target"]
flex_call_fixed = self.functions["flex_call_fixed"]
r = flex_call_fixed(123)
self.assertEqual(r, 123 + 100 + 10)
@njit
def foo(x):
x = flex_call_fixed(x) # calls fixed_target indirectly
x = flex_target(x)
return x
with self.check_retarget_error():
with self.switch_target():
foo(123)
def test_case5(self):
"""
Tests overload resolution with target switching
"""
def overloaded_func(x):
pass
@overload(overloaded_func, target=CUSTOM_TARGET)
def ol_overloaded_func_custom_target(x):
def impl(x):
return 62830
return impl
@overload(overloaded_func, target='cpu')
def ol_overloaded_func_cpu(x):
def impl(x):
return 31415
return impl
@njit
def flex_resolve_overload(x):
return
@njit
def foo(x):
return x + overloaded_func(x)
r = foo(123)
self.assertEqual(r, 123 + 31415)
with self.switch_target():
r = foo(123)
self.assertEqual(r, 123 + 62830)
self.check_non_empty_cache()
|
178123
|
TEMPLATE = """
{name}
====
README
"""
def get_readme_template(name: str) -> str:
return TEMPLATE.format(name=name)
|
178133
|
import ctypes
import os
from ctypes import wintypes
from collections import namedtuple
from PySide2.QtWidgets import QApplication
def get_process_hwnds():
# https://stackoverflow.com/questions/37501191/how-to-get-windows-window-names-with-ctypes-in-python
user32 = ctypes.WinDLL('user32', use_last_error=True)
def check_zero(result, func, args):
if not result:
err = ctypes.get_last_error()
if err:
raise ctypes.WinError(err)
return args
if not hasattr(wintypes, 'LPDWORD'): # PY2
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
WindowInfo = namedtuple('WindowInfo', 'title hwnd')
WNDENUMPROC = ctypes.WINFUNCTYPE(
wintypes.BOOL,
wintypes.HWND, # _In_ hWnd
wintypes.LPARAM,) # _In_ lParam
user32.EnumWindows.errcheck = check_zero
user32.EnumWindows.argtypes = (
WNDENUMPROC, # _In_ lpEnumFunc
wintypes.LPARAM,) # _In_ lParam
user32.IsWindowVisible.argtypes = (
wintypes.HWND,) # _In_ hWnd
user32.GetWindowThreadProcessId.restype = wintypes.DWORD
user32.GetWindowThreadProcessId.argtypes = (
wintypes.HWND, # _In_ hWnd
wintypes.LPDWORD,) # _Out_opt_ lpdwProcessId
user32.GetWindowTextLengthW.errcheck = check_zero
user32.GetWindowTextLengthW.argtypes = (
wintypes.HWND,) # _In_ hWnd
user32.GetWindowTextW.errcheck = check_zero
user32.GetWindowTextW.argtypes = (
wintypes.HWND, # _In_ hWnd
wintypes.LPWSTR, # _Out_ lpString
ctypes.c_int,) # _In_ nMaxCount
def list_windows():
'''Return a sorted list of visible windows.'''
result = []
@WNDENUMPROC
def enum_proc(hWnd, lParam):
if user32.IsWindowVisible(hWnd):
pid = wintypes.DWORD()
tid = user32.GetWindowThreadProcessId(
hWnd, ctypes.byref(pid))
length = user32.GetWindowTextLengthW(hWnd) + 1
title = ctypes.create_unicode_buffer(length)
user32.GetWindowTextW(hWnd, title, length)
current_pid = os.getpid()
if pid.value == current_pid:
result.append(WindowInfo(title.value, hWnd))
return True
user32.EnumWindows(enum_proc, 0)
return sorted(result)
return list_windows()
def get_window_z_order():
'''Returns windows in z-order (top first)'''
# https://stackoverflow.com/questions/6381198/get-window-z-order-with-python-windows-extensions
user32 = ctypes.windll.user32
lst = []
top = user32.GetTopWindow(None)
if not top:
return lst
lst.append(top)
while True:
next = user32.GetWindow(lst[-1], 2)
if not next:
break
lst.append(next)
return lst
'''
def get_qt_window_order():
window_order = get_window_z_order()
app = QApplication.instance()
qt_windows = app.topLevelWidgets()
qt_window_ids = [window.winId() for window in qt_windows]
return qt_window_ids
# return order_window_list(qt_window_ids)
def order_window_list(list_of_windows=[]):
all_windows_ordered_list = get_window_z_order()
ordered_list = []
for window in all_windows_ordered_list:
if window in list_of_windows:
ordered_list.append(window)
return ordered_list
'''
# qt_order = get_qt_window_order()
all_windows = get_window_z_order()
process_windows = get_process_hwnds()
# blender_windows = [b_window.hwnd for b_window in process_windows if b_window.hwnd not in qt_order]
window_dict = {wind.hwnd: wind.title for wind in process_windows}
print("Sort order is:")
i = 0
for window in all_windows:
if window in window_dict:
print("\t", i, window_dict[window])
i += 1
|
178141
|
import os
import glob
from unet3d.data import write_data_to_file, open_data_file
from unet3d.generator import get_training_and_validation_generators
from unet3d.model import siam3dunet_model
from unet3d.model import testnet_model
from unet3d.training import load_old_model, train_model
from skimage.io import imsave, imread
config = dict()
config["image_shape"] = (64, 64, 16) # This determines what shape the images will be cropped/resampled to.
config["patch_shape"] = None # switch to None to train on the whole image
config["labels"] = (1,) # the label numbers on the input image
config["n_base_filters"] = 16
config["n_labels"] = len(config["labels"])
config["all_modalities"] = ["dwi", "t1", "t1c", "t2"]
config["training_modalities"] = ["t1"] #config["all_modalities"] # change this if you want to only use some of the modalities
#config["nb_channels"] = len(config["training_modalities"])
config["nb_channels"] = 1
mode = config["training_modalities"][0]
if "patch_shape" in config and config["patch_shape"] is not None:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["patch_shape"]))
else:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["image_shape"]))
config["truth_channel"] = config["nb_channels"]
config["deconvolution"] = True # if False, will use upsampling instead of deconvolution
config["batch_size"] = 239
config["validation_batch_size"] = 60
config["n_epochs"] = 1000 # cutoff the training after this many epochs
config["patience"] = 100 # learning rate will be reduced after this many epochs if the validation loss is not improving
config["early_stop"] = 1000 # training will be stopped after this many epochs without the validation loss improving
config["initial_learning_rate"] = 1e-2 #5e-4
config["learning_rate_drop"] = 0.5 # factor by which the learning rate will be reduced
config["validation_split"] = 0.8 # portion of the data that will be used for training
config["flip"] = True # augments the data by randomly flipping an axis during
config["permute"] = False # data shape must be a cube. Augments the data by permuting in various directions
config["distort"] = 0.5 # switch to None if you want no distortion
config["augment"] = config["flip"] or config["distort"]
config["validation_patch_overlap"] = 0 # if > 0, during training, validation patches will be overlapping
config["training_patch_start_offset"] = (16, 16, 16) # randomly offset the first patch index by up to this offset
config["skip_blank"] = True # if True, then patches without any target will be skipped
config["data_file0"] = os.path.abspath(f"siam_data0_{mode}.h5")
config["data_file1"] = os.path.abspath(f"siam_data1_{mode}.h5")
config["model_file"] = os.path.abspath(f"siam_model_{mode}.h5")
config["training_file"] = os.path.abspath(f"siam_training_ids_t1.pkl")
config["validation_file"] = os.path.abspath(f"siam_validation_ids_t1.pkl")
config["overwrite"] = False # If True, will previous files. If False, will use previously written files.
def fetch_training_data_files(return_subject_ids=False):
training_data_files0 = list()
subject_ids0 = list()
training_data_files1 = list()
subject_ids1 = list()
train0 = glob.glob(os.path.join(os.path.dirname(__file__), "twostage_data_300", "preprocessed", "pre", "*"))
train1 = glob.glob(os.path.join(os.path.dirname(__file__), "twostage_data_300", "preprocessed", "post", "*"))
#train0 = glob.glob(os.path.join(os.path.dirname(__file__), "twostage_data_300", "preprocessed", "pre", "*"))
#train1 = glob.glob(os.path.join(os.path.dirname(__file__), "twostage_data_300", "preprocessed", "post", "*"))
train0.sort(key = lambda x: x.split('/')[-1].split('-')[-1])
train1.sort(key = lambda x: x.split('/')[-1].split('-')[-1])
train0_index = [i.split('/')[-1].split('-')[-1] for i in train0]
train1_index = [i.split('/')[-1].split('-')[-1] for i in train1]
assert train0_index == train1_index, print ('train0 and train1 sort not equal !!!')
for subject_dir in train0:
subject_ids0.append(os.path.basename(subject_dir))
subject_files = list()
for modality in config["training_modalities"]:
subject_files.append(os.path.join(subject_dir, modality + ".nii.gz"))
subject_files.append(os.path.join(subject_dir, "truth.nii"))
training_data_files0.append(tuple(subject_files))
for subject_dir in train1:
subject_ids1.append(os.path.basename(subject_dir))
subject_files = list()
for modality in config["training_modalities"]:
subject_files.append(os.path.join(subject_dir, modality + ".nii.gz"))
subject_files.append(os.path.join(subject_dir, "truth.nii"))
training_data_files1.append(tuple(subject_files))
assert len(subject_ids0) == len(subject_ids1), print ('len subject_ids0 and subject_ids1 are not equal !!!')
training_data_files = [training_data_files0, training_data_files1]
subject_ids = [subject_ids0, subject_ids1]
if return_subject_ids:
return training_data_files, subject_ids
else:
return training_data_files
def main(overwrite=False):
# convert input images into an hdf5 file
if overwrite or not (os.path.exists(config["data_file0"]) and os.path.exists(config["data_file1"])):
training_files, subject_ids = fetch_training_data_files(return_subject_ids=True)
training_files0, training_files1 = training_files
subject_ids0, subject_ids1 = subject_ids
if not os.path.exists(config["data_file0"]):
write_data_to_file(training_files0, config["data_file0"], image_shape=config["image_shape"], subject_ids=subject_ids0)
if not os.path.exists(config["data_file1"]):
write_data_to_file(training_files1, config["data_file1"], image_shape=config["image_shape"], subject_ids=subject_ids1)
data_file_opened0 = open_data_file(config["data_file0"])
data_file_opened1 = open_data_file(config["data_file1"])
if not overwrite and os.path.exists(config["model_file"]):
model = load_old_model(config["model_file"])
else:
# instantiate new model
model = siam3dunet_model(input_shape=config["input_shape"], n_labels=config["n_labels"], initial_learning_rate=config["initial_learning_rate"], n_base_filters=config["n_base_filters"])
#model = testnet_model(input_shape=config["input_shape"], n_labels=config["n_labels"], initial_learning_rate=config["initial_learning_rate"], n_base_filters=config["n_base_filters"])
#if os.path.exists(config["model_file"]):
# model = load_weights(config["model_file"])
# get training and testing generators
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
data_file_opened0,
data_file_opened1,
batch_size=config["batch_size"],
data_split=config["validation_split"],
overwrite=overwrite,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=config["validation_batch_size"],
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
permute=config["permute"],
augment=config["augment"],
skip_blank=config["skip_blank"],
augment_flip=config["flip"],
augment_distortion_factor=config["distort"])
'''
train_data = []
train_label = []
for i in range(n_train_steps):
a, b = next(train_generator)
train_data.append(a)
train_label.append(b)
a0, a1 = a
for i in range(len(a0[0,0,0,0,:])):
a0_0 = a0[0,2,:,:,i]
if a0_0.min() == a0_0.max():
a0_0 = a0_0 - a0_0
else:
a0_0 = (a0_0-a0_0.min())/(a0_0.max()-a0_0.min())
#print (a0_0.shape)
#print (a0_0.max())
#print (a0_0.min())
imsave(f'vis_img/{i}.jpg', a0_0)
raise
'''
test_data, test_label = next(validation_generator)
test_g = (test_data, test_label)
train_data, train_label = next(train_generator)
train_g = (train_data, train_label)
if not overwrite and os.path.exists(config["model_file"]):
txt_file = open(f"output_log.txt","w")
#res = model.evaluate(test_data, test_label)
#print (res)
pre = model.predict(test_data)
#print ([i for i in pre[0]])
#print ([int(i) for i in test_label[0]])
for i in range(len(pre[0])):
txt_file.write(str(pre[0][i][0])+' '+str(test_label[0][i])+"\n")
pre_train = model.predict(train_data)
for i in range(len(pre_train[0])):
txt_file.write(str(pre_train[0][i][0])+' '+str(train_label[0][i])+"\n")
txt_file.close()
raise
# run training
train_model(model=model,
model_file=config["model_file"],
training_generator=train_generator,
validation_generator=test_g,
steps_per_epoch=n_train_steps,
validation_steps=n_validation_steps,
initial_learning_rate=config["initial_learning_rate"],
learning_rate_drop=config["learning_rate_drop"],
learning_rate_patience=config["patience"],
early_stopping_patience=config["early_stop"],
n_epochs=config["n_epochs"])
'''
for i in range(len(train_label)):
#scores = model.evaluate(train_data[i], train_label[i], verbose=1)
scores = model.predict(train_data[i])
print (len(scores[0]))
'''
data_file_opened0.close()
data_file_opened1.close()
if __name__ == "__main__":
main(overwrite=config["overwrite"])
|
178176
|
import pdb
import sys
class FixedPdb(pdb.Pdb):
"""
Since we re-direct stdout and stderr in other parts of the
application, pdb can't interpret things like arrow keys
and auto-complete correctly.
This class fixes the issue by getting pdb to always use
the default stdout and stderr.
"""
def set_trace(self, *args, **kwargs):
self._use_default_stdout_stderr()
return super().set_trace(*args, **kwargs)
def do_continue(self, *args, **kwargs):
self._restore_stdout_stderr()
return super().do_continue(*args, **kwargs)
def _use_default_stdout_stderr(self):
self._prev_stdout = sys.stdout
self._prev_stderr = sys.stderr
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def _restore_stdout_stderr(self):
sys.stdout = self._prev_stdout
sys.stderr = self._prev_stderr
def fix_pdb():
if not isinstance(pdb.Pdb, FixedPdb):
pdb.Pdb = FixedPdb
|
178201
|
from django import template
from ghoster import forms
from django.contrib import admin
from django.contrib.admin import helpers
import pprint
import copy
import re
register = template.Library()
def __flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def __flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
flat = []
for name, opts in fieldsets:
flat.append(
(name, {'fields': __flatten(opts['fields'])})
)
return flat
def __get_meta_fieldsets(flat_fieldsets, model_admin):
markdown_field_name = model_admin.markdown_field
title_field_name = model_admin.title_field
meta_fieldsets = []
has_markdown = False
has_title = False
for name, opts in flat_fieldsets:
if not has_markdown:
if markdown_field_name in opts['fields']:
opts['fields'].remove(markdown_field_name)
has_markdown = True
if not has_title:
if title_field_name in opts['fields']:
opts['fields'].remove(title_field_name)
has_title = True
meta_fieldsets.append(
(name, {'fields': opts['fields']})
)
return meta_fieldsets
@register.simple_tag
def get_gh_media():
pass
# return forms.BaseMadiaWidget().media
@register.simple_tag(takes_context=True)
def get_gh_app_list(context):
"""
"""
return admin.site.get_app_list(context['request'])
@register.simple_tag()
def get_ghoster_form(adminform):
"""
Split origin adminform into three parts:
1. markdownfield: this field should use <textarea> widget and will be rendered with markdown style
2. titlefield: this field will be placed in top-bar
3. metafieldsets: the rest of fields will be flatten from fieldsets and be placed in right-sidebar
"""
# TODO: add some try-catch handler
form = adminform.form
model_admin = adminform.model_admin
readonly_fields = adminform.readonly_fields
flat_fieldsets = __flatten_fieldsets(adminform.fieldsets)
prepopulated_fields = adminform.prepopulated_fields
if prepopulated_fields:
prepopulated_fields = prepopulated_fields[0]
else:
prepopulated_fields = {}
meta_fieldsets = __get_meta_fieldsets(flat_fieldsets, model_admin)
markdownfield = form[model_admin.markdown_field]
markdownfield.id = re.search(r'id=\"(\S+)\"', str(markdownfield)).group(1)
titlefield = form[model_admin.title_field]
titlefield.field.widget.attrs['class'] += ' form-control'
metaformsets = helpers.AdminForm(
form,
meta_fieldsets,
prepopulated_fields,
readonly_fields=readonly_fields,
model_admin=model_admin
)
# print('@@@@', form['bool_field'].field.widget)
# pprint.pprint(adminform.__dict__, width=1)
return {'markdown': markdownfield, 'title': titlefield, 'meta': metaformsets}
|
178211
|
import numpy as np
import tensorflow as tf
OUTPUT_PATH = "../events/"
def save():
input_node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32)
net = tf.layers.conv2d(input_node, 32, (3, 3), strides=(2, 2), padding='same', name='conv_1')
net = tf.layers.conv2d(net, 32, (3, 3), strides=(1, 1), padding='same', name='conv_2')
net = tf.layers.conv2d(net, 64, (3, 3), strides=(2, 2), padding='same', name='conv_3')
tf.summary.FileWriter(OUTPUT_PATH, graph=tf.get_default_graph())
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
result = get_first_filter_value(sess)
saver.save(sess, "../ckpt/model.ckpt")
return result
def get_first_filter_value(sess):
tensor = tf.get_default_graph().get_tensor_by_name("conv_1/kernel/read:0")
return sess.run(tensor)[1, :, :, 1]
def load():
tf.reset_default_graph()
with tf.Session() as sess:
saver = tf.train.import_meta_graph('../ckpt/model.ckpt.meta')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver.restore(sess, '../ckpt/model.ckpt')
result = get_first_filter_value(sess)
return result
if __name__ == '__main__':
save_value = save()
load_value = load()
print(save_value)
print(load_value)
assert np.alltrue(save_value == load_value)
|
178220
|
import requests
def get_all_data():
root = "https://raw.githubusercontent.com/patidarparas13/Sentiment-Analyzer-Tool/master/Datasets/"
data = requests.get(root + "imdb_labelled.txt").text.split("\n")
data += requests.get(root + "amazon_cells_labelled.txt").text.split("\n")
data += requests.get(root + "yelp_labelled.txt").text.split("\n")
return data
data = get_all_data()
|
178297
|
from credmark.cmf.model import Model
@Model.describe(
slug='contrib.neilz',
display_name='An example of a contrib model',
description="This model exists simply as an example of how and where to \
contribute a model to the Credmark framework",
version='1.0',
developer='neilz.eth',
output=dict
)
class MyModel(Model):
def run(self, input):
return {
"credmarkFounder": "Neil",
"message": "You are a modeler. Thank you modeler."
}
|
178302
|
from .dataclasses import Skill, Card
from .string_mgr import DictionaryAccess
from typing import Callable, Union, Optional
from collections import UserDict
from .skill_cs_enums import (
ST,
IMPLICIT_TARGET_SKILL_TYPES,
PERCENT_VALUE_SKILL_TYPES,
MIXED_VALUE_SKILL_TYPES,
)
VALUE_PERCENT = 1
VALUE_MIXED = 2
class SkillEffectDSLHelper(UserDict):
def __call__(self, skill_type_id):
def _(describer):
self.data[skill_type_id] = describer
return describer
return _
class SkillEffectDescriberContext(object):
def __init__(self):
self.finish = self.default_finish
self.birdseye = self.default_birdseye
self.trigger = self.default_trigger
self.target = self.default_target
self.combiner = self.default_combiner
self.skill_effect = SkillEffectDSLHelper()
@staticmethod
def mod_value(vs):
eff_d_type = 1
if vs.effect_type in MIXED_VALUE_SKILL_TYPES:
eff_d_type = vs.calc_type
elif vs.effect_type in PERCENT_VALUE_SKILL_TYPES:
eff_d_type = 2
if eff_d_type > 1:
vf = vs.effect_value / 100
vi = vs.effect_value // 100
if vf == vi:
vf = vi
return f"{vf}%"
return str(vs.effect_value)
def default_birdseye(self, effect1, effect2=None):
return ""
def default_finish(self, skill: Skill.Effect):
return ""
def default_trigger(self, skill: Skill):
return ""
def default_target(self, tt: Skill.TargetType, strings: DictionaryAccess, context: Card):
return ""
def default_combiner(self, trigger: str, effect: str):
return " ".join([trigger, effect])
def finish_clause(self, f: Callable[[Skill.Effect, dict], str]):
self.finish = f
return f
def birdseye_clause(self, f: Callable[[tuple, Optional[tuple]], str]):
self.birdseye = f
return f
def trigger_clause(self, f: Callable[[Skill, dict], str]):
self.trigger = f
return f
def target_clause(self, f: Callable[[Skill.TargetType, Card], str]):
self.target = f
return f
def final_combiner(self, f: Callable[[str, str], str]):
self.combiner = f
return f
def format_single_value(self, level_struct):
return self.mod_value(level_struct)
def format_target(
self,
tt: Skill,
strings: DictionaryAccess,
context: Card = None,
format_args: dict = None,
format_args_sec: dict = None
):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
if format_args_sec is None:
format_args_sec = format_args
e1 = None
e2 = None
if tt.levels[0].effect_type not in IMPLICIT_TARGET_SKILL_TYPES:
e1 = self.target(tt.target, strings, context)
if (tt.levels_2 and tt.levels_2[0].effect_type not in IMPLICIT_TARGET_SKILL_TYPES
and tt.target_2.id != tt.target.id):
e2 = self.target(tt.target_2, strings, context)
if e1 and e2:
return self.display_dual_effect(e1, e2, format_args=format_args, format_args_sec=format_args_sec)
elif e1:
return e1
elif e2:
return e2
return ""
def find_formatter(self, effect_type):
desc = self.skill_effect.get(effect_type)
if not desc:
return None
if callable(desc):
return desc
return desc.format
def display_value(self, levels, at_level):
if at_level is not None:
value = self.birdseye(levels[at_level])
else:
value = self.birdseye(levels[0], levels[-1])
return value
def display_dual_effect(
self,
effect_1: str,
effect_2: str,
format_args: dict,
format_args_sec: dict
):
return (
f"{format_args['let']}①{format_args['end']} {effect_1} "
f"{format_args_sec['let']}②{format_args_sec['end']} {effect_2}"
)
def format_effect(
self,
skill: Skill,
level: int = None,
format_args: dict = None,
format_args_sec: dict = None,
):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
if format_args_sec is None:
format_args_sec = format_args
formatter = self.find_formatter(skill.levels[0].effect_type)
if skill.levels_2:
formatter_sec = self.find_formatter(skill.levels_2[0].effect_type)
else:
formatter_sec = None
if formatter is None or (skill.levels_2 and formatter_sec is None):
return None
if len(skill.levels) == 1:
level = 0
value = self.display_value(skill.levels, level)
trigger = self.trigger(skill, format_args)
effect = " ".join(
(formatter(value=value, **format_args), self.finish(skill.levels[0], format_args))
)
if skill.levels_2:
value_2 = self.display_value(skill.levels_2, level)
effect_2 = " ".join(
(
formatter_sec(value=value_2, **format_args_sec),
self.finish(skill.levels_2[0], format_args_sec),
)
)
effect = self.display_dual_effect(effect, effect_2, format_args, format_args_sec)
return self.combiner(trigger, effect)
|
178305
|
from Voicelab.pipeline.Node import Node
from parselmouth.praat import call
from Voicelab.toolkits.Voicelab.VoicelabNode import VoicelabNode
from Voicelab.toolkits.Voicelab.MeasurePitchNode import measure_pitch
from scipy import stats
import statistics
class MeasureFormantPositionsNode(VoicelabNode):
def __init__(self, *args, **kwargs):
"""
Args:
*args:
**kwargs:
"""
super().__init__(*args, **kwargs)
self.args = {
# 'Method': 'formants_praat_manual'
}
self.state = {
"f1_mean_pf_list": [],
"f2_mean_pf_list": [],
"f3_mean_pf_list": [],
"f4_mean_pf_list": [],
#'f1_median_pf_list': [],
#'f2_median_pf_list': [],
#'f3_median_pf_list': [],
#'f4_median_pf_list': [],
}
# On each file we want to calculate the formants at the glottal pulses
def process(self):
voice: object = self.args["voice"]
# method = self.args['Method']
# pitch = self.args['Pitch']
formant_object = self.args["Formants"]
pitch_floor = self.args["Pitch Floor"]
pitch_ceiling = self.args["Pitch Ceiling"]
pitch = measure_pitch(
voice=voice, measure="cc", floor=pitch_floor, ceiling=pitch_ceiling
)
point_process = call(
[voice, pitch], "To PointProcess (cc)"
) # Create PointProcess object
num_points = call(point_process, "Get number of points")
f1_list = []
f2_list = []
f3_list = []
f4_list = []
measurement_times = []
for point in range(0, num_points):
point += 1
t = call(point_process, "Get time from index", point)
measurement_times.append(t)
f1 = call(formant_object, "Get value at time", 1, t, "Hertz", "Linear")
f2 = call(formant_object, "Get value at time", 2, t, "Hertz", "Linear")
f3 = call(formant_object, "Get value at time", 3, t, "Hertz", "Linear")
f4 = call(formant_object, "Get value at time", 4, t, "Hertz", "Linear")
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
f1_list = [f1 for f1 in f1_list if str(f1) != "nan"]
f2_list = [f2 for f2 in f2_list if str(f2) != "nan"]
f3_list = [f3 for f3 in f3_list if str(f3) != "nan"]
f4_list = [f4 for f4 in f4_list if str(f4) != "nan"]
# calculate mean & median formants across pulses
if len(f1_list) > 0:
f1_mean_pf = sum(f1_list) / len(f1_list)
# f1_median_pf = statistics.median(f1_list)
else:
f1_mean_pf = "N/A"
f1_median_pf = "N/A"
if len(f2_list) > 0:
f2_mean_pf = sum(f2_list) / len(f2_list)
f2_median_pf = statistics.median(f2_list)
else:
f2_mean_pf = "N/A"
f2_median_pf = "N/A"
if len(f3_list) > 0:
f3_mean_pf = sum(f3_list) / len(f3_list)
f3_median_pf = statistics.median(f3_list)
else:
f3_mean_pf = "N/A"
f3_median_pf = "N/A"
if len(f4_list) > 0:
f4_mean_pf = sum(f4_list) / len(f4_list)
f4_median_pf = statistics.median(f4_list)
else:
f4_mean_pf = "N/A"
f4_median_pf = "N/A"
results = {}
# collect all means and median values, these will be needed at the end to calculate the formant positions
self.state["f1_mean_pf_list"].append(f1_mean_pf)
self.state["f2_mean_pf_list"].append(f2_mean_pf)
self.state["f3_mean_pf_list"].append(f3_mean_pf)
self.state["f4_mean_pf_list"].append(f4_mean_pf)
self.state["f1_median_pf_list"].append(f1_median_pf)
self.state["f2_median_pf_list"].append(f2_median_pf)
self.state["f3_median_pf_list"].append(f3_median_pf)
self.state["f4_median_pf_list"].append(f4_median_pf)
return results
# Once all of the files have been processed, we want to calculate the position across all of them
def end(self, results):
"""
Args:
results:
"""
f1_mean_pf_list = self.state["f1_mean_pf_list"]
f2_mean_pf_list = self.state["f2_mean_pf_list"]
f3_mean_pf_list = self.state["f3_mean_pf_list"]
f4_mean_pf_list = self.state["f4_mean_pf_list"]
f1_median_pf_list = self.state["f1_median_pf_list"]
f2_median_pf_list = self.state["f2_median_pf_list"]
f3_median_pf_list = self.state["f3_median_pf_list"]
f4_median_pf_list = self.state["f4_median_pf_list"]
formant_mean_lists = [
f1_mean_pf_list,
f2_mean_pf_list,
f3_mean_pf_list,
f4_mean_pf_list,
]
formant_median_lists = [
f1_median_pf_list,
f2_median_pf_list,
f3_median_pf_list,
f4_median_pf_list,
]
# append it to the results of all of them
formant_positions = self.calculate_formant_position(formant_mean_lists)
for i, result in enumerate(results):
if isinstance(formant_positions, str):
results[i][self]["Formant Position"] = formant_positions
else:
results[i][self]["Formant Position"] = float(formant_positions[i])
return results
# to calcualte the formant position we need the formants at glotal pulses for each file we rans
def calculate_formant_position(self, formant_mean_lists, formant_median_lists):
"""
Args:
formant_mean_lists:
formant_median_lists:
"""
if len(formant_mean_lists[0]) < 30: # or len(formant_medians_lists[0]) < 8:
return "Not enough samples, requires at least 30"
# Normality test for mean data
_, p_f1_mean = stats.normaltest(formant_mean_lists[0])
_, p_f2_mean = stats.normaltest(formant_mean_lists[1])
_, p_f3_mean = stats.normaltest(formant_mean_lists[2])
_, p_f4_mean = stats.normaltest(formant_mean_lists[3])
if p_f1_mean >= 0.5 or p_f2_mean >= 0.5 or p_f3_mean >= 0.5 or p_f4_mean >= 0.5:
return "formants not normally distributed"
else:
zf1_mean = stats.zscore(formant_mean_lists[0])
zf2_mean = stats.zscore(formant_mean_lists[1])
zf3_mean = stats.zscore(formant_mean_lists[2])
zf4_mean = stats.zscore(formant_mean_lists[3])
pf_mean = (zf1_mean + zf2_mean + zf3_mean + zf4_mean) / 4
return pf_mean
# normality test for median data
_, p_f1_median = stats.normaltest(formant_medians_lists[0])
_, p_f2_median = stats.normaltest(formant_medians_lists[1])
_, p_f3_median = stats.normaltest(formant_medians_lists[2])
_, p_f4_median = stats.normaltest(formant_medians_lists[3])
if (
p_f1_median >= 0.5
or p_f2_median >= 0.5
or p_f3_median >= 0.5
or p_f4_median >= 0.5
):
return "formants not normally distributed"
else:
zf1_median = stats.zscore(formant_medians_lists[0])
zf2_median = stats.zscore(formant_medians_lists[1])
zf3_median = stats.zscore(formant_medians_lists[2])
zf4_median = stats.zscore(formant_medians_lists[3])
pf_median = (zf1_median + zf2_median + zf3_median + zf4_median) / 4
return pf_median
|
178308
|
from unittest.mock import patch, ANY
import numpy as np
import cv2
from tasks import (
_download_image, ascii, candy, mosaic, the_scream,
udnie, celeba_distill, face_paint, paprika
)
IMAGE_URL = "image_url"
IMAGE_NAME = "image_name"
IMAGE_PATH = "image_path"
IMAGE_IPFS_URL = "pinata_hash_123"
METADATA_IPFS_URL = "pinata_hash_365"
PAYER = "payer"
TRANSFORMATION_NAME = "sketch"
TRANSFORMATION_NUMBER = 1
TOKEN_NAME = "<PASSWORD>"
def _setup_mocks(dir_mock, pinata_mock, download_mock, original_image):
dir_mock.local_file_path.return_value = IMAGE_PATH
pinata_mock().pin_image.return_value = IMAGE_IPFS_URL
pinata_mock().pin_metadata.return_value = METADATA_IPFS_URL
download_mock.return_value = original_image
def _assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock):
# 1. Download the image
download_mock.assert_called_with(IMAGE_URL)
# 2. Create a file path on the tmp file system
dir_mock.local_file_path.assert_called_with(f"{IMAGE_NAME}.jpg")
# 3. Store the image locally
cv2_mock.imwrite.assert_called_with(IMAGE_PATH, ANY)
# 4. Upload the image to pinata
pinata_mock().pin_image.assert_called_with(IMAGE_PATH)
# 5. Upload nft metadata to pinata
pinata_mock().pin_metadata.assert_called_with(IMAGE_IPFS_URL, TOKEN_NAME,
PAYER, TRANSFORMATION_NAME)
# 6. Remove image from the tmp file system
dir_mock.remove_file.assert_called_with(IMAGE_PATH)
# 7. Post nft
requests_mock.post.assert_called_with(
ANY,
json = {"payer": PAYER, "token_uri": METADATA_IPFS_URL}
)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_ascii(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/original.jpeg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = ascii(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_candy(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/original.jpeg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = candy(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_mosaic(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/original.jpeg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = mosaic(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_the_scream(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/original.jpeg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = the_scream(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_udnie(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/original.jpeg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = udnie(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_celeba_distill(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/gril.jpg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = celeba_distill(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_face_paint(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/gril.jpg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = face_paint(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
@patch("tasks.requests")
@patch("tasks.cv2")
@patch("tasks.working_directory", return_value="image_path.jpg")
@patch("tasks.PinataClient")
@patch("tasks._download_image")
def test_paprika(download_mock, pinata_mock, dir_mock, cv2_mock, requests_mock):
original_image = cv2.imread("tests/gril.jpg")
_setup_mocks(dir_mock, pinata_mock, download_mock, original_image)
status = paprika(TRANSFORMATION_NAME, TRANSFORMATION_NUMBER, PAYER, IMAGE_URL, IMAGE_NAME)
_assert_call_order_and_params(download_mock, dir_mock, cv2_mock, pinata_mock, requests_mock)
|
178386
|
from binascii import hexlify
import enum
import re
from . import bech32
PREFIXES = [
"addr",
"addr_test",
"script",
"stake",
"stake_test"
# -- * Hashes
,
"addr_vkh",
"stake_vkh",
"addr_shared_vkh",
"stake_shared_vkh"
# -- * Keys for 1852H
,
"addr_vk",
"addr_sk",
"addr_xvk",
"addr_xsk",
"acct_vk",
"acct_sk",
"acct_xvk",
"acct_xsk",
"root_vk",
"root_sk",
"root_xvk",
"root_xsk",
"stake_vk",
"stake_sk",
"stake_xvk",
"stake_xsk"
# -- * Keys for 1854H
,
"addr_shared_vk",
"addr_shared_sk",
"addr_shared_xvk",
"addr_shared_xsk",
"acct_shared_vk",
"acct_shared_sk",
"acct_shared_xvk",
"acct_shared_xsk",
"root_shared_vk",
"root_shared_sk",
"root_shared_xvk",
"root_shared_xsk",
"stake_shared_vk",
"stake_shared_sk",
"stake_shared_xvk",
"stake_shared_xsk",
]
SHELLEY_ADDR_RE = re.compile("^(" + "|".join(PREFIXES) + ")")
class NetworkTag(enum.IntEnum):
TESTNET = 0
MAINNET = 1
class AddressType(enum.IntEnum):
PaymentKeyHash_StakeKeyHash = 0
ScriptHash_StakeKeyHash = 1
PaymentKeyHash_ScriptHash = 2
ScriptHash_ScriptHash = 3
PaymentKeyHash_Pointer = 4
ScriptHash_Pointer = 5
PaymentKeyHashOnly = 6
ScriptHashOnly = 7
Stake_StakeKeyHash = 0xE
Stake_ScriptHash = 0xF
class Hash(object):
hex_repr = None
def __init__(self, data):
if len(data) != 28:
raise ValueError(
"Hash object needs 28 bytes, {:d} provided".format(len(data))
)
self.hex_repr = hexlify(bytes(data)).decode()
def __str__(self):
return self.hex_repr
class Pointer(object):
slot_num = 0
transaction_index = None
output_index = None
def __init__(self, data):
data = data.copy()
self.slot_num, data = self._popint(data)
self.transaction_index, data = self._popint(data)
self.output_index, data = self._popint(data)
def _popint(self, data):
_intbytes = []
while data:
b = data[0]
data = data[1:]
_intbytes.append(b & 0x7F)
if b & 0x80 == 0:
break
val = 0
for idx, b in enumerate(reversed(_intbytes)):
val = val | (b << (8 * idx) - idx)
return val, data
class AddressDeserializer(object):
ADDR_LENGTH_CHECK = {
AddressType.PaymentKeyHash_StakeKeyHash: lambda l: l == 28 * 2,
AddressType.ScriptHash_StakeKeyHash: lambda l: l == 28 * 2,
AddressType.PaymentKeyHash_ScriptHash: lambda l: l == 28 * 2,
AddressType.ScriptHash_ScriptHash: lambda l: l == 28 * 2,
AddressType.PaymentKeyHash_Pointer: lambda l: l > 28,
AddressType.ScriptHash_Pointer: lambda l: l > 28,
AddressType.PaymentKeyHashOnly: lambda l: l == 28,
AddressType.ScriptHashOnly: lambda l: l == 28,
AddressType.Stake_StakeKeyHash: lambda l: l == 28,
AddressType.Stake_ScriptHash: lambda l: l == 28,
}
network_tag = None
address_type = None
components = None
hrp = None
payload = None
def __init__(self, address):
"""
Performs basic validation of a Shelley address but doesn't analyze the payload.
:param addres: a Shelley address as a :class:`str`
"""
self.hrp, binaddr5bit = bech32.bech32_decode(address)
if not binaddr5bit:
raise ValueError("{:s} is not a valid Shelley address".format(address))
binaddr = bech32.convertbits(binaddr5bit, 5, 8, False)
header = binaddr[0]
self.payload = binaddr[1:]
self.address_type, self.network_tag = (header & 0xF0) >> 4, header & 0xF
if self.address_type not in AddressType.__members__.values():
raise ValueError(
"Shelley address {:s} is of wrong type (0x{:x})".format(
address, self.address_type
)
)
if self.network_tag not in NetworkTag.__members__.values():
raise ValueError(
"Shelley address {:s} has unsupported net tag (0x{:x})".format(
address, self.network_tag
)
)
if self.network_tag == NetworkTag.TESTNET and not self.hrp.endswith("_test"):
raise ValueError(
'Shelley address {:s} has TESTNET tag but the prefix doesn\'t end with "_test"'.format(
address
)
)
elif self.network_tag == NetworkTag.MAINNET and self.hrp.endswith("_test"):
raise ValueError(
'Shelley address {:s} has MAINNET tag but the prefix ends with "_test"'.format(
address
)
)
if not self.ADDR_LENGTH_CHECK[self.address_type](len(self.payload)):
raise ValueError(
"Shelley address {:s} has invalid self.payload length".format(address)
)
def deserialized(self):
"""
Returns data deserialized from the address.
:rtype: (:class:`NetworkTag`, :class:`AddressType`, :class:`list` of :class:`AddressComponents <AddressComponent>`)
"""
part1, part2 = Hash(self.payload[:28]), self.payload[28:]
if self.address_type in (
AddressType.PaymentKeyHash_Pointer,
AddressType.ScriptHash_Pointer,
):
part2 = Pointer(part2) if part2 else None
else:
part2 = Hash(part2) if part2 else None
self.components = (part1, part2)
return (self.hrp, self.network_tag, self.address_type, self.components)
|
178430
|
import vertica_sdk
import urllib.request
import time
class validate_url(vertica_sdk.ScalarFunction):
"""Validates HTTP requests.
Returns the status code of a webpage. Pages that cannot be accessed return
"Failed to load page."
"""
def __init__(self):
pass
def setup(self, server_interface, col_types):
pass
def processBlock(self, server_interface, arg_reader, res_writer):
# Writes a string to the UDx log file.
server_interface.log("Validating URL Accessibility - UDx")
while(True):
url = arg_reader.getString(0)
try:
status = urllib.request.urlopen(url).getcode()
# Avoid overwhelming web servers -- be nice.
time.sleep(2)
except (ValueError, urllib.error.HTTPError, urllib.error.URLError):
status = 'Failed to load page'
res_writer.setString(str(status))
res_writer.next()
if not arg_reader.next():
# Stop processing when there are no more input rows.
break
def destroy(self, server_interface, col_types):
pass
class validate_url_factory(vertica_sdk.ScalarFunctionFactory):
def createScalarFunction(self, srv):
return test_url()
def getPrototype(self, srv_interface, arg_types, return_type):
arg_types.addVarchar()
return_type.addChar()
def getReturnType(self, srv_interface, arg_types, return_type):
return_type.addChar(20)
|
178449
|
import functools
from flask import abort, request
from flask_stupe import marshmallow
__all__ = []
if marshmallow:
def _load_schema(schema, json):
try:
return schema.load(json)
except marshmallow.exceptions.ValidationError as e:
abort(400, e.messages)
def schema_required(schema):
"""Validate body of the request against the schema.
Abort with a status code 400 if the schema yields errors."""
if isinstance(schema, type):
schema = schema()
def __inner(f):
@functools.wraps(f)
def __inner(*args, **kwargs):
json = request.get_json(force=True)
request.schema = _load_schema(schema, json)
return f(*args, **kwargs)
return __inner
return __inner
__all__.extend(["schema_required"])
|
178492
|
import collections.abc
import enum
import os
from typing import (
TYPE_CHECKING,
Annotated,
Any,
AsyncGenerator,
Optional,
Union,
)
import aiohttp
import inflection
import numpy as np
import pandas as pd
import pydantic
import structlog
import uplink
import uplink.converters
from pandera.decorators import check_io, check_output
from pandera.errors import SchemaError
from pandera.model import SchemaModel
from pandera.model_components import Field
from pandera.typing import Series, String
from wraeblast import constants, errors
from wraeblast.filtering.elements import ItemFilter
from wraeblast.filtering.parsers.extended import env
if TYPE_CHECKING:
import uplink.commands
from wraeblast.filtering.parsers.extended import config
logger = structlog.get_logger()
QcutItemsType = list[tuple[pd.Interval, pd.DataFrame]]
SingleInfluencedQcutItemsType = list[
tuple[tuple[tuple[str, ...], pd.Interval], pd.DataFrame]
]
InsightsResultType = tuple[
Union["CurrencyType", "ItemType"],
pd.DataFrame,
]
InsightsType = Union["CurrencyType", "ItemType"]
quantiles = {
"quartile": 4,
"quintile": 5,
"decile": 10,
"percentile": 100,
}
shard_names_to_orb_names = {
"Transmutation Shard": "Orb of Transmutation",
"Alteration Shard": "Orb of Alteration",
"Alchemy Shard": "Orb of Alteration",
"Annulment Shard": "Orb of Annulment",
"Binding Shard": "Orb of Binding",
"Horizon Shard": "Orb of Horizons",
"Harbinger's Shard": "Harbinger's Orb",
"Engineer's Shard": "Engineer's Orb",
"Ancient Shard": "Ancient Orb",
"Chaos Shard": "Chaos Orb",
"Mirror Shard": "Mirror of Kalandra",
"Exalted Shard": "Exalted Orb",
"Regal Shard": "Regal Orb",
}
_cache = pd.HDFStore(os.getenv("WRAEBLAST_CACHE", "./.wbinsights.h5"))
class InflectedEnumMixin(enum.Enum):
@property
def underscored_value(self) -> str:
return inflection.underscore(self.value)
@property
def pluralized_underscored_value(self) -> str:
return inflection.pluralize(self.underscored_value)
class CurrencyType(InflectedEnumMixin, enum.Enum):
CURRENCY = "Currency"
FRAGMENT = "Fragment"
class ItemType(InflectedEnumMixin, enum.Enum):
ARTIFACT = "Artifact"
BASE_TYPE = "BaseType"
BEAST = "Beast"
BLIGHTED_MAP = "BlightedMap"
CLUSTER_JEWEL = "ClusterJewel"
DELIRIUM_ORB = "DeliriumOrb"
DIVINATION_CARD = "DivinationCard"
ESSENCE = "Essence"
FOSSIL = "Fossil"
HELMET_ENCHANT = "HelmetEnchant"
INCUBATOR = "Incubator"
INVITATION = "Invitation"
MAP = "Map"
OIL = "Oil"
PROPHECY = "Prophecy"
RESONATOR = "Resonator"
SCARAB = "Scarab"
SKILL_GEM = "SkillGem"
UNIQUE_ACCESSORY = "UniqueAccessory"
UNIQUE_ARMOUR = "UniqueArmour"
UNIQUE_FLASK = "UniqueFlask"
UNIQUE_JEWEL = "UniqueJewel"
UNIQUE_MAP = "UniqueMap"
UNIQUE_WEAPON = "UniqueWeapon"
VIAL = "Vial"
WATCHSTONE = "Watchstone"
@property
def key_name(self) -> str:
return inflection.pluralize(inflection.underscore(self.value))
@uplink.response_handler
def raise_for_status(response):
"""Checks whether or not the response was successful."""
if 200 <= response.status_code < 300:
return response
raise errors.UnsuccessfulInsightsRequest(
f"error {response.status_code}: {response.url}"
)
def get_all_insights_types() -> list[InsightsType]:
return [*CurrencyType, *ItemType]
def get_display_value(
chaos_value: float,
exalted_exchange_value: int,
round_down_by: int = 1,
precision: int = 0,
) -> str:
if chaos_value < exalted_exchange_value:
if round_down_by:
chaos_value = env.round_down(chaos_value, round_down_by)
return f"{chaos_value}c"
else:
return f"{chaos_value / exalted_exchange_value:.{precision}f}ex"
def get_insights_type_by_value(s: str) -> InsightsType:
for t in get_all_insights_types():
if t.value == s:
return t
raise KeyError(s)
def get_quantile_tuple(q: str) -> tuple[str, int]:
if q.startswith("D"):
return ("decile", int(q[1:]))
elif q.startswith("P"):
return ("percentile", int(q[1:]))
elif q.startswith("QU"):
return ("quintile", int(q[2:]))
elif q.startswith("Q"):
return ("quartile", int(q[1:]))
else:
raise RuntimeError(f"invalid quantile: {q}")
async def get_economy_overview(
league: str,
client: "NinjaConsumer",
type_: InsightsType,
) -> pd.DataFrame:
"""Request an economy overview from poe.ninja."""
if type_ in CurrencyType:
meth = client.get_currency_overview
elif type_ in ItemType:
meth = client.get_item_overview
else:
raise RuntimeError()
logger.info(
"overview.get",
client=".".join([client.__module__, client.__class__.__name__]),
type=type_.value,
)
return await meth(league=league, type=type_.value) # type: ignore
async def get_dataframes(
league: str,
types: list[InsightsType],
) -> AsyncGenerator[InsightsResultType, None]:
"""Request all economy overviews from poe.ninja."""
logger.info("all_insights.get", league=league)
session = aiohttp.ClientSession()
client = uplink.AiohttpClient(session=session)
ninja = NinjaConsumer(
base_url=NinjaConsumer.default_base_url,
client=client,
)
for t in types:
overview = await get_economy_overview(
league=league,
client=ninja,
type_=t,
)
yield (t, overview)
await session.close()
async def initialize_insights_cache(
league: str,
cache: Optional[pd.HDFStore] = None,
no_sync: bool = False,
) -> pd.HDFStore:
"""Fetch and cache economy insights as needed."""
if cache is None:
cache = _cache
# log = logger.bind(league=league, cache_dir=cache.directory)
log = logger.bind(league=league)
log.info("cache.initialize", league=league)
missing_dataframes = []
for t in get_all_insights_types():
try:
df = cache.get(f"i_{t.value}")
log.debug("cache.hit", type=t.value)
except KeyError:
log.debug("cache.miss", type=t.value)
missing_dataframes.append(t)
if missing_dataframes and no_sync:
raise errors.WraeblastError("insights cache is incomplete")
async for t, df in get_dataframes(
league=league,
types=missing_dataframes,
):
log.info(
"overview.response",
lines=df.shape[0],
type=t.value,
)
cache.put(f"i_{t.value}", df, format="table")
return cache
async def initialize_filter_context(
initialize_cache: bool = True,
league: Optional[str] = None,
cache: Optional[pd.HDFStore] = None,
no_sync: bool = False,
) -> "ItemFilterContext":
"""Create an ``ItemFilterContext`` from cached economy data."""
if initialize_cache:
if league is None:
raise RuntimeError("league must be provided if initializing cache")
cache = await initialize_insights_cache(
league=league,
cache=cache,
no_sync=no_sync,
)
elif cache is None:
cache = _cache
else:
raise RuntimeError("cache not provided")
economy_data = {}
for t in get_all_insights_types():
overview = cache.get(f"i_{t.value}")
economy_data[t.pluralized_underscored_value] = overview
return ItemFilterContext(data=economy_data)
def _parse_name_and_details_id(
name: str, details_id: str
) -> tuple[Optional[int], tuple[constants.Influence, ...]]:
tokens = details_id.split("-")
ilvl_and_influences = tokens[name.count(" ") + name.count("-") + 1 :]
try:
return (
int(ilvl_and_influences[0]),
tuple(
constants.Influence(i.capitalize())
for i in ilvl_and_influences[1:]
),
)
except (IndexError, ValueError):
return (None, tuple())
def get_quantile_thresholds(df: pd.DataFrame) -> list[dict[str, float]]:
groups = df.groupby(list(quantiles.keys()), as_index=False)
return groups.agg({"chaos_value": "min"}).to_dict( # type: ignore
"records"
)
class NinjaCurrencyOverviewSchema(SchemaModel):
currency_type_name: Series[String]
chaos_equivalent: Series[float]
details_id: Series[String]
pay_id: Series[float] = Field(alias="pay.id", nullable=True)
pay_league_id: Series[float] = Field(alias="pay.league_id", nullable=True)
pay_pay_currency_id: Series[float] = Field(
alias="pay.pay_currency_id", nullable=True
)
pay_get_currency_id: Series[float] = Field(
alias="pay.get_currency_id", nullable=True
)
pay_sample_time_utc: Series[
Annotated[pd.DatetimeTZDtype, "ns", "utc"]
] = Field(alias="pay.sample_time_utc", coerce=True, nullable=True)
pay_count: Series[float] = Field(alias="pay.count", nullable=True)
pay_value: Series[float] = Field(alias="pay.value", nullable=True)
pay_data_point_count: Series[float] = Field(
alias="pay.data_point_count", ge=0, coerce=True, nullable=True
)
pay_includes_secondary: Series[bool] = Field(
alias="pay.includes_secondary", coerce=True, nullable=True
)
pay_listing_count: Series[float] = Field(
alias="pay.listing_count", coerce=True, nullable=True
)
class NinjaItemOverviewSchema(SchemaModel):
id: Series[int]
name: Series[String]
item_class: Series[int]
flavour_text: Optional[Series[String]]
chaos_value: Series[float]
exalted_value: Series[float]
count: Series[int]
details_id: Series[String]
# listing_count: Series[int]
icon: Optional[Series[String]] = Field(nullable=True)
base_type: Optional[Series[String]] = Field(nullable=True)
gem_level: Optional[Series[float]] = Field(nullable=True)
gem_quality: Optional[Series[float]] = Field(nullable=True)
item_level: Optional[Series[float]] = Field(nullable=True)
level_required: Optional[Series[float]] = Field(nullable=True)
links: Optional[Series[float]] = Field(nullable=True)
map_tier: Optional[Series[float]] = Field(nullable=True)
stack_size: Optional[Series[float]] = Field(nullable=True)
variant: Optional[Series[String]] = Field(nullable=True)
class Config:
coerce = True
class ExtendedNinjaOverviewSchema(SchemaModel):
item_name: Series[String]
chaos_value: Series[float]
alt_quality: Series[String]
is_alt_quality: Series[bool]
chaos_value: Series[float]
chaos_value_log: Series[float]
quartile: Series[int]
quintile: Series[int]
decile: Series[int]
percentile: Series[int]
base_type: Optional[Series[String]] = Field(nullable=True)
gem_level: Optional[Series[float]] = Field(nullable=True)
gem_quality: Optional[Series[float]] = Field(nullable=True)
influences: Optional[Series[String]] = Field(nullable=True)
level_required: Optional[Series[float]] = Field(nullable=True)
links: Optional[Series[float]] = Field(nullable=True)
map_tier: Optional[Series[float]] = Field(nullable=True)
num_influences: Optional[Series[int]] = Field(nullable=True)
orb_name: Optional[Series[String]] = Field(nullable=True)
stack_size: Optional[Series[float]] = Field(nullable=True)
uber_blight: Optional[Series[bool]] = Field(nullable=True)
variant: Optional[Series[String]] = Field(nullable=True)
class PostProcessedNinjaOverviewSchema(ExtendedNinjaOverviewSchema):
exalted_value: Series[float]
display_value: Series[String]
@check_output(ExtendedNinjaOverviewSchema)
def transform_ninja_df(df: pd.DataFrame) -> pd.DataFrame:
currency_overview_schema = NinjaCurrencyOverviewSchema.to_schema()
item_overview_schema = NinjaItemOverviewSchema.to_schema()
is_currency_overview = False
df = df.fillna(0)
try:
df = currency_overview_schema.validate(df)
is_currency_overview = True
except SchemaError as e:
df = item_overview_schema.validate(df)
if is_currency_overview:
try:
shards = []
for shard_name, orb_name in shard_names_to_orb_names.items():
if not df[df["currency_type_name"] == shard_name].empty:
continue
orb_value = (
df[df["currency_type_name"] == orb_name][
"chaos_equivalent"
].iloc[0]
if orb_name != "Chaos Orb"
else 1
)
shards.append(
{
"currency_type_name": shard_name,
"chaos_equivalent": orb_value / 20,
}
)
df = df.append(
shards,
verify_integrity=True,
sort=True,
ignore_index=True,
)
except IndexError:
pass
if "sparkline.data" in df.columns:
df = df.loc[df["sparkline.data"].str.len() != 0]
output = pd.DataFrame()
output["item_name"] = (
df["currency_type_name"]
if "currency_type_name" in df.columns
else df["name"]
)
if not is_currency_overview:
output["scourged"] = output["item_name"].map(
lambda name: name.startswith("Scourged")
)
for label in ("currency_type_name", "skill_gem_name"):
if label in df.columns:
output["item_name"] = df[label]
if "map_tier" in df.columns:
output["uber_blight"] = df["name"].map(
lambda name: name.startswith("Blight-ravaged")
)
if (
"base_type" in df.columns
and not df[
df["base_type"].str.contains("Cluster Jewel", na=False)
].empty
):
try:
output["cluster_jewel_enchantment"] = df["name"].map(
lambda name: constants.get_cluster_jewel_passive(name).value
)
output["cluster_jewel_passives_min"] = df["trade_info"].apply(
lambda trade_info: [
ti
for ti in trade_info
if ti["mod"] == "enchant.stat_3086156145"
][0]["min"]
)
output["cluster_jewel_passives_max"] = df["trade_info"].apply(
lambda trade_info: [
ti
for ti in trade_info
if ti["mod"] == "enchant.stat_3086156145"
][0]["max"]
)
output["cluster_jewel_passives"] = output[
"cluster_jewel_passives_min"
]
except (KeyError, IndexError) as e:
# TODO: Find a way to filter out unique cluster jewels better
pass
output["alt_quality"] = ""
output["is_alt_quality"] = False
alt_filter = (
output["item_name"].str.startswith("Anomalous")
| output["item_name"].str.startswith("Divergent")
| output["item_name"].str.startswith("Phantasmal")
)
if not output[alt_filter].empty:
output.loc[alt_filter, "is_alt_quality"] = True
output.loc[alt_filter, "alt_quality"] = output["item_name"].apply(
lambda s: s[: s.find(" ")],
)
output.loc[alt_filter, "item_name"] = output["item_name"].apply(
lambda s: s[s.find(" ") + 1 :],
)
if "name" in df.columns and "details_id" in df.columns:
output["influences"] = df.apply(
lambda r: "/".join(
i.value
for i in _parse_name_and_details_id(
str(r["name"]),
str(r["details_id"]),
)[1]
),
axis=1,
)
output["num_influences"] = (
df["variant"].str.count("/").fillna(0).astype(int)
if "variant" in df.columns
else 0
)
for column in (
"base_type",
"level_required",
"links",
"gem_level",
"gem_quality",
"map_tier",
"stack_size",
):
if column in df.columns:
output[column] = df[column]
if column == "links":
output[column] = output[column].fillna(0)
output["chaos_value"] = (
df["chaos_equivalent"]
if "chaos_equivalent" in df.columns
else df["chaos_value"]
)
# Normalized chaos values
# XXX: since log(0) is -inf, the min chaos value of the dataframe replaces
# rows with a chaos value of 0
min_chaos_value = output.loc[
output["chaos_value"] != 0, "chaos_value"
].min()
output["chaos_value"].replace(0, min_chaos_value, inplace=True) # type: ignore
output["chaos_value_log"] = np.log(output["chaos_value"])
# Pre-defined quantiles (quartiles, quintiles, percentiles)
for label, q in quantiles.items():
labels = None
if isinstance(q, (list, tuple)):
q, labels = q
output[label] = pd.qcut(
output["chaos_value"].rank(method="first", numeric_only=True),
q=q,
labels=False if labels is None else None,
precision=0,
duplicates="drop",
)
if labels is not None:
output[label] = output[label].map(dict(enumerate(labels)))
return output
def json_normalize(data: dict[Any, Any]) -> pd.DataFrame:
df = pd.json_normalize(data)
df.columns = [inflection.underscore(c) for c in df.columns]
return df
@uplink.install
class NinjaDataFrameFactory(uplink.converters.Factory):
def create_response_body_converter(self, cls, request_definition):
return lambda response: transform_ninja_df(
df=json_normalize(response.json()["lines"]),
)
uplink_retry = uplink.retry(
when=uplink.retry.when.status(503) | uplink.retry.when.raises(Exception),
stop=uplink.retry.stop.after_attempt(5)
| uplink.retry.stop.after_delay(10),
backoff=uplink.retry.backoff.jittered(multiplier=0.5),
)
@raise_for_status
@uplink_retry
@uplink.returns.json
@uplink.json
@uplink.get
def get_json() -> uplink.commands.RequestDefinitionBuilder:
"""Template for GET requests with JSON as both request and response."""
@raise_for_status
@uplink_retry
@uplink.get
def get_dataframe() -> uplink.commands.RequestDefinitionBuilder:
...
class NinjaConsumer(uplink.Consumer):
default_base_url = "https://poe.ninja/api/data/"
@uplink.ratelimit(calls=2, period=150)
@get_dataframe("CurrencyOverview") # type: ignore
def get_currency_overview(
self,
league: uplink.Query(type=str), # type: ignore
type: uplink.Query(type=CurrencyType), # type: ignore
) -> pd.DataFrame:
...
@uplink.ratelimit(calls=2, period=150)
@get_dataframe("CurrencyHistory") # type: ignore
def get_currency_history(
self,
league: uplink.Query(type=str), # type: ignore
type: uplink.Query(type=CurrencyType), # type: ignore
currency_id: uplink.Query("currencyId", type=int), # type: ignore
) -> pd.DataFrame:
...
@uplink.ratelimit(calls=30, period=150)
@get_dataframe("ItemOverview") # type: ignore
def get_item_overview(
self,
league: uplink.Query(type=str), # type: ignore
type: uplink.Query(type=ItemType), # type: ignore
) -> pd.DataFrame:
...
class ItemFilterContext(pydantic.BaseModel):
"""Entrypoint for accessing economy data from poe.ninja."""
data: dict[str, pd.DataFrame]
_exalted_value: int = pydantic.PrivateAttr(default=0)
_quantile_thresholds: dict[
str, list[dict[str, float]]
] = pydantic.PrivateAttr(default_factory=list)
class Config:
arbitrary_types_allowed = True
def __init__(self, **data) -> None:
super().__init__(**data)
self._exalted_value = self.data["currencies"][
self.data["currencies"].item_name == "Exalted Orb"
].iloc[0]["chaos_value"]
self._quantile_thresholds = {
k: get_quantile_thresholds(df) for k, df in self.data.items()
}
self.data = {
k: self._post_process(k, df) for k, df in self.data.items()
}
def get_display_value(
self,
chaos_value: float,
round_down_by: int = 1,
precision: int = 0,
):
return get_display_value(
chaos_value=chaos_value,
exalted_exchange_value=self._exalted_value,
round_down_by=round_down_by,
precision=precision,
)
def get_quantiles_for_threshold(
self,
key: str,
min_chaos_value: float,
) -> Optional[dict[str, float]]:
for threshold in self._quantile_thresholds[key]:
if threshold["chaos_value"] >= min_chaos_value:
return threshold
return self._quantile_thresholds[key][-1]
@pydantic.validator("data")
def data_must_contain_all_types(
cls, v: dict[str, pd.DataFrame]
) -> dict[str, pd.DataFrame]:
for type_ in [*CurrencyType, *ItemType]:
if type_.pluralized_underscored_value not in v:
raise ValueError(f"{type_} missing from filter context")
return v
@check_io(
df=ExtendedNinjaOverviewSchema.to_schema(),
out=PostProcessedNinjaOverviewSchema.to_schema(),
)
def _post_process(self, key: str, df: pd.DataFrame) -> pd.DataFrame:
df["exalted_value"] = df["chaos_value"].apply(
lambda x: x / self._exalted_value
)
df["display_value"] = df["chaos_value"].apply(
lambda x: get_display_value(
chaos_value=x,
exalted_exchange_value=self._exalted_value,
precision=2,
)
)
return df
|
178510
|
from heapq import heappush ,heappop, heapify ,_heapify_max
from typing import Union
# Running scalar median
# Ack: https://medium.com/mind-boggling-algorithms/streaming-algorithms-running-median-of-an-array-using-two-heaps-cd1b61b3c034
def med(s,x:Union[float,int]=None)->dict:
""" Running median
:param x: scalar float
"""
if not s or s.get('low_heap') is None:
s = dict()
s['low_heap'] = []
s['high_heap'] = []
s['median'] = 0
if x < s['median']:
heappush(s['low_heap'], x)
_heapify_max(s['low_heap'])
else:
heappush(s['high_heap'], x)
if len(s['low_heap']) > len(s['high_heap'] ) +1:
heappush(s['high_heap'], heappop(s['low_heap']))
_heapify_max(s['low_heap'])
elif len(s['high_heap']) > len(s['low_heap']) + 1:
heappush(s['low_heap'], heappop(s['high_heap']))
_heapify_max(s['low_heap'])
if len(s['low_heap']) == len(s['high_heap']):
s['median'] = float(s['low_heap'][0] + s['high_heap'][0] ) /2.0
else:
s['median'] = float(s['low_heap'][0]) if len(s['low_heap']) > len(s['high_heap']) else float(s['high_heap'][0])
return s
if __name__=='__main__':
import numpy as np
import random
xs = np.random.randn(random.choice([1,5,10,1000]))
s = {}
for x in xs:
s = med(s=s,x=x)
assert s['median'] == np.median(xs)
|
178535
|
from dynamicserialize.dstypes.com.raytheon.uf.common.datastorage.records import AbstractDataRecord
class ByteDataRecord(AbstractDataRecord):
def __init__(self):
super(ByteDataRecord, self).__init__()
self.byteData = None
def getByteData(self):
return self.byteData
def setByteData(self, byteData):
self.byteData = byteData
def retrieveDataObject(self):
return self.getByteData()
def putDataObject(self, obj):
self.setByteData(obj)
|
178582
|
import torch
from torch import nn
import numpy as np
from collections import OrderedDict
from torch.utils.data import DataLoader
from torch.utils.data import Sampler
from contextlib import nullcontext
import yaml
from yaml import SafeLoader as yaml_Loader, SafeDumper as yaml_Dumper
import os,sys
from tqdm import tqdm
from lib.utils.dotdict import HDict
HDict.L.update_globals({'path':os.path})
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
def read_config_from_file(config_file):
with open(config_file, 'r') as fp:
return yaml.load(fp, Loader=yaml_Loader)
def save_config_to_file(config, config_file):
with open(config_file, 'w') as fp:
return yaml.dump(config, fp, sort_keys=False, Dumper=yaml_Dumper)
class StopTrainingException(Exception):
pass
class CollatedBatch(list):
pass
class DistributedTestDataSampler(Sampler):
def __init__(self, data_source, batch_size, rank, world_size):
data_len = len(data_source)
all_indices = np.arange(data_len, dtype=int)
split_indices = np.array_split(all_indices, world_size)
num_batches = (len(split_indices[0]) + batch_size -1) // batch_size
self.batch_indices = [i.tolist() for i in np.array_split(split_indices[rank],
num_batches)]
def __iter__(self):
return iter(self.batch_indices)
def __len__(self):
return len(self.batch_indices)
def cached_property(func):
atrribute_name = f'_{func.__name__}'
def _wrapper(self):
try:
return getattr(self, atrribute_name)
except AttributeError:
val = func(self)
self.__dict__[atrribute_name] = val
return val
return property(_wrapper)
class TrainingBase:
def __init__(self, config=None, ddp_rank=0, ddp_world_size=1):
self.config_input = config
self.config = self.get_default_config()
if config is not None:
for k in config.keys():
if not k in self.config:
raise KeyError(f'Unknown config "{k}"')
self.config.update(config)
self.state = self.get_default_state()
self.ddp_rank = ddp_rank
self.ddp_world_size = ddp_world_size
self.is_distributed = (self.ddp_world_size > 1)
self.is_main_rank = (self.ddp_rank == 0)
@cached_property
def train_dataset(self):
raise NotImplementedError
@cached_property
def val_dataset(self):
raise NotImplementedError
@cached_property
def collate_fn(self):
return None
@cached_property
def train_sampler(self):
return torch.utils.data.DistributedSampler(self.train_dataset,
shuffle=True)
@cached_property
def train_dataloader(self):
common_kwargs = dict(
dataset=self.train_dataset,
batch_size=self.config.batch_size,
collate_fn=self.collate_fn,
pin_memory=True,
)
if self.config.dataloader_workers > 0:
common_kwargs.update(
num_workers=self.config.dataloader_workers,
persistent_workers=True,
multiprocessing_context=self.config.dataloader_mp_context,
)
if not self.is_distributed:
dataloader = DataLoader(**common_kwargs, shuffle=True,
drop_last=False)
else:
dataloader = DataLoader(**common_kwargs,
sampler=self.train_sampler)
return dataloader
@cached_property
def val_dataloader(self):
common_kwargs = dict(
dataset=self.val_dataset,
collate_fn=self.collate_fn,
pin_memory=True,
)
if self.config.dataloader_workers > 0:
common_kwargs.update(
num_workers=self.config.dataloader_workers,
persistent_workers=True,
multiprocessing_context=self.config.dataloader_mp_context,
)
prediction_batch_size = self.config.batch_size*self.config.prediction_bmult
if not self.is_distributed:
dataloader = DataLoader(**common_kwargs,
batch_size=prediction_batch_size,
shuffle=False, drop_last=False)
else:
sampler = DistributedTestDataSampler(data_source=self.val_dataset,
batch_size=prediction_batch_size,
rank=self.ddp_rank,
world_size=self.ddp_world_size)
dataloader = DataLoader(**common_kwargs, batch_sampler=sampler)
return dataloader
@cached_property
def base_model(self):
raise NotImplementedError
@cached_property
def model(self):
model = self.base_model
if self.is_distributed:
model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[self.ddp_rank],
output_device=self.ddp_rank)
return model
@cached_property
def optimizer(self):
config = self.config
optimizer_class = getattr(torch.optim, config.optimizer)
optimizer = optimizer_class(self.model.parameters(),
lr=config.max_lr,
**config.optimizer_params)
return optimizer
def get_default_config(self):
return HDict(
scheme = None,
model_name = 'unnamed_model',
distributed = False,
random_seed = None,
num_epochs = 100,
save_path = HDict.L('c:path.join("models",c.model_name)'),
checkpoint_path = HDict.L('c:path.join(c.save_path,"checkpoint")'),
config_path = HDict.L('c:path.join(c.save_path,"config")'),
summary_path = HDict.L('c:path.join(c.save_path,"summary")'),
log_path = HDict.L('c:path.join(c.save_path,"logs")'),
validation_frequency = 1,
batch_size = HDict.L('c:128 if c.distributed else 32'),
optimizer = 'Adam' ,
max_lr = 5e-4 ,
clip_grad_value = None ,
optimizer_params = {} ,
dataloader_workers = 0 ,
dataloader_mp_context = 'forkserver',
training_type = 'normal' ,
evaluation_type = 'validation',
predictions_path = HDict.L('c:path.join(c.save_path,"predictions")'),
grad_accum_steps = 1 ,
prediction_bmult = 1 ,
)
def get_default_state(self):
state = HDict(
current_epoch = 0,
global_step = 0,
)
return state
def config_summary(self):
if not self.is_main_rank: return
for k,v in self.config.get_dict().items():
print(f'{k} : {v}', flush=True)
def save_config_file(self):
if not self.is_main_rank: return
os.makedirs(os.path.dirname(self.config.config_path), exist_ok=True)
save_config_to_file(self.config.get_dict(), self.config.config_path+'.yaml')
save_config_to_file(self.config_input, self.config.config_path+'_input.yaml')
def model_summary(self):
if not self.is_main_rank: return
os.makedirs(os.path.dirname(self.config.summary_path), exist_ok=True)
trainable_params = 0
non_trainable_params = 0
for p in self.model.parameters():
if p.requires_grad:
trainable_params += p.numel()
else:
non_trainable_params += p.numel()
summary = dict(
trainable_params = trainable_params,
non_trainable_params = non_trainable_params,
model_representation = repr(self.model),
)
with open(self.config.summary_path+'.txt', 'w') as fp:
yaml.dump(summary, fp, sort_keys=False, Dumper=yaml_Dumper)
def save_checkpoint(self):
if not self.is_main_rank: return
ckpt_path = self.config.checkpoint_path
os.makedirs(ckpt_path, exist_ok=True)
torch.save(self.state, os.path.join(ckpt_path, 'training_state'))
torch.save(self.base_model.state_dict(), os.path.join(ckpt_path, 'model_state'))
torch.save(self.optimizer.state_dict(), os.path.join(ckpt_path, 'optimizer_state'))
print(f'Checkpoint saved to: {ckpt_path}',flush=True)
def load_checkpoint(self):
ckpt_path = self.config.checkpoint_path
try:
self.state.update(torch.load(os.path.join(ckpt_path, 'training_state')))
self.base_model.load_state_dict(torch.load(os.path.join(ckpt_path, 'model_state')))
self.optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, 'optimizer_state')))
if self.is_main_rank:
print(f'Checkpoint loaded from: {ckpt_path}',flush=True)
torch.cuda.empty_cache()
except FileNotFoundError:
pass
# Callbacks
def on_train_begin(self):
pass
def on_train_end(self):
pass
def on_epoch_begin(self, logs, training):
pass
def on_epoch_end(self, logs, training):
pass
def on_batch_begin(self, i, logs, training):
pass
def on_batch_end(self, i, logs, training):
pass
# Logging
def get_verbose_logs(self):
return OrderedDict(loss='0.4f')
@cached_property
def verbose_logs(self):
return self.get_verbose_logs()
def update_logs(self, logs, training, **updates):
if training:
logs.update(updates)
else:
logs.update(('val_'+k,v) for k,v in updates.items())
def log_description(self, i, logs, training):
if training:
return list(f'{k} = {logs[k]:{f}}'
for k,f in self.verbose_logs.items())
else:
return list(f'val_{k} = {logs["val_"+k]:{f}}'
for k,f in self.verbose_logs.items())
# Training loop
def preprocess_batch(self, batch):
if isinstance(batch, CollatedBatch):
return CollatedBatch(self.preprocess_batch(b) for b in batch)
elif hasattr(batch, 'cuda'):
return batch.cuda(non_blocking=True)
elif hasattr(batch, 'items'):
return batch.__class__((k,v.cuda(non_blocking=True)) for k,v in batch.items())
elif hasattr(batch, '__iter__'):
return batch.__class__(v.cuda(non_blocking=True) for v in batch)
else:
raise ValueError(f'Unsupported batch type: {type(batch)}')
def calculate_loss(self, outputs, inputs):
raise NotImplementedError
def grad_accum_gather_outputs(self, outputs):
return torch.cat(outputs, dim=0)
def grad_accum_reduce_loss(self, loss):
with torch.no_grad():
total_loss = sum(loss)
return total_loss
def grad_accum_collator(self, dataloader):
dataloader_iter = iter(dataloader)
if self.config.grad_accum_steps == 1:
yield from dataloader_iter
else:
while True:
collated_batch = CollatedBatch()
try:
for _ in range(self.config.grad_accum_steps):
collated_batch.append(next(dataloader_iter))
except StopIteration:
break
finally:
if len(collated_batch) > 0: yield collated_batch
@cached_property
def train_steps_per_epoch(self):
if self.config.grad_accum_steps == 1:
return len(self.train_dataloader)
else:
return (len(self.train_dataloader) + self.config.grad_accum_steps - 1)\
// self.config.grad_accum_steps
@cached_property
def validation_steps_per_epoch(self):
return len(self.val_dataloader)
def training_step(self, batch, logs):
for param in self.model.parameters():
param.grad = None
if not isinstance(batch, CollatedBatch):
outputs = self.model(batch)
loss = self.calculate_loss(outputs=outputs, inputs=batch)
loss.backward()
else:
num_nested_batches = len(batch)
outputs = CollatedBatch()
loss = CollatedBatch()
sync_context = self.model.no_sync() \
if self.is_distributed else nullcontext()
with sync_context:
for b in batch:
o = self.model(b)
l = self.calculate_loss(outputs=o, inputs=b) / num_nested_batches
l.backward()
outputs.append(o)
loss.append(l)
outputs = self.grad_accum_gather_outputs(outputs)
loss = self.grad_accum_reduce_loss(loss)
if self.config.clip_grad_value is not None:
nn.utils.clip_grad_value_(self.model.parameters(), self.config.clip_grad_value)
self.optimizer.step()
return outputs, loss
def validation_step(self, batch, logs):
outputs = self.model(batch)
loss = self.calculate_loss(outputs=outputs, inputs=batch)
return outputs, loss
def initialize_metrics(self, logs, training):
pass
def update_metrics(self, outputs, inputs, logs, training):
pass
def initialize_losses(self, logs, training):
self._total_loss = 0.
def update_losses(self, i, loss, inputs, logs, training):
if not self.is_distributed:
step_loss = loss.item()
else:
if training:
loss = loss.detach()
torch.distributed.all_reduce(loss)
step_loss = loss.item()/self.ddp_world_size
self._total_loss += step_loss
self.update_logs(logs=logs, training=training,
loss=self._total_loss/(i+1))
def train_epoch(self, epoch, logs):
self.model.train()
self.initialize_losses(logs, True)
self.initialize_metrics(logs, True)
if self.is_distributed:
self.train_sampler.set_epoch(epoch)
gen = self.grad_accum_collator(self.train_dataloader)
if self.is_main_rank:
gen = tqdm(gen, dynamic_ncols=True,
total=self.train_steps_per_epoch)
try:
for i, batch in enumerate(gen):
self.on_batch_begin(i, logs, True)
batch = self.preprocess_batch(batch)
outputs, loss = self.training_step(batch, logs)
self.state.global_step = self.state.global_step + 1
logs.update(global_step=self.state.global_step)
self.update_losses(i, loss, batch, logs, True)
self.update_metrics(outputs, batch, logs, True)
self.on_batch_end(i, logs, True)
if self.is_main_rank:
desc = 'Training: '+'; '.join(self.log_description(i, logs, True))
gen.set_description(desc)
finally:
if self.is_main_rank: gen.close()
for param in self.model.parameters():
param.grad = None
def minimal_train_epoch(self, epoch, logs):
self.model.train()
if self.is_distributed:
self.train_sampler.set_epoch(epoch)
gen = self.grad_accum_collator(self.train_dataloader)
if self.is_main_rank:
gen = tqdm(gen, dynamic_ncols=True, desc='Training: ',
total=self.train_steps_per_epoch)
try:
for i, batch in enumerate(gen):
self.on_batch_begin(i, logs, True)
batch = self.preprocess_batch(batch)
_ = self.training_step(batch, logs)
self.state.global_step = self.state.global_step + 1
logs.update(global_step=self.state.global_step)
self.on_batch_end(i, logs, True)
finally:
if self.is_main_rank: gen.close()
for param in self.model.parameters():
param.grad = None
def validation_epoch(self, epoch, logs):
self.model.eval()
self.initialize_losses(logs, False)
self.initialize_metrics(logs, False)
gen = self.val_dataloader
if self.is_main_rank:
gen = tqdm(gen, dynamic_ncols=True,
total=self.validation_steps_per_epoch)
try:
with torch.no_grad():
for i, batch in enumerate(gen):
self.on_batch_begin(i, logs, False)
batch = self.preprocess_batch(batch)
outputs, loss = self.validation_step(batch, logs)
self.update_losses(i, loss, batch, logs, False)
self.update_metrics(outputs, batch, logs, False)
self.on_batch_end(i, logs, False)
if self.is_main_rank:
desc = 'Validation: '+'; '.join(self.log_description(i, logs, False))
gen.set_description(desc)
finally:
if self.is_main_rank: gen.close()
def load_history(self):
history_file = os.path.join(self.config.log_path, 'history.yaml')
try:
with open(history_file, 'r') as fp:
return yaml.load(fp, Loader=yaml_Loader)
except FileNotFoundError:
return []
def save_history(self, history):
os.makedirs(self.config.log_path, exist_ok=True)
history_file = os.path.join(self.config.log_path, 'history.yaml')
with open(history_file, 'w') as fp:
yaml.dump(history, fp, sort_keys=False, Dumper=yaml_Dumper)
def train_model(self):
if self.is_main_rank:
history = self.load_history()
starting_epoch = self.state.current_epoch
self.on_train_begin()
should_stop_training = False
try:
for i in range(starting_epoch, self.config.num_epochs):
self.state.current_epoch = i
if self.is_main_rank:
print(f'\nEpoch {i+1}/{self.config.num_epochs}:', flush=True)
logs = dict(epoch = self.state.current_epoch,
global_step = self.state.global_step)
try:
self.on_epoch_begin(logs, True)
if self.config.training_type == 'normal':
self.train_epoch(i, logs)
elif self.config.training_type == 'minimal':
self.minimal_train_epoch(i, logs)
else:
raise ValueError(f'Unknown training type: {self.config.training_type}')
self.on_epoch_end(logs, True)
except StopTrainingException:
should_stop_training = True
try:
if (self.val_dataloader is not None)\
and (not ((i+1) % self.config.validation_frequency)):
self.on_epoch_begin(logs, False)
if self.config.evaluation_type == 'validation':
self.validation_epoch(i, logs)
elif self.config.evaluation_type == 'prediction':
self.prediction_epoch(i, logs)
else:
raise ValueError(f'Unknown evaluation type: {self.config.evaluation_type}')
self.on_epoch_end(logs, False)
except StopTrainingException:
should_stop_training = True
self.state.current_epoch = i + 1
if self.is_main_rank:
self.save_checkpoint()
history.append(logs)
self.save_history(history)
if should_stop_training:
if self.is_main_rank:
print('Stopping training ...')
break
finally:
self.on_train_end()
def distributed_barrier(self):
if self.is_distributed:
dummy = torch.ones((),dtype=torch.int64).cuda()
torch.distributed.all_reduce(dummy)
# Prediction logic
def prediction_step(self, batch):
predictions = self.model(batch)
if isinstance(batch, torch.Tensor):
return dict(inputs=batch, predictions=predictions)
elif isinstance(batch, list):
outputs = batch.copy()
batch.append(predictions)
return outputs
elif isinstance(batch, dict):
outputs = batch.copy()
outputs.update(predictions=predictions)
return outputs
def prediction_loop(self, dataloader):
self.model.eval()
outputs = []
if self.is_main_rank:
gen = tqdm(dataloader, dynamic_ncols=True)
else:
gen = dataloader
try:
with torch.no_grad():
for batch in gen:
batch = self.preprocess_batch(batch)
outputs.append(self.prediction_step(batch))
finally:
if self.is_main_rank: gen.close()
return outputs
def preprocess_predictions(self, outputs):
if isinstance(outputs[0], torch.Tensor):
return torch.cat(outputs, dim=0)
elif isinstance(outputs[0], dict):
return {k: torch.cat([o[k] for o in outputs], dim=0)
for k in outputs[0].keys()}
elif isinstance(outputs[0], list):
return [torch.cat([o[i] for o in outputs], dim=0)
for i in range(len(outputs[0]))]
else:
raise ValueError('Unsupported output type')
def postprocess_predictions(self, outputs):
if isinstance(outputs, torch.Tensor):
return outputs.cpu().numpy()
elif isinstance(outputs, dict):
return {k: v.cpu().numpy() for k, v in outputs.items()}
elif isinstance(outputs, list):
return [v.cpu().numpy() for v in outputs]
else:
raise ValueError('Unsupported output type')
def distributed_gatther_tensor(self, tensors):
shapes = torch.zeros(self.ddp_world_size+1, dtype=torch.long).cuda()
shapes[self.ddp_rank+1] = tensors.shape[0]
torch.distributed.all_reduce(shapes)
offsets = torch.cumsum(shapes, dim=0)
all_tensors = torch.zeros(offsets[-1], *tensors.shape[1:], dtype=tensors.dtype).cuda()
all_tensors[offsets[self.ddp_rank]:offsets[self.ddp_rank+1]] = tensors
torch.distributed.all_reduce(all_tensors)
return all_tensors
def distributed_gather_predictions(self, predictions):
if self.is_main_rank:
print('Gathering predictions from all ranks...')
if isinstance(predictions, torch.Tensor):
all_predictions = self.distributed_gatther_tensor(predictions)
elif isinstance(predictions, list):
all_predictions = [self.distributed_gatther_tensor(pred) for pred in predictions]
elif isinstance(predictions, dict):
all_predictions = {key:self.distributed_gatther_tensor(pred)
for key, pred in predictions.items()}
else:
raise ValueError('Unsupported output type')
if self.is_main_rank:
print('Done.')
return all_predictions
def save_predictions(self, dataset_name, predictions):
os.makedirs(self.config.predictions_path, exist_ok=True)
predictions_file = os.path.join(self.config.predictions_path, f'{dataset_name}.pt')
torch.save(predictions, predictions_file)
print(f'Saved predictions to {predictions_file}')
def evaluate_predictions(self, predictions):
raise NotImplementedError
def prediction_epoch(self, epoch, logs):
if self.is_main_rank:
print(f'Predicting on validation dataset...')
dataloader = self.val_dataloader
outputs = self.prediction_loop(dataloader)
outputs = self.preprocess_predictions(outputs)
if self.is_distributed:
outputs = self.distributed_gather_predictions(outputs)
predictions = self.postprocess_predictions(outputs)
if self.is_main_rank:
self.save_predictions('validation', predictions)
results = self.evaluate_predictions(predictions)
results = {f'val_{k}': v for k, v in results.items()}
logs.update(results)
if self.is_main_rank:
desc = 'Validation: '+'; '.join(f'{k}: {v:.4f}' for k, v in results.items())
print(desc, flush=True)
# Interface
def prepare_for_training(self):
self.config_summary()
self.save_config_file()
self.load_checkpoint()
self.model_summary()
def execute_training(self):
self.prepare_for_training()
self.train_model()
self.finalize_training()
def finalize_training(self):
pass
|
178584
|
from .plates import (
get_plate_class,
Plate96, Plate384, Plate1536, Plate2x4,
Plate4ti0960, Plate4ti0130, PlateLabcyteEchoLp0200Ldv,
PlateLabcyteEchoP05525Pp, Trough8x1
)
|
178588
|
import numpy as np
from scipy import ndimage
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by <NAME> & <NAME>
This code loads and sharpens UCI HAR Dataset data.
UCI HAR Dataset data can be downloaded from:
https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones
Unzipped dataset should be placed inside the '../data/UCI HAR Dataset/' folder.
'''
dir_path = '../data/UCI HAR Dataset/'
def load_x(train_or_test):
global dir_path
if train_or_test is "train":
x_path = dir_path + 'train/X_train.txt'
elif train_or_test is "test":
x_path = dir_path + 'test/X_test.txt'
with open(x_path) as f:
container = f.readlines()
result = []
for line in container:
tmp1 = line.strip()
tmp2 = tmp1.replace(' ', ' ') # removes inconsistent blank spaces
tmp_ary = map(float, tmp2.split(' '))
result.append(tmp_ary)
return np.array(result)
def load_y(train_or_test):
global dir_path
if train_or_test is "train":
y_path = dir_path + 'train/y_train.txt'
elif train_or_test is "test":
y_path = dir_path + 'test/y_test.txt'
with open(y_path) as f:
container = f.readlines()
result = []
for line in container:
num_str = line.strip()
result.append(int(num_str))
return np.array(result)
def sharpen(x_test, sigma, alpha):
r = x_test.shape[0]
c = x_test.shape[1]
container = np.empty((r, c))
i = 0
for row in x_test:
test = np.array([row])
blurred = ndimage.gaussian_filter(test, sigma)
sharpened = test + alpha * (test - blurred)
container[i] = sharpened
i = i + 1
return container
|
178597
|
from subdaap.models import Server
from daapserver.utils import generate_persistent_id
from daapserver import provider
import logging
# Logger instance
logger = logging.getLogger(__name__)
class Provider(provider.Provider):
# SubSonic has support for artwork.
supports_artwork = True
# Persistent IDs are supported.
supports_persistent_id = True
def __init__(self, server_name, db, state, connections, cache_manager):
"""
"""
super(Provider, self).__init__()
self.server_name = server_name
self.db = db
self.state = state
self.connections = connections
self.cache_manager = cache_manager
self.setup_state()
self.setup_server()
def setup_state(self):
"""
"""
if "persistent_id" not in self.state:
self.state["persistent_id"] = generate_persistent_id()
def setup_server(self):
"""
"""
self.server = Server(db=self.db)
# Set server name and persistent ID.
self.server.name = self.server_name
self.server.persistent_id = self.state["persistent_id"]
def get_artwork_data(self, session, item):
"""
Get artwork data from cache or remote.
"""
cache_item = self.cache_manager.artwork_cache.get(item.id)
if cache_item.iterator is None:
remote_fd = self.connections[item.database_id].get_artwork_fd(
item.remote_id, item.file_suffix)
self.cache_manager.artwork_cache.download(
item.id, cache_item, remote_fd)
logger.debug("Artwork data from remote, size=unknown")
return cache_item.iterator(), None, None
logger.debug(
"Artwork data from cache, size=%d", cache_item.size)
return cache_item.iterator(), None, cache_item.size
def get_item_data(self, session, item, byte_range=None):
"""
Get item data from cache or remote.
"""
cache_item = self.cache_manager.item_cache.get(item.id)
connection = self.connections[item.database_id]
is_transcode = connection.needs_transcoding(item.file_suffix)
item_file_type = item.file_type
if is_transcode:
item_file_type = connection.transcode_format[item.file_type]
if cache_item.iterator is None:
remote_fd = connection.get_item_fd(
item.remote_id, item.file_suffix)
self.cache_manager.item_cache.download(
item.id, cache_item, remote_fd)
item_size = item.file_size
# Determine returned size by checking for transcode.
if is_transcode:
item_size = -1
logger.debug(
"Item data from remote: range=%s, type=%s, size=%d",
byte_range, item_file_type, item_size)
return cache_item.iterator(byte_range), item_file_type, item_size
logger.debug(
"Item data from cache, range=%s, type=%s, size=%d",
byte_range, item.file_type, item.file_size)
return cache_item.iterator(byte_range), item_file_type, \
cache_item.size
|
178704
|
from paver.easy import sh
import time
import yaml
from tests.util import clean, invoke_drkns
def test_check():
clean()
output = invoke_drkns('nominalcase', 'check')
assert (len(output) == 0)
def test_list():
clean()
output = invoke_drkns('nominalcase', 'list')
assert(len(output) > 6)
assert('project1.run' in output)
assert('project1.dependency1.hello' in output)
assert('project2.dependency1WithAnotherKey.run' not in output)
def test_debug():
clean()
output = invoke_drkns('nominalcase', 'debug')
assert(len(output) > 6)
def test_generate():
output = invoke_drkns('nominalcase', 'generate')
data = yaml.safe_load(output)
assert('drkns-check' in data['jobs']['project1']['needs'])
assert('dependency1' in data['jobs']['project1']['needs'])
def test_run_partial():
clean()
invoke_drkns('nominalcase', 'run project1.run')
ls_output = sh('ls /tmp', capture=True)
assert('project1.drknsdemo.out' in ls_output)
assert('project2.drknsdemo.out' not in ls_output)
clean()
invoke_drkns('nominalcase', 'run project1')
ls_output = sh('ls /tmp', capture=True)
assert ('project1.drknsdemo.out' in ls_output)
def test_run_complete():
clean()
t0 = time.time()
drkns_output = invoke_drkns('nominalcase', 'run')
ls_output = sh('ls /tmp', capture=True)
t1 = time.time()
execution_time = t1 - t0
assert(execution_time < 10)
assert('dummyCleanup' in drkns_output)
assert('project1.drknsdemo.out' in ls_output)
assert('project2.drknsdemo.out' in ls_output)
assert('project2built.drknsdemo.out' in ls_output)
def test_run_cache():
clean()
invoke_drkns('nominalcase', 'run')
ls_output = sh('ls /tmp', capture=True)
assert('project1.drknsdemo.out' in ls_output)
assert('project2.drknsdemo.out' in ls_output)
sh('rm /tmp/project*.out')
invoke_drkns('nominalcase', 'run')
ls_output = sh('ls /tmp', capture=True)
# the hash hasn't changed the build should not have generated output
assert('project1.drknsdemo.out' not in ls_output)
assert('project2.drknsdemo.out' not in ls_output)
project_file = 'testprojects/nominalcase/project1/main.py'
sh('echo "# for test" >> ' + project_file, capture=True)
invoke_drkns('nominalcase', 'run')
ls_output = sh('ls /tmp', capture=True)
assert('project1.drknsdemo.out' in ls_output)
assert('project2.drknsdemo.out' not in ls_output)
sh('cat ' + project_file + ' | grep -v test > ' + project_file + '.mod')
sh('rm ' + project_file)
sh('mv ' + project_file + '.mod ' + project_file)
ignored_file_1 = 'testprojects/nominalcase/project1/main.py.tmp'
sh('rm -rf /tmp/project*.drknsdemo.out', capture=True)
sh('echo "something that should not trigger a build" >> ' + ignored_file_1)
ignored_directory = 'testprojects/nominalcase/project1/ignoreddirectory'
sh('mkdir ' + ignored_directory)
ignored_file_2 = ignored_directory + '/somefile'
sh('echo "some other thing that should not trigger a build either" >> ' +\
ignored_file_2)
invoke_drkns('nominalcase', 'run')
ls_output = sh('ls /tmp', capture=True)
assert('project1.drknsdemo.out' not in ls_output)
assert('project2.drknsdemo.out' not in ls_output)
sh('rm -rf ' + ignored_file_1 + ' ' + ignored_directory)
invoke_drkns('nominalcase', 'forget project1')
invoke_drkns('nominalcase', 'run')
ls_output = sh('ls /tmp', capture=True)
assert('project1.drknsdemo.out' in ls_output)
def test_run_no_multi_dependencies_execution():
clean()
invoke_drkns('nominalcase', 'run')
dependency_output_file = '/tmp/dependency1.drknsdemo.out'
line_count = 0
with open(dependency_output_file, 'r') as f:
for _ in f:
line_count += 1
assert(line_count == 1)
|
178828
|
import pytest
import numpy as np
from synthpop.census_helpers import Census
from synthpop import categorizer as cat
import os
@pytest.fixture
def c():
return Census('bfa6b4e541243011fab6307a31aed9e91015ba90')
@pytest.fixture
def acs_data(c):
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
df = c.block_group_query(all_columns, "06", "075", tract="030600")
return df
@pytest.fixture
def pums_data(c):
return c.download_population_pums("06", "07506")
def test_categorize(acs_data, pums_data):
p_acs_cat = cat.categorize(acs_data, {
("population", "total"): "B01001_001E",
("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E",
("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E",
("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E",
("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E",
("race", "white"): "B02001_002E",
("race", "black"): "B02001_003E",
("race", "asian"): "B02001_005E",
("race", "other"): "B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E",
("sex", "male"): "B01001_002E",
("sex", "female"): "B01001_026E"
}, index_cols=['NAME'])
assert len(p_acs_cat) == 3
assert len(p_acs_cat.columns) == 11
assert len(p_acs_cat.columns.names) == 2
assert p_acs_cat.columns[0][0] == "age"
assert np.all(cat.sum_accross_category(p_acs_cat) < 2)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
pums_data, jd_persons = cat.joint_distribution(
pums_data,
cat.category_combinations(p_acs_cat.columns),
{"age": age_cat, "race": race_cat, "sex": sex_cat}
)
|
178849
|
from .Exceptions import BFSyntaxError, BFSemanticError
from .Token import Token
from .General import is_token_literal
class Parser:
"""
Used to easily iterate tokens
"""
def __init__(self, tokens):
self.tokens = tokens
self.current_token_index = 0
# parsing tokens
def current_token(self):
if self.current_token_index >= len(self.tokens):
return None
else:
return self.token_at_index(self.current_token_index)
def advance_token(self, amount=1):
self.current_token_index += amount
def advance_to_token_at_index(self, token_index):
self.current_token_index = token_index
def token_at_index(self, index):
assert index < len(self.tokens)
return self.tokens[index]
def next_token(self, next_amount=1):
return self.token_at_index(self.current_token_index + next_amount)
def find_matching(self, starting_index=None):
"""
:return: the index of the token that matches the current token
:param starting_index (optional) - the index of the token we want to match
for example, if current token is {
it returns the index of the matching }
"""
if starting_index is None:
starting_index = self.current_token_index
tokens = self.tokens
token_to_match = tokens[starting_index]
if token_to_match.type == Token.LBRACE:
inc = Token.LBRACE
dec = Token.RBRACE
elif token_to_match.type == Token.LBRACK:
inc = Token.LBRACK
dec = Token.RBRACK
elif token_to_match.type == Token.LPAREN:
inc = Token.LPAREN
dec = Token.RPAREN
else:
raise BFSemanticError("No support for matching %s" % str(token_to_match))
i = starting_index
cnt = 0
while i < len(tokens):
if tokens[i].type == inc:
cnt += 1
elif tokens[i].type == dec:
cnt -= 1
if cnt == 0:
return i
i += 1
raise BFSyntaxError("Did not find matching %s for %s" % (dec, str(token_to_match)))
def check_next_tokens_are(self, tokens_list, starting_index=None):
if starting_index is None:
starting_index = self.current_token_index
# used for "assertion" and print a nice message to the user
if starting_index + len(tokens_list) >= len(self.tokens):
raise BFSyntaxError("Expected %s after %s" % (str(tokens_list), str(self.tokens[starting_index])))
for i in range(0, len(tokens_list)):
if self.tokens[starting_index + 1 + i].type != tokens_list[i]:
raise BFSyntaxError("Expected %s after %s" % (str(tokens_list[i]), [str(t) for t in self.tokens[starting_index: starting_index+1+i]]))
def check_current_tokens_are(self, tokens_list):
self.check_next_tokens_are(tokens_list, starting_index=self.current_token_index - 1)
def compile_array_initialization_list(self):
# {1, 2, 3, ...} or {array_initialization_list, array_initialization_list, array_initialization_list, ...} or string
# parses the definition and returns a list (of list of list ....) of literal tokens (NUM, CHAR, TRUE, FALSE)
list_tokens = []
if self.current_token().type == Token.STRING:
string_token = self.current_token()
line, column = string_token.line, string_token.column
for char in string_token.data:
list_tokens.append(Token(Token.NUM, line, column, str(ord(char))))
self.advance_token() # point to after STRING
return list_tokens
assert self.current_token().type == Token.LBRACE
self.advance_token() # skip to after LBRACE
while is_token_literal(self.current_token()) or self.current_token().type == Token.LBRACE:
if self.current_token().type == Token.LBRACE: # list of (literals | list)
list_tokens.append(self.compile_array_initialization_list())
else: # literal
list_tokens.append(self.current_token())
self.advance_token() # skip literal
if self.current_token().type not in [Token.COMMA, Token.RBRACE]:
raise BFSyntaxError("Unexpected %s (expected comma (,) or RBRACE (}))" % self.current_token())
if self.current_token().type == Token.COMMA:
self.advance_token() # skip comma
if self.current_token().type == Token.RBRACE:
break
self.check_current_tokens_are([Token.RBRACE])
self.advance_token() # skip RBRACE
return list_tokens
|
178881
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from . import verilog
__intrinsics__ = ('set_header', 'get_header',
'set_global_offset', 'set_global_addrs',
'set_global_addr_map', 'write_global_addr_map', 'load_global_addr_map',
'start', 'wait', 'sw_rst')
def set_header(fsm, saxi, index, header, wordsize=4):
awaddr = (verilog.header_reg + index) * wordsize
saxi.write(fsm, awaddr, header)
def get_header(fsm, saxi, index, wordsize=4):
araddr = (verilog.header_reg + index) * wordsize
h = saxi.read(fsm, araddr)
return h
def set_global_offset(fsm, saxi, addr, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
awaddr = verilog.control_reg_global_offset * wordsize
saxi.write(fsm, awaddr, addr)
def set_global_addrs(fsm, saxi, *addrs, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
offset = opt['offset'] if 'offset' in opt else 0
awaddr = (offset + verilog.control_reg_global_addr) * wordsize
for addr in addrs:
saxi.write(fsm, awaddr, addr)
awaddr += wordsize
def set_global_addr_map(fsm, saxi, memory, map_addr, *addrs, **opt):
write_global_addr_map(fsm, memory, map_addr, *addrs, **opt)
load_global_addr_map(fsm, saxi, map_addr, **opt)
def write_global_addr_map(fsm, memory, map_addr, *addrs, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
offset = opt['offset'] if 'offset' in opt else 0
for i, addr in enumerate(addrs):
memory.write_word(fsm, i + offset, map_addr, addr, wordsize * 8)
def load_global_addr_map(fsm, saxi, map_addr, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
awaddr = verilog.control_reg_addr_global_addr_map * wordsize
saxi.write(fsm, awaddr, map_addr)
awaddr = verilog.control_reg_load_global_addr_map * wordsize
saxi.write(fsm, awaddr, 1)
araddr = verilog.control_reg_load_global_addr_map * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
araddr = verilog.control_reg_busy_global_addr_map * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
def start(fsm, saxi, wordsize=4):
awaddr = verilog.control_reg_start * wordsize
saxi.write(fsm, awaddr, 1)
araddr = verilog.control_reg_start * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
def wait(fsm, saxi, wordsize=4):
araddr = verilog.control_reg_busy * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
def sw_rst(fsm, saxi, wordsize=4):
awaddr = verilog.control_reg_reset * wordsize
saxi.write(fsm, awaddr, 1)
araddr = verilog.control_reg_busy * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
|
178908
|
from sys import stdin
f, = map(float, stdin.readline().strip().split())
print "%.3lf" % (5*(f-32)/9)
|
178912
|
for __ in range(int(input())):
N,A,B,C = map(int,input().split( ))
if A+C >= N and B>=N:
print("YES")
else:
print("NO")
|
178958
|
import os
from cupy.testing._pytest_impl import is_available, check_available
if is_available():
import pytest
_gpu_limit = int(os.getenv('CUPY_TEST_GPU_LIMIT', '-1'))
def gpu(*args, **kwargs):
return pytest.mark.gpu(*args, **kwargs)
def cudnn(*args, **kwargs):
return pytest.mark.cudnn(*args, **kwargs)
def slow(*args, **kwargs):
return pytest.mark.slow(*args, **kwargs)
else:
def _dummy_callable(*args, **kwargs):
check_available('pytest attributes')
assert False # Not reachable
gpu = _dummy_callable
cudnn = _dummy_callable
slow = _dummy_callable
def multi_gpu(gpu_num):
"""Decorator to indicate number of GPUs required to run the test.
Tests can be annotated with this decorator (e.g., ``@multi_gpu(2)``) to
declare number of GPUs required to run. When running tests, if
``CUPY_TEST_GPU_LIMIT`` environment variable is set to value greater
than or equals to 0, test cases that require GPUs more than the limit will
be skipped.
"""
check_available('multi_gpu attribute')
# at this point we know pytest is available for sure
assert 1 < gpu_num
def _wrapper(f):
return pytest.mark.skipif(
0 <= _gpu_limit < gpu_num,
reason='{} GPUs required'.format(gpu_num))(
pytest.mark.multi_gpu(f))
return _wrapper
|
178974
|
import abc
class AbstractRateCounter(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get(self, scope):
raise NotImplementedError()
@abc.abstractmethod
def increment(self, scope, delta):
raise NotImplementedError()
@abc.abstractmethod
def increment_and_get(self, scope, delta):
raise NotImplementedError()
|
178992
|
from nnet.models.srl import *
from nnet.run.runner import *
from nnet.ml.voc import *
from nnet.run.srl.run import *
from nnet.run.srl.util import *
from nnet.run.srl.decoder import *
from functools import partial
from nnet.run.srl.read_dependency import get_adj
import nnet.run.srl.conll09_evaluation.eval
def make_local_voc(labels):
return {i: label for i, label in enumerate(labels)}
def bio_reader(record):
dbg_header, sent, pos_tags, dep_parsing, degree, frame, target, f_lemmas, f_targets, labels_voc, labels = record.split(
'\t')
labels_voc = labels_voc.split(' ')
frame = [frame] * len(labels_voc)
words = []
for word in sent.split(' '):
words.append(word)
pos_tags = pos_tags.split(' ')
labels = labels.split(' ')
assert (len(words) == len(labels))
local_voc = {v: k for k, v in make_local_voc(labels_voc).items()}
labels = [local_voc[label] for label in labels]
dep_parsing = dep_parsing.split()
dep_parsing = [p.split('|') for p in dep_parsing]
dep_parsing = [(p[0], int(p[1]), int(p[2])) for p in dep_parsing]
f_lemmas = f_lemmas.split(' ')
f_targets = f_targets.split(' ')
return dbg_header, words, pos_tags, dep_parsing, np.int32(degree), frame, \
np.int32(target), f_lemmas, np.int32(f_targets), labels_voc, labels
class BioSrlErrorComputer:
def __init__(self, converter, do_labeling, data_partition,
eval_dir):
self.converter = converter
self.do_labeling = do_labeling
self.data_partition = data_partition
self.labels1 = []
self.labels2 = []
self.predictions = []
self.eval_dir = eval_dir
self.do_eval = nnet.run.srl.conll09_evaluation.eval.do_eval
def pprint(self, record, voc, predictions, true_labels):
n = 2
sent = record[1]
predictions = [p[:len(voc.items())] for p in predictions.tolist()]
predictions = predictions[:len(sent)]
constraints = []
best_labeling = constrained_decoder(voc, predictions, 100, constraints)
info = list()
for word, prediction, true_label, best in zip(sent, predictions,
true_labels,
best_labeling):
nbest = sorted(range(len(prediction)),
key=lambda x: -prediction[x])
nbest = nbest[:n]
probs = [prediction[l] for l in nbest]
labels = [voc[label] for label in nbest if label in voc]
labels = ' '.join(labels)
info.append((word, labels, probs, voc[true_label], best))
return info
def compute(self, model, batch):
errors, errors_w = 0, 0.0
record_ids, batch = zip(*batch)
model.test_mode_on()
model_input = self.converter(batch)
sent, \
p_sent, \
pos_tags,\
sent_mask, \
targets, \
frames, \
labels_voc, \
labels_voc_mask, \
freq, \
region_mark, sent_pred_lemmas_idx, \
adj_arcs_in, adj_arcs_out, adj_lab_in, adj_lab_out, \
mask_in, mask_out, mask_loop, \
true_labels \
= model_input
predictions = model.predict(*model_input[:-1])
labels = np.argmax(predictions, axis=1)
labels = np.reshape(labels, sent.shape)
predictions = np.reshape(predictions,
(sent.shape[0], sent.shape[1],
labels_voc.shape[1]))
for i, sent_labels in enumerate(labels):
labels_voc = batch[i][-2]
local_voc = make_local_voc(labels_voc)
info = self.pprint(batch[i], local_voc, predictions[i],
true_labels[i])
self.labels1.append([x[4] for x in info])
self.labels2.append([x[3] for x in info])
if self.do_labeling:
sentence = []
for word, label, probs, true, best in info:
single_pred = "%10s\t%10s\t%20s\t%40s\t%10s\t%10s\t%10s" % (
word, best, label, probs, true, batch[i][4],
batch[i][3][0])
sentence.append(
(word, best, label, probs, true, batch[i][4],
batch[i][3][0]))
print(single_pred)
print('\n')
self.predictions.append(sentence)
else:
sentence = []
for word, label, probs, true, best in info:
sentence.append(
(word, best, label, probs, true, batch[i][4], batch[i][3][0]))
self.predictions.append(sentence)
for word, label, probs, true, best in info:
if true != best:
errors += 1
errors_w += 1 / len(info)
loss = np.sum(model.compute_loss(*model_input))
model.test_mode_off()
return errors, errors, errors_w
def final(self):
if self.do_eval:
results = self.do_eval(self.data_partition, self.predictions,
self.eval_dir)
else:
evaluate(self.labels1, self.labels2)
self.predictions = []
self.labels1, self.labels2 = [], []
return results[2]
class SRLRunner(Runner):
def __init__(self):
super(SRLRunner, self).__init__()
self.word_voc = create_voc('file', self.a.word_voc)
self.word_voc.add_unks()
self.freq_voc = frequency_voc(self.a.freq_voc)
self.p_word_voc = create_voc('file', self.a.p_word_voc)
self.p_word_voc.add_unks()
self.role_voc = create_voc('file', self.a.role_voc)
self.frame_voc = create_voc('file', self.a.frame_voc)
self.pos_voc = create_voc('file', self.a.pos_voc)
def add_special_args(self, parser):
parser.add_argument(
"--word-voc", required=True)
parser.add_argument(
"--p-word-voc", required=True)
parser.add_argument(
"--freq-voc", required=True)
parser.add_argument(
"--role-voc", required=True)
parser.add_argument(
"--frame-voc", required=True)
parser.add_argument(
"--pos-voc", required=True
)
parser.add_argument(
"--word-embeddings", required=True
)
parser.add_argument(
"--data_partition", required=True
)
parser.add_argument(
"--hps", help="model hyperparams", required=False
)
parser.add_argument(
"--eval-dir", help="path to dir with eval data and scripts",
required=True
)
def get_parser(self):
return partial(bio_reader)
def get_reader(self):
return simple_reader
def get_converter(self):
def bio_converter(batch):
headers, sent_, pos_tags, dep_parsing, degree, frames, \
targets, f_lemmas, f_targets, labels_voc, labels = list(
zip(*batch))
sent = [self.word_voc.vocalize(w) for w in sent_]
p_sent = [self.p_word_voc.vocalize(w) for w in sent_]
freq = [[self.freq_voc[self.word_voc.direct[i]] if
self.word_voc.direct[i] != '_UNK' else 0 for i in w] for
w
in sent]
pos_tags = [self.pos_voc.vocalize(w) for w in pos_tags]
frames = [self.frame_voc.vocalize(f) for f in frames]
labels_voc = [self.role_voc.vocalize(r) for r in labels_voc]
lemmas_idx = [self.frame_voc.vocalize(f) for f in f_lemmas]
adj_arcs_in, adj_arcs_out, adj_lab_in, adj_lab_out, \
mask_in, mask_out, mask_loop = get_adj(dep_parsing, degree)
sent_batch, sent_mask = mask_batch(sent)
p_sent_batch, _ = mask_batch(p_sent)
freq_batch, _ = mask_batch(freq)
freq_batch = freq_batch.astype(dtype='float32')
pos_batch, _ = mask_batch(pos_tags)
labels_voc_batch, labels_voc_mask = mask_batch(labels_voc)
labels_batch, _ = mask_batch(labels)
frames_batch, _ = mask_batch(frames)
region_mark = np.zeros(sent_batch.shape, dtype='float32')
hps = eval(self.a.hps)
rm = hps['rm']
if rm >= 0:
for r, row in enumerate(region_mark):
for c, column in enumerate(row):
if targets[r] - rm <= c <= targets[r] + rm:
region_mark[r][c] = 1
sent_pred_lemmas_idx = np.zeros(sent_batch.shape, dtype='int32')
for r, row in enumerate(sent_pred_lemmas_idx):
for c, column in enumerate(row):
for t, tar in enumerate(f_targets[r]):
if tar == c:
sent_pred_lemmas_idx[r][c] = lemmas_idx[r][t]
sent_pred_lemmas_idx = np.array(sent_pred_lemmas_idx, dtype='int32')
assert (sent_batch.shape == sent_mask.shape)
assert (
frames_batch.shape == labels_voc_batch.shape == labels_voc_mask.shape)
assert (labels_batch.shape == sent_batch.shape)
return sent_batch, p_sent_batch, pos_batch, sent_mask, targets, frames_batch, \
labels_voc_batch, \
labels_voc_mask, freq_batch, \
region_mark, \
sent_pred_lemmas_idx,\
adj_arcs_in, adj_arcs_out, adj_lab_in, adj_lab_out, \
mask_in, mask_out, mask_loop, \
labels_batch
return bio_converter
def get_tester(self):
converter = self.get_converter()
computer = BioSrlErrorComputer(
converter, self.a.test_only, self.a.data_partition,
self.a.eval_dir)
corpus = Corpus(
parser=partial(bio_reader),
batch_size=self.a.batch,
path=self.a.test,
reader=self.get_reader()
)
return ErrorRateTester(computer, self.a.out, corpus)
def load_model(self):
hps = eval(self.a.hps)
hps['vframe'] = self.frame_voc.size()
hps['vword'] = self.word_voc.size()
hps['vbio'] = self.role_voc.size()
hps['vpos'] = self.pos_voc.size()
hps['word_embeddings'] = parse_word_embeddings(self.a.word_embeddings)
hps['in_arcs']=True
hps['out_arcs']=True
return BioSRL(hps)
if __name__ == '__main__':
SRLRunner().run()
|
179112
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from atvgnet import *
|
179150
|
import unittest
import parameterized
import numpy as np
from rlutil.envs.tabular_cy import q_iteration, tabular_env
from rlutil.envs.tabular import q_iteration as q_iteration_py
class QIterationTest(unittest.TestCase):
def setUp(self):
self.env = tabular_env.CliffwalkEnv(num_states=3, transition_noise=0.01)
def test_qiteration(self):
params = {
'num_itrs': 50,
'ent_wt': 1.0,
'discount': 0.99,
}
qvals_py = q_iteration_py.softq_iteration(self.env, **params)
qvals_cy = q_iteration.softq_iteration(self.env, **params)
self.assertTrue(np.allclose(qvals_cy, qvals_py))
def test_qevaluation_noent(self):
env = tabular_env.CliffwalkEnv(num_states=2, transition_noise=0.00)
params = {
'num_itrs': 100,
'ent_wt': 0.0,
'discount': 0.5,
}
q_values = np.zeros((env.num_states, env.num_actions))
q_values[:, 1] = 1e10
returns, _ = q_iteration.softq_evaluation(env, q_values, **params)
self.assertAlmostEqual(returns, 0.66666666)
def test_qevaluation_ent(self):
env = tabular_env.CliffwalkEnv(num_states=2, transition_noise=0.00)
params = {
'num_itrs': 100,
'ent_wt': 0.001,
'discount': 0.5,
}
q_values = np.zeros((env.num_states, env.num_actions))
q_values[:, 1] = 1e10
returns, _ = q_iteration.softq_evaluation(env, q_values, **params)
self.assertAlmostEqual(returns, 0.66666666)
def test_visitations(self):
env = tabular_env.CliffwalkEnv(num_states=3, transition_noise=0.00)
params = {
'num_itrs': 50,
'ent_wt': 0.0,
'discount': 0.99,
}
qvals_py = q_iteration_py.softq_iteration(env, **params)
visitations = q_iteration_py.compute_visitation(env, qvals_py, ent_wt=0.0, env_time_limit=1)
s_visitations = np.sum(visitations, axis=1)
tru_visits = np.array([1, 0, 0])
self.assertTrue(np.allclose(tru_visits, s_visitations))
visitations = q_iteration_py.compute_visitation(env, qvals_py, ent_wt=0.0, env_time_limit=3)
s_visitations = np.sum(visitations, axis=1)
tru_visits = np.array([1, 1, 1]) / 3.0
self.assertTrue(np.allclose(tru_visits, s_visitations))
visitations = q_iteration_py.compute_visitation(env, qvals_py, ent_wt=0.0, env_time_limit=5)
s_visitations = np.sum(visitations, axis=1)
tru_visits = np.array([2, 2, 1]) / 5.0
self.assertTrue(np.allclose(tru_visits, s_visitations))
if __name__ == '__main__':
unittest.main()
|
179236
|
from cartoframes.viz import popup_element
from cartoframes.viz.popup import Popup
from cartoframes.viz.popup_list import PopupList
popup_list = PopupList({
'click': [popup_element('value_1'), popup_element('value_2')],
'hover': [popup_element('value_1'), popup_element('value_3')]
})
class TestPopupList(object):
def test_should_have_access_to_popup_elements(self):
for element in popup_list.elements:
assert isinstance(element, Popup)
def test_should_get_all_popup_interactivities(self):
assert popup_list.get_interactivity() == [
{
'event': 'click',
'attrs': {
'name': 'v72224b',
'title': 'value_2',
'format': None
}
}, {
'event': 'click',
'attrs': {
'name': 'vbc6799',
'title': 'value_1',
'format': None
}
}, {
'event': 'hover',
'attrs': {
'name': 'vc266e3',
'title': 'value_3',
'format': None
}
}, {
'event': 'hover',
'attrs': {
'name': 'vbc6799',
'title': 'value_1',
'format': None
}
}
]
def test_should_get_all_popup_variables(self):
assert popup_list.get_variables() == {
'vbc6799': "prop('value_1')",
'v72224b': "prop('value_2')",
'vbc6799': "prop('value_1')",
'vc266e3': "prop('value_3')"
}
|
179269
|
from ._version import __version__
from ._rwlock import RWLock as RWLock
from ._streams import (
BufferedReceiveStream as BufferedReceiveStream,
TextReceiveStream as TextReceiveStream,
)
from ._multi_cancel import MultiCancelScope as MultiCancelScope
from ._service_nursery import open_service_nursery as open_service_nursery
from ._meta import ScopedObject as ScopedObject, BackgroundObject as BackgroundObject
# watch this space...
_export = None
for _export in globals().values():
if hasattr(_export, "__module__"):
_export.__module__ = __name__
del _export
|
179291
|
import numpy as np
raw = np.load('data/training_data_bounty_attack_mobilenet.npy')
converted_data = []
for data in raw:
# data[0]
if data[1] == [0,0,0,0]:
data[1] = [0,0,0,0,1]
else:
data[1].append(0)
if data[2] == [0,0]:
data[2] = [0,0,1]
else:
data[2].append(0)
converted_data.append([data[0], data[1], data[2]])
np.save('data/converted2.npy',converted_data)
|
179299
|
from direct.distributed.DistributedCartesianGridAI import DistributedCartesianGridAI
from direct.directnotify import DirectNotifyGlobal
from pirates.world.DistributedGameAreaAI import DistributedGameAreaAI
from pirates.world.InteriorAreaBuilderAI import InteriorAreaBuilderAI
from pirates.world.WorldGlobals import *
class DistributedGAInteriorAI(DistributedCartesianGridAI, DistributedGameAreaAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGAInteriorAI')
def __init__(self, air):
DistributedCartesianGridAI.__init__(self, air, GAME_AREA_INTERIOR_STARTING_ZONE, GAME_AREA_INTERIOR_GRID_SIZE,
GAME_AREA_INTERIOR_GRID_RADIUS, GAME_AREA_INTERIOR_CELL_SIZE)
DistributedGameAreaAI.__init__(self, air)
self.connectorId = 0
self.interiorDoor = None
self.exteriorDoor = None
self.builder = InteriorAreaBuilderAI(air, self)
def setConnectorId(self, connectorId):
self.connectorId = connectorId
def d_setConnectorId(self, connectorId):
self.sendUpdate('setConnectorId', [connectorId])
def b_setConnectorId(self, connectorId):
self.setConnectorId(connectorId)
self.d_setConnectorId(connectorId)
def getConnectorId(self):
return self.connectorId
def setInteriorDoor(self, interiorDoor):
self.interiorDoor = interiorDoor
def getInteriorDoor(self):
return self.interiorDoor
def setExteriorDoor(self, exteriorDoor):
self.exteriorDoor = exteriorDoor
def getExteriorDoor(self):
return self.exteriorDoor
def delete(self):
self.air.deallocateZone(self.zoneId)
DistributedCartesianGridAI.delete(self)
DistributedGameAreaAI.delete(self)
|
179320
|
from dataclasses import dataclass, InitVar
from typing import Any
@dataclass(unsafe_hash=True)
class CoaxialControlIOStatus:
speaker: bool = False
white_light: bool = False
api_response: InitVar[Any] = None
def __post_init__(self, api_response):
if api_response is not None:
self.speaker = api_response["params"]["status"]["Speaker"] == "On"
self.white_light = api_response["params"]["status"]["WhiteLight"] == "On"
|
179419
|
import tensorflow as tf
from keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv3D, MaxPooling3D, UpSampling3D, concatenate, Activation
from tensorflow.keras.optimizers import Adam
K.set_image_data_format("channels_last")
# Set the image shape to have the channels in the last dimension
def Unet():
'''
Builds the 3D UNet Keras model.
Depth of UNet model = 4.
returns:
Untrained 3D UNet Model.
'''
input_layer = tf.keras.Input(shape=(16, 160, 160, 4))
down_depth_0_layer_0 = Conv3D(filters=32, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(input_layer)
down_depth_0_layer_0 = Activation('relu')(down_depth_0_layer_0)
down_depth_0_layer_1 = Conv3D(filters=64, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_0_layer_0)
down_depth_0_layer_1 = Activation('relu')(down_depth_0_layer_1)
down_depth_0_layer_pool = MaxPooling3D(pool_size=(2,2,2))(down_depth_0_layer_1)
down_depth_1_layer_0 = Conv3D(filters=64, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_0_layer_pool)
down_depth_1_layer_0 = Activation('relu')(down_depth_1_layer_0)
down_depth_1_layer_1 = Conv3D(filters=128, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_1_layer_0)
down_depth_1_layer_1 = Activation('relu')(down_depth_1_layer_1)
down_depth_1_layer_pool = MaxPooling3D(pool_size=(2,2,2))(down_depth_1_layer_1)
down_depth_2_layer_0 = Conv3D(filters=128, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_1_layer_pool)
down_depth_2_layer_0 = Activation('relu')(down_depth_2_layer_0)
down_depth_2_layer_1 = Conv3D(filters=256, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_2_layer_0)
down_depth_2_layer_1 = Activation('relu')(down_depth_2_layer_1)
down_depth_2_layer_pool = MaxPooling3D(pool_size=(2,2,2))(down_depth_2_layer_1)
down_depth_3_layer_0 = Conv3D(filters=256, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_2_layer_pool)
down_depth_3_layer_0 = Activation('relu')(down_depth_3_layer_0)
down_depth_3_layer_1 = Conv3D(filters=512, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(down_depth_3_layer_0)
down_depth_3_layer_1 = Activation('relu')(down_depth_3_layer_1)
up_depth_2_layer_0 = UpSampling3D(size=(2,2,2))(down_depth_3_layer_1)
up_depth_2_concat = concatenate([up_depth_2_layer_0,down_depth_2_layer_1],axis=4)
up_depth_2_layer_1 = Conv3D(filters=256, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(up_depth_2_concat)
up_depth_2_layer_1 = Activation('relu')(up_depth_2_layer_1)
up_depth_2_layer_2 = Conv3D(filters=256, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(up_depth_2_layer_1)
up_depth_2_layer_2 = Activation('relu')(up_depth_2_layer_2)
up_depth_1_layer_0 = UpSampling3D(size=(2,2,2))(up_depth_2_layer_2)
up_depth_1_concat = concatenate([up_depth_1_layer_0,down_depth_1_layer_1],axis=4)
up_depth_1_layer_1 = Conv3D(filters=128, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(up_depth_1_concat)
up_depth_1_layer_1 = Activation('relu')(up_depth_1_layer_1)
up_depth_1_layer_2 = Conv3D(filters=128, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(up_depth_1_layer_1)
up_depth_1_layer_2 = Activation('relu')(up_depth_1_layer_2)
up_depth_0_layer_0 = UpSampling3D(size=(2,2,2))(up_depth_1_layer_2)
up_depth_0_concat = concatenate([up_depth_0_layer_0,down_depth_0_layer_1],axis=4)
up_depth_0_layer_1 = Conv3D(filters=64, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(up_depth_0_concat)
up_depth_0_layer_1 = Activation('relu')(up_depth_0_layer_1)
up_depth_0_layer_2 = Conv3D(filters=64, kernel_size=(3,3,3),padding='same',strides=(1,1,1))(up_depth_0_layer_1)
up_depth_0_layer_2 = Activation('relu')(up_depth_0_layer_2)
final_conv = Conv3D(filters=3, kernel_size=(1,1,1),padding='valid',strides=(1,1,1))(up_depth_0_layer_2)
final_activation = Activation('sigmoid')(final_conv)
model = Model(inputs=input_layer, outputs=final_activation)
model.compile(optimizer=Adam(learning_rate=0.00001),loss='categorical_crossentropy',metrics=['categorical_accuracy'])
# model.summary()
return model
def load_model():
'''
Loads weights from a pre-trained model.
returns:
Trained 3D UNet Model.
'''
model = Unet()
model.load_weights('model_pretrained.hdf5')
#model.summary()
return model
|
179456
|
from django.dispatch import Signal
"""
When an xform is received, either from posting or when finished playing.
"""
xform_received = Signal(providing_args=["form"])
"""
When a form is finished playing (via the SMS apis)
"""
sms_form_complete = Signal(providing_args=["session_id", "form"])
|
179533
|
import torch
import torch.nn as nn
#from torch.autograd import Function
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
class LovaszSoftmax(nn.Module):
def __init__(self, reduction='mean'):
super(LovaszSoftmax, self).__init__()
self.reduction = reduction
def prob_flatten(self, input, target):
assert input.dim() in [4, 5]
num_class = input.size(1)
if input.dim() == 4:
input = input.permute(0, 2, 3, 1).contiguous()
input_flatten = input.view(-1, num_class)
elif input.dim() == 5:
input = input.permute(0, 2, 3, 4, 1).contiguous()
input_flatten = input.view(-1, num_class)
target_flatten = target.view(-1)
return input_flatten, target_flatten
def lovasz_softmax_flat(self, inputs, targets):
num_classes = inputs.size(1)
losses = []
for c in range(num_classes):
target_c = (targets == c).float()
if num_classes == 1:
input_c = inputs[:, 0]
else:
input_c = inputs[:, c]
loss_c = (torch.autograd.Variable(target_c) - input_c).abs()
loss_c_sorted, loss_index = torch.sort(loss_c, 0, descending=True)
target_c_sorted = target_c[loss_index]
losses.append(torch.dot(loss_c_sorted, torch.autograd.Variable(lovasz_grad(target_c_sorted))))
losses = torch.stack(losses)
if self.reduction == 'none':
loss = losses
elif self.reduction == 'sum':
loss = losses.sum()
else:
loss = losses.mean()
return loss
def forward(self, inputs, targets):
# print(inputs.shape, targets.shape) # (batch size, class_num, x,y,z), (batch size, 1, x,y,z)
inputs, targets = self.prob_flatten(inputs, targets)
# print(inputs.shape, targets.shape)
losses = self.lovasz_softmax_flat(inputs, targets)
return losses
# class net(nn.Module):
# def __init__(self, in_channels, num_classes):
# super(net, self).__init__()
# self.conv = nn.Conv3d(in_channels, num_classes, (1, 3, 3), padding=(0, 1, 1))
# def forward(self, input):
# out = self.conv(input)
# return out
# from torch.optim import Adam
# BS = 2
# num_classes = 8
# dim, hei, wid = 8, 64, 64
# data = torch.rand(BS, num_classes, dim, hei, wid)
# model = net(num_classes, num_classes)
# target = torch.zeros(BS, dim, hei, wid).random_(num_classes)
# Loss = LovaszSoftmax()
# optim = Adam(model.parameters(), lr=0.01,betas=(0.99,0.999))
# for step in range(2):
# out = model(data)
# loss = Loss(out, target)
# optim.zero_grad()
# loss.backward()
# optim.step()
# print(loss)
|
179614
|
from typing import List
from unittest import mock
from uuid import UUID
import pytest
from sqlalchemy.orm.exc import NoResultFound
from orchestrator.db import ResourceTypeTable, SubscriptionTable, WorkflowTable, db, transactional
from orchestrator.targets import Target
def test_transactional():
def insert_wf(state):
wf = WorkflowTable(name="Test transactional", target=Target.CREATE, description="Testing 1, 2, 3!")
db.session.add(wf)
def insert_wf_error(state):
wf = WorkflowTable(
name="Test transactional [ERROR]", target=Target.CREATE, description="Testing 1, 2, 3! BOOM!"
)
db.session.add(wf)
raise Exception("Let's wreck some havoc!")
logger = mock.MagicMock()
with transactional(db, logger):
insert_wf({})
logger.assert_has_calls(
[
mock.call.debug("Temporarily disabling commit."),
mock.call.debug("Reenabling commit."),
mock.call.debug("Committing transaction."),
]
)
logger.reset_mock()
with pytest.raises(Exception):
with transactional(db, logger):
insert_wf_error({})
logger.assert_has_calls(
[
mock.call.debug("Temporarily disabling commit."),
mock.call.debug("Reenabling commit."),
mock.call.warning("Rolling back transaction."),
]
)
def test_transactional_no_commit():
def insert_wf(state):
wf = WorkflowTable(
name="Test transactional should not be committed", target=Target.CREATE, description="Testing 1, 2, 3!"
)
db.session.add(wf)
db.session.commit()
raise Exception("Lets rollback")
logger = mock.MagicMock()
with pytest.raises(Exception, match="Lets rollback"):
with transactional(db, logger):
insert_wf({})
assert (
db.session.query(WorkflowTable).filter(WorkflowTable.name == "Test transactional should not be committed").all()
== []
)
logger.assert_has_calls(
[
mock.call.warning(
"Step function tried to issue a commit. It should not! Will execute commit on behalf of step function when it returns."
),
]
)
def test_transactional_no_commit_second_thread():
def insert_wf(state):
wf = WorkflowTable(
name="Test transactional should not be committed", target=Target.CREATE, description="Testing 1, 2, 3!"
)
db.session.add(wf)
db.session.commit()
# Create new database session to simulate another workflow/api handler running at the same time
# This is also a workaround for our disable commit wrapper but it should be reasonable obvious that
# someone is fucking around if you see `with db.database_scope():` in actual production code
with db.database_scope():
wf2 = WorkflowTable(
name="Test transactional should be committed", target=Target.CREATE, description="Testing 1, 2, 3!"
)
db.session.add(wf2)
db.session.commit()
raise Exception("Lets rollback")
logger = mock.MagicMock()
with pytest.raises(Exception, match="Lets rollback"):
with transactional(db, logger):
insert_wf({})
assert db.session.query(WorkflowTable).filter(WorkflowTable.name == "Test transactional should be committed").one()
assert (
db.session.query(WorkflowTable).filter(WorkflowTable.name == "Test transactional should not be committed").all()
== []
)
logger.assert_has_calls(
[
mock.call.warning(
"Step function tried to issue a commit. It should not! Will execute commit on behalf of step function when it returns."
),
]
)
def test_autouse_fixture_rolls_back_aaa():
# We want to test whether a change committed to the database in one test is visible to other tests (as in really
# persisted to the database). Of course such a change should not be visible if our `fastapi_app` and `database`
# autouse fixtures work as advertised.
#
# However, tests should be independent of each other and we cannot assume one test runs before the other. Hence
# this test comes in two versions: one with the `_aaa` postfix and one with the `_bbb` postfix. Both will test
# for the presence of a change the other test thinks it has committed to the database. If one of the tests (the
# one that runs after the other) finds the change the other has committed our fixtures don't work properly.
# Using ResourceTypeTable as it's a simple model than doesn't require foreign keys.
rt = ResourceTypeTable(resource_type="aaa", description="aaa")
# print(db)
# print(dir(db))
db.session.add(rt)
db.session.commit()
with pytest.raises(NoResultFound):
ResourceTypeTable.query.filter(ResourceTypeTable.resource_type == "bbb").one()
def test_autouse_fixture_rolls_back_bbb():
# We want to test whether a change committed to the database in one test is visible to other tests (as in really
# persisted to the database). Of course such a change should not be visible if our `fastapi_app` and `database`
# autouse fixtures work as advertised.
#
# However, tests should be independent of each other and we cannot assume one test runs before the other. Hence
# this test comes in two versions: one with the `_aaa` postfix and one with the `_bbb` postfix. Both will test
# for the presence of a change the other test thinks it has committed to the database. If one of the tests (the
# one that runs after the other) finds the change the other has committed our fixtures don't work properly.
# Using ResourceTypeTable as it's a simple model than doesn't require foreign keys.
rt = ResourceTypeTable(resource_type="bbb", description="bbb")
db.session.add(rt)
db.session.commit()
with pytest.raises(NoResultFound):
ResourceTypeTable.query.filter(ResourceTypeTable.resource_type == "aaa").one()
def test_full_text_search(generic_subscription_1):
def get_subs_tsq(query: str) -> List[SubscriptionTable]:
subs = SubscriptionTable.query.search(query).all()
return subs
subs = get_subs_tsq("Generic Subscription One")
assert subs[0].subscription_id == UUID(generic_subscription_1)
subs = get_subs_tsq("description:Generic Subscription One")
assert subs[0].subscription_id == UUID(generic_subscription_1)
subs = get_subs_tsq("rt_2: 42")
assert subs[0].subscription_id == UUID(generic_subscription_1)
def test_str_method():
assert (
str(SubscriptionTable())
== "SubscriptionTable(subscription_id=None, description=None, status=None, product_id=None, customer_id=None, insync=None, start_date=None, end_date=None, note=None)"
)
|
179636
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphConvolution
from ...modules.prediction.classification.link_prediction.ConcatFeedForwardNNLayer import (
ConcatFeedForwardNNLayer,
)
from ...modules.prediction.classification.link_prediction.ElementSumLayer import ElementSumLayer
from ...modules.prediction.classification.link_prediction.StackedElementProdLayer import (
StackedElementProdLayer,
)
class GCNModelVAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, prediction_type):
super(GCNModelVAE, self).__init__()
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.prediction_type = prediction_type
self.act = lambda x: x
if self.prediction_type == "ele_sum":
self.dc = ElementSumLayer(hidden_dim2, 16, 1)
if self.prediction_type == "concat_NN":
self.dc = ConcatFeedForwardNNLayer(hidden_dim2, 16, 1)
if self.prediction_type == "stacked_ele_prod":
self.dc = StackedElementProdLayer(hidden_dim2, 16, 1, 1)
# InnerProductDecoder(dropout, act=lambda x: x)
def encode(self, x, adj):
hidden1 = self.gc1(x, adj)
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x, adj):
mu, logvar = self.encode(x, adj)
z = self.reparameterize(mu, logvar)
if self.prediction_type == "stacked_ele_prod":
link_logits = self.dc([z])
recovered = self.dc([mu])
else:
link_logits = self.dc(z)
recovered = self.dc(mu)
return self.act(link_logits), mu, logvar, recovered
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.mm(z, z.t()))
return adj
|
179651
|
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap
from PyQt5.QtCore import *
class view_case_ui(QMainWindow):
def __init__(
self,
problem_path,
parent=None
):
super(view_case_ui, self).__init__(parent)
self.button_mode = 1
self.problem_path = problem_path
self.file_not_found = 0
self.setWindowTitle('View Case')
self.setGeometry(550, 250, 800, 700)
self.setFixedSize(800,700)
main = self.main_view_case_ui()
self.setCentralWidget(main)
return
def main_view_case_ui(self):
heading = QLabel('View Test File')
open_label = QLabel("Open: ")
path = QLabel(self.problem_path)
path_layout = QHBoxLayout()
path_layout.addWidget(open_label)
path_layout.addWidget(path)
path_layout.addStretch(1)
path_widget = QWidget()
path_widget.setLayout(path_layout)
show_line_endings_label = QLabel('Show Line endings: ')
show_line_endings_button = QCheckBox('')
show_line_endings_button.setChecked(False)
show_line_endings_button.stateChanged.connect(
lambda:self.line_end_toggle(
show_line_endings_button.checkState()
)
)
line_end_layout = QHBoxLayout()
line_end_layout.addWidget(show_line_endings_label)
line_end_layout.addWidget(show_line_endings_button)
line_end_layout.addStretch(1)
line_end_widget = QWidget()
line_end_widget.setLayout(line_end_layout)
self.file_text_box = QTextEdit()
self.file_text_box.setReadOnly(True)
self.file_text_box.setFixedHeight(500)
# Try to open file:
try:
file_content = ''
# File is read line by line to maybe show line numbers in the future
with open (self.problem_path, "r") as myfile:
data = myfile.readlines()
# print(data)
for i in data:
file_content = file_content + i
self.backup_file_content = repr(file_content)
except Exception as error:
print("[ CRITICAL ] Could not read test file : " + str(error))
file_content = "CRITICAL ERROR\nFile not found!"
self.backup_file_content = " CRITICAL ERROR\nFile not found! "
self.file_not_found = 1
self.file_text_box.setText(file_content)
main_layout = QVBoxLayout()
main_layout.addWidget(heading)
main_layout.addWidget(path_widget)
main_layout.addWidget(line_end_widget)
main_layout.addWidget(self.file_text_box)
main_layout.addStretch(1)
main = QWidget()
main.setLayout(main_layout)
heading.setObjectName('main_screen_heading')
open_label.setObjectName('main_screen_sub_heading')
path.setObjectName('main_screen_content')
main.setObjectName('account_window')
show_line_endings_label.setObjectName('main_screen_content')
return main
def line_end_toggle(self, state):
try:
if(state == Qt.Checked) and self.file_not_found == 0:
# line endings show
data = self.backup_file_content
data = data.replace('\\r', ' CR\r')
data = data.replace('\\n', ' LF\n')
data = data[1:-1]
self.line_endings_shown = 1
self.file_text_box.setText(data)
elif self.file_not_found == 0:
# line endings hide
if self.line_endings_shown == 1:
self.line_endings_shown = 0
# Replace current text with backup text
self.file_text_box.setText(eval(self.backup_file_content))
except Exception as error:
print('[ ERROR ] Could not show line endings: ', error)
return
|
179690
|
from Geometry.MTDGeometryBuilder.mtdParameters_cfi import mtdParameters
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
dd4hep.toModify(mtdParameters, fromDD4hep = True)
|
179750
|
import copy
import unittest
import os
import shutil
from matador.config import load_custom_settings, set_settings
from matador.config.config import DEFAULT_SETTINGS
DUMMY_SETTINGS = {
"mongo": {"host": "blah", "port": 666},
"plotting": {"style": "matador"},
"this_is_a_test": {True: "it is"},
"run3": {
"castep_executable": "castep",
"optados_executable": "optados",
"scratch_prefix": ".",
},
}
REAL_PATH = "/".join(__file__.split("/")[:-1]) + "/"
class ConfigTest(unittest.TestCase):
""" Test config loading. """
def tearDown(self):
from matador.config import SETTINGS
SETTINGS.reset()
def setUp(self):
from matador.config import SETTINGS
SETTINGS.reset()
def testLoadNamedCustomSettings(self):
""" Test custom config. """
settings = load_custom_settings(
config_fname=(REAL_PATH + "data/custom_config.yml"), no_quickstart=True
)
self.assertEqual(settings.settings, DUMMY_SETTINGS)
from matador.config import SETTINGS
self.assertEqual(SETTINGS.settings, DUMMY_SETTINGS)
SETTINGS.reset()
def testSetSettings(self):
set_settings(DUMMY_SETTINGS)
from matador.config import SETTINGS
self.assertEqual(SETTINGS.settings, DUMMY_SETTINGS)
SETTINGS["backend"] = "mongo"
self.assertEqual(SETTINGS.settings["backend"], "mongo")
SETTINGS.reset()
def testLoadUserDefaultSettings(self):
""" Test default config. """
exists = False
try:
if os.path.isfile(os.path.expanduser("~/.matadorrc")):
exists = True
shutil.copy(
os.path.expanduser("~/.matadorrc"),
os.path.expanduser("~/.matadorrc_bak"),
)
shutil.copy(
REAL_PATH + "data/custom_config.yml", os.path.expanduser("~/.matadorrc")
)
settings = load_custom_settings(no_quickstart=True)
self.assertEqual(settings.settings, DUMMY_SETTINGS)
os.remove(os.path.expanduser("~/.matadorrc"))
if exists:
shutil.copy(
os.path.expanduser("~/.matadorrc_bak"),
os.path.expanduser("~/.matadorrc"),
)
os.remove(os.path.expanduser("~/.matadorrc_bak"))
except Exception as oops:
if exists:
shutil.copy(
os.path.expanduser("~/.matadorrc_bak"),
os.path.expanduser("~/.matadorrc"),
)
os.remove(os.path.expanduser("~/.matadorrc_bak"))
raise oops
def testLoadDefaultSettings(self):
""" Test default config. """
settings = load_custom_settings(
config_fname="definitely_doesnt_exist.yml", no_quickstart=True
)
self.assertEqual(settings.settings, DEFAULT_SETTINGS)
settings.reset()
def testSetDefaultSettings(self):
""" Test default config. """
set_settings(DEFAULT_SETTINGS)
from matador.config import SETTINGS
new_settings = copy.deepcopy(DUMMY_SETTINGS)
new_settings["mongo"].pop("host")
set_settings(new_settings)
self.assertEqual(SETTINGS.settings["mongo"]["host"], DEFAULT_SETTINGS["mongo"]["host"])
SETTINGS.reset()
|
179771
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from classifieds.tests.test_views import FancyTestCase
class TestAdBrowsing(FancyTestCase):
fixtures = ['users', 'categories', 'ads']
def setUp(self):
self.client.login(username='user', password='<PASSWORD>')
def test_view_ad_must_be_active(self):
response = self.get('classifieds_browse_ad_view', pk=1)
self.assertEqual(response.status_code, 404)
def test_cant_view_expired_ad_when_logged_out(self):
self.client.logout()
response = self.get('classifieds_browse_ad_view', pk=2)
self.assertEqual(response.status_code, 404)
def test_cant_view_expiered_ad_of_another_user(self):
self.client.logout()
self.client.login(username='other_user', password='<PASSWORD>')
response = self.get('classifieds_browse_ad_view', pk=2)
self.assertEqual(response.status_code, 404)
def test_can_view_own_expired_ad(self):
response = self.get('classifieds_browse_ad_view', pk=2)
self.assertEqual(response.status_code, 200)
def test_unauthed_user_can_view_active_ad(self):
self.client.logout()
response = self.get('classifieds_browse_ad_view', pk=18)
self.assertEqual(response.status_code, 200)
def test_authed_user_can_view_active_ad(self):
response = self.get('classifieds_browse_ad_view', pk=18)
self.assertEqual(response.status_code, 200)
def test_category_overview_uses_template(self):
response = self.get('classifieds_browse_categories')
self.assertTemplateUsed(response, 'classifieds/category_overview.html')
|
179817
|
import talib._ta_lib as _ta_lib
from ._ta_lib import __TA_FUNCTION_NAMES__
for func_name in __TA_FUNCTION_NAMES__:
globals()[func_name] = getattr(_ta_lib, "stream_%s" % func_name)
|
179903
|
import torch.nn as nn
import torch
import math
class NoSkipBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertIntermediateId(nn.Module):
def forward(self, hidden_states, *args, **kwargs):
return hidden_states
class BertOutputId(nn.Module):
def forward(self, hidden_states, input_tensor, *args, **kwargs):
return hidden_states
class BertOutputNoSkip(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# ========= Albert ==========
class AlbertAttentionWithoutSkipConnection(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# Should find a better way to do this
w = (
self.dense.weight.t()
.view(self.num_attention_heads, self.attention_head_size, self.hidden_size)
.to(context_layer.dtype)
)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
# layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
layernormed_context_layer = self.LayerNorm(projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
# ============ DistilBert ==============
import transformers.models.distilbert.modeling_distilbert as modeling_distilbert
class TransformerBlockWithoutSkip(nn.Module):
def __init__(self, config):
super().__init__()
assert config.dim % config.n_heads == 0
self.attention = modeling_distilbert.MultiHeadSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = modeling_distilbert.FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(self, x, attn_mask=None, head_mask=None, output_attentions=False):
"""
Parameters:
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
Returns:
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
"""
# Self-Attention
sa_output = self.attention(
query=x,
key=x,
value=x,
mask=attn_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output) # (bs, seq_length, dim)
# sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if output_attentions:
output = (sa_weights,) + output
return output
# ================ T5 ================
from transformers.models.t5.modeling_t5 import T5Attention, T5LayerNorm
class SkipT5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# hidden_states = hidden_states + self.dropout(attention_output[0])
hidden_states = self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
|
179911
|
import asyncio
from zeroservices.backend.mongodb import MongoDBCollection
from . import _BaseCollectionTestCase
from ..utils import TestCase, _create_test_resource_service, _async_test
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class MongoDBCollectionTestCase(_BaseCollectionTestCase):
def setUp(self):
super(MongoDBCollectionTestCase, self).setUp()
self.database_name = 'test'
self.collection = MongoDBCollection(self.resource_name,
self.database_name)
self.collection.service = self.service
def tearDown(self):
super().tearDown()
self.collection.collection.drop()
class MongoDBTestCase(TestCase):
def setUp(self):
self.database_name = 'test'
self.resource_name = 'test_resource'
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
self.service = _create_test_resource_service('test_service', self.loop)
self.collection = MongoDBCollection(self.resource_name,
database_name=self.database_name)
self.collection.service = self.service
def tearDown(self):
self.collection.collection.drop()
@_async_test
def test_custom_database(self):
# Create a resource
resource_id = 'UUID1'
message_args = {'resource_data': {'kwarg_1': 1, 'kwarg_2': 2},
'resource_id': resource_id}
query = {'action': 'create'}
query.update(message_args)
result = yield from self.collection.on_message(**query)
self.assertEqual(result,
{'resource_id': 'UUID1'})
# Check that resource exists
resource_list = yield from self.collection.on_message(action='list')
self.assertEqual(resource_list,
[message_args])
# On a separate database, check that resource doesn't exists
collection2 = MongoDBCollection(self.resource_name,
database_name='other')
resource_list = yield from collection2.on_message(action='list')
self.assertEqual(resource_list, [])
|
179916
|
import tensorflow as tf
def get_vectors_norm(vectors):
transposed = tf.transpose(vectors)
v_mag = tf.sqrt(tf.math.reduce_sum(transposed * transposed, axis=0))
return tf.transpose(tf.math.divide_no_nan(transposed, v_mag))
class InnerAngleRepresentation:
def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor, p3s: tf.Tensor) -> tf.Tensor:
"""
Angle in point p2s for the triangle <p1s, p2s, p3s>
:param p1s: tf.Tensor (Points, Batch, Len, Dims)
:param p2s: tf.Tensor (Points, Batch, Len, Dims)
:param p3s: tf.Tensor (Points, Batch, Len, Dims)
:return: tf.Tensor (Points, Batch, Len)
"""
# Following https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space
v1 = p1s - p2s # (Points, Batch, Len, Dims)
v2 = p3s - p2s # (Points, Batch, Len, Dims)
v1_norm = get_vectors_norm(v1)
v2_norm = get_vectors_norm(v2)
slopes = tf.reduce_sum(v1_norm * v2_norm, axis=3)
angles = tf.acos(slopes)
angles = tf.where(tf.math.is_nan(angles), 0., angles) # Fix NaN, TODO think of faster way
return angles
|
179918
|
import sys
import contextlib
import functools
import ir_measures
from ir_measures import providers, measures, Metric
from ir_measures.providers.base import Any, Choices, NOT_PROVIDED
class TrectoolsProvider(providers.Provider):
"""
trectools
https://github.com/joaopalotti/trectools
::
@inproceedings{palotti2019,
author = {<NAME> <NAME>},
title = {TrecTools: an open-source Python library for Information Retrieval practitioners involved in TREC-like campaigns},
series = {SIGIR'19},
year = {2019},
location = {Paris, France},
publisher = {ACM}
}
"""
NAME = 'trectools'
SUPPORTED_MEASURES = [
measures._P(cutoff=Any(), rel=Choices(1)),
measures._RR(cutoff=Choices(NOT_PROVIDED), rel=Choices(1)),
measures._Rprec(rel=Choices(1)),
measures._AP(cutoff=Any(), rel=Choices(1)),
measures._nDCG(cutoff=Any(), dcg=Any()),
measures._Bpref(rel=Choices(1)),
measures._RBP(cutoff=Any(), p=Any(), rel=Any()),
# Other supported metrics: urbp, ubpref, alpha_urbp, geometric_map, unjudged
]
def __init__(self):
super().__init__()
self.trectools = None
def _evaluator(self, measures, qrels):
import pandas as pd
measures = ir_measures.util.flatten_measures(measures)
# Convert qrels to dict_of_dict (input format used by pytrec_eval)
tmp_qrels = ir_measures.util.QrelsConverter(qrels).as_namedtuple_iter()
tmp_qrels = pd.DataFrame(tmp_qrels)
if len(tmp_qrels) == 0:
tmp_qrels = pd.DataFrame(columns=['query', 'docid', 'rel'], dtype='object')
else:
tmp_qrels = tmp_qrels.rename(columns={'query_id': 'query', 'doc_id': 'docid', 'relevance': 'rel'})
qrels = self.trectools.TrecQrel()
qrels.qrels_data = tmp_qrels
invocations = self._build_invocations(measures)
return TrectoolsEvaluator(measures, qrels, invocations, self.trectools)
def _build_invocations(self, measures):
invocations = []
for measure in measures:
def depth():
try:
cutoff = measure['cutoff']
except KeyError:
cutoff = NOT_PROVIDED
if cutoff is NOT_PROVIDED:
cutoff = sys.maxsize
return cutoff
if measure.NAME == 'P':
fn = functools.partial(self.trectools.TrecEval.get_precision, depth=depth(), per_query=True, trec_eval=False, removeUnjudged=False)
elif measure.NAME == 'RR':
fn = functools.partial(self.trectools.TrecEval.get_reciprocal_rank, depth=depth(), per_query=True, trec_eval=False, removeUnjudged=False)
elif measure.NAME == 'Rprec':
fn = functools.partial(self.trectools.TrecEval.get_rprec, depth=depth(), per_query=True, trec_eval=False, removeUnjudged=False)
elif measure.NAME == 'AP':
fn = functools.partial(self.trectools.TrecEval.get_map, depth=depth(), per_query=True, trec_eval=False)
elif measure.NAME == 'nDCG':
te_mode = {
'log2': True,
'exp-log2': False
}[measure['dcg']]
# trec_eval has other side-effects; namely ordering by score instead of rank.
# But in our setting, those are always the same so no difference.
fn = functools.partial(self.trectools.TrecEval.get_ndcg, depth=depth(), per_query=True, trec_eval=te_mode, removeUnjudged=False)
elif measure.NAME == 'Bpref':
fn = functools.partial(self.trectools.TrecEval.get_bpref, depth=depth(), per_query=True, trec_eval=False)
elif measure.NAME == 'RBP':
rel = measure['rel']
if rel is not NOT_PROVIDED:
# TODO: how to handle different relevance levels? I think the only way is to modify
# the dataframe.
raise RuntimeError('unsupported')
fn = lambda ev: self.trectools.TrecEval.get_rbp(ev, p=measure['p'], depth=depth(), per_query=True, binary_topical_relevance=True, average_ties=True, removeUnjudged=False)[0]
else:
fn = lambda ev: self.trectools.TrecEval.get_rbp(ev, p=measure['p'], depth=depth(), per_query=True, binary_topical_relevance=False, average_ties=True, removeUnjudged=False)[0]
else:
raise ValueError(f'unsupported measure {measure}')
invocations.append((fn, measure))
return invocations
def initialize(self):
try:
import trectools
self.trectools = trectools
except ImportError as ex:
raise RuntimeError('trectools not available', ex)
class TrectoolsEvaluator(providers.Evaluator):
def __init__(self, measures, qrels, invocations, trectools):
super().__init__(measures, qrels.qrels_data['query'].unique())
self.qrels = qrels
self.invocations = invocations
self.trectools = trectools
def _iter_calc(self, run):
import pandas as pd
available_qids = set(self.qrels.qrels_data['query'].unique())
tmp_run = ir_measures.util.RunConverter(run).as_namedtuple_iter()
tmp_run = pd.DataFrame(tmp_run)
if len(tmp_run) == 0:
tmp_run = pd.DataFrame(columns=['query', 'docid', 'score'], dtype='object')
else:
tmp_run = tmp_run.rename(columns={'query_id': 'query', 'doc_id': 'docid', 'score': 'score'})
tmp_run.sort_values(['query', 'score'], ascending=[True, False], inplace=True)
run = self.trectools.TrecRun()
run.run_data = tmp_run
evaluator = self.trectools.TrecEval(run, self.qrels)
for invocation, measure in self.invocations:
for query_id, value in invocation(evaluator).itertuples():
if query_id in available_qids:
yield Metric(query_id=query_id, measure=measure, value=value)
providers.register(TrectoolsProvider())
|
180002
|
from flask import Blueprint, render_template, request, flash, redirect, url_for, jsonify, make_response
from app.users.models import Users, UsersSchema
from werkzeug.security import generate_password_hash, check_password_hash
from flask_restful import Resource, Api
import flask_restful
import jwt
from jwt import DecodeError, ExpiredSignature
from config import SECRET_KEY
from datetime import datetime, timedelta
from functools import wraps
from flask import g
users = Blueprint('users', __name__)
# http://marshmallow.readthedocs.org/en/latest/quickstart.html#declaring-schemas
schema = UsersSchema()
# JWT AUTh process start
def create_token(user):
payload = {
'sub': user.id,
'iat': datetime.utcnow(),
'exp': datetime.utcnow() + timedelta(days=1)
}
token = jwt.encode(payload, SECRET_KEY, algorithm='HS256')
return token.decode('unicode_escape')
def parse_token(req):
token = req.headers.get('Authorization').split()[1]
return jwt.decode(token, SECRET_KEY, algorithms='HS256')
# Login decorator function
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('Authorization'):
response = jsonify(message='Missing authorization header')
response.status_code = 401
return response
try:
payload = parse_token(request)
except DecodeError:
response = jsonify(message='Token is invalid')
response.status_code = 401
return response
except ExpiredSignature:
response = jsonify(message='Token has expired')
response.status_code = 401
return response
g.user_id = payload['sub']
return f(*args, **kwargs)
return decorated_function
# JWT AUTh process end
api = Api(users)
class Auth(Resource):
def post(self):
data = request.get_json(force=True)
print(data)
email = data['email']
password = data['password']
user = Users.query.filter_by(email=email).first()
if user == None:
response = make_response(
jsonify({"message": "invalid username/password"}))
response.status_code = 401
return response
if check_password_hash(user.password, password):
token = create_token(user)
return {'token': token}
else:
response = make_response(
jsonify({"message": "invalid username/password"}))
response.status_code = 401
return response
api.add_resource(Auth, '/login')
# Adding the login decorator to the Resource class
class Resource(flask_restful.Resource):
method_decorators = [login_required]
# Any API class now inheriting the Resource class will need Authentication
class User(Resource):
def get(self):
results = Users.query.all()
users = schema.dump(results, many=True).data
return jsonify({"users": users})
api.add_resource(User, '/users')
|
180034
|
import jax
from evosax import Strategies
from evosax.problems import ClassicFitness
def test_strategy_ask(strategy_name):
# Loop over all strategies and test ask API
rng = jax.random.PRNGKey(0)
popsize = 20
strategy = Strategies[strategy_name](popsize=popsize, num_dims=2)
params = strategy.default_params
state = strategy.initialize(rng, params)
x, state = strategy.ask(rng, state, params)
assert x.shape[0] == popsize
assert x.shape[1] == 2
return
def test_strategy_ask_tell(strategy_name):
# Loop over all strategies and test ask API
rng = jax.random.PRNGKey(0)
popsize = 20
strategy = Strategies[strategy_name](popsize=popsize, num_dims=2)
params = strategy.default_params
state = strategy.initialize(rng, params)
x, state = strategy.ask(rng, state, params)
evaluator = ClassicFitness("rosenbrock", num_dims=2)
fitness = evaluator.rollout(rng, x)
state = strategy.tell(x, fitness, state, params)
return
|
180043
|
from abc import ABCMeta, abstractmethod
class WordVectorWrapper:
__metaclass__ = ABCMeta
def __init__(self, w2v_model):
self.w2v_model = w2v_model
self.vocab = self.get_vocab()
self.word_vector_shape = self.get_word_vector_shape()
@abstractmethod
def get_vocab(self):
pass
@abstractmethod
def get_vector(self, word):
pass
@abstractmethod
def closest_words(self, vector, k):
pass
@abstractmethod
def get_word_vector_shape(self):
pass
def __getitem__(self, item):
return self.get_vector(item)
|
180046
|
import json
from datetime import date, datetime
import requests
from .utilities import test_utility
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
class server_api(test_utility):
def __init__(self, daemon_config, task_id):
super().__init__(daemon_config, task_id)
self.result = {}
# create an empty record in the database
self._update_result()
def _update_result(self, new_result=None):
if new_result:
self.result = {**self.result, **new_result}
ret = requests.post('{}/api_v1/testresult/{}'.format(self.daemon_config['server_url'],
self.task_id),
json=json.dumps(self.result, default=json_serial))
if ret.status_code != 200:
raise AssertionError('Updating the results to server failed')
|
180059
|
import pytest
from regcore_read.views import search_utils
def inner_fn(request, search_args):
# We'd generally return a Response here, but we're mocking
return search_args
@pytest.mark.parametrize('page_size', ('-10', '0', '200', 'abcd', '---'))
def test_invalid_page_size(page_size, rf):
"""Invalid page sizes should not be admitted."""
view = search_utils.requires_search_args(inner_fn)
result = view(rf.get('?q=term&page_size={0}'.format(page_size)))
assert result.status_code == 400
def test_valid_page_size(rf):
"""Valid page sizes should pass through."""
view = search_utils.requires_search_args(inner_fn)
result = view(rf.get('?q=term'))
result = view(rf.get('?q=term&page_size=10'))
assert result.page_size == 10
|
180084
|
import modelindex
import pytest
from modelindex import Metadata
from modelindex.models.Collection import Collection
from modelindex.models.CollectionList import CollectionList
from modelindex.models.Model import Model
from modelindex.models.ModelList import ModelList
from modelindex.models.Result import Result
from modelindex.models.ResultList import ResultList
from modelindex.models.ModelIndex import ModelIndex
import copy
def test_deepcopy():
mi = modelindex.load("tests/test-mi/03_col")
m1 = mi.models[0]
m2 = copy.deepcopy(m1)
m2.name = "New name"
assert m1.name != m2.name
assert m2.name == "New name"
m2.results[0].task = "New task"
assert m1.results[0].task != m2.results[0].task
assert m2.results[0].task == "New task"
m2.results.data.append(Result(task="", dataset="", metrics={}))
assert len(m1.results) == 1
assert len(m2.results) == 2
m2.metadata.flops = 10
assert m1.metadata.flops != m2.metadata.flops
assert m2.metadata.flops == 10
def test_col_merge():
mi = modelindex.load("tests/test-mi/17_collections_merge")
m1 = mi.models[0].full_model
m2 = mi.models[1].full_model
assert m1.metadata.training_data == "ImageNet"
assert m2.metadata.training_data == "Reddit"
assert len(m1.metadata.training_techniques) == 4
assert len(m2.metadata.training_techniques) == 5
assert m2.metadata.training_techniques[-1] == "Transformers"
assert m1.readme == "docs/inception-v3-readme.md"
assert m2.readme == "docs/inception-v3-readme-120.md"
mi = modelindex.load("tests/test-mi/17_collections_merge/mi2.yml")
m1 = mi.models[0].full_model
m2 = mi.models[1].full_model
assert len(m1.results) == 2
assert len(m2.results) == 2
assert m1.results[0].metrics["Top 1 Accuracy"] == "11%"
assert m2.results[0].metrics["Top 1 Accuracy"] == "11%"
assert m1.results[1].metrics["Top 1 Accuracy"] == "74.67%"
assert m2.results[1].metrics["Top 1 Accuracy"] == "75.1%"
mi = modelindex.load("tests/test-mi/17_collections_merge/mi3.yml")
err = mi.check(silent=True)
assert len(err) == 2
assert "Inception v3-1" in err[0]
m1 = mi.models[0].full_model
m2 = mi.models[1].full_model
assert m1.metadata.training_data is None
assert m2.metadata.training_data == "Reddit"
|
180109
|
class PrefixStorage(object):
"""Storage for store information about prefixes.
>>> s = PrefixStorage()
First we save information for some prefixes:
>>> s["123"] = "123 domain"
>>> s["12"] = "12 domain"
Then we can retrieve prefix information by full key
(longest prefix always win):
>>> s.getByPrefix("123456")
'123 domain'
>>> s.getByPrefix("12456")
'12 domain'
If no prefix has been found then getByPrefix() returns default value:
>>> s.getByPrefix("13456", "None")
'None'
"""
def __init__(self):
self._mapping = {}
self._sizes = []
def __setitem__(self, key, value):
ln = len(key)
if ln not in self._sizes:
self._sizes.append(ln)
self._sizes.sort()
self._sizes.reverse()
self._mapping[key] = value
def getByPrefix(self, key, default=None):
for ln in self._sizes:
k = key[:ln]
if k in self._mapping:
return self._mapping[k]
return default
|
180146
|
from django.conf.urls import url
from django.views.i18n import JavaScriptCatalog
from finder import views
urlpatterns = [
url(r'^jsi18n/$', JavaScriptCatalog.as_view(packages=['finder']), name='javascript-catalog'),
url(r'^setlang/$', views.set_language, name='set_language'),
url(r'^api/$', views.api, name='apidoc'),
url(r'^data/$', views.data, name='data'),
url(r'^demo/$', views.demo, name='demo'),
url(r'^government/$', views.government, name='government'),
url(r'^privacy/$', views.privacy, name='privacy'),
url(r'^$', views.index),
]
|
180154
|
import rm64
encoders = rm64.encoders
for encoder in encoders:
encoder["case"] = "mixedcase"
|
180162
|
import torch
mapping = {}
def register(name):
def _thunk(func):
mapping[name] = func
return func
return _thunk
@register("rmsprop")
def rmsprop():
return torch.optim.RMSprop
@register("adam")
def adam():
return torch.optim.Adam
def get_optimizer_func(name):
"""
If you want to register your own optimizer function, you just need:
Usage Example:
-------------
from tbase.common.optimizers import register
@register("your_reward_function_name")
def your_optimizer_func(**kwargs):
...
return optimizer_func
"""
if callable(name):
return name
elif name in mapping:
return mapping[name]
else:
raise ValueError('Unknown optimizer_func: {}'.format(name))
|
180177
|
import numpy as np
import torch
from ..callback.progressbar import ProgressBar
from ..common.tools import restore_checkpoint,model_device
from ..common.tools import summary
from ..common.tools import seed_everything
from ..common.tools import AverageMeter
from torch.nn.utils import clip_grad_norm_
from pybert.train.metrics import MRR, Recall, NDCG, EIM, REIM, RIIM
from pybert.configs.basic_config import config
class Trainer(object):
def __init__(self,n_gpu,i2w,i2l,
model,
epochs,
logger,
criterion,
optimizer,
lr_scheduler,
early_stopping,
epoch_metrics,
writer,
batch_metrics,
gradient_accumulation_steps,
grad_clip = 0.0,
verbose = 1,
fp16 = None,
resume_path = None,
training_monitor = None,
model_checkpoint = None
):
self.start_epoch = 1
self.global_step = 0
self.n_gpu = n_gpu
self.model = model
self.epochs = epochs
self.logger =logger
self.fp16 = fp16
self.grad_clip = grad_clip
self.verbose = verbose
self.criterion = criterion
self.optimizer = optimizer
self.i2w = i2w
self.i2l = i2l
self.lr_scheduler = lr_scheduler
self.early_stopping = early_stopping
self.epoch_metrics = epoch_metrics
self.writer = writer
self.batch_metrics = batch_metrics
self.model_checkpoint = model_checkpoint
self.training_monitor = training_monitor
self.gradient_accumulation_steps = gradient_accumulation_steps
self.model, self.device = model_device(n_gpu = self.n_gpu, model=self.model)
if self.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
if resume_path:
self.logger.info(f"\nLoading checkpoint: {resume_path}")
resume_dict = torch.load(resume_path / 'checkpoint_info.bin')
best = resume_dict['epoch']
self.start_epoch = resume_dict['epoch']
if self.model_checkpoint:
self.model_checkpoint.best = best
self.logger.info(f"\nCheckpoint '{resume_path}' and epoch {self.start_epoch} loaded")
def epoch_reset(self):
self.inputs = []
self.outputs = []
self.targets = []
self.result = {}
for metric in self.epoch_metrics:
metric.reset()
def batch_reset(self):
self.info = {}
for metric in self.batch_metrics:
metric.reset()
def save_info(self,epoch,best):
model_save = self.model.module if hasattr(self.model, 'module') else self.model
state = {"model":model_save,
'epoch':epoch,
'best':best}
return state
def valid_epoch(self,data,epoch):
pbar = ProgressBar(n_total=len(data))
vl_loss = AverageMeter()
self.epoch_reset()
self.model.eval()
eval_metrics = [MRR(), NDCG(), Recall(), EIM(config['data_label_path'],self.i2w,self.i2l), RIIM(config['data_label_path'],self.i2w,self.i2l), REIM(config['data_label_path'],self.i2w,self.i2l)]
implicit_metrics = ["eim","riim","reim"]
with torch.no_grad():
for step, batch in enumerate(data):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = self.model(input_ids, segment_ids,input_mask)
loss = self.criterion(target=label_ids, output=logits)
vl_loss.update(loss, n=1)
if self.epoch_metrics:
for metric in self.epoch_metrics:
metric(logits=logits, target=label_ids)
value = metric.value()
if value:
self.result[f'valid_{metric.name()}'] = value
for metric in eval_metrics:
if metric.name() in implicit_metrics:
metric(input_ids=input_ids, output=logits, target=label_ids)
else:
metric(logits=logits, target=label_ids)
pbar.batch_step(step=step,info = {},bar_type='Evaluating')
print("------------- valid result --------------")
self.result['valid_loss'] = vl_loss.avg
for metric in eval_metrics:
metric.show()
if 'cuda' in str(self.device):
torch.cuda.empty_cache()
return self.result
def train_epoch(self,data):
pbar = ProgressBar(n_total = len(data))
tr_loss = AverageMeter()
self.epoch_reset()
for step, batch in enumerate(data):
self.batch_reset()
self.model.train()
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = self.model(input_ids, segment_ids,input_mask)
loss = self.criterion(output=logits,target=label_ids)
if len(self.n_gpu) >= 2:
loss = loss.mean()
if self.gradient_accumulation_steps > 1:
loss = loss / self.gradient_accumulation_steps
if self.fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
clip_grad_norm_(amp.master_params(self.optimizer), self.grad_clip)
else:
loss.backward()
clip_grad_norm_(self.model.parameters(), self.grad_clip)
if (step + 1) % self.gradient_accumulation_steps == 0:
self.lr_scheduler.step()
self.optimizer.step()
self.optimizer.zero_grad()
self.global_step += 1
if self.batch_metrics:
for metric in self.batch_metrics:
metric(logits = logits,target = label_ids)
self.info[metric.name()] = metric.value()
self.info['loss'] = loss.item()
tr_loss.update(loss.item(),n = 1)
if self.verbose >= 1:
pbar.batch_step(step= step,info = self.info,bar_type='Training')
self.outputs.append(logits.cpu().detach())
self.targets.append(label_ids.cpu().detach())
print("\n------------- train result --------------")
# epoch metric
self.outputs = torch.cat(self.outputs, dim =0).cpu().detach()
self.targets = torch.cat(self.targets, dim =0).cpu().detach()
self.result['loss'] = tr_loss.avg
if self.epoch_metrics:
for metric in self.epoch_metrics:
metric(logits=self.outputs, target=self.targets)
value = metric.value()
if value:
self.result[f'{metric.name()}'] = value
if "cuda" in str(self.device):
torch.cuda.empty_cache()
return self.result
def train(self,train_data,valid_data,seed):
seed_everything(seed)
print("model summary info: ")
for step, (input_ids, input_mask, segment_ids, label_ids) in enumerate(train_data):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
summary(self.model,*(input_ids, segment_ids,input_mask),show_input=True)
break
# ***************************************************************
for epoch in range(self.start_epoch,self.start_epoch+self.epochs):
self.logger.info(f"Epoch {epoch}/{self.epochs}")
train_log = self.train_epoch(train_data)
valid_log = self.valid_epoch(valid_data, epoch)
logs = dict(train_log,**valid_log)
logs = dict(valid_log,**valid_log)
show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key,value in logs.items()])
self.logger.info(show_info)
# save model
if self.model_checkpoint:
state = self.save_info(epoch,best=logs['valid_loss'])
self.model_checkpoint.bert_epoch_step(current=logs[self.model_checkpoint.monitor],state = state)
# early_stopping
if self.early_stopping:
self.early_stopping.epoch_step(epoch=epoch, current=logs[self.early_stopping.monitor])
if self.early_stopping.stop_training:
break
|
180217
|
import taco.common.exceptions
class SQSWrapperException(taco.common.exceptions.DataDictException):
pass
class SQSClientException(SQSWrapperException):
def __init__(self, message='SQS client error', data_dict=None, exc=None):
super().__init__(message, data_dict=data_dict, exc=exc)
class SendMessageException(SQSWrapperException):
def __init__(self, data, queue_name, exc=None):
data_dict = {
'data': data,
'queue_name': queue_name
}
super().__init__('Failed to send message', data_dict=data_dict, exc=exc)
class ListQueuesClientException(SQSClientException):
def __init__(self, exc=None):
super().__init__('SQS list queues client error', exc=exc)
class CreateQueuesClientException(SQSClientException):
def __init__(self, queue_creation_config, exc=None):
super().__init__('SQS create queue client error',
data_dict={'queue_creation_config': queue_creation_config},
exc=exc)
class GetQueueClientException(SQSClientException):
def __init__(self, queue_name, exc=None):
super().__init__('SQS get queue by name client error', data_dict={'queue_name': queue_name}, exc=exc)
class ClearQueueClientException(SQSClientException):
def __init__(self, queue_name, exc=None):
super().__init__('SQS clear queue client error', data_dict={'queue_name': queue_name}, exc=exc)
class DeleteQueueClientException(SQSClientException):
def __init__(self, queue_name, exc=None):
super().__init__('SQS delete queue client error', data_dict={'queue_name': queue_name}, exc=exc)
class SendMessageClientException(SQSClientException):
def __init__(self, queue_name, data_messages, exc=None):
data_dict = {
'queue_name': queue_name,
'data_messages': data_messages
}
super().__init__('SQS send messages client error', data_dict=data_dict, exc=exc)
class ReadMessageClientException(SQSClientException):
def __init__(self, queue_name, exc=None):
super().__init__('SQS read messages client error', data_dict={'queue_name': queue_name}, exc=exc)
class DeleteMessageClientException(SQSClientException):
def __init__(self, exc=None):
super().__init__('SQS delete messages client error', exc=exc)
class SQSNonExistingQueueException(SQSWrapperException):
def __init__(self, queue_name, exc=None):
super().__init__('None existing queue name', data_dict={'queue_name': queue_name}, exc=exc)
class SQSInvalidParamsException(SQSWrapperException):
def __init__(self, params, exc=None):
super().__init__('AWS invalid param exception',
data_dict={'params': params}, exc=exc)
class SQSCreateExistingQueueParamsErrorException(SQSWrapperException):
def __init__(self, params, exc=None):
super().__init__('A queue already exists with the same name and a different value for an attribute',
data_dict={'params': params}, exc=exc)
class SQSMessageWrapperException(SQSWrapperException):
pass
class SQSDecodingException(SQSMessageWrapperException):
def __init__(self, exc=None):
super().__init__('SQS Message decoding exception', exc=exc)
|
180228
|
import ujson
class TestAccount:
def test_get_account_without_auth(self, test_server):
res = test_server.get("/account")
assert res.status_code == 401
def test_get_info(self, test_server, create_account_jwt):
res = test_server.get(
"/account", headers={"Authorization": f"Bearer {create_account_jwt}"}
)
resData = res.json()
assert res.status_code == 200
assert "jwt" not in resData
assert resData["emailAddress"] == "<EMAIL>"
assert resData["userRole"] == "USER"
assert resData["isVerified"] is False
def test_valid_update(self, test_server, create_account_jwt):
payload = {"firstName": "Jason", "lastName": "Bourne", "emailAddress": ""}
res = test_server.put(
"/account",
headers={"Authorization": f"Bearer {create_account_jwt}"},
data=ujson.dumps(payload),
)
resData = res.json()
assert res.status_code == 200
assert resData["firstName"] == "Jason"
assert resData["lastName"] == "Bourne"
assert resData["createdTime"] < resData["modifiedTime"]
assert resData["emailAddress"] == "<EMAIL>"
def test_update_own_role(self, test_server, create_account_jwt):
payload = {"userRole": "ADMIN"}
res = test_server.put(
"/account",
headers={"Authorization": f"Bearer {create_account_jwt}"},
data=ujson.dumps(payload),
)
assert res.status_code == 403
def test_update_password(self, test_server, create_account_jwt):
payload = {"password": "<PASSWORD>"}
res = test_server.put(
"/account",
headers={"Authorization": f"Bearer {create_account_jwt}"},
data=ujson.dumps(payload),
)
assert res.status_code == 200
resData = res.json()
login_data = {
"emailAddress": resData["emailAddress"],
"password": "<PASSWORD>",
}
login_res = test_server.post("/login", data=ujson.dumps(login_data))
login_resData = login_res.json()
assert login_res.status_code == 200
assert login_resData["emailAddress"] == resData["emailAddress"]
def test_short_pass_update(self, test_server, create_account_jwt, app_config):
payload = {"password": "x" * (app_config.MIN_PASS_LENGTH - 1)}
res = test_server.put(
"/account",
headers={"Authorization": f"Bearer {create_account_jwt}"},
data=ujson.dumps(payload),
)
assert res.status_code == 422
def test_update_email_to_existing_account(self, test_server, create_account_jwt):
# guarantees there is already a "<EMAIL>" associated account.
unusedJwt = create_account_jwt # NOQA
payload = {"emailAddress": "<EMAIL>", "password": "<PASSWORD>"}
res = test_server.post("/signup", data=ujson.dumps(payload))
resData = res.json()
jwtToken = resData["jwt"]
updatePayload = {"emailAddress": "<EMAIL>"}
updateRes = test_server.put(
"/account",
headers={"Authorization": f"Bearer {jwtToken}"},
data=ujson.dumps(updatePayload),
)
assert updateRes.status_code == 403
def test_update_email_to_own_email(self, test_server, create_account_jwt):
# guarantees there is already a "<EMAIL>" associated account.
unusedJwt = create_account_jwt # NOQA
payload = {"emailAddress": "<EMAIL>", "password": "<PASSWORD>"}
res = test_server.post("/signup", data=ujson.dumps(payload))
resData = res.json()
jwtToken = resData["jwt"]
updatePayload = {
"emailAddress": "<EMAIL>",
"firstName": "flavor-dealer",
}
updateRes = test_server.put(
"/account",
headers={"Authorization": f"Bearer {jwtToken}"},
data=ujson.dumps(updatePayload),
)
assert updateRes.status_code == 200
assert updateRes.json()["firstName"] == "flavor-dealer"
def test_access_all_users(self, test_server, create_account_jwt):
res = test_server.get(
"/accounts", headers={"Authorization": f"Bearer {create_account_jwt}"}
)
assert res.status_code == 403
def test_access_specific_user(self, test_server, create_account_jwt):
res = test_server.get(
"/accounts/1", headers={"Authorization": f"Bearer {create_account_jwt}"}
)
assert res.status_code == 403
def test_delete_account(self, test_server, create_account_jwt):
res = test_server.delete(
"/account", headers={"Authorization": f"Bearer {create_account_jwt}"}
)
assert res.status_code == 200
deleted_credentials = {"emailAddress": "<EMAIL>", "password": "<PASSWORD>"}
res2 = test_server.post("/login", data=ujson.dumps(deleted_credentials))
assert res2.status_code == 404
|
180243
|
import json
import time
from pybbn.graph.dag import Bbn
from pybbn.pptc.inferencecontroller import InferenceController
def do_it(join_tree):
InferenceController.reapply(join_tree, {0: [0.5, 0.5]})
with open('singly-bbn.json', 'r') as f:
s = time.time()
bbn = Bbn.from_dict(json.loads(f.read()))
e = time.time()
d = e - s
print('{:.5f} seconds to load'.format(d))
s = time.time()
join_tree = InferenceController.apply(bbn)
e = time.time()
d = e - s
print('{:.5f} seconds to create join tree'.format(d))
InferenceController.reapply(join_tree, {0: [0.5, 0.5]})
|
180249
|
from .changesets import _ChangesetContext
from .meta import ConfigManagerSettings
from .persistence import ConfigPersistenceAdapter, YamlReaderWriter, JsonReaderWriter, ConfigParserReaderWriter
from .schema_parser import parse_config_schema
from .sections import Section
from .utils import _get_persistence_adapter_for
class Config(Section):
"""
Represents a configuration tree.
.. attribute:: Config(schema=None, **kwargs)
Creates a configuration tree from a schema.
Args:
``schema``: can be a dictionary, a list, a simple class, a module, another :class:`.Config`
instance, and a combination of these.
Keyword Args:
``config_parser_factory``:
Examples::
config = Config([
('greeting', 'Hello!'),
('uploads', Config({
'enabled': True,
'tmp_dir': '/tmp',
})),
('db', {
'host': 'localhost',
'user': 'root',
'password': '<PASSWORD>',
'name': 'test',
}),
('api', Config([
'host',
'port',
'default_user',
('enabled', Item(type=bool)),
])),
])
.. attribute:: <config>[<name_or_path>]
Access item by its name, section by its alias, or either by its path.
Args:
``name`` (str): name of an item or alias of a section
Args:
``path`` (tuple): path of an item or a section
Returns:
:class:`.Item` or :class:`.Config`
Examples::
>>> config['greeting']
<Item greeting 'Hello!'>
>>> config['uploads']
<Config uploads at 4436269600>
>>> config['uploads', 'enabled'].value
True
.. attribute:: <config>.<name>
Access an item by its name or a section by its alias.
For names and aliases that break Python grammar rules, use ``config[name]`` notation instead.
Returns:
:class:`.Item` or :class:`.Config`
.. attribute:: <name_or_path> in <Config>
Returns ``True`` if an item or section with the specified name or path is to be found in this section.
.. attribute:: len(<Config>)
Returns the number of items and sections in this section (does not include sections and items in
sub-sections).
.. attribute:: __iter__()
Returns an iterator over all item names and section aliases in this section.
"""
is_config = True
def __init__(self, schema=None, **configmanager_settings):
if 'configmanager_settings' in configmanager_settings:
if len(configmanager_settings) > 1:
raise ValueError('Dubious configmanager_settings specification: {}'.format(configmanager_settings))
configmanager_settings = configmanager_settings['configmanager_settings']
if not isinstance(configmanager_settings, ConfigManagerSettings):
self._settings = ConfigManagerSettings(**configmanager_settings)
super(Config, self).__init__()
self._changeset_contexts = []
self._configparser_adapter = None
self._json_adapter = None
self._yaml_adapter = None
self._click_extension = None
if schema is not None:
parse_config_schema(schema, root=self)
if self.settings.auto_load:
self.load()
def __repr__(self):
return '<{cls} {alias} at {id}>'.format(cls=self.__class__.__name__, alias=self.alias, id=id(self))
def __call__(self, values=None):
"""
Returns a changeset context which auto-resets itself on exit.
This allows creation of temporary changes of configuration.
Do not use this in combination with non-resetting changeset contexts:
the behaviour under such conditions is undefined.
If ``values`` dictionary is supplied, the specified configuration values will be set
for the duration of the changeset context.
"""
context = self.changeset_context(auto_reset=True)
if values is not None:
self.load_values(values)
return context
@property
def settings(self):
return self._settings
def changeset_context(self, **options):
"""
Returns:
configmanager.changesets._ChangesetContext
"""
return _ChangesetContext(self, **options)
@property
def configparser(self):
"""
Adapter to dump/load INI format strings and files using standard library's
``ConfigParser`` (or the backported configparser module in Python 2).
Returns:
ConfigPersistenceAdapter
"""
if self._configparser_adapter is None:
self._configparser_adapter = ConfigPersistenceAdapter(
config=self,
reader_writer=ConfigParserReaderWriter(
config_parser_factory=self.settings.configparser_factory,
),
)
return self._configparser_adapter
@property
def json(self):
"""
Adapter to dump/load JSON format strings and files.
Returns:
ConfigPersistenceAdapter
"""
if self._json_adapter is None:
self._json_adapter = ConfigPersistenceAdapter(
config=self,
reader_writer=JsonReaderWriter(),
)
return self._json_adapter
@property
def yaml(self):
"""
Adapter to dump/load YAML format strings and files.
Returns:
ConfigPersistenceAdapter
"""
if self._yaml_adapter is None:
self._yaml_adapter = ConfigPersistenceAdapter(
config=self,
reader_writer=YamlReaderWriter(),
)
return self._yaml_adapter
@property
def click(self):
"""
click extension
Returns:
ClickExtension
"""
if self._click_extension is None:
from .click_ext import ClickExtension
self._click_extension = ClickExtension(
config=self
)
return self._click_extension
def load(self):
"""
Load user configuration based on settings.
"""
# Must reverse because we want the sources assigned to higher-up Config instances
# to overrides sources assigned to lower Config instances.
for section in reversed(list(self.iter_sections(recursive=True, key=None))):
if section.is_config:
section.load()
for source in self.settings.load_sources:
adapter = getattr(self, _get_persistence_adapter_for(source))
if adapter.store_exists(source):
adapter.load(source)
def validate(self):
for item in self.iter_items(recursive=True, key=None):
item.validate()
|
180307
|
import os
import argparse
import socket
from id_driver import IDDriver
print("NAAL_FPGA board start")
parser = argparse.ArgumentParser(
description='Generic script for running the ID Extractor on the ' +
'FPGA board.')
# Socket communication arguments
parser.add_argument(
'--host_ip', type=str, default='127.0.0.1',
help='IP Address of host PC. Used for socket communication between the ' +
'nengo models.')
parser.add_argument(
'--remote_ip', type=str, default='127.0.0.1',
help='IP Address of FPGA board. Used for socket communication' +
'between nengo models.')
parser.add_argument(
'--udp_ip', type=int, default=500000,
help='Port number to use for the socket communication.')
parser.add_argument(
'--socket_args', type=str, default='{}',
help='Additional arguments used when creating the udp socket. Should be '+
'formatted as a dictionary string.')
parser.add_argument(
'--tcp_port', type=int, default=50000,
help='Port number to use for the socket communication.')
#parser.add_argument(
# '--arg_data_file', type=str, default=os.path.join(params_path, 'args.npz'),
# help='Path to parameter arguments (for ensemble, output connection) data' +
# 'file.')
# Parse the arguments
args = parser.parse_args()
print(args.host_ip)
# Driver for ID_extractor bitstream
fpga_driver = IDDriver()
id_str = "Found board ID:" + str(fpga_driver.id_bytes)
print(id_str)
#send_sock= sockets.UDPSendSocket('192.168.1.30',5001)
#send_sock.send("test")
#tcp_init=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#tcp_init.bind(("",8080))
#tcp_init.listen(1)
#print("server start")
#tcp_init.accept()
#print("join");
print("NAAL_FPGA board socket ");
send_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
send_sock.connect(('192.168.1.30',8585))
send_sock.sendall(fpga_driver.id_bytes)
send_sock.close()
#while True:
# If we are local, then print and save ID here
#if args.host_ip == parser.get_default('host_ip'):
# id_str = "Found board ID: %#0.16X" % fpga_driver.id_int
# print(id_str)
# with open("id_pynq.txt", 'w') as file:
# file.write(id_str + '\n')
# If we havea host connection send the ID back
#else:
# Using vanilla socket since we are only sending one value
# tcp_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# tcp_send.connect((args.host_ip, args.tcp_port))
# tcp_send.sendall(fpga_driver.id_bytes)
# Close socket
#tcp_send.close()
|
180318
|
from collections import defaultdict
from math import exp, ceil
from typing import List
from jchord.midi import MidiNote
# Notes separated by less than this much belong to one chord
MIN_SEP_INTERVAL = 0.1
# Bucket size for the KDE algorithm
KDE_BUCKETS_PER_SECOND = 1 / MIN_SEP_INTERVAL
def kernel_default(distance):
"""
Default kernel
"""
return exp(-((distance / MIN_SEP_INTERVAL) ** 2))
def group_notes_to_chords(notes: List[MidiNote], kernel=None) -> List[List[MidiNote]]:
"""
Groups the list of `MidiNote`s by time.
The return value maps time to a list of `MidiNote`s for that time.
"""
if kernel is None:
kernel = kernel_default
# Degenerate case: no notes -> no chords
if not notes:
return []
# Ensure notes are sorted
notes = sorted(notes, key=lambda note: note.time)
# Get the total duration of all notes
min_time = notes[0].time
max_time = notes[-1].time
# Degenerate case: all in one chord
if (max_time - min_time) <= MIN_SEP_INTERVAL:
return [notes]
max_time += notes[-1].duration
duration = max_time - min_time
# Do kernel density estimate
bucket_duration = 1.0 / KDE_BUCKETS_PER_SECOND
kde = [
sum(kernel(abs(note.time - i * bucket_duration)) for note in notes)
for i in range(ceil(KDE_BUCKETS_PER_SECOND * duration))
]
# Find kde_threshold such that the times between the first and last note in a chord
# always has kde[t] > kde_threshold
buckets = defaultdict(list)
kde_threshold = float("inf")
for note in notes:
bucket = min(int(note.time / bucket_duration), len(kde) - 1)
buckets[bucket].append(note)
kde_threshold = min(kde_threshold, kde[bucket])
# It needs to be a little bit lower than that to ensure all notes get included in a chord.
# Arbitrarily reduce by 25%
kde_threshold *= 0.95
# Do grouping
chords = []
cur_chord = []
for i, kde_val in enumerate(kde):
if kde_val > kde_threshold:
if i in buckets:
cur_chord.extend(buckets[i])
else:
if cur_chord:
chords.append(cur_chord)
cur_chord = []
if cur_chord:
chords.append(cur_chord)
return chords
|
180322
|
import logging
from functools import partial
import cv2
import os
import json
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from evaluation.inception import InceptionScore
from sg2im.data.dataset_params import get_dataset, get_collate_fn
from scripts.args import get_args, print_args, init_args
from scripts.graphs_utils import calc_log_p
from sg2im.data.utils import decode_image, imagenet_deprocess, print_compute_converse_edges, \
print_compute_transitive_edges
from sg2im.meta_models import MetaGeneratorModel, MetaDiscriminatorModel
from sg2im.model import get_conv_converse
from sg2im.pix2pix_model import Pix2PixModel
from sg2im.data import deprocess_batch
from sg2im.metrics import jaccard
from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding
from spade.models.networks.sync_batchnorm import DataParallelWithCallback
torch.backends.cudnn.benchmark = True
def restore_checkpoint(args, model, gans_model, discriminator, optimizer, device):
try:
if args.checkpoint_name is None:
raise Exception('You should pre-train the model on your training data first')
img_discriminator, obj_discriminator = discriminator.img_discriminator, discriminator.obj_discriminator,
optimizer_d_img, optimizer_d_obj = discriminator.optimizer_d_img, discriminator.optimizer_d_obj
# Load pre-trained weights for fine-tune
checkpoint = torch.load(args.checkpoint_name, map_location=device)
model.load_state_dict(checkpoint['model_state'])
gans_model.load_state_dict(checkpoint['gans_model_state'])
img_discriminator.load_state_dict(checkpoint['d_img_state'])
obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
# Load Optimizers
try:
optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
optimizer.load_state_dict(checkpoint['optim_state'])
except Exception as e:
print("Could not load optimizers state:", e)
# Load Epoch and Iteration num.
t = checkpoint['counters']['t']
epoch = checkpoint['counters']['epoch']
except Exception as e:
raise NotImplementedError(
'Could not restore weights for checkpoint {} because `{}`'.format(args.checkpoint_name, e))
return epoch, t
def restore_checkpoints(args, model, gans_model, discriminator, optimizer, device):
try:
if args.checkpoint_name is None:
raise Exception('You should pre-train the model on your training data first')
img_discriminator, obj_discriminator = discriminator.img_discriminator, discriminator.obj_discriminator,
optimizer_d_img, optimizer_d_obj = discriminator.optimizer_d_img, discriminator.optimizer_d_obj
# Load pre-trained weights for fine-tune
checkpoint_gan = torch.load(args.checkpoint_gan_name, map_location=device)
checkpoint_graph = torch.load(args.checkpoint_graph_name, map_location=device)
checkpoint_gan['model_state'].update(checkpoint_graph['model_state'])
model.load_state_dict(checkpoint_gan['model_state'], strict=False)
checkpoint_gan['gans_model_state'].pop(
'module.discriminator.mask_discriminator.discriminator_0.model0.0.weight')
checkpoint_gan['gans_model_state'].pop(
'module.discriminator.mask_discriminator.discriminator_1.model0.0.weight')
checkpoint_gan['gans_model_state'].pop('module.netD_mask.discriminator_0.model0.0.weight')
checkpoint_gan['gans_model_state'].pop('module.netD_mask.discriminator_1.model0.0.weight')
gans_model.load_state_dict(checkpoint_gan['gans_model_state'], strict=False)
img_discriminator.load_state_dict(checkpoint_gan['d_img_state'])
obj_discriminator.load_state_dict(checkpoint_gan['d_obj_state'])
except Exception as e:
raise NotImplementedError(
'Could not restore weights for checkpoint {} because `{}`'.format(args.checkpoint_name, e))
# Load Optimizers
try:
optimizer_d_img.load_state_dict(checkpoint_gan['d_img_optim_state'])
optimizer_d_obj.load_state_dict(checkpoint_gan['d_obj_optim_state'])
optimizer.load_state_dict(checkpoint_gan['optim_state'])
except Exception as e:
print("Could not load optimizers state:", e)
# Load Epoch and Iteration num.
t = 0
epoch = 0
return epoch, t
def freeze_weights(model, discriminator, module):
print(" >> Freeze Weights:")
if module == 'generation':
print(" >> Freeze Layout to image module")
if hasattr(model, 'layout_to_image_model'):
for param in model.layout_to_image_model.parameters():
param.requires_grad = False
for param in discriminator.parameters():
param.requires_grad = False
else:
raise NotImplementedError('Unrecognized option, you can freeze either graph module or I3D module')
pass
def add_loss(curr_loss, loss_dict, loss_name, weight=1):
curr_loss = curr_loss * weight
loss_dict[loss_name] = curr_loss.item()
def build_test_dsets(args):
test_dset = get_dataset(args.dataset, 'test', args)
vocab = test_dset.vocab
collate_fn = get_collate_fn(args)
loader_kwargs = {
'batch_size': args.batch_size,
'num_workers': args.loader_num_workers,
'shuffle': False,
'collate_fn': partial(collate_fn, vocab),
}
test_loader = DataLoader(test_dset, **loader_kwargs)
return test_loader, test_dset.vocab
def build_train_val_loaders(args):
train_dset = get_dataset(args.dataset, 'train', args)
val_dset = get_dataset(args.dataset, 'val', args)
assert train_dset.vocab == val_dset.vocab
vocab = json.loads(json.dumps(train_dset.vocab))
collate = get_collate_fn(args)
loader_kwargs = {
'batch_size': args.batch_size,
'num_workers': args.loader_num_workers,
'shuffle': True,
'collate_fn': partial(collate, vocab),
}
train_loader = DataLoader(train_dset, **loader_kwargs)
loader_kwargs['shuffle'] = args.shuffle_val
val_loader = DataLoader(val_dset, **loader_kwargs)
return vocab, train_loader, val_loader
def check_model(args, loader, model, gans_model, inception_score, use_gt=True, full_test=False):
model.eval()
num_samples = 0
all_losses = defaultdict(list)
total_iou = 0.
total_iou_masks = 0.
total_iou_05 = 0.
total_iou_03 = 0.
total_boxes = 0.
inception_score.clean()
image_df = {
'image_id': [],
'avg_iou': [],
'iou03': [],
'iou05': [],
"predicted_boxes": [],
"gt_boxes": [],
"number_of_objects": [],
"class": []
}
with torch.no_grad():
for batch in loader:
try:
batch = batch_to(batch)
imgs, objs, boxes, triplets, _, triplet_type, masks, image_ids = batch
# Run the model as it has been run during training
if use_gt:
model_out = model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=True)
else:
model_out = model(objs, triplets, triplet_type, test_mode=True)
imgs_pred, boxes_pred, masks_pred = model_out
G_losses = gans_model(batch, model_out, mode='compute_generator_loss')
if boxes_pred is not None:
boxes_pred = torch.clamp(boxes_pred, 0., 1.)
if imgs_pred is not None:
inception_score(imgs_pred)
if not args.skip_graph_model:
image_df['image_id'].extend(image_ids)
for i in range(boxes.size(0)):
# masks_sample = masks[i]
# masks_pred_sample = masks_pred[i]
boxes_sample = boxes[i]
boxes_pred_sample = boxes_pred[i]
boxes_pred_sample, boxes_sample = \
remove_dummies_and_padding(boxes_sample, objs[i], args.vocab,
[boxes_pred_sample, boxes_sample])
iou, iou05, iou03 = jaccard(boxes_pred_sample, boxes_sample)
# iou_masks = jaccard_masks(masks_pred_sample, masks_sample)
total_iou += iou.sum()
# total_iou_masks += iou_masks.sum()
total_iou_05 += iou05.sum()
total_iou_03 += iou03.sum()
total_boxes += float(iou.shape[0])
image_df['avg_iou'].append(np.mean(iou))
image_df['iou03'].append(np.mean(iou03))
image_df['iou05'].append(np.mean(iou03))
image_df['predicted_boxes'].append(str(boxes_pred_sample.cpu().numpy().tolist()))
image_df['gt_boxes'].append(str(boxes_sample.cpu().numpy().tolist()))
image_df["number_of_objects"].append(len(objs[i]))
if objs.shape[-1] == 1:
image_df["class"].append(
str([args.vocab["object_idx_to_name"][obj_index] for obj_index in objs[i]]))
else:
image_df["class"].append(str(
[args.vocab["reverse_attributes"]['shape'][str(int(objs[i][obj_index][2]))] for
obj_index in range(objs[i].shape[0])]))
for loss_name, loss_val in G_losses.items():
all_losses[loss_name].append(loss_val)
num_samples += imgs.size(0)
if not full_test and args.num_val_samples and num_samples >= args.num_val_samples:
break
except Exception as e:
print("Error in {}".format(str(e)))
samples = {}
if not args.skip_generation and not args.skip_graph_model:
samples['pred_box_pred_mask'] = model(objs, triplets, triplet_type, test_mode=True)[0]
samples['pred_box_gt_mask'] = model(objs, triplets, triplet_type, masks_gt=masks, test_mode=True)[0]
if not args.skip_generation:
samples['gt_img'] = imgs
samples['gt_box_gt_mask'] = \
model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=True)[0]
samples['gt_box_pred_mask'] = model(objs, triplets, triplet_type, boxes_gt=boxes, test_mode=True)[0]
for k, v in samples.items():
samples[k] = np.transpose(deprocess_batch(v, deprocess_func=args.deprocess_func).cpu().numpy(),
[0, 2, 3, 1])
mean_losses = {k: torch.stack(v).mean() for k, v in all_losses.items() if k != 'bbox_pred_all'}
if not args.skip_graph_model:
mean_losses.update({'avg_iou': total_iou / total_boxes,
'total_iou_05': total_iou_05 / total_boxes,
'total_iou_03': total_iou_03 / total_boxes})
mean_losses.update({'inception_mean': 0.0})
mean_losses.update({'inception_std': 0.0})
if not args.skip_generation:
inception_mean, inception_std = inception_score.compute_score(splits=5)
mean_losses.update({'inception_mean': inception_mean})
mean_losses.update({'inception_std': inception_std})
model.train()
return mean_losses, samples, pd.DataFrame.from_dict(image_df)
def update_loader_params(dset, w_conv, w_trans):
if w_conv is not None:
dset.converse_candidates_weights = w_conv.detach().cpu().numpy()
if w_trans is not None:
dset.trans_candidates_weights = torch.sigmoid(w_trans).detach().cpu().numpy()
def main(args):
logger = logging.getLogger(__name__)
args.vocab, train_loader, val_loader = build_train_val_loaders(args)
init_args(args)
learning_rate = args.learning_rate
print_args(args)
if not os.path.isdir(args.output_dir):
print('Checkpoints directory "%s" does not exist; creating it' % args.output_dir)
os.makedirs(args.output_dir)
json.dump(vars(args), open(os.path.join(args.output_dir, 'run_args.json'), 'w'))
writer = SummaryWriter(args.output_dir)
float_dtype = torch.cuda.FloatTensor
# Define img_deprocess
if args.img_deprocess == "imagenet":
args.deprocess_func = imagenet_deprocess
elif args.img_deprocess == "decode_img":
args.deprocess_func = decode_image
else:
print("Error: No deprocess function was found. decode_image was chosen")
args.deprocess_func = decode_image
# setup device - CPU or GPU
device = torch.device("cuda:{gpu}".format(gpu=args.gpu_ids[0]) if args.use_cuda else "cpu")
print(" > Active GPU ids: {}".format(args.gpu_ids))
print(" > Using device: {}".format(device.type))
model = MetaGeneratorModel(args, device)
model.type(float_dtype)
conv_weights_mat = get_conv_converse(model)
update_loader_params(train_loader.dataset, conv_weights_mat, None)
update_loader_params(val_loader.dataset, conv_weights_mat, None)
converse_list = [
'sg_to_layout.module.converse_candidates_weights'] # 'sg_to_layout.module.trans_candidates_weights'
trans_list = ['sg_to_layout.module.trans_candidates_weights'] # 'sg_to_layout.module.trans_candidates_weights'
learned_converse_params = [kv[1] for kv in model.named_parameters() if kv[0] in converse_list]
learned_transitivity_params = [kv[1] for kv in model.named_parameters() if kv[0] in trans_list]
all_special_params = converse_list + trans_list
base_params = [kv[1] for kv in model.named_parameters() if kv[0] not in all_special_params]
optimizer = torch.optim.Adam([{'params': base_params, 'lr': learning_rate},
{'params': learned_transitivity_params, 'lr': 1e-2}])
optimizer_converse = torch.optim.Adam([{'params': learned_converse_params, 'lr': 1e-2}])
print(model)
discriminator = MetaDiscriminatorModel(args)
print(discriminator)
gans_model = Pix2PixModel(args, discriminator=discriminator)
gans_model = DataParallelWithCallback(gans_model, device_ids=args.gpu_ids).to(device)
epoch, t = 0, 0
# Restore checkpoint
if args.restore_checkpoint:
epoch, t = restore_checkpoint(args, model, gans_model, discriminator, optimizer, device)
# Freeze weights
if args.freeze:
freeze_weights(model, discriminator, args.freeze_options)
# Init Inception Score
inception_score = InceptionScore(device, batch_size=args.batch_size, resize=True)
# Run Epoch
meta_relations = [args.vocab['pred_name_to_idx'][p] for p in train_loader.dataset.meta_relations]
non_meta_relations = set(args.vocab['pred_name_to_idx'].values()) - set(meta_relations)
eps = np.finfo(np.float32).eps.item()
while True:
if t >= args.num_iterations:
break
epoch += 1
print('Starting epoch %d' % epoch)
# Run Batch
for batch in train_loader:
try:
t += 1
batch = batch_to(batch)
imgs, objs, boxes, triplets, conv_counts, triplet_type, masks, image_ids = batch
model_out = model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=False)
# non gan losses
G_losses = gans_model(batch, model_out, mode="compute_generator_loss")
r = G_losses["bbox_pred_all"].detach()
G_losses = {k: v.mean() for k, v in G_losses.items()}
log_scalar_dict(writer, G_losses, 'train/loss', t)
optimizer.zero_grad()
G_losses["total_loss"].backward()
optimizer.step()
# Update SRC params
if args.learned_converse:
batch_size = batch[0].shape[0]
if batch_size > 1:
r = (r - r.mean()) / (r.std() + eps)
conv_weights_mat = get_conv_converse(model)
log_prob = calc_log_p(conv_weights_mat, non_meta_relations, conv_counts)
loss_conv = torch.mean(r * log_prob)
optimizer_converse.zero_grad()
loss_conv.backward()
optimizer_converse.step()
conv_weights_mat = get_conv_converse(model)
update_loader_params(train_loader.dataset, conv_weights_mat, None)
update_loader_params(val_loader.dataset, conv_weights_mat, None)
# Update GAN discriminators losses
D_losses = {}
if not args.skip_generation and args.freeze_options != "generation":
D_losses = gans_model(batch, model_out, mode="compute_discriminator_loss")
D_losses = {k: v.mean() for k, v in D_losses.items()}
log_scalar_dict(writer, D_losses, 'train/loss', t)
set_d_gans_loss(D_losses, args, discriminator)
# Logger
if t % args.print_every == 0:
print('t = %d / %d' % (t, args.num_iterations))
for name, val in G_losses.items():
print(' G [%s]: %.4f' % (name, val))
for name, val in D_losses.items():
print(' D [%s]: %.4f' % (name, val))
# Save checkpoint
if t % args.checkpoint_every == 0:
conv_weights_mat = get_conv_converse(model)
print_compute_converse_edges({}, conv_weights_mat.detach(), args.vocab, non_meta_relations)
print_compute_transitive_edges({}, torch.sigmoid(
model.sg_to_layout.module.trans_candidates_weights).detach(), args.vocab)
# GT Boxes; GT Masks
print('checking: input box/mask as GT')
gt_val_losses, gt_val_samples, _ = check_model(args, val_loader, model, gans_model, inception_score,
use_gt=True, full_test=False)
log_scalar_dict(writer, gt_val_losses, 'gt_val/loss', t)
log_results(gt_val_losses, t, prefix='GT VAL')
# Pred Boxes; Pred Masks
print('checking: input box/mask as PRED')
use_gt = True if args.skip_graph_model else False # if skip graph then use gt
val_losses, val_samples, _ = check_model(args, val_loader, model, gans_model,
inception_score, use_gt=use_gt, full_test=False)
log_scalar_dict(writer, val_losses, 'val/loss', t)
log_results(val_losses, t, prefix='VAL')
save_images(args, t, val_samples, writer)
# Save checkpoint
checkpoint_path = os.path.join(args.output_dir, 'itr_%s.pt' % t)
print('Saving checkpoint to ', checkpoint_path)
save_checkpoint(args, checkpoint_path, discriminator, epoch, gans_model, model, optimizer, t)
# Full test
if t % args.full_test == 0:
print('checking on full eval')
test_losses, test_samples, _ = check_model(args, val_loader, model, gans_model, inception_score,
use_gt=False, full_test=True)
log_scalar_dict(writer, test_losses, 'test/loss', t)
print('Iter: {},'.format(t) + ' TEST Inception mean: %.4f' % test_losses['inception_mean'])
print('Iter: {},'.format(t) + ' TEST Inception STD: %.4f' % test_losses['inception_std'])
except Exception as e:
logger.exception(e)
writer.close()
def log_results(semi_val_losses, t, prefix=''):
print('Iter: {}, '.format(t) + prefix + ' avg_iou: %.4f' % semi_val_losses.get('avg_iou', 0.0))
print('Iter: {}, '.format(t) + prefix + ' total_iou_03: %.4f' % semi_val_losses.get('total_iou_03', 0.0))
print('Iter: {}, '.format(t) + prefix + ' total_iou_05: %.4f' % semi_val_losses.get('total_iou_05', 0.0))
print('Iter: {}, '.format(t) + prefix + ' Inception mean: %.4f' % semi_val_losses.get('inception_mean', 0.0))
print('Iter: {}, '.format(t) + prefix + ' Inception STD: %.4f' % semi_val_losses.get('inception_std', 0.0))
def save_images(args, t, val_samples, writer, dir_name='val'):
for k, v in val_samples.items():
if isinstance(v, list):
for i in range(len(v)):
writer.add_figure('val_%s/%s' % (k, i), v, global_step=t)
else:
path = os.path.join(args.output_dir, dir_name, str(t), k)
os.makedirs(path)
for i in range(v.shape[0]):
writer.add_images('val_%s/%s' % (k, i), v[i], global_step=t, dataformats='HWC')
RGB_img_i = cv2.cvtColor(v[i], cv2.COLOR_BGR2RGB)
cv2.imwrite("{}/{}.jpg".format(path, i), RGB_img_i)
def set_d_gans_loss(D_losses, args, discriminator):
if args.use_img_disc:
discriminator.optimizer_d_img.zero_grad()
D_losses["total_img_loss"].backward()
discriminator.optimizer_d_img.step()
else:
discriminator.optimizer_d_img.zero_grad()
D_losses["total_img_loss"].backward()
discriminator.optimizer_d_img.step()
discriminator.optimizer_d_obj.zero_grad()
D_losses["total_obj_loss"].backward()
discriminator.optimizer_d_obj.step()
if args.mask_size > 0 and "total_mask_loss" in D_losses:
discriminator.optimizer_d_mask.zero_grad()
D_losses["total_mask_loss"].backward()
discriminator.optimizer_d_mask.step()
def save_checkpoint(args, checkpoint_path, discriminator, epoch, gans_model, model, optimizer, t):
if args.use_img_disc:
checkpoint_dict = {
'model_state': model.state_dict(),
'gans_model_state': gans_model.state_dict(),
'd_img_state': discriminator.img_discriminator.state_dict(),
'd_img_optim_state': discriminator.optimizer_d_img.state_dict(),
'optim_state': optimizer.state_dict(),
'vocab': args.vocab,
'counters': {
't': t,
'epoch': epoch,
}
}
else:
checkpoint_dict = {
'model_state': model.state_dict(),
'gans_model_state': gans_model.state_dict(),
'd_img_state': discriminator.img_discriminator.state_dict(),
'd_obj_state': discriminator.obj_discriminator.state_dict(),
'd_mask_state': discriminator.mask_discriminator.state_dict(),
'd_img_optim_state': discriminator.optimizer_d_img.state_dict(),
'd_obj_optim_state': discriminator.optimizer_d_obj.state_dict(),
'd_mask_optim_state': discriminator.optimizer_d_mask.state_dict(),
'optim_state': optimizer.state_dict(),
'vocab': args.vocab,
'counters': {
't': t,
'epoch': epoch,
}
}
torch.save(checkpoint_dict, checkpoint_path)
if __name__ == '__main__':
args = get_args()
main(args)
|
180361
|
from .warmup import WarmupLRScheduler
class Linear(WarmupLRScheduler):
r"""
After a warmup period during which learning rate increases linearly between 0 and the start_lr,
The decay period performs :math:`\text{lr}=\text{start_lr}\times \dfrac{\text{end_iter}-\text{num_iter}}{\text{end_iter}-\text{warmup_iter}}`
"""
def get_lr_warmup(self, num_iter) -> float:
return self.start_lr * num_iter / self.warmup_iter
def get_lr_decay(self, num_iter) -> float:
return max(0.0, self.start_lr * (self.end_iter - num_iter) / (self.end_iter - self.warmup_iter))
|
180410
|
from inspect import getfile, getsourcelines
from sys import stderr
from types import FunctionType
def print_function_context(f: FunctionType) -> None:
name, file, (source, source_line_number) = f.__name__, getfile(f), getsourcelines(f)
print(f"Function '{name}' located in {file} at line {source_line_number}", file=stderr)
print(">>", "".join(source[:5]), file=stderr)
|
180450
|
import os
import time
import pickle
import random
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
from utils import cv_utils
from data.dataset import DatasetBase
class AusDataset(DatasetBase):
def __init__(self, opt, is_for_train):
super(AusDataset, self).__init__(opt, is_for_train)
self._name = "AusDataset"
self._read_dataset()
self._cond_nc = opt.cond_nc
def __len__(self):
return self._dataset_size
def __getitem__(self, idx):
assert (idx < self._dataset_size)
real_img = None
real_cond = None
real_img_path = None
while real_img is None or real_cond is None:
# get sample data
sample_id = self._get_id(idx)
real_img, real_img_path = self._get_img_by_id(idx)
real_cond = self._get_cond_by_id(idx)
if real_img is None:
print('error reading image %s, skipping sample' % real_img_path)
idx = random.randint(0, self._dataset_size - 1)
if real_cond is None:
print('error reading aus %s, skipping sample' % sample_id)
idx = random.randint(0, self._dataset_size - 1)
real_cond += np.random.uniform(-0.02, 0.02, real_cond.shape)
desired_img, desired_cond, noise = self._generate_random_cond()
# transform data
real_img = self._transform(Image.fromarray(real_img))
desired_img = self._transform(Image.fromarray(desired_img))
# pack data
sample = {'real_img': real_img,
'real_cond': real_cond,
'desired_img': desired_img,
'desired_cond': desired_cond,
'cond_diff': desired_cond - real_cond,
}
return sample
def _create_transform(self):
if self._is_for_train:
transform_list = [transforms.RandomHorizontalFlip(),
transforms.Resize(self._image_size),
transforms.Pad(self._image_size // 16),
transforms.RandomCrop(self._image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
else:
transform_list = [transforms.Resize(self._image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
self._transform = transforms.Compose(transform_list)
def _read_dataset(self):
self._root = self._opt.data_dir
self._imgs_dir = os.path.join(self._root, self._opt.images_folder)
annotations_file = self._opt.train_annotations_file if self._is_for_train else self._opt.test_annotations_file
pkl_path = os.path.join(self._root, annotations_file)
self._info = self._read_pkl(pkl_path)
self._image_size = self._opt.image_size
# dataset size
self._dataset_size = len(self._info)
def _read_pkl(self, file_path):
assert os.path.exists(file_path) and file_path.endswith('.pkl'), 'Read pkl file error. Cannot open %s' % file_path
with open(file_path, 'rb') as f:
return pickle.load(f)
def _get_id(self, idx):
id = self._info[idx]['file_path']
return os.path.splitext(id)[0]
def _get_cond_by_id(self, idx):
cond = None
if idx < self._dataset_size:
cond = self._info[idx]['aus'] / 5.0
return cond
def _get_img_by_id(self, idx):
if idx < self._dataset_size:
img_path = os.path.join(self._imgs_dir, self._info[idx]['file_path'])
img = cv_utils.read_cv2_img(img_path)
return img, self._info[idx]['file_path']
else:
print('You input idx: ', idx)
return None, None
def _generate_random_cond(self):
cond = None
rand_sample_id = -1
while cond is None:
rand_sample_id = random.randint(0, self._dataset_size - 1)
cond = self._get_cond_by_id(rand_sample_id)
img, _ = self._get_img_by_id(rand_sample_id)
noise = np.random.uniform(-0.1, 0.1, cond.shape)
if img is None:
img, cond, noise = self._generate_random_cond()
cond += noise
return img, cond, noise
|
180510
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import os
def find_card(I):
# 识别出车牌区域并返回该区域的图像
[y, x, z] = I.shape
# y取值范围分析
Blue_y = np.zeros((y, 1))
for i in range(y):
for j in range(x):
# 蓝色rgb范围
temp = I[i, j, :]
if (I[i, j, 2] <= 30) and (I[i, j, 0] >= 119):
Blue_y[i][0] += 1
MaxY = np.argmax(Blue_y)
PY1 = MaxY
while (Blue_y[PY1, 0] >= 5) and (PY1 > 0):
PY1 -= 1
PY2 = MaxY
while (Blue_y[PY2, 0] >= 5) and (PY2 < y - 1):
PY2 += 1
# x取值
Blue_x = np.zeros((1, x))
for i in range(x):
for j in range(PY1, PY2):
if (I[j, i, 2] <= 30) and (I[j, i, 0] >= 119):
Blue_x[0][i] += 1
PX1 = 0
while (Blue_x[0, PX1] < 3) and (PX1 < x - 1):
PX1 += 1
PX2 = x - 1
while (Blue_x[0, PX2] < 3) and (PX2 > PX1):
PX2 -= 1
# 对车牌区域的修正
PX1 -= 2
PX2 += 2
return I[PY1:PY2, PX1 - 2: PX2, :]
def divide(I):
[y, x, z] = I.shape
White_x = np.zeros((x, 1))
for i in range(x):
for j in range(y):
if I[j, i, 1] > 176:
White_x[i][0] += 1
return White_x
def divide_each_character(I):
[y, x, z] = I.shape
White_x = np.zeros((x, 1))
for i in range(x):
for j in range(y):
if I[j, i, 1] > 176:
White_x[i][0] += 1
res = []
length = 0
for i in range(White_x.shape[0]):
# 使用超参数经验分割
t = I.shape[1] / 297
num = White_x[i]
if num > 8:
length += 1
elif length > 20 * t:
res.append([i - length - 2, i + 2])
length = 0
else:
length = 0
return res
if __name__ == '__main__':
I = cv2.imread('Car.jpg')
Plate = find_card(I)
# White_x = divide(Plate)
plt.imshow(Plate)
plt.show()
# plt.plot(np.arange(Plate.shape[1]), White_x)
res = divide_each_character(Plate)
plate_save_path = './singledigit/'
for t in range(len(res)):
plt.subplot(1, 7, t + 1)
temp = res[t]
save_img = cv2.cvtColor(Plate[:, temp[0]:temp[1], :],cv2.COLOR_BGR2GRAY)
ma = max(save_img.shape[0], save_img.shape[1])
mi = min(save_img.shape[0], save_img.shape[1])
ans = np.zeros(shape=(ma, ma, 3),dtype=np.uint8)
start =int(ma/2-mi/2)
for i in range(mi):
for j in range(ma):
if save_img[j,i] > 125:
for k in range(3):
ans[j,start+i,k]=255
ans=cv2.merge([ans[:,:,0],ans[:,:,1],ans[:,:,2]])
ans=cv2.resize(ans,(25,25))
dir_name=plate_save_path+str(t)
os.mkdir(dir_name)
cv2.imwrite(dir_name+'/'+str(t)+'.jpg',ans)
plt.imshow(ans)
plt.show()
|
180600
|
t=int(input())
for i in range(t):
s=int(input())
m=s%12
if m==1:
print(s+11,'WS')
elif m==2:
print(s+9,'MS')
elif m==3:
print(s+7,'AS')
elif m==4:
print(s+5,'AS')
elif m==5:
print(s+3,'MS')
elif m==6:
print(s+1,'WS')
elif m==7:
print(s-1,'WS')
elif m==8:
print(s-3,'MS')
elif m==9:
print(s-5,'AS')
elif m==10:
print(s-7,'AS')
elif m==11:
print(s-9,'MS')
elif m==0:
print(s-11,'WS')
# t=int(input())
# for i in range(t):
# s=int(input())
# m=s%12
# l=11
# if m==0:
# print(s-11,'WS')
# for i in range(1,12):
# if m==i:
# print(s+l)
# else:
# l=l-2
|
180609
|
import numpy as np
from ..base import Parameter
from .optimizer import BaseOptimizer
from benderopt.utils import logb
from .random import RandomOptimizer
class ParzenEstimator(BaseOptimizer):
""" Parzen Estimator
This estimator is largely inspired from TPE and hyperopt.
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
gamma: ratio of best observations to build lowest loss function
number_of_candidates: number of candidates to draw at each iteration
subsampling: number of observations to consider at max
subsampling_type: how to drow observations if number_of_observations > subsampling
prior_weight: weight of prior when building posterior parameters
minimum_observations: params will be drawn at random until minimum_observations is reached
"""
def __init__(
self,
optimization_problem,
gamma=0.15,
number_of_candidates=100,
subsampling=100,
subsampling_type="random",
prior_weight=0.05,
minimum_observations=20,
):
super(ParzenEstimator, self).__init__(optimization_problem)
self.gamma = gamma
self.number_of_candidates = number_of_candidates
self.subsampling = subsampling
self.subsampling_type = subsampling_type
self.prior_weight = prior_weight
self.minimum_observations = minimum_observations
def _generate_samples(self, size, debug=False):
assert size < int(self.number_of_candidates / 3)
# 0. If not enough observations, draw at random
if self.optimization_problem.number_of_observations < self.minimum_observations:
samples = RandomOptimizer(self.optimization_problem)._generate_samples(size)
if debug:
return samples, None, None
return samples
# 0. Retrieve self.gamma % best observations (lowest loss) observations_l
# and worst obervations (greatest loss g) observations_g
observations_l, observations_g = self.optimization_problem.observations_quantile(
self.gamma,
subsampling=min(len(self.observations), self.subsampling),
subsampling_type=self.subsampling_type,
)
# 1. Build by drawing a value for each parameter according to parzen estimation
samples = [{} for _ in range(size)]
posterior_parameters_l = []
posterior_parameters_g = []
for parameter in self.parameters:
# 1.a Build empirical distribution of good observations and bad obsevations
posterior_parameter_l = self._build_posterior_parameter(parameter, observations_l)
posterior_parameters_l.append(posterior_parameter_l)
posterior_parameter_g = self._build_posterior_parameter(parameter, observations_g)
posterior_parameters_g.append(posterior_parameter_g)
# 1.b Draw candidates from observations_l
candidates = np.array(
[
x[parameter.name]
for x in RandomOptimizer(self.optimization_problem).suggest(
self.number_of_candidates
)
]
)
# 1.c Evaluate cantidates score according to g / l taking care of zero division
scores = posterior_parameter_g.pdf(candidates) / np.clip(
posterior_parameter_l.pdf(candidates), a_min=1e-16, a_max=None
)
# Sort candidate and choose best
sorted_candidates = candidates[np.argsort(scores)][: int(self.number_of_candidates / 3)]
selected_candidates = np.random.choice(sorted_candidates, size=size, replace=False)
for i in range(size):
samples[i][parameter.name] = selected_candidates[i]
if debug:
return samples, posterior_parameters_l, posterior_parameters_g
return samples
def _build_posterior_parameter(self, parameter, observations):
"""Retrieve observed value for eache parameter."""
observed_values, observed_weights = zip(
*[
(observation.sample[parameter.name], observation.weight)
for observation in observations
]
)
return parzen_estimator_build_posterior_parameter[parameter.category](
observed_values=observed_values,
observed_weights=observed_weights,
parameter=parameter,
prior_weight=self.prior_weight,
)
def build_posterior_categorical(observed_values, observed_weights, parameter, prior_weight):
"""Posterior for categorical parameters.
observed_probabilities are the weighted count of each possible value.
posterior_probabilities are the weighted sum of prior (initial search space).
TODO Compare mean (current implem) vs hyperopt approach."""
posterior_parameter = None
prior_probabilities = np.array(parameter.search_space["probabilities"])
values = parameter.search_space["values"]
sum_observed_weights = sum(observed_weights)
if sum_observed_weights != 0:
observed_probabilities = np.array(
[
sum(
[
observed_weight
for observed_value, observed_weight in zip(
observed_values, observed_weights
)
if observed_value == value
]
)
/ sum_observed_weights
for value in values
]
)
posterior_probabilities = prior_probabilities * prior_weight + observed_probabilities * (
1 - prior_weight
)
# Numerical safety to always have sum = 1
posterior_probabilities /= sum(posterior_probabilities)
# Build param
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "categorical",
"search_space": {"values": values, "probabilities": list(posterior_probabilities),},
}
)
return posterior_parameter
def find_sigmas_mus(observed_mus, prior_mu, prior_sigma, low, high):
"""TODO when multiple values for prior index ??"""
# Mus
unsorted_mus = np.array(list(observed_mus)[:] + [prior_mu])
index = np.argsort(unsorted_mus)
mus = unsorted_mus[index]
# Sigmas
# Trick to get for each mu the greater distance from left and right neighbor
# when low and high are not defined we use inf to get the only available distance
# (right neighbor for sigmas[0] and left for sigmas[-1])
tmp = np.concatenate(
([low if low != -np.inf else np.inf], mus, [high if high != np.inf else -np.inf],)
)
sigmas = np.maximum(tmp[1:-1] - tmp[0:-2], tmp[2:] - tmp[1:-1])
# Use formulas from hyperopt to clip sigmas
sigma_max_value = prior_sigma
sigma_min_value = prior_sigma / min(100.0, (1.0 + len(mus)))
sigmas = np.clip(sigmas, sigma_min_value, sigma_max_value)
# Fix prior sigma with correct value
sigmas[index[-1]] = prior_sigma
return mus[:], sigmas[:], index
def build_posterior_uniform(observed_values, observed_weights, parameter, prior_weight):
"""TODO put doc here."""
low = parameter.search_space["low"]
high = parameter.search_space["high"]
# build prior mu and sigma
prior_mu = 0.5 * (high + low)
prior_sigma = high - low
# Build mus and sigmas centered on each observation, taking care of the prior
mus, sigmas, index = find_sigmas_mus(
observed_mus=observed_values, prior_mu=prior_mu, prior_sigma=prior_sigma, low=low, high=high
)
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "normal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": low,
"high": high,
"step": parameter.search_space.get("step", None),
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
def build_posterior_loguniform(observed_values, observed_weights, parameter, prior_weight):
low_log = parameter.search_space["low_log"]
high_log = parameter.search_space["high_log"]
base = parameter.search_space["base"]
# build log prior mu and sigma
prior_mu_log = 0.5 * (high_log + low_log)
prior_sigma_log = high_log - low_log
# Build mus and sigmas centered on each observation, taking care of the prior
mus_log, sigmas_log, index = find_sigmas_mus(
observed_mus=logb(observed_values, base),
prior_mu=prior_mu_log,
prior_sigma=prior_sigma_log,
low=low_log,
high=high_log,
)
# Back from log scale
mus = base ** mus_log
sigmas = base ** sigmas_log
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "lognormal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": parameter.search_space["low"],
"high": parameter.search_space["high"],
"step": parameter.search_space["step"],
"base": parameter.search_space["base"],
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
def build_posterior_normal(observed_values, observed_weights, parameter, prior_weight):
low = parameter.search_space["low"]
high = parameter.search_space["high"]
# build prior mu and sigma
prior_mu = parameter.search_space["mu"]
prior_sigma = parameter.search_space["sigma"]
# Build mus and sigmas centered on each observation, taking care of the prior
mus, sigmas, index = find_sigmas_mus(
observed_mus=observed_values, prior_mu=prior_mu, prior_sigma=prior_sigma, low=low, high=high
)
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "normal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": low,
"high": high,
"step": parameter.search_space.get("step", None),
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
def build_posterior_lognormal(observed_values, observed_weights, parameter, prior_weight):
low_log = parameter.search_space["low_log"]
high_log = parameter.search_space["high_log"]
base = parameter.search_space["base"]
# build log prior mu and sigma
prior_mu_log = parameter.search_space["mu_log"]
prior_sigma_log = parameter.search_space["sigma_log"]
# Build mus and sigmas centered on each observation, taking care of the prior
mus_log, sigmas_log, index = find_sigmas_mus(
observed_mus=logb(observed_values, base),
prior_mu=prior_mu_log,
prior_sigma=prior_sigma_log,
low=low_log,
high=high_log,
)
# Back from log scale
mus = base ** mus_log
sigmas = base ** sigmas_log
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "lognormal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": parameter.search_space["low"],
"high": parameter.search_space["high"],
"step": parameter.search_space["step"],
"base": parameter.search_space["base"],
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
parzen_estimator_build_posterior_parameter = {
"categorical": build_posterior_categorical,
"uniform": build_posterior_uniform,
"loguniform": build_posterior_loguniform,
"normal": build_posterior_normal,
"lognormal": build_posterior_lognormal,
}
|
180635
|
import codecs
from os import path
from ncbi_genome_download.summary import SummaryReader
def open_testfile(fname):
return codecs.open(path.join(path.dirname(__file__), fname), 'r', 'utf-8')
def test_bacteria_ascii():
ascii_file = open_testfile('partial_summary.txt')
reader = SummaryReader(ascii_file)
first = next(reader)
assert 'assembly_accession' in first
assert 'ftp_path' in first
def test_bacteria_unicode():
utf8_file = open_testfile('noascii_summary.txt')
reader = SummaryReader(utf8_file)
first = next(reader)
assert 'assembly_accession' in first
assert 'ftp_path' in first
def test_weird_organism_name():
ascii_file = open_testfile('weird_organism_name_summary.txt')
reader = SummaryReader(ascii_file)
first = next(reader)
assert 'assembly_accession' in first
assert 'ftp_path' in first
# stupid viral summary file has an extra comment
def test_virus():
utf8_file = open_testfile('viral_summary.txt')
reader = SummaryReader(utf8_file)
entries = list(reader)
first = entries[0]
assert 'assembly_accession' in first
assert 'ftp_path' in first
assert len(entries) == 6
for entry in entries:
assert 'assembly_accession' in entry
# entry should now be the last
|
180675
|
import re
import tiles
# REGEXs for global/clock signals
# Globals including spine inputs, TAP_DRIVE inputs and TAP_DRIVE outputs
global_spine_tap_re = re.compile(r'R\d+C\d+_[HV]P[TLBR]X(\d){2}00')
# CMUX outputs
global_cmux_out_re = re.compile(r'R\d+C\d+_[UL][LR]PCLK\d+')
# CMUX inputs
global_cmux_in_re = re.compile(r'R\d+C\d+_[HV]PF[NESW](\d){2}00')
# Clock pins
clock_pin_re = re.compile(r'R\d+C\d+_J?PCLK[TBLR]\d+')
# PLL global outputs
pll_out_re = re.compile(r'R\d+C\d+_J?[UL][LR][QC]PLL\dCLKO[PS]\d?')
# CIB clock inputs
cib_clk_re = re.compile(r'R\d+C\d+_J?[ULTB][LR][QCM]PCLKCIB\d+')
# Oscillator output
osc_clk_re = re.compile(r'R\d+C\d+_J?OSC')
# Clock dividers
cdivx_clk_re = re.compile(r'R\d+C\d+_J?[UL]CDIVX\d+')
# SED clock output
sed_clk_re = re.compile(r'R\d+C\d+_J?SEDCLKOUT')
# SERDES reference clocks
pcs_clk_re = re.compile(r'R\d+C\d+_J?PCS[AB][TR]XCLK\d')
# DDRDEL delay signals
ddr_delay_re = re.compile(r'R\d+C\d+_[UL][LR]DDRDEL')
# DCC signals
dcc_clk_re = re.compile(r'R\d+C\d+_J?(CLK[IO]|CE)_[BLTR]?DCC(\d+|[BT][LR])')
# DCC inputs
dcc_clki_re = re.compile(r'R\d+C\d+_[BLTR]?DCC(\d+|[BT][LR])CLKI')
# DCS signals
dcs_sig_re = re.compile(r'R\d+C\d+_J?(CLK\d|SEL\d|DCSOUT|MODESEL)_DCS\d')
# DCS clocks
dcs_clk_re = re.compile(r'R\d+C\d+_DCS\d(CLK\d)?')
# Misc. center clocks
center_clk_re = re.compile(r'R\d+C\d+_J?(LE|RE)CLK\d')
# Shared DQS signals
dqs_ssig_re = re.compile(r'R\d+C\d+_(DQS[RW]\d*|(RD|WR)PNTR\d)$')
# Bank edge clocks
bnk_eclk_re = re.compile('R\d+C\d+_BANK\d+(ECLK\d+)')
# CIB ECLK inputs
cib_eclk_re = re.compile(r'R\d+C\d+_J?[ULTB][LR][QCM]ECLKCIB\d+')
brg_eclk_re = re.compile(r'R\d+C(\d+)_JBRGECLK\d+')
def is_global_brgeclk(wire):
m = brg_eclk_re.match(wire)
if not m:
return False
if m:
x = int(m.group(1))
return x > 5 and x < 67
def is_global(wire):
"""Return true if a wire is part of the global clock network"""
return bool(global_spine_tap_re.match(wire) or
global_cmux_out_re.match(wire) or
global_cmux_in_re.match(wire) or
clock_pin_re.match(wire) or
pll_out_re.match(wire) or
cib_clk_re.match(wire) or
osc_clk_re.match(wire) or
cdivx_clk_re.match(wire) or
sed_clk_re.match(wire) or
ddr_delay_re.match(wire) or
dcc_clk_re.match(wire) or
dcc_clki_re.match(wire) or
dcs_sig_re.match(wire) or
dcs_clk_re.match(wire) or
pcs_clk_re.match(wire) or
center_clk_re.match(wire) or
cib_eclk_re.match(wire) or
is_global_brgeclk(wire))
def handle_family_net(tile, wire, prefix_pos, tile_pos, netname):
if tile.startswith("TAP") and netname.startswith("H"):
if prefix_pos[1] < tile_pos[1]:
return "L_" + netname
elif prefix_pos[1] > tile_pos[1]:
return "R_" + netname
else:
assert False, "bad TAP_DRIVE netname"
elif is_global(wire):
return "G_" + netname
elif dqs_ssig_re.match(wire):
return "DQSG_" + netname
elif bnk_eclk_re.match(wire):
if "ECLK" in tile:
return "G_" + netname
else:
return "BNK_" + bnk_eclk_re.match(wire).group(1)
elif netname in ("INRD", "LVDS"):
return "BNK_" + netname
else:
return None
|
180710
|
from django.test import TestCase
from django.urls import reverse
class FilebrowserAnonymousTestCase(TestCase):
def test_browse(self):
url = reverse("fb_browse")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual("/admin/login/?next=" + url, response.url)
def test_mkdir(self):
url = reverse("fb_browse")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual("/admin/login/?next=" + url, response.url)
def test_rename(self):
url = reverse("fb_rename")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual("/admin/login/?next=" + url, response.url)
def test_delete(self):
url = reverse("fb_delete")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual("/admin/login/?next=" + url, response.url)
def test_upload(self):
url = reverse("fb_upload")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual("/admin/login/?next=" + url, response.url)
def test_do_upload(self):
url = reverse("fb_do_upload")
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertEqual("/admin/login/?next=" + url, response.url)
|
180744
|
from base import View, TemplateView, RedirectView
from dates import (ArchiveIndexView, YearArchiveView, MonthArchiveView,
WeekArchiveView, DayArchiveView, TodayArchiveView,
DateDetailView)
from detail import DetailView
from edit import FormView, CreateView, UpdateView, DeleteView
from list import ListView
__version__ = "0.3.1"
class GenericViewError(Exception):
"""A problem in a generic view."""
pass
|
180766
|
from collections import namedtuple
from collections import defaultdict
from biicode.common.model.brl.cell_name import CellName
from biicode.common.model.symbolic.block_version import BlockVersion
from biicode.common.utils.serializer import DictDeserializer, SetDeserializer
from biicode.common.model.brl.block_cell_name import BlockCellName
from biicode.common.model.resource import ResourceDeserializer
from biicode.common.model.cells import CellDeserializer
from biicode.common.model.content import ContentDeserializer
from biicode.common.model.id import ID
from biicode.common.model.declare.declaration import Declaration
import copy
class VersionDict(defaultdict):
def __init__(self, items_type):
super(VersionDict, self).__init__(items_type)
def explode(self):
items_type = self.default_factory()
if isinstance(items_type, (set, list, tuple)):
result = []
for k, v in self.iteritems():
result.extend([Reference(k, x) for x in v])
return result
elif isinstance(items_type, dict):
result = {}
for k, v in self.iteritems():
for k2, v2 in v.iteritems():
result[Reference(k, k2)] = v2
return result
raise ValueError('This type of VersionDict cannot be exploded')
def __repr__(self):
result = [str(self.__class__.__name__)]
for k, v in self.iteritems():
result.append('%s: %s' % (k, v))
return ', '.join(result)
class AbsoluteReferences(VersionDict):
"""{block_version: set(BlockCellName)}
"""
def __init__(self):
super(AbsoluteReferences, self).__init__(set)
class References(VersionDict):
'''Dict of block_version -> Set[CellName]. It can also be
{block_version: Set of BlockCellName}, for Dependencies translating with DependencyTranslator
'''
def __init__(self):
super(References, self).__init__(set)
def add(self, reference):
self[reference.block_version].add(reference.ref)
def __deepcopy__(self, memo):
'''this method is necessary for deepcopy in memory caches, defaultdict
deepcopy __init__ signature is incompatible with current'''
r = References()
for key, values in self.iteritems():
r[key] = copy.deepcopy(values)
return r
@staticmethod
def deserialize(data):
if data is None:
return None
d = DictDeserializer(BlockVersion, SetDeserializer(CellName)).deserialize(data)
result = References()
result.update(d)
return result
class ReferencedResources(VersionDict):
'''The dict items are dict {CellName: Resource(Cell, Content)}'''
def __init__(self):
super(ReferencedResources, self).__init__(dict)
@staticmethod
def deserialize(data):
d = DictDeserializer(BlockVersion,
DictDeserializer(CellName,
ResourceDeserializer(CellDeserializer(ID),
ContentDeserializer(ID)))).deserialize(data)
result = ReferencedResources()
result.update(d)
return result
def __add__(self, other):
'''adds two referencedResources, for example localDb+remotes for building Snapshot of
dependencies'''
result = ReferencedResources()
for version, deps in self.iteritems():
result[version].update(deps)
for version, deps in other.iteritems():
result[version].update(deps)
return result
class ReferencedDependencies(VersionDict):
'''The dict items are dict{Declaration: set(BlockCellName)}'''
def __init__(self):
super(ReferencedDependencies, self).__init__(lambda: defaultdict(set))
@staticmethod
def deserialize(data):
d = DictDeserializer(BlockVersion,
DictDeserializer(Declaration,
SetDeserializer(BlockCellName))).deserialize(data)
result = ReferencedDependencies()
result.update(d)
return result
class Reference(namedtuple('Reference', ['block_version', 'ref'])):
'''Ref can only be a single ref
'''
def __repr__(self):
return "%s/%s" % (self[0], self[1])
@staticmethod
def deserialize(data):
return Reference(BlockVersion.deserialize(data[0]), CellName.deserialize(data[1]))
def serialize(self):
return (self.block_version.serialize(), self.ref)
def block_cell_name(self):
'''assuming that ref is a single CellName'''
return self.block_version.block_name + self.ref
|
180776
|
import os
import csv
from src import *
def exists_song(csv_letter, artist_url, song_url):
"""
Checks if a song exists in a given CSV given the artist and song url.
:param csv_letter: CSV letter in order to identify which CSV to get.
:param artist_url: Artist AZLyrics URL.
:param song_url: Song AZLyrics URL.
:return: True if the song exists in the CSV, False otherwise.
"""
csv_file_name = f'{CSV_FILE}_{csv_letter}.csv'
exists_file = os.path.isfile(csv_file_name)
if exists_file:
with open(csv_file_name, 'r') as file:
rows = [row for row in csv.reader(file)][1:]
rows = [row for row in rows if row[1] == artist_url and row[3] == song_url]
if rows:
return True
return False
def append_to_csv(artist_name, artist_url, song_name, song_url, song_lyrics, csv_letter):
"""
Appends song information into the end of a (in)existing CSV.
:param artist_name: Artist name.
:param artist_url: Artist AZLyrics URL.
:param song_name: Song name.
:param song_url: Song AZLyrics URL.
:param song_lyrics: Song lyrics.
:param csv_letter: CSV letter for getting the CSV where to append.
:return: Song information appended.
"""
if song_lyrics:
csv_file_name = f'{CSV_FILE}_{csv_letter}.csv'
exists_file = os.path.isfile(csv_file_name)
with open(csv_file_name, 'a') as file:
if not exists_file:
file.write(
f'"{CSV_HEADER_ARTIST_NAME}","{CSV_HEADER_ARTIST_URL}",'
f'"{CSV_HEADER_SONG_NAME}","{CSV_HEADER_SONG_URL}","{CSV_HEADER_LYRICS}"'
)
file.write(f'\n"{artist_name}","{artist_url}","{song_name}","{song_url}","{song_lyrics}"')
|
180793
|
import math
import torch
from HyperSphere.BO.utils.normal_cdf import norm_cdf
def norm_pdf(x, mu=0.0, var=1.0):
return torch.exp(-0.5 * (x-mu) ** 2 / var)/(2 * math.pi * var)**0.5
def expected_improvement(mean, var, reference):
std = torch.sqrt(var)
standardized = (-mean + reference) / std
return (std * norm_pdf(standardized) + (-mean + reference) * norm_cdf(standardized)).clamp(min=0)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy.stats import norm
x = torch.linspace(2, 3, 200)
y1 = norm_cdf(x)
y2 = norm.cdf(x.numpy())
plt.plot(x.numpy(), y1.numpy(), label='approximate')
plt.plot(x.numpy(), y2, ':', label='exact')
z1 = norm_pdf(x)
z2 = norm.pdf(x.numpy())
plt.plot(x.numpy(), z1.numpy(), label='approximate')
plt.plot(x.numpy(), z2, ':', label='exact')
plt.legend()
plt.show()
|
180803
|
from infraboxcli.dashboard import local_config
from infraboxcli.log import logger
def list_remotes(args):
if args.verbose:
remotes = local_config.get_all_remotes()
msg = '\n: '.join(remotes)
logger.info('Remotes:')
logger.log(msg, print_header=False)
|
180825
|
from Queue import Queue
def evaluate_rpn(tokens):
stk = []
while not tokens.empty():
token = tokens.get()
if is_operand(token):
stk.append(token)
else:
val0, val1 = stk.pop(), stk.pop()
num = eval(val1 + token + val0)
stk.append(str(num))
return stk.pop()
def infix_to_rpn(expression):
# The Shunting Yard algorithm by Dr. Dijkstra himself
stk = []
queue = Queue()
tokens = expression.replace(" ", "")
for token in tokens:
if is_operand(token):
queue.put(token)
elif is_operator(token):
token_preced = get_preced(token)
while stk and is_operator(stk[-1]) and get_preced(stk[-1]) > token_preced:
queue.put(stk.pop())
stk.append(token)
elif token == '(':
stk.append(token)
elif token == ')':
while stk[-1] != '(':
queue.put(stk.pop())
stk.pop()
while stk:
queue.put(stk.pop())
return queue
def is_operand(token):
return token.isdigit()
def is_operator(token):
return token in '+-*/'
def get_preced(token):
if not is_operator(token):
return TypeError('Token is not an operator!')
precedences = {'+': 0,
'-': 0,
'*': 1,
'/': 1}
return precedences[token]
|
180968
|
from django.contrib import admin
from . models import AsnListModel, AsnDetailModel
admin.site.register(AsnListModel)
admin.site.register(AsnDetailModel)
|
180981
|
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import scipy.spatial.distance
import tensorflow as tf
from .. import losses
def test_dice():
x = np.zeros(4)
y = np.zeros(4)
out = losses.dice(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = np.ones(4)
y = np.ones(4)
out = losses.dice(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 1.0, 1.0]
out = losses.dice(x, y, axis=None).numpy()
ref = scipy.spatial.distance.dice(x, y)
assert_allclose(out, ref)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 0.0, 0.0]
out = losses.dice(x, y, axis=None).numpy()
ref = scipy.spatial.distance.dice(x, y)
assert_allclose(out, ref)
assert_allclose(out, 1)
x = np.ones((4, 32, 32, 32, 1), dtype=np.float32)
y = x.copy()
x[:2, :10, 10:] = 0
y[:2, :3, 20:] = 0
y[3:, 10:] = 0
dices = np.empty(x.shape[0])
for i in range(x.shape[0]):
dices[i] = scipy.spatial.distance.dice(x[i].flatten(), y[i].flatten())
assert_allclose(losses.dice(x, y, axis=(1, 2, 3, 4)), dices, rtol=1e-05)
assert_allclose(losses.Dice(axis=(1, 2, 3, 4))(x, y), dices.mean(), rtol=1e-05)
assert_allclose(losses.Dice(axis=(1, 2, 3, 4))(y, x), dices.mean(), rtol=1e-05)
def test_generalized_dice():
shape = (8, 32, 32, 32, 16)
x = np.zeros(shape)
y = np.zeros(shape)
assert_array_equal(losses.generalized_dice(x, y), np.zeros(shape[0]))
shape = (8, 32, 32, 32, 16)
x = np.ones(shape)
y = np.ones(shape)
assert_array_equal(losses.generalized_dice(x, y), np.zeros(shape[0]))
shape = (8, 32, 32, 32, 16)
x = np.ones(shape)
y = np.zeros(shape)
# Why aren't the losses exactly one? Could it be the propagation of floating
# point inaccuracies when summing?
assert_allclose(losses.generalized_dice(x, y), np.ones(shape[0]), atol=1e-03)
assert_allclose(
losses.GeneralizedDice(axis=(1, 2, 3))(x, y), losses.generalized_dice(x, y)
)
x = np.ones((4, 32, 32, 32, 1), dtype=np.float64)
y = x.copy()
x[:2, :10, 10:] = 0
y[:2, :3, 20:] = 0
y[3:, 10:] = 0
# Dice is similar to generalized Dice for one class. The weight factor
# makes the generalized form slightly different from Dice.
gd = losses.generalized_dice(x, y, axis=(1, 2, 3)).numpy()
dd = losses.dice(x, y, axis=(1, 2, 3, 4)).numpy()
assert_allclose(gd, dd, rtol=1e-02) # is this close enough?
def test_jaccard():
x = np.zeros(4)
y = np.zeros(4)
out = losses.jaccard(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = np.ones(4)
y = np.ones(4)
out = losses.jaccard(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 1.0, 1.0]
out = losses.jaccard(x, y, axis=None).numpy()
ref = scipy.spatial.distance.jaccard(x, y)
assert_allclose(out, ref)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 0.0, 0.0]
out = losses.jaccard(x, y, axis=None).numpy()
ref = scipy.spatial.distance.jaccard(x, y)
assert_allclose(out, ref)
assert_allclose(out, 1)
x = np.ones((4, 32, 32, 32, 1), dtype=np.float32)
y = x.copy()
x[:2, :10, 10:] = 0
y[:2, :3, 20:] = 0
y[3:, 10:] = 0
jaccards = np.empty(x.shape[0])
for i in range(x.shape[0]):
jaccards[i] = scipy.spatial.distance.jaccard(x[i].flatten(), y[i].flatten())
assert_allclose(losses.jaccard(x, y, axis=(1, 2, 3, 4)), jaccards)
assert_allclose(losses.Jaccard(axis=(1, 2, 3, 4))(x, y), jaccards.mean())
assert_allclose(losses.Jaccard(axis=(1, 2, 3, 4))(y, x), jaccards.mean())
@pytest.mark.xfail
def test_tversky():
# TODO: write the test
assert False
@pytest.mark.xfail
def test_elbo():
# TODO: write the test
assert False
def test_wasserstein():
x = np.zeros(4)
y = np.zeros(4)
out = losses.wasserstein(x, y)
assert_allclose(out, 0)
x = np.ones(4)
y = np.ones(4)
out = losses.wasserstein(x, y)
assert_allclose(out, 1)
x = np.array([0.0, -1.0, 1.0, -1.0])
y = np.array([1.0, -1.0, 1.0, 1.0])
out = losses.wasserstein(x, y)
ref = [0.0, 1.0, 1.0, -1.0]
assert_allclose(out, ref)
x = np.array([0.0, 0.0, 1.0, 1.0])
y = np.array([1.0, 1.0, 0.0, 0.0])
out = losses.wasserstein(x, y)
assert_allclose(out, 0)
def test_gradient_penalty():
x = np.zeros(4)
y = np.zeros(4)
out = losses.gradient_penalty(x, y)
assert_allclose(out, 10)
x = np.ones(4)
y = np.ones(4)
out = losses.gradient_penalty(x, y)
assert_allclose(out, 0.001)
x = np.array([0.0, -1.0, 1.0, -1.0])
y = np.array([1.0, -1.0, 1.0, 1.0])
out = losses.gradient_penalty(x, y)
ref = [1.0001e01, 1.0000e-03, 1.0000e-03, 1.0000e-03]
assert_allclose(out, ref)
x = np.array([0.0, 0.0, 1.0, 1.0])
y = np.array([1.0, 1.0, 0.0, 0.0])
out = losses.gradient_penalty(x, y)
ref = [10.001, 10.001, 0.0, 0.0]
assert_allclose(out, ref)
def test_get():
if LooseVersion(tf.__version__) < LooseVersion("1.14.1-dev20190408"):
assert losses.get("dice") is losses.dice
assert losses.get("Dice") is losses.Dice
assert losses.get("jaccard") is losses.jaccard
assert losses.get("Jaccard") is losses.Jaccard
assert losses.get("tversky") is losses.tversky
assert losses.get("Tversky") is losses.Tversky
assert losses.get("binary_crossentropy")
else:
assert losses.get("dice") is losses.dice
assert isinstance(losses.get("Dice"), losses.Dice)
assert losses.get("jaccard") is losses.jaccard
assert isinstance(losses.get("Jaccard"), losses.Jaccard)
assert losses.get("tversky") is losses.tversky
assert isinstance(losses.get("Tversky"), losses.Tversky)
assert losses.get("binary_crossentropy")
assert losses.get("gradient_penalty") is losses.gradient_penalty
assert losses.get("wasserstein") is losses.wasserstein
assert isinstance(losses.get("Wasserstein"), losses.Wasserstein)
|
181063
|
from matplotlib import pyplot as plt
from vis_utils.vis_utils import draw_bounding_box_on_image
from PIL import Image
def vis_datum_mask(dataset):
datum = dataset.random_datum()
fig = plt.figure()
fig.add_subplot(1, 2, 1)
plt.axis('off')
plt.imshow(datum.image)
fig.add_subplot(1, 2, 2)
plt.axis('off')
plt.imshow(datum.image)
plt.imshow(datum.mask, alpha=50)
def datum_with_labels(datum, objects=None):
"""
Draws bounding boxes on image.
Args:
datum: Datum Object
objects: List of object instances
Returns:
PIL Image instance
"""
if objects is None:
boxes = [[obj.y_min, obj.x_min, obj.y_max, obj.x_max] for obj in datum.objects]
else:
boxes = [[obj.y_min, obj.x_min, obj.y_max, obj.x_max] for obj in objects]
img = datum.image
for box in boxes:
draw_bounding_box_on_image(img, box[0], box[1], box[2], box[3], use_normalized_coordinates=False)
return img
def datum_with_track(datum, track=None):
"""
Draws bounding boxes on image.
Args:
datum: Datum Object
track: List of object instances
Returns:
PIL Image instance
"""
boxes = [[obj.y_min, obj.x_min, obj.y_max, obj.x_max] for obj in datum.ground_truth if obj.track == track]
img = datum.image
for box in boxes:
draw_bounding_box_on_image(img, box[0], box[1], box[2], box[3], use_normalized_coordinates=False)
return img
def image_with_box(path, box, image=None, color='red', thickness=2):
"""
Args:
path: str (path to image)
box: [x, y, w, h]
x,y are normalized centre / w,h also normalized
image: give image to ignore path
color: box color
thickness: box thinkness
Returns:
"""
if image is None:
image = Image.open(path)
x, y, w, h = box[0], box[1], box[2], box[3]
xmin = x - w / 2
xmax = x + w / 2
ymin = y - h / 2
ymax = y + h / 2
draw_bounding_box_on_image(image=image, ymin=ymin, ymax=ymax, xmin=xmin, xmax=xmax, color=color,
thickness=thickness)
return image
class ImageBoxes:
def __init__(self, path, plt_axis_off=False):
if plt_axis_off:
_, self.ax = plt.subplots(1)
plt.axis("off")
self.im = Image.open(path)
def add_box(self, box, color='red', thickness=2, flip_order=False):
if flip_order:
cx, cy, h, w = box
box = cx, cy, w, h
self.im = image_with_box(path='', box=box, image=self.im, color=color, thickness=thickness)
def get_final(self):
return self.im.convert("RGB")
def add_from_track(self, x, y, y_pred):
"""
Args:
x: x of shape (time_steps, features)
y: y of shape (features,)
y_pred: y_pred of shape (features,)
"""
for i in x[:, :4]:
self.add_box(list(i[:4]), color='blue', flip_order=True)
self.add_box(list(y[:4]), color='red', thickness=4, flip_order=True)
self.add_box(list(y_pred[:4]), color='orange', thickness=4, flip_order=True)
def show_plt(self, arr):
"""
Plots image in current axis
Args:
arr: image as np.array
"""
self.ax.imshow(arr)
|
181100
|
class A:
def spam(self):
print('A.spam')
class B(A):
def spam(self):
print('B.spam')
super().spam() # Call parent spam()
class C:
def __init__(self):
self.x = 0
class D(C):
def __init__(self):
super().__init__()
self.y = 1
d = D()
print(d.y)
class Base:
def __init__(self):
print('Base.__init__')
class A(Base):
def __init__(self):
Base.__init__(self)
print('A.__init__')
|
181143
|
import unittest
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.aws.RedshiftClusterKMSKey import check
import hcl2
class TestRedshiftClusterKMSKey(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "aws_redshift_cluster" "test" {
cluster_identifier = "tf-redshift-cluster"
database_name = "mydb"
master_username = "foo"
master_password = "<PASSWORD>"
node_type = "dc1.large"
cluster_type = "single-node"
}
""")
resource_conf = hcl_res['resource'][0]['aws_redshift_cluster']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "aws_redshift_cluster" "test" {
cluster_identifier = "tf-redshift-cluster"
database_name = "mydb"
master_username = "foo"
master_password = "<PASSWORD>"
node_type = "dc1.large"
cluster_type = "single-node"
kms_key_id = "someKey"
}
""")
resource_conf = hcl_res['resource'][0]['aws_redshift_cluster']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == "__main__":
unittest.main()
|
181144
|
import os
class CFFile(object):
def __init__(self, path, size):
self.path = path
self.tell_pos = 0
self.size = size
self.gen_file()
def gen_file(self):
with open(self.path, 'w') as f:
f.seek(self.size - 1)
f.write('\x00')
def write(self, data, mode='r+'):
if len(data) + self.tell_pos > self.size:
print('error')
return
with open(self.path, mode) as f:
f.seek(self.tell_pos)
f.write(data)
self.tell_pos = f.tell()
class FileSearch(object):
MOVIE = ['mp4', 'avi', 'wmv', 'rmvb', 'mov', 'mkv']
PIC = ['bmp', 'gif', 'jpeg', 'jpg', 'png']
DATA = ['zip', 'rar', '7z']
DOC = ['doc', 'xls', 'ppt', 'pdf', 'txt']
# 初始化时设置好过滤器
def __init__(self, the_filter):
capital_filter = [x.upper() for x in the_filter]
self.the_filter = the_filter + capital_filter
print(self.the_filter)
# 搜索过滤列表内的后缀名文件,返回一个生成器
def search(self, dir_path):
for path, subdir, files in os.walk(dir_path):
for f in files:
if '.' in f and f[f.rindex('.') + 1:] in self.the_filter:
file_path = os.path.abspath(path)
yield os.path.join(file_path, f)
def file_split(file_path, output_file_path, size):
if not os.path.exists(file_path):
raise ValueError('Input file path not exists: %s ', file_path)
all_len = os.path.getsize(file_path)
num = all_len // size if all_len % size == 0 else (all_len // size) + 1
index = 0
with open(file_path, 'rb') as f:
for i in range(0, num):
index += 1
# 每次读取size_block大小的数据,防止内存不够
size_block = 10 * 1024 * 1024
size_block_num = size // size_block if size % size_block == 0 else (size // size_block) + 1
for x in range(0, size_block_num):
data = f.read(size_block)
if not data:
break
with open(output_file_path + '.' + str(index), 'ab') as out:
out.write(data)
def file_merge(file_path, output_file_path, num):
if os.path.exists(output_file_path):
raise ValueError('Output file path exists: %s ', output_file_path)
with open(output_file_path, 'ab') as out:
for i in range(1, num + 1):
with open(file_path + '.' + str(i), 'rb') as f:
size = os.path.getsize(file_path + '.' + str(i))
# 每次读取size_block大小的数据,防止内存不够
size_block = 10 * 1024 * 1024
size_block_num = size // size_block if size % size_block == 0 else (size // size_block) + 1
for x in range(0, size_block_num):
data = f.read(size_block)
if not data:
break
out.write(data)
|
181183
|
from os.path import join, isfile, expanduser
from os import listdir
from simple_NER.settings import RESOURES_DIR
def resolve_resource_file(res_name, lang="en-us"):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for simple_NER/res/res_name first, and
if not found will look in language subfolders
Args:
res_name (str): a resource path/name
Returns:
str: path to resource or None if no resource found
"""
# First look for fully qualified file (e.g. a user setting)
if isfile(res_name):
return res_name
# Next look for /simple_NER/res/res_name
filename = expanduser(join(RESOURES_DIR, res_name))
if isfile(filename):
return filename
# Next look for /simple_NER/res/{lang}/res_name
data_dir = join(RESOURES_DIR, lang)
filename = expanduser(join(data_dir, res_name))
if isfile(filename):
return filename
# Next look for /simple_NER/res/{lang_short}/res_name
data_dir = join(RESOURES_DIR, lang.split("-")[0])
filename = expanduser(join(data_dir, res_name))
if isfile(filename):
return filename
# Next look for /simple_NER/res/{lang-short}-XX/res_name
data_dir = join(RESOURES_DIR)
for folder in listdir(data_dir):
if folder.startswith(lang.split("-")[0]):
filename = expanduser(join(data_dir, folder, res_name))
if isfile(filename):
return filename
return None # Resource cannot be resolved
|
181218
|
import time
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import Parameter
from torch.utils.data.sampler import SubsetRandomSampler
from data_loader import libsvm_dataset
from thrift_ps.ps_service import ParameterServer
from thrift_ps.client import ps_client
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from utils.constants import Prefix, MLModel, Optimization, Synchronization
from storage.s3.s3_type import S3Storage
from model import linear_models
def handler(event, context):
start_time = time.time()
# dataset setting
file = event['file']
data_bucket = event['data_bucket']
dataset_type = event['dataset_type']
assert dataset_type == "dense_libsvm"
n_features = event['n_features']
n_classes = event['n_classes']
n_workers = event['n_workers']
worker_index = event['worker_index']
# ps setting
host = event['host']
port = event['port']
# training setting
model_name = event['model']
optim = event['optim']
sync_mode = event['sync_mode']
assert model_name.lower() in MLModel.Linear_Models
assert optim.lower() == Optimization.Grad_Avg
assert sync_mode.lower() == Synchronization.Reduce
# hyper-parameter
learning_rate = event['lr']
batch_size = event['batch_size']
n_epochs = event['n_epochs']
valid_ratio = event['valid_ratio']
print('bucket = {}'.format(data_bucket))
print("file = {}".format(file))
print('number of workers = {}'.format(n_workers))
print('worker index = {}'.format(worker_index))
print('model = {}'.format(model_name))
print('host = {}'.format(host))
print('port = {}'.format(port))
# Set thrift connection
# Make socket
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
t_client = ParameterServer.Client(protocol)
# Connect!
transport.open()
# test thrift connection
ps_client.ping(t_client)
print("create and ping thrift server >>> HOST = {}, PORT = {}".format(host, port))
# Read file from s3
read_start = time.time()
storage = S3Storage()
lines = storage.load(file, data_bucket).read().decode('utf-8').split("\n")
print("read data cost {} s".format(time.time() - read_start))
parse_start = time.time()
dataset = libsvm_dataset.from_lines(lines, n_features, dataset_type)
print("parse data cost {} s".format(time.time() - parse_start))
preprocess_start = time.time()
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(valid_ratio * dataset_size))
shuffle_dataset = True
random_seed = 100
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
n_train_batch = len(train_loader)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=valid_sampler)
print("preprocess data cost {} s, dataset size = {}"
.format(time.time() - preprocess_start, dataset_size))
model = linear_models.get_model(model_name, n_features, n_classes)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# register model
model_name = "w.b"
weight_shape = model.linear.weight.data.numpy().shape
weight_length = weight_shape[0] * weight_shape[1]
bias_shape = model.linear.bias.data.numpy().shape
bias_length = bias_shape[0]
model_length = weight_length + bias_length
ps_client.register_model(t_client, worker_index, model_name, model_length, n_workers)
ps_client.exist_model(t_client, model_name)
print("register and check model >>> name = {}, length = {}".format(model_name, model_length))
# Training the Model
train_start = time.time()
iter_counter = 0
for epoch in range(n_epochs):
epoch_start = time.time()
epoch_cal_time = 0
epoch_comm_time = 0
epoch_loss = 0.
for batch_idx, (items, labels) in enumerate(train_loader):
batch_comm_time = 0
batch_start = time.time()
# pull latest model
ps_client.can_pull(t_client, model_name, iter_counter, worker_index)
latest_model = ps_client.pull_model(t_client, model_name, iter_counter, worker_index)
model.linear.weight = Parameter(
torch.from_numpy(np.asarray(latest_model[:weight_length], dtype=np.float32).reshape(weight_shape)))
model.linear.bias = Parameter(
torch.from_numpy(np.asarray(latest_model[weight_length:], dtype=np.float32).reshape(bias_shape[0])))
batch_comm_time += time.time() - batch_start
# Forward + Backward + Optimize
batch_cal_start = time.time()
items = Variable(items.view(-1, n_features))
labels = Variable(labels)
optimizer.zero_grad()
outputs = model(items)
loss = criterion(outputs, labels)
epoch_loss += loss.item()
loss.backward()
# flatten and concat gradients of weight and bias
w_b_grad = np.concatenate((model.linear.weight.grad.data.double().numpy().flatten(),
model.linear.bias.grad.data.double().numpy().flatten()))
batch_cal_time = time.time() - batch_cal_start
# push gradient to PS
batch_comm_start = time.time()
ps_client.can_push(t_client, model_name, iter_counter, worker_index)
ps_client.push_grad(t_client, model_name, w_b_grad, -1. * learning_rate / n_workers,
iter_counter, worker_index)
ps_client.can_pull(t_client, model_name, iter_counter + 1, worker_index) # sync all workers
batch_comm_time += time.time() - batch_comm_start
epoch_cal_time += batch_cal_time
epoch_comm_time += batch_comm_time
if batch_idx % 10 == 0:
print('Epoch: [%d/%d], Batch: [%d/%d] >>> Time: %.4f, Loss: %.4f, epoch cost %.4f, '
'batch cost %.4f s: cal cost %.4f s and communication cost %.4f s'
% (epoch + 1, n_epochs, batch_idx + 1, n_train_batch,
time.time() - train_start, loss.data, time.time() - epoch_start,
time.time() - batch_start, batch_cal_time, batch_comm_time))
iter_counter += 1
# Test the Model
test_start = time.time()
n_test_correct = 0
n_test = 0
test_loss = 0
for items, labels in validation_loader:
items = Variable(items.view(-1, n_features))
labels = Variable(labels)
outputs = model(items)
test_loss += criterion(outputs, labels).data
_, predicted = torch.max(outputs.data, 1)
n_test += labels.size(0)
n_test_correct += (predicted == labels).sum()
test_time = time.time() - test_start
print('Epoch: [%d/%d], Batch: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f: '
'calculation cost = %.4f s, synchronization cost %.4f s, test cost %.4f s, '
'accuracy of the model on the %d test samples: %d %%, loss = %f'
% (epoch + 1, n_epochs, batch_idx + 1, n_train_batch,
time.time() - train_start, epoch_loss, time.time() - epoch_start,
epoch_cal_time, epoch_comm_time, test_time,
n_test, 100. * n_test_correct / n_test, test_loss / n_test))
end_time = time.time()
print("Elapsed time = {} s".format(end_time - start_time))
|
181220
|
import pytest
from butterfree.clients import SparkClient
from butterfree.hooks.schema_compatibility import SparkTableSchemaCompatibilityHook
class TestSparkTableSchemaCompatibilityHook:
@pytest.mark.parametrize(
"table, database, target_table_expression",
[("table", "database", "`database`.`table`"), ("table", None, "`table`")],
)
def test_build_table_expression(self, table, database, target_table_expression):
# arrange
spark_client = SparkClient()
# act
result_table_expression = SparkTableSchemaCompatibilityHook(
spark_client, table, database
).table_expression
# assert
assert target_table_expression == result_table_expression
def test_run_compatible_schema(self, spark_session):
# arrange
spark_client = SparkClient()
target_table = spark_session.sql(
"select 1 as feature_a, 'abc' as feature_b, true as other_feature"
)
input_dataframe = spark_session.sql("select 1 as feature_a, 'abc' as feature_b")
target_table.registerTempTable("test")
hook = SparkTableSchemaCompatibilityHook(spark_client, "test")
# act and assert
assert hook.run(input_dataframe) == input_dataframe
def test_run_incompatible_schema(self, spark_session):
# arrange
spark_client = SparkClient()
target_table = spark_session.sql(
"select 1 as feature_a, 'abc' as feature_b, true as other_feature"
)
input_dataframe = spark_session.sql(
"select 1 as feature_a, 'abc' as feature_b, true as unregisted_column"
)
target_table.registerTempTable("test")
hook = SparkTableSchemaCompatibilityHook(spark_client, "test")
# act and assert
with pytest.raises(ValueError, match="The dataframe has a schema incompatible"):
hook.run(input_dataframe)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.