id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11464657
|
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import pandas as pd
import tensorflow as tf
import torch
from sklearn.model_selection import train_test_split
from carla.data.catalog import DataCatalog
from carla.data.causal_model.synthethic_data import ScmDataset
from carla.data.load_catalog import load_catalog
from carla.models.api import MLModel
from carla.models.pipelining import decode, descale, encode, order_data, scale
from .load_model import load_online_model, load_trained_model, save_model
from .train_model import train_model
class MLModelCatalog(MLModel):
"""
Use pretrained classifier.
Parameters
----------
data : data.catalog.DataCatalog Class
Correct dataset for ML model.
model_type : {'ann', 'linear'}
Architecture.
backend : {'tensorflow', 'pytorch'}
Specifies the used framework.
cache : boolean, default: True
If True, try to load from the local cache first, and save to the cache.
if a download is required.
models_home : string, optional
The directory in which to cache data; see :func:`get_models_home`.
kws : keys and values, optional
Additional keyword arguments are passed to passed through to the read model function
use_pipeline : bool, default: False
If true, the model uses a pipeline before predict and predict_proba to preprocess the input data.
load_online: bool, default: True
If true, a pretrained model is loaded. If false, a model is trained.
Methods
-------
predict:
One-dimensional prediction of ml model for an output interval of [0, 1].
predict_proba:
Two-dimensional probability prediction of ml model
get_pipeline_element:
Returns a specific element of the pipeline
perform_pipeline:
Transforms input for prediction into correct form.
Returns
-------
None
"""
def __init__(
self,
data: DataCatalog,
model_type: str,
backend: str = "tensorflow",
cache: bool = True,
models_home: str = None,
use_pipeline: bool = False,
load_online: bool = True,
**kws,
) -> None:
"""
Constructor for pretrained ML models from the catalog.
Possible backends are currently "pytorch" and "tensorflow".
Possible models are corrently "ann" and "linear".
"""
self._model_type = model_type
self._backend = backend
if self._backend == "pytorch":
ext = "pt"
encoding_method = "OneHot"
elif self._backend == "tensorflow":
ext = "h5"
encoding_method = "OneHot_drop_binary"
else:
raise ValueError(
"Backend not available, please choose between pytorch and tensorflow"
)
super().__init__(data, encoding_method=encoding_method)
if not isinstance(data, ScmDataset):
# Load catalog
catalog_content = ["ann", "linear"]
catalog = load_catalog("mlmodel_catalog.yaml", data.name, catalog_content) # type: ignore
if model_type not in catalog:
raise ValueError("Model type not in model catalog")
self._catalog = catalog[model_type][self._backend]
self._feature_input_order = self._catalog["feature_order"]
else:
self._catalog = None
# TODO is this the same as np.settdiff1d?
self._feature_input_order = list(
np.sort(data.continous + data.categoricals)
)
self._continuous = data.continous
self._categoricals = data.categoricals
# Preparing pipeline components
self._use_pipeline = use_pipeline
self._pipeline = self.__init_pipeline()
self._inverse_pipeline = self.__init_inverse_pipeline()
if load_online:
self._model = load_online_model(
model_type, data.name, ext, cache, models_home, **kws
)
def __init_pipeline(self) -> List[Tuple[str, Callable]]:
return [
("scaler", lambda x: scale(self.scaler, self._continuous, x)),
("encoder", lambda x: encode(self.encoder, self._categoricals, x)),
("order", lambda x: order_data(self._feature_input_order, x)),
]
def __init_inverse_pipeline(self) -> List[Tuple[str, Callable]]:
return [
("encoder", lambda x: decode(self.encoder, self._categoricals, x)),
("scaler", lambda x: descale(self.scaler, self._continuous, x)),
]
def get_pipeline_element(self, key: str) -> Callable:
"""
Returns a specific element of the pipeline
Parameters
----------
key : str
Element of the pipeline we want to return
Returns
-------
Pipeline element
"""
key_idx = list(zip(*self._pipeline))[0].index(key) # find key in pipeline
return self._pipeline[key_idx][1]
@property
def pipeline(self) -> List[Tuple[str, Callable]]:
"""
Returns transformations steps for input before predictions.
Returns
-------
pipeline : list
List of (name, transform) tuples that are chained in the order in which they are preformed.
"""
return self._pipeline
@property
def inverse_pipeline(self) -> List[Tuple[str, Callable]]:
"""
Returns transformations steps for output after predictions.
Returns
-------
pipeline : list
List of (name, transform) tuples that are chained in the order in which they are preformed.
"""
return self._inverse_pipeline
def perform_pipeline(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transforms input for prediction into correct form.
Only possible for DataFrames without preprocessing steps.
Recommended to use to keep correct encodings, normalization and input order
Parameters
----------
df : pd.DataFrame
Contains unnormalized and not encoded data.
Returns
-------
output : pd.DataFrame
Prediction input in correct order, normalized and encoded
"""
output = df.copy()
for trans_name, trans_function in self._pipeline:
output = trans_function(output)
return output
def perform_inverse_pipeline(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transforms output after prediction back into original form.
Only possible for DataFrames with preprocessing steps.
Parameters
----------
df : pd.DataFrame
Contains normalized and encoded data.
Returns
-------
output : pd.DataFrame
Prediction output denormalized and decoded
"""
output = df.copy()
for trans_name, trans_function in self._inverse_pipeline:
output = trans_function(output)
return output
@property
def feature_input_order(self) -> List[str]:
"""
Saves the required order of feature as list.
Prevents confusion about correct order of input features in evaluation
Returns
-------
ordered_features : list of str
Correct order of input features for ml model
"""
return self._feature_input_order
@property
def model_type(self) -> str:
"""
Describes the model type
E.g., ann, linear
Returns
-------
backend : str
model type
"""
return self._model_type
@property
def backend(self) -> str:
"""
Describes the type of backend which is used for the ml model.
E.g., tensorflow, pytorch, sklearn, ...
Returns
-------
backend : str
Used framework
"""
return self._backend
@property
def raw_model(self) -> Any:
"""
Returns the raw ml model built on its framework
Returns
-------
ml_model : tensorflow, pytorch, sklearn model type
Loaded model
"""
return self._model
def predict(
self, x: Union[np.ndarray, pd.DataFrame, torch.Tensor, tf.Tensor]
) -> Union[np.ndarray, pd.DataFrame, torch.Tensor, tf.Tensor]:
"""
One-dimensional prediction of ml model for an output interval of [0, 1]
Shape of input dimension has to be always two-dimensional (e.g., (1, m), (n, m))
Parameters
----------
x : np.Array, pd.DataFrame, or backend specific (tensorflow or pytorch tensor)
Tabular data of shape N x M (N number of instances, M number of features)
Returns
-------
output : np.ndarray, or backend specific (tensorflow or pytorch tensor)
Ml model prediction for interval [0, 1] with shape N x 1
"""
if len(x.shape) != 2:
raise ValueError("Input shape has to be two-dimensional")
if self._backend == "pytorch":
input = x
return self.predict_proba(input)[:, 1].reshape((-1, 1))
elif self._backend == "tensorflow":
# keep output in shape N x 1
input = self.perform_pipeline(x) if self._use_pipeline else x
return self._model.predict(input)[:, 1].reshape((-1, 1))
else:
raise ValueError(
'Uncorrect backend value. Please use only "pytorch" or "tensorflow".'
)
def predict_proba(
self, x: Union[np.ndarray, pd.DataFrame, torch.Tensor, tf.Tensor]
) -> Union[np.ndarray, pd.DataFrame, torch.Tensor, tf.Tensor]:
"""
Two-dimensional probability prediction of ml model
Shape of input dimension has to be always two-dimensional (e.g., (1, m), (n, m))
Parameters
----------
x : np.Array, pd.DataFrame, or backend specific (tensorflow or pytorch tensor)
Tabular data of shape N x M (N number of instances, M number of features)
Returns
-------
output : np.ndarray, or backend specific (tensorflow or pytorch tensor)
Ml model prediction with shape N x 2
"""
if len(x.shape) != 2:
raise ValueError("Input shape has to be two-dimensional")
input = self.perform_pipeline(x) if self._use_pipeline else x
if self._backend == "pytorch":
# Keep model and input on the same device
device = "cuda" if torch.cuda.is_available() else "cpu"
self._model = self._model.to(device)
if isinstance(input, pd.DataFrame):
input = input.values
input, tensor_output = (
(torch.Tensor(input), False)
if not torch.is_tensor(input)
else (input, True)
)
input = input.to(device)
output = self._model(input)
if tensor_output:
return output
else:
return output.detach().cpu().numpy()
elif self._backend == "tensorflow":
return self._model.predict(input)
else:
raise ValueError(
'Uncorrect backend value. Please use only "pytorch" or "tensorflow".'
)
@property
def use_pipeline(self) -> bool:
"""
Returns if the ML model uses the pipeline for predictions
Returns
-------
bool
"""
return self._use_pipeline
@use_pipeline.setter
def use_pipeline(self, use_pipe: bool) -> None:
"""
Sets if the ML model should use the pipeline before prediction.
Parameters
----------
use_pipe : bool
If true, the model uses a transformation pipeline before prediction.
Returns
-------
"""
self._use_pipeline = use_pipe
def train(
self,
learning_rate,
epochs,
batch_size,
force_train=False,
hidden_size=[18, 9, 3],
):
"""
Parameters
----------
learning_rate: float
Learning rate for the training.
epochs: int
Number of epochs to train for.
batch_size: int
Number of samples in each batch
force_train: bool
Force training, even if model already exists in cache.
hidden_size: list[int]
hidden_size[i] contains the number of nodes in layer [i]
Returns
-------
"""
layer_string = "_".join([str(size) for size in hidden_size])
if self.model_type == "linear":
save_name = f"{self.model_type}"
elif self.model_type == "ann":
save_name = f"{self.model_type}_layers_{layer_string}"
else:
raise NotImplementedError("Model type not supported:", self.model_type)
# try to load the model from disk, if that fails train the model instead.
self._model = None
if not force_train:
self._model = load_trained_model(
save_name=save_name, data_name=self.data.name, backend=self.backend
)
# sanity check to see if loaded model accuracy makes sense
if self._model is not None:
# preprocess data
data_df = self.data.raw
x = data_df[list(set(data_df.columns) - {self.data.target})]
y = data_df[self.data.target]
x_train, x_test, y_train, y_test = train_test_split(x, y)
prediction = (self.predict(x_test) > 0.5).flatten()
correct = prediction == y_test
print(f"approx. acc: {correct.mean()}")
if self._model is None or force_train:
# preprocess data
data_df = self.data.raw
if self.use_pipeline:
x = self.perform_pipeline(data_df)
else:
x = data_df[list(set(data_df.columns) - {self.data.target})]
y = data_df[self.data.target]
self._model = train_model(
self, x, y, learning_rate, epochs, batch_size, hidden_size
)
save_model(
model=self._model,
save_name=save_name,
data_name=self.data.name,
backend=self.backend,
)
|
11464695
|
import idc
import idaapi
from idaapi import Choose2
import driverlib
import ctypes
import ioctl_decoder as ioctl_decoder
# yoinked from https://stackoverflow.com/a/25678113
OpenClipboard = ctypes.windll.user32.OpenClipboard
EmptyClipboard = ctypes.windll.user32.EmptyClipboard
GetClipboardData = ctypes.windll.user32.GetClipboardData
SetClipboardData = ctypes.windll.user32.SetClipboardData
CloseClipboard = ctypes.windll.user32.CloseClipboard
CF_UNICODETEXT = 13
GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
GlobalLock = ctypes.windll.kernel32.GlobalLock
GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock
GlobalSize = ctypes.windll.kernel32.GlobalSize
GMEM_MOVEABLE = 0x0002
GMEM_ZEROINIT = 0x0040
unicode_type = type(u'')
class stop_unload_handler_t(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
print "Admin privileges required"
return
name = idc.GetInputFile().split('.')[0]
driver = driverlib.Driver(idc.GetInputFilePath(),name)
driver.stop()
driver.unload()
def update(self, ctx):
return idaapi.AST_ENABLE_FOR_FORM if idaapi.is_chooser_tform(ctx.form_type) else idaapi.AST_DISABLE_FOR_FORM
class start_load_handler_t(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
print "Admin privileges required"
return
name = idc.GetInputFile().split('.')[0]
driver = driverlib.Driver(idc.GetInputFilePath(),name)
driver.load()
driver.start()
def update(self, ctx):
return idaapi.AST_ENABLE_FOR_FORM if idaapi.is_chooser_tform(ctx.form_type) else idaapi.AST_DISABLE_FOR_FORM
class send_ioctl_handler_t(idaapi.action_handler_t):
def __init__(self, items):
idaapi.action_handler_t.__init__(self)
self.items = items
def activate(self, ctx):
ind = ctx.chooser_selection.at(0)
ioctl = self.items[ind - 1]
name = idc.GetInputFile().split('.')[0]
driver = driverlib.Driver(idc.GetInputFilePath(),name)
DisplayIOCTLSForm(ioctl, driver)
def update(self, ctx):
return idaapi.AST_ENABLE_FOR_FORM if idaapi.is_chooser_tform(ctx.form_type) else idaapi.AST_DISABLE_FOR_FORM
class copy_defines_handler_t(idaapi.action_handler_t):
def __init__(self, items):
idaapi.action_handler_t.__init__(self)
self.items = items
def activate(self, ctx):
defines = []
for item in self.items:
defines.append(item[5])
print(defines)
paste('\n'.join(defines))
def update(self, ctx):
return idaapi.AST_ENABLE_FOR_FORM if idaapi.is_chooser_tform(ctx.form_type) else idaapi.AST_DISABLE_FOR_FORM
class remove_ioctl(idaapi.action_handler_t):
def __init__(self, items):
idaapi.action_handler_t.__init__(self)
self.items = items
def activate(self, ctx):
# get item and remove
ind = ctx.chooser_selection.at(0)
ioctl = self.items[ind - 1]
pos = int(ioctl[0], 16)
define = ioctl[5]
global ioctl_tracker
code = None
for (addr, val) in ioctl_tracker.ioctls:
if addr == pos:
code = val
break
# Get current comment for this instruction and remove the C define from it, if present
comment = idc.Comment(pos)
comment = comment.replace(define, "")
idc.MakeComm(pos, comment)
# Remove the ioctl from the valid list and add it to the invalid list to avoid 'find_all_ioctls' accidentally re-indexing it.
ioctl_tracker.remove_ioctl(pos, code)
def update(self, ctx):
return idaapi.AST_ENABLE_FOR_FORM if idaapi.is_chooser_tform(ctx.form_type) else idaapi.AST_DISABLE_FOR_FORM
class MyChoose2(Choose2):
def __init__(self, title, items, flags=0, width=None, height=None, embedded=False, modal=False):
Choose2.__init__(
self,
title,
[ ["Address", 5], ["Function", 5], ["Device", 15], ["Method", 15], ["Access", 30], ["C define", 100] ],
flags = flags,
width = width,
height = height,
embedded = embedded)
self.n = 0
self.items = items
self.icon = 5
self.selcount = 0
self.modal = modal
self.popup_names = ["Insert", "Delete", "Refresh"]
def OnClose(self):
pass
def OnSelectLine(self, n):
item = self.items[n]
jump_ea = int(item[0], 16)
# Only jump for valid addresses
if idaapi.IDA_SDK_VERSION < 700:
valid_addr = idc.isEnabled(jump_ea)
else:
valid_addr = idc.is_mapped(jump_ea)
if valid_addr:
idc.Jump(jump_ea)
def OnGetLine(self, n):
return self.items[n]
def OnGetSize(self):
n = len(self.items)
return n
def OnDeleteLine(self, n):
global ioctl_tracker
ioctl_tracker.remove_ioctl(int(self.items[n][0], 16))
del self.items[n]
return n
def OnRefresh(self, n):
self.items = get_all_defines()
return n
def OnGetIcon(self, n):
return -1
def show(self):
return self.Show(self.modal) >= 0
def OnGetLineAttr(self, n):
pass
def get_operand_value(addr):
"""Returns the value of the second operand to the instruction at `addr` masked to be a 32 bit value"""
return idc.GetOperandValue(addr, 1) & 0xffffffff
def get_all_defines():
"""Returns the C defines for all ICOTL codes which have been marked during the current session"""
global ioctl_tracker
defines = []
for (addr, value) in ioctl_tracker.ioctls:
function = ioctl_decoder.get_function(value)
device_name, device_code = ioctl_decoder.get_device(value)
method_name, method_code = ioctl_decoder.get_method(value)
access_name, access_code = ioctl_decoder.get_access(value)
define = ioctl_decoder.get_define(value)
defines.append(["0x%X" % (addr,), "0x%X" % (function,), "%s (0x%X)" % (device_name, device_code), "%s (0x%X)" % (method_name, method_code), "%s (0x%X)" % (access_name, access_code), define])
return defines
def create_ioctl_tab(tracker, modal=False):
global ioctl_tracker
ioctl_tracker = tracker
items = get_all_defines()
idaapi.register_action(
idaapi.action_desc_t(
"choose2:remove_ioctl",
"Invalid IOCTL",
remove_ioctl(items)
)
)
action = "send_ioctl"
actname = "choose2:act%s" % action
idaapi.register_action(
idaapi.action_desc_t(
actname,
"Send IOCTL",
send_ioctl_handler_t(items)))
idaapi.register_action(
idaapi.action_desc_t(
"choose2:actcopy_defines",
"Copy All Defines",
copy_defines_handler_t(items)))
idaapi.register_action(
idaapi.action_desc_t(
"choose2:actstop_unload",
"Stop & Unload Driver",
stop_unload_handler_t()))
idaapi.register_action(
idaapi.action_desc_t(
"choose2:actstart_load",
"Load & Start Driver",
start_load_handler_t()))
global c
c = MyChoose2("IOCTL Code Viewer", items, modal=modal)
c.show()
form = idaapi.get_current_tform()
idaapi.attach_action_to_popup(form, None, "choose2:act%s" % action)
idaapi.attach_action_to_popup(form, None, "choose2:actcopy_defines")
idaapi.attach_action_to_popup(form, None, "choose2:actstop_unload")
idaapi.attach_action_to_popup(form, None, "choose2:actstart_load")
idaapi.attach_action_to_popup(form, None, "choose2:remove_ioctl")
def paste(s):
if not isinstance(s, unicode_type):
s = s.decode('mbcs')
data = s.encode('utf-16le')
OpenClipboard(None)
EmptyClipboard()
handle = GlobalAlloc(GMEM_MOVEABLE | GMEM_ZEROINIT, len(data) + 2)
pcontents = GlobalLock(handle)
ctypes.memmove(pcontents, data, len(data))
GlobalUnlock(handle)
SetClipboardData(CF_UNICODETEXT, handle)
CloseClipboard()
class DisplayIOCTLSForm(idaapi.Form):
"""Creates a pop up dialog with all indexed IOCTL code definitions inside of a multi line text box"""
def __init__(self, ioctl, driver):
idaapi.Form.__init__(
self,
"""Send IOCTL
{form_change}
<#Input Buffer#~I~nput Buffer:{in_buf}>
<#Input Buffer Size#~I~nput Buffer Size:{in_size}>
<#Output Buffer#~O~utput Buffer:{out_buf}>
<#Output Buffer Size#~O~utput Buffer Size:{out_size}>
<#Send IOCTL#~S~end IOCTL:{sendIOCTL}>
""", {
"form_change": idaapi.Form.FormChangeCb(self.form_change),
"in_buf": idaapi.Form.MultiLineTextControl(),
"out_buf": idaapi.Form.MultiLineTextControl(),
"in_size": idaapi.Form.NumericInput(),
"out_size": idaapi.Form.NumericInput(),
"sendIOCTL": idaapi.Form.ButtonInput(self.send_ioctl)
}
)
self.driver = driver
global ioctl_tracker
for inst in ioctl_tracker.ioctl_locs:
value = get_operand_value(inst)
function = ioctl_decoder.get_function(value)
if function == int(ioctl[1],16):
self.ioctl = value
self.Compile()
self.in_size.value = 0x20
self.out_size.value = 0x20
self.in_buf.value = "\\x41" * 0x20
self.Execute()
def form_change(self,fid):
if fid == self.in_size.id:
val = self.GetControlValue(self.in_size)
self.in_size.value = val
elif fid == self.out_size.id:
val = self.GetControlValue(self.out_size)
self.out_size.value = val
elif fid == self.out_buf.id:
val = self.GetControlValue(self.out_buf)
self.out_buf.value = val.value
elif fid == self.in_buf.id:
val = self.GetControlValue(self.in_buf)
self.in_buf.value = val.value
elif fid == -1:
pass
elif fid == -2:
self.Close(-1)
elif fid == self.sendIOCTL.id:
pass
else:
print fid
def send_ioctl(self,fid):
if not self.driver.handle:
self.driver.open_device()
in_buf = self.in_buf.value.decode('string_escape')
in_size = self.in_size.value
out_size = self.out_size.value
out_buf = self.out_buf.value.decode('string_escape')
self.driver.send_ioctl(self.ioctl, in_buf, in_size, out_buf, out_size)
|
11464735
|
class Shape:
'''
A class for shape
'''
def __init__(self):
'''
You won't believe the next five lines
'''
print(0)
shape = Shape()
|
11464750
|
from __future__ import print_function
from kivy.properties import (
ObjectProperty, ListProperty, DictProperty, StringProperty)
from kivy.uix.boxlayout import BoxLayout
from .app_buttons import AppBlueButton
from kivy.logger import Logger
from kivy.lang import Builder
Builder.load_string('''
<ActivitieBox>:
orientation: 'vertical'
size_hint_y: None
height: len(self.children) * app.bsize * 1.1
spacing: app.bsize * 0.1
''')
class ActivitieViewClass(AppBlueButton):
activity = StringProperty()
name = StringProperty()
def refresh_view_attrs(self, act_box, index, data):
self.text = '%s %s...' % (data['name'], data['activity'][:25])
class ActivitieBox(BoxLayout):
viewclass_class = ActivitieViewClass
data = DictProperty()
def on_data(self, instance, value):
items = value.items()
value = [{'name': str(k), 'activity': str(v)} for k, v in items]
Logger.info('ActivitieBox: on_data: {}'.format(value))
if self.viewclass_class:
children_count = len(self.children)
value_count = len(value)
if children_count != value_count:
if children_count < value_count:
for count in range(0, value_count - children_count):
self.add_widget(self.viewclass_class())
else:
for count in range(0, children_count - value_count):
self.remove_widget(self.children[-1])
if self.children:
for i, child in enumerate(reversed(self.children)):
child.refresh_view_attrs(self, i, value[i])
|
11464778
|
import pylab
# import seaborn as sns
from scipy.sparse import diags
from scipy.sparse.linalg import cg
MAX_VAL = 255.0
from scipy.sparse import csr_matrix
import numpy as np
from scipy.sparse.linalg import inv
RGB_TO_YUV = np.array([
[ 0.299, 0.587, 0.114],
[-0.168736, -0.331264, 0.5],
[ 0.5, -0.418688, -0.081312]])
YUV_TO_RGB = np.array([
[1.0, 0.0, 1.402],
[1.0, -0.34414, -0.71414],
[1.0, 1.772, 0.0]])
YUV_OFFSET = np.array([0, 128.0, 128.0]).reshape(1, 1, -1)
def rgb2yuv(im):
return np.tensordot(im, RGB_TO_YUV, ([2], [1])) + YUV_OFFSET
def yuv2rgb(im):
return np.tensordot(im.astype(float) - YUV_OFFSET, YUV_TO_RGB, ([2], [1]))
##############################################################################
REQUIRES_CONF_GRAD = True
##############################################################################
def get_valid_idx(valid, candidates):
"""Find which values are present in a list and where they are located"""
locs = np.searchsorted(valid, candidates)
# Handle edge case where the candidate is larger than all valid values
locs = np.clip(locs, 0, len(valid) - 1)
# Identify which values are actually present
valid_idx = np.flatnonzero(valid[locs] == candidates)
locs = locs[valid_idx]
return valid_idx, locs
class BilateralGrid(object):
def __init__(self, im, sigma_spatial=32, sigma_luma=8, sigma_chroma=8):
im_yuv = rgb2yuv(im)
# Compute 5-dimensional XYLUV bilateral-space coordinates
Iy, Ix = np.mgrid[:im.shape[0], :im.shape[1]]
x_coords = (Ix / sigma_spatial).astype(int)
y_coords = (Iy / sigma_spatial).astype(int)
luma_coords = (im_yuv[..., 0] /sigma_luma).astype(int)
chroma_coords = (im_yuv[..., 1:] / sigma_chroma).astype(int)
coords = np.dstack((x_coords, y_coords, luma_coords, chroma_coords))
coords_flat = coords.reshape(-1, coords.shape[-1])
self.npixels, self.dim = coords_flat.shape
# Hacky "hash vector" for coordinates,
# Requires all scaled coordinates be < MAX_VAL
self.hash_vec = (MAX_VAL**np.arange(self.dim))
# Construct S and B matrix
self._compute_factorization(coords_flat)
def _compute_factorization(self, coords_flat):
# Hash each coordinate in grid to a unique value
hashed_coords = self._hash_coords(coords_flat)
unique_hashes, unique_idx, idx = \
np.unique(hashed_coords, return_index=True, return_inverse=True)
# Identify unique set of vertices
unique_coords = coords_flat[unique_idx]
self.nvertices = len(unique_coords)
# Construct sparse splat matrix that maps from pixels to vertices
self.S = csr_matrix((np.ones(self.npixels), (idx, np.arange(self.npixels))))
# Construct sparse blur matrices.
# Note that these represent [1 0 1] blurs, excluding the central element
self.blurs = []
for d in range(self.dim):
blur = 0.0
for offset in (-1, 1):
offset_vec = np.zeros((1, self.dim))
offset_vec[:, d] = offset
neighbor_hash = self._hash_coords(unique_coords + offset_vec)
valid_coord, idx = get_valid_idx(unique_hashes, neighbor_hash)
blur = blur + csr_matrix((np.ones((len(valid_coord),)),
(valid_coord, idx)),
shape=(self.nvertices, self.nvertices))
self.blurs.append(blur)
def _hash_coords(self, coord):
"""Hacky function to turn a coordinate into a unique value"""
return np.dot(coord.reshape(-1, self.dim), self.hash_vec)
def splat(self, x):
return self.S.dot(x)
def slice(self, y):
return self.S.T.dot(y)
def blur(self, x):
"""Blur a bilateral-space vector with a 1 2 1 kernel in each dimension"""
assert x.shape[0] == self.nvertices
out = 2 * self.dim * x
for blur in self.blurs:
out = out + blur.dot(x)
return out
def filter(self, x):
"""Apply bilateral filter to an input x"""
return self.slice(self.blur(self.splat(x))) / \
self.slice(self.blur(self.splat(np.ones_like(x))))
def bistochastize(grid, maxiter=10):
"""Compute diagonal matrices to bistochastize a bilateral grid"""
m = grid.splat(np.ones(grid.npixels))
n = np.ones(grid.nvertices)
for i in range(maxiter):
n = np.sqrt(n * m / grid.blur(n))
# Correct m to satisfy the assumption of bistochastization regardless
# of how many iterations have been run.
m = n * grid.blur(n)
Dm = diags(m, 0)
Dn = diags(n, 0)
return Dn, Dm
class BilateralSolver(object):
def __init__(self, grid, params):
self.grid = grid
self.params = params
self.Dn, self.Dm = bistochastize(grid)
def solve(self, x, w):
# Check that w is a vector or a nx1 matrix
if w.ndim == 2:
assert(w.shape[1] == 1)
elif w.dim == 1:
w = w.reshape(w.shape[0], 1)
A_smooth = (self.Dm - self.Dn.dot(self.grid.blur(self.Dn)))
w_splat = self.grid.splat(w)
A_data = diags(w_splat[:,0], 0)
A = self.params["lam"] * A_smooth + A_data
xw = x * w
b = self.grid.splat(xw)
# Use simple Jacobi preconditioner
A_diag = np.maximum(A.diagonal(), self.params["A_diag_min"])
M = diags(1 / A_diag, 0)
# Flat initialization
y0 = self.grid.splat(xw) / np.maximum(w_splat, 1e-10)
yhat = np.empty_like(y0)
for d in range(x.shape[-1]):
yhat[..., d], info = cg(A, b[..., d], x0=y0[..., d], M=M, maxiter=self.params["cg_maxiter"], tol=self.params["cg_tol"])
xhat = self.grid.slice(yhat)
return xhat, yhat
def solveGrad(self, x, w, saved_yhat, saved_target):
# Check that w is a vector or a nx1 matrix
if w.ndim == 2:
assert(w.shape[1] == 1)
elif w.dim == 1:
w = w.reshape(w.shape[0], 1)
A_smooth = (self.Dm - self.Dn.dot(self.grid.blur(self.Dn)))
w_splat = self.grid.splat(w)
A_data = diags(w_splat[:,0], 0)
A = self.params["lam"] * A_smooth + A_data
b = self.grid.splat(x)
# Use simple Jacobi preconditioner
A_diag = np.maximum(A.diagonal(), self.params["A_diag_min"])
M = diags(1 / A_diag, 0)
# Flat initialization
# here we should make all w to 1
w_1 = np.ones(w.shape, np.double)
y0 = self.grid.splat(x * w_1) / self.grid.splat(w_1)
yhat = np.empty_like(y0)
for d in range(x.shape[-1]):
yhat[..., d], info = cg(A, b[..., d], x0=y0[..., d], M=M, maxiter=self.params["cg_maxiter"], tol=self.params["cg_tol"])
grad_f_b = yhat
slice_grad_f_b = self.grid.slice(grad_f_b)
grad_t = slice_grad_f_b * w
### calculate grad for confidence
if REQUIRES_CONF_GRAD == True:
grad_diag_A = -1.0 * (grad_f_b * saved_yhat)
grad_conf = self.grid.slice(grad_diag_A) + slice_grad_f_b * saved_target
else:
grad_conf = None
return grad_t, grad_conf
def solve(grid, target, confidence, bs_params, im_shape):
t = target.reshape(-1, im_shape[2] ).astype(np.double)
c = confidence.reshape(-1, 1).astype(np.double) # / (pow(2,16)-1)
xhat, yhat = BilateralSolver(grid, bs_params).solve(t, c)
xhat = xhat.reshape(im_shape)
return xhat, yhat
def solveForGrad(grid, grad_f_x, confidence, bs_params, im_shape, yhat,
target):
grad = grad_f_x.reshape(-1, im_shape[2] ).astype(np.double)
c = confidence.reshape(-1, 1).astype(np.double)
t = target.reshape(-1, im_shape[2] ).astype(np.double)
grad_t, grad_c = BilateralSolver(grid, bs_params).solveGrad(grad, c,
yhat, t)
grad_t = grad_t.reshape(im_shape)
if REQUIRES_CONF_GRAD == True:
grad_c = grad_c.reshape(im_shape)
grad_c = grad_c.sum(2)
else:
grad_c = None
return grad_t, grad_c
|
11464798
|
from pkg_resources import get_distribution
__import__('pkg_resources').declare_namespace(__name__)
__version__ = get_distribution("telesign").version
__author__ = "TeleSign"
__copyright__ = "Copyright 2017, TeleSign Corp."
__credits__ = ["TeleSign"]
__license__ = "MIT"
__maintainer__ = "TeleSign Corp."
__email__ = "<EMAIL>"
__status__ = "Production"
|
11464820
|
class EmrClusterBuilder(object):
def __init__(self, emr_client, ec2_client):
super(EmrClusterBuilder, self).__init__()
self.emr = emr_client
self.ec2 = ec2_client # boto3.client("ec2", region_name=region_name)
def get_security_group_id(self, group_name, region_name):
response = self.ec2.describe_security_groups(GroupNames=[group_name])
return response["SecurityGroups"][0]["GroupId"]
def create_cluster(
self,
region_name,
cluster_name,
release_label="emr-5.16.0",
master_instance_type="m3.xlarge",
num_core_nodes=2,
core_node_instance_type="m3.2xlarge",
):
emr_master_security_group_id = self.get_security_group_id(
"ElasticMapReduce-master", region_name=region_name
)
emr_slave_security_group_id = self.get_security_group_id(
"ElasticMapReduce-slave", region_name=region_name
)
cluster_response = self.emr.run_job_flow(
Name=cluster_name,
ReleaseLabel=release_label,
Instances={
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": master_instance_type,
"InstanceCount": 1,
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": core_node_instance_type,
"InstanceCount": num_core_nodes,
},
],
"KeepJobFlowAliveWhenNoSteps": True,
"Ec2KeyName": "databand-dev",
"EmrManagedMasterSecurityGroup": emr_master_security_group_id,
"EmrManagedSlaveSecurityGroup": emr_slave_security_group_id,
},
VisibleToAllUsers=True,
JobFlowRole="EMR_EC2_DefaultRole",
ServiceRole="EMR_DefaultRole",
Applications=[
{"Name": "hadoop"},
{"Name": "spark"},
{"Name": "hive"},
{"Name": "livy"},
{"Name": "zeppelin"},
],
)
return cluster_response["JobFlowId"]
|
11464823
|
import os
from .deploy_manifest import log
from . import exceptions as exc
class BlueGreen(object):
"""This class orchestrates a Blue-Green deployment in the style of the
Autopilot CF CLI plugin.
"""
def __init__(self,
space,
manifest,
verbose=True,
wait_kwargs=None,
**kwargs):
"""Initializes the deployment
Args:
space (cf_api.deploy_space.Space):
The space to which the application should be deployed
manifest (cf_api.deploy_manifest.Deploy):
The manifest of the application to be deployed
verbose (bool):
Whether the deployment should be verbose in its output
wait_kwargs (dict|None):
Arguments to pass to the application ``wait_for_app_start``
function when waiting for the application to start
"""
self.space = space
self.manifest = manifest
self.verbose = verbose
self.venerable_name = '-'.join([self.app_name, 'venerable'])
self.venerable_manifest = self.manifest.clone(self.venerable_name)
self.app = None
self.venerable_app = None
self.wait_kwargs = wait_kwargs or {}
@property
def cc(self):
return self.space.cc
@property
def app_name(self):
return self.manifest.name
@classmethod
def parse_manifest(cls, space, manifest_filename, **kwargs):
"""Parses a deployment manifest and creates a BlueGreen instance
for each application in the manifest.
Args:
space (cf_api.deploy_space.Space):
space to which the manifest should be deployed
manifest_filename (str):
application manifest to be deployed
**kwargs (dict):
passed into the BlueGreen constructor
Returns:
list[BlueGreen]
"""
space.set_debug(kwargs.get('verbose'))
manifests = space.get_deploy_manifest(manifest_filename)
return [BlueGreen(space, manifest, **kwargs) for manifest in manifests]
def log(self, *args):
if self.verbose:
return log(*args)
def _load_apps(self):
self._load_app()
self._load_venerable_app()
def _load_app(self):
try:
self.app = self.space.get_app_by_name(self.app_name)
except exc.ResponseException as e:
self.app = None
if 404 != e.code:
raise
def _load_venerable_app(self):
try:
self.venerable_app = self.space.get_app_by_name(
self.venerable_name)
except exc.ResponseException as e:
self.venerable_app = None
if 404 != e.code:
raise
def _rename_app(self):
self._load_app()
if self.app:
self._load_venerable_app()
if self.venerable_app:
raise exc.InvalidStateException(
'attempting to rename app to venerable, but venerable '
'already exists', 409)
return self.cc.apps(self.app.guid)\
.set_params(name=self.venerable_name).put().data
self._load_apps()
return None
def _destroy_venerable_app(self):
self._load_venerable_app()
if self.venerable_app:
self._load_app()
if not self.app:
raise exc.InvalidStateException(
'attempting to destroy venerable app, but no app will take'
' it\'s place! aborting...', 409)
return self.venerable_manifest.destroy(destroy_routes=False)
self._load_apps()
return None
def wait_and_cleanup(self):
"""Waits for the new application to start and then destroys the old
version of the app.
"""
self.log('Waiting for app to start...')
self.manifest.wait_for_app_start(
tailing=self.verbose, **self.wait_kwargs)
self.log('OK')
self.log('Destroying venerable...')
self._load_venerable_app()
if self.venerable_app:
self._destroy_venerable_app()
self.log('OK')
def deploy_app(self):
"""Deploys the new application
"""
self.log('Checking apps...')
self._load_apps()
self.log('OK')
if self.venerable_app:
if self.app:
self.log('Leftover venerable detected with replacement! '
'Deleting...')
self._destroy_venerable_app()
self.log('OK')
else:
self.log('Leftover venerable detected with no replacement! '
'Aborting...')
raise exc.InvalidStateException(
'Leftover venerable detected! Rename it and try again.',
409)
if self.app:
self.log('Renaming app to venerable...')
self._rename_app()
self.log('OK')
self.manifest.push()
def deploy(self):
"""Deploy the new application, wait for it to start, then clean up the
old application.
"""
self.deploy_app()
self.wait_and_cleanup()
def main():
import argparse
from getpass import getpass
from .deploy_space import Space
import cf_api
args = argparse.ArgumentParser()
args.add_argument('--cloud-controller', required=True)
args.add_argument('-u', '--user')
args.add_argument('-o', '--org', required=True)
args.add_argument('-s', '--space', required=True)
args.add_argument('-m', '--manifest', required=True)
args = args.parse_args()
kwargs = dict(
client_id='cf',
client_secret='',
)
if args.user:
kwargs['username'] = args.user
kwargs['password'] = <PASSWORD>()
else:
kwargs['refresh_token'] = os.getenv('CF_REFRESH_TOKEN', '')
cc = cf_api.new_cloud_controller(args.cloud_controller, **kwargs)
space = Space(cc, org_name=args.org, space_name=args.space).set_debug(True)
for manifest in space.deploy_blue_green(args.manifest):
pass
for manifest in space.wait_blue_green(args.manifest):
pass
if '__main__' == __name__:
main()
|
11464830
|
import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, tier1
from ocs_ci.framework.pytest_customization.marks import (
skipif_external_mode,
skipif_ocs_version,
)
from ocs_ci.ocs.cluster import (
validate_compression,
validate_replica_data,
)
from ocs_ci.ocs.constants import CEPHBLOCKPOOL
from ocs_ci.ocs.exceptions import PoolNotReplicatedAsNeeded, PoolNotCompressedAsExpected
log = logging.getLogger(__name__)
@tier1
@skipif_external_mode
@skipif_ocs_version("<4.6")
@pytest.mark.polarion_id("OCS-2391")
class TestMultipleScOnePoolRep2Comp(ManageTest):
"""
Create new rbd pool with replica 2 and compression.
Attach it to 2 new storageclasses.
Create PVCs and PODs for each storageclass.
Run IO.
Delete PODs, PVSs, Storageclass and pool.
"""
replica = 2
def test_multiple_sc_one_pool_rep2_comp(
self,
ceph_pool_factory,
storageclass_factory,
pvc_factory,
pod_factory,
):
"""
This test function does below,
*. Creates 2 Storage Class with creating one rbd pool for both
*. Creates PVCs using new Storage Classes
*. Mount PVC to an app pod
*. Run IO on an app pod
*. Verify compression and replication
"""
log.info("Creating new pool with replica2 and compression")
pool_obj = ceph_pool_factory(
interface=CEPHBLOCKPOOL,
replica=self.replica,
compression="aggressive",
)
log.info(f"Creating first storageclass with pool {pool_obj.name}")
sc_obj1 = storageclass_factory(
interface=CEPHBLOCKPOOL,
new_rbd_pool=False,
pool_name=pool_obj.name,
)
log.info(f"Creating second storageclass with pool {pool_obj.name}")
sc_obj2 = storageclass_factory(
interface=CEPHBLOCKPOOL,
new_rbd_pool=False,
pool_name=pool_obj.name,
)
sc_obj_list = [sc_obj1, sc_obj2]
pod_obj_list = []
log.info("Creating PVCs and PODs")
for sc_obj in sc_obj_list:
pvc_obj = pvc_factory(interface=CEPHBLOCKPOOL, storageclass=sc_obj, size=10)
pod_obj_list.append(pod_factory(interface=CEPHBLOCKPOOL, pvc=pvc_obj))
log.info("Running IO on pods")
for pod_obj in pod_obj_list:
pod_obj.run_io(
"fs",
size="1G",
rate="1500m",
runtime=60,
buffer_compress_percentage=60,
buffer_pattern="0xdeadface",
bs="8K",
jobs=5,
readwrite="readwrite",
)
log.info(f"validating info on pool {pool_obj.name}")
validate_rep_result = validate_replica_data(pool_obj.name, self.replica)
if validate_rep_result is False:
raise PoolNotReplicatedAsNeeded(
f"pool {pool_obj.name} not replicated as expected"
)
validate_comp_result = validate_compression(pool_obj.name)
if validate_comp_result is False:
raise PoolNotCompressedAsExpected(
f"pool {pool_obj.name} not compressed as expected"
)
|
11464832
|
import io
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Generator
from unittest import mock
import pytest
from _pytest._io import terminalwriter
from _pytest.monkeypatch import MonkeyPatch
# These tests were initially copied from py 1.8.1.
def test_terminal_width_COLUMNS(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("COLUMNS", "42")
assert terminalwriter.get_terminal_width() == 42
monkeypatch.delenv("COLUMNS", raising=False)
def test_terminalwriter_width_bogus(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(shutil, "get_terminal_size", mock.Mock(return_value=(10, 10)))
monkeypatch.delenv("COLUMNS", raising=False)
tw = terminalwriter.TerminalWriter()
assert tw.fullwidth == 80
def test_terminalwriter_computes_width(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(terminalwriter, "get_terminal_width", lambda: 42)
tw = terminalwriter.TerminalWriter()
assert tw.fullwidth == 42
def test_terminalwriter_dumb_term_no_markup(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(os, "environ", {"TERM": "dumb", "PATH": ""})
class MyFile:
closed = False
def isatty(self):
return True
with monkeypatch.context() as m:
m.setattr(sys, "stdout", MyFile())
assert sys.stdout.isatty()
tw = terminalwriter.TerminalWriter()
assert not tw.hasmarkup
def test_terminalwriter_not_unicode() -> None:
"""If the file doesn't support Unicode, the string is unicode-escaped (#7475)."""
buffer = io.BytesIO()
file = io.TextIOWrapper(buffer, encoding="cp1252")
tw = terminalwriter.TerminalWriter(file)
tw.write("hello 🌀 wôrld אבג", flush=True)
assert buffer.getvalue() == br"hello \U0001f300 w\xf4rld \u05d0\u05d1\u05d2"
win32 = int(sys.platform == "win32")
class TestTerminalWriter:
@pytest.fixture(params=["path", "stringio"])
def tw(
self, request, tmp_path: Path
) -> Generator[terminalwriter.TerminalWriter, None, None]:
if request.param == "path":
p = tmp_path.joinpath("tmpfile")
f = open(str(p), "w+", encoding="utf8")
tw = terminalwriter.TerminalWriter(f)
def getlines():
f.flush()
with open(str(p), encoding="utf8") as fp:
return fp.readlines()
elif request.param == "stringio":
f = io.StringIO()
tw = terminalwriter.TerminalWriter(f)
def getlines():
f.seek(0)
return f.readlines()
tw.getlines = getlines # type: ignore
tw.getvalue = lambda: "".join(getlines()) # type: ignore
with f:
yield tw
def test_line(self, tw) -> None:
tw.line("hello")
lines = tw.getlines()
assert len(lines) == 1
assert lines[0] == "hello\n"
def test_line_unicode(self, tw) -> None:
msg = "b\u00f6y"
tw.line(msg)
lines = tw.getlines()
assert lines[0] == msg + "\n"
def test_sep_no_title(self, tw) -> None:
tw.sep("-", fullwidth=60)
lines = tw.getlines()
assert len(lines) == 1
assert lines[0] == "-" * (60 - win32) + "\n"
def test_sep_with_title(self, tw) -> None:
tw.sep("-", "hello", fullwidth=60)
lines = tw.getlines()
assert len(lines) == 1
assert lines[0] == "-" * 26 + " hello " + "-" * (27 - win32) + "\n"
def test_sep_longer_than_width(self, tw) -> None:
tw.sep("-", "a" * 10, fullwidth=5)
(line,) = tw.getlines()
# even though the string is wider than the line, still have a separator
assert line == "- aaaaaaaaaa -\n"
@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
@pytest.mark.parametrize("bold", (True, False))
@pytest.mark.parametrize("color", ("red", "green"))
def test_markup(self, tw, bold: bool, color: str) -> None:
text = tw.markup("hello", **{color: True, "bold": bold})
assert "hello" in text
def test_markup_bad(self, tw) -> None:
with pytest.raises(ValueError):
tw.markup("x", wronkw=3)
with pytest.raises(ValueError):
tw.markup("x", wronkw=0)
def test_line_write_markup(self, tw) -> None:
tw.hasmarkup = True
tw.line("x", bold=True)
tw.write("x\n", red=True)
lines = tw.getlines()
if sys.platform != "win32":
assert len(lines[0]) >= 2, lines
assert len(lines[1]) >= 2, lines
def test_attr_fullwidth(self, tw) -> None:
tw.sep("-", "hello", fullwidth=70)
tw.fullwidth = 70
tw.sep("-", "hello")
lines = tw.getlines()
assert len(lines[0]) == len(lines[1])
@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
def test_attr_hasmarkup() -> None:
file = io.StringIO()
tw = terminalwriter.TerminalWriter(file)
assert not tw.hasmarkup
tw.hasmarkup = True
tw.line("hello", bold=True)
s = file.getvalue()
assert len(s) > len("hello\n")
assert "\x1b[1m" in s
assert "\x1b[0m" in s
def assert_color_set():
file = io.StringIO()
tw = terminalwriter.TerminalWriter(file)
assert tw.hasmarkup
tw.line("hello", bold=True)
s = file.getvalue()
assert len(s) > len("hello\n")
assert "\x1b[1m" in s
assert "\x1b[0m" in s
def assert_color_not_set():
f = io.StringIO()
f.isatty = lambda: True # type: ignore
tw = terminalwriter.TerminalWriter(file=f)
assert not tw.hasmarkup
tw.line("hello", bold=True)
s = f.getvalue()
assert s == "hello\n"
def test_should_do_markup_PY_COLORS_eq_1(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setitem(os.environ, "PY_COLORS", "1")
assert_color_set()
def test_should_not_do_markup_PY_COLORS_eq_0(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setitem(os.environ, "PY_COLORS", "0")
assert_color_not_set()
def test_should_not_do_markup_NO_COLOR(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setitem(os.environ, "NO_COLOR", "1")
assert_color_not_set()
def test_should_do_markup_FORCE_COLOR(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setitem(os.environ, "FORCE_COLOR", "1")
assert_color_set()
def test_should_not_do_markup_NO_COLOR_and_FORCE_COLOR(
monkeypatch: MonkeyPatch,
) -> None:
monkeypatch.setitem(os.environ, "NO_COLOR", "1")
monkeypatch.setitem(os.environ, "FORCE_COLOR", "1")
assert_color_not_set()
class TestTerminalWriterLineWidth:
def test_init(self) -> None:
tw = terminalwriter.TerminalWriter()
assert tw.width_of_current_line == 0
def test_update(self) -> None:
tw = terminalwriter.TerminalWriter()
tw.write("hello world")
assert tw.width_of_current_line == 11
def test_update_with_newline(self) -> None:
tw = terminalwriter.TerminalWriter()
tw.write("hello\nworld")
assert tw.width_of_current_line == 5
def test_update_with_wide_text(self) -> None:
tw = terminalwriter.TerminalWriter()
tw.write("乇乂ㄒ尺卂 ㄒ卄丨匚匚")
assert tw.width_of_current_line == 21 # 5*2 + 1 + 5*2
def test_composed(self) -> None:
tw = terminalwriter.TerminalWriter()
text = "café food"
assert len(text) == 9
tw.write(text)
assert tw.width_of_current_line == 9
def test_combining(self) -> None:
tw = terminalwriter.TerminalWriter()
text = "café food"
assert len(text) == 10
tw.write(text)
assert tw.width_of_current_line == 9
@pytest.mark.parametrize(
("has_markup", "code_highlight", "expected"),
[
pytest.param(
True,
True,
"{kw}assert{hl-reset} {number}0{hl-reset}\n",
id="with markup and code_highlight",
),
pytest.param(
True,
False,
"assert 0\n",
id="with markup but no code_highlight",
),
pytest.param(
False,
True,
"assert 0\n",
id="without markup but with code_highlight",
),
pytest.param(
False,
False,
"assert 0\n",
id="neither markup nor code_highlight",
),
],
)
def test_code_highlight(has_markup, code_highlight, expected, color_mapping):
f = io.StringIO()
tw = terminalwriter.TerminalWriter(f)
tw.hasmarkup = has_markup
tw.code_highlight = code_highlight
tw._write_source(["assert 0"])
assert f.getvalue().splitlines(keepends=True) == color_mapping.format([expected])
with pytest.raises(
ValueError,
match=re.escape("indents size (2) should have same size as lines (1)"),
):
tw._write_source(["assert 0"], [" ", " "])
|
11464890
|
import os
import sys
import inspect
class FindFilePath:
def __new__(cls, name) -> str:
if name is None:
raise NameError("Please specify filename")
stack_t = inspect.stack()
ins = inspect.getframeinfo(stack_t[1][0])
this_file_dir = os.path.dirname(os.path.dirname(os.path.abspath(ins.filename)))
_file_path = None
for root, dirs, files in os.walk(this_file_dir, topdown=False):
for _file in files:
if _file == name:
_file_path = os.path.join(root, _file)
break
else:
continue
break
return _file_path
find_file_path = FindFilePath
class File:
@property
def path(self) -> str:
"""
Returns the absolute path to the directory where the current file resides
For example:
"/User/tech/you/test_dir/test_sample.py"
return "/User/tech/you/test_dir/test_sample.py"
"""
stack_t = inspect.stack()
ins = inspect.getframeinfo(stack_t[1][0])
return os.path.abspath(ins.filename)
@property
def dir(self) -> str:
"""
Returns the absolute path to the directory where the current file resides
For example:
"/User/tech/you/test_dir/test_sample.py"
return "/User/tech/you/test_dir/"
"""
stack_t = inspect.stack()
ins = inspect.getframeinfo(stack_t[1][0])
return os.path.dirname(os.path.abspath(ins.filename))
@property
def dir_dir(self) -> str:
"""
Returns the absolute directory path of the current file directory.
For example:
"/User/tech/you/test_dir/test_sample.py"
return "/User/tech/you/"
"""
stack_t = inspect.stack()
ins = inspect.getframeinfo(stack_t[1][0])
return os.path.dirname(os.path.dirname(os.path.abspath(ins.filename)))
@property
def dir_dir_dir(self) -> str:
"""
Returns the absolute directory path of the current file directory
For example:
/User/tech/you/test_dir/test_sample.py
return "/User/you/"
"""
stack_t = inspect.stack()
ins = inspect.getframeinfo(stack_t[1][0])
return os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ins.filename))))
@staticmethod
def add_to_path(path=None) -> None:
"""
add path to environment variable path.
"""
if path is None:
raise FileNotFoundError("Please setting the File Path")
sys.path.insert(1, path)
@staticmethod
def remove(path) -> None:
"""
del file
:param path:
:return:
"""
if os.path.isfile(path):
os.remove(path)
else:
raise FileNotFoundError("file does not exist")
file = File()
|
11464993
|
from veroviz._common import *
from veroviz._queryOpenWeather import owGetWeather
def privGetWeather(location, id, metricUnits, dataProvider, dataProviderArgs):
try:
dataProvider = dataProvider.lower()
except:
pass
if (weatherDataProviderDictionary[dataProvider] == 'openweather'):
weatherDF = owGetWeather(location, id, metricUnits, dataProviderArgs['APIkey'])
return weatherDF
|
11465016
|
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter, MaxNLocator
import numpy as np
import plotly.graph_objs as go
import seaborn as sns
import plotly.figure_factory as pff
DEFAULT_COLORSCALE = "Viridis"
def plot_distribution(ls, column, bins=None):
"""Plot the distribution of numerical columns.
Create a plotly plot with a histogram of the values in a column. The
number of bin in the histogram is decided according to the
Freedman-Diaconis rule unless given by the `bins` parameter.
Parameters
----------
ls : :class:`~lens.Summary`
Lens `Summary`.
column : str
Name of the column.
bins : int, optional
Number of bins to use for histogram. If not given, the
Freedman-Diaconis rule will be used to estimate the best number of
bins. This argument also accepts the formats taken by the `bins`
parameter of matplotlib's :function:`~matplotlib.pyplot.hist`.
Returns
-------
:class:`~matplotlib.Axes`
Matplotlib axes containing the distribution plot.
"""
column_summary = ls.summary(column)
if column_summary["notnulls"] <= 2:
# Plotly refuses to plot histograms if
# the tdigest has too few values
raise ValueError(
"There are fewer than two non-null values in this column"
)
if bins is None:
counts, edges = ls.histogram(column)
else:
xs, counts = ls.tdigest_centroids(column)
counts, edges = np.histogram(xs, weights=counts, bins=bins)
fig, ax = plt.subplots()
ax.bar(
edges[:-1], counts, width=np.diff(edges), label=column, align="edge"
)
ax.set_ylim(bottom=0)
ax.set_xlabel(column)
ax.set_title('Distribution of column "{}"'.format(column))
ax.figure.tight_layout()
return fig
def _set_integer_tick_labels(axis, labels):
"""Use labels dict to set labels on axis"""
axis.set_major_formatter(FuncFormatter(lambda x, _: labels.get(x, "")))
axis.set_major_locator(MaxNLocator(integer=True))
def plot_pairdensity_mpl(ls, column1, column2):
"""Plot the pairwise density between two columns.
This plot is an approximation of a scatterplot through a 2D Kernel
Density Estimate for two numerical variables. When one of the variables
is categorical, a 1D KDE for each of the categories is shown,
normalised to the total number of non-null observations. For two
categorical variables, the plot produced is a heatmap representation of
the contingency table.
Parameters
----------
ls : :class:`~lens.Summary`
Lens `Summary`.
column1 : str
First column.
column2 : str
Second column.
Returns
-------
:class:`plt.Figure`
Matplotlib figure containing the pairwise density plot.
"""
pair_details = ls.pair_details(column1, column2)
pairdensity = pair_details["pairdensity"]
x = np.array(pairdensity["x"])
y = np.array(pairdensity["y"])
Z = np.array(pairdensity["density"])
fig, ax = plt.subplots()
if ls.summary(column1)["desc"] == "categorical":
idx = np.argsort(x)
x = x[idx]
Z = Z[:, idx]
# Create labels and positions for categorical axis
x_labels = dict(enumerate(x))
_set_integer_tick_labels(ax.xaxis, x_labels)
x = np.arange(-0.5, len(x), 1.0)
if ls.summary(column2)["desc"] == "categorical":
idx = np.argsort(y)
y = y[idx]
Z = Z[idx]
y_labels = dict(enumerate(y))
_set_integer_tick_labels(ax.yaxis, y_labels)
y = np.arange(-0.5, len(y), 1.0)
X, Y = np.meshgrid(x, y)
ax.pcolormesh(X, Y, Z, cmap=DEFAULT_COLORSCALE.lower())
ax.set_xlabel(column1)
ax.set_ylabel(column2)
ax.set_title(r"$\it{{ {} }}$ vs $\it{{ {} }}$".format(column1, column2))
return fig
def plot_correlation_mpl(ls, include=None, exclude=None):
"""Plot the correlation matrix for numeric columns
Plot a Spearman rank order correlation coefficient matrix showing the
correlation between columns. The matrix is reordered to group together
columns that have a higher correlation coefficient. The columns to be
plotted in the correlation plot can be selected through either the
``include`` or ``exclude`` keyword arguments. Only one of them can be
given.
Parameters
----------
ls : :class:`~lens.Summary`
Lens `Summary`.
include : list of str
List of columns to include in the correlation plot.
exclude : list of str
List of columns to exclude from the correlation plot.
Returns
-------
:class:`plt.Figure`
Matplotlib figure containing the pairwise density plot.
"""
columns, correlation_matrix = ls.correlation_matrix(include, exclude)
num_cols = len(columns)
if num_cols > 10:
annotate = False
else:
annotate = True
fig, ax = plt.subplots()
sns.heatmap(
correlation_matrix,
annot=annotate,
fmt=".2f",
ax=ax,
xticklabels=columns,
yticklabels=columns,
vmin=-1,
vmax=1,
cmap="RdBu_r",
square=True,
)
ax.xaxis.tick_top()
# Enforces a width of 2.5 inches per cell in the plot,
# unless this exceeds 10 inches.
width_inches = len(columns) * 2.5
while width_inches > 10:
width_inches = 10
fig.set_size_inches(width_inches, width_inches)
return fig
def plot_cdf(ls, column, N_cdf=100):
"""Plot the empirical cumulative distribution function of a column.
Creates a plotly plot with the empirical CDF of a column.
Parameters
----------
ls : :class:`~lens.Summary`
Lens `Summary`.
column : str
Name of the column.
N_cdf : int
Number of points in the CDF plot.
Returns
-------
:class:`~matplotlib.Axes`
Matplotlib axes containing the distribution plot.
"""
tdigest = ls.tdigest(column)
cdfs = np.linspace(0, 100, N_cdf)
xs = [tdigest.percentile(p) for p in cdfs]
fig, ax = plt.subplots()
ax.set_ylabel("Percentile")
ax.set_xlabel(column)
ax.plot(xs, cdfs)
if ls._report["column_summary"][column]["logtrans"]:
ax.set_xscale("log")
ax.set_title("Empirical Cumulative Distribution Function")
return fig
def plot_pairdensity(ls, column1, column2):
"""Plot the pairwise density between two columns.
This plot is an approximation of a scatterplot through a 2D Kernel
Density Estimate for two numerical variables. When one of the variables
is categorical, a 1D KDE for each of the categories is shown,
normalised to the total number of non-null observations. For two
categorical variables, the plot produced is a heatmap representation of
the contingency table.
Parameters
----------
ls : :class:`~lens.Summary`
Lens `Summary`.
column1 : str
First column.
column2 : str
Second column.
Returns
-------
:class:`plotly.Figure`
Plotly figure containing the pairwise density plot.
"""
pair_details = ls.pair_details(column1, column2)
pairdensity = pair_details["pairdensity"]
x = np.array(pairdensity["x"])
y = np.array(pairdensity["y"])
Z = np.array(pairdensity["density"])
if ls.summary(column1)["desc"] == "categorical":
idx = np.argsort(x)
x = x[idx]
Z = Z[:, idx]
if ls.summary(column2)["desc"] == "categorical":
idx = np.argsort(y)
y = y[idx]
Z = Z[idx]
data = [go.Heatmap(z=Z, x=x, y=y, colorscale=DEFAULT_COLORSCALE)]
layout = go.Layout(title="<i>{}</i> vs <i>{}</i>".format(column1, column2))
layout["xaxis"] = {
"type": pairdensity["x_scale"],
"autorange": True,
"title": column1,
}
layout["yaxis"] = {
"type": pairdensity["y_scale"],
"autorange": True,
"title": column2,
}
fig = go.Figure(data=data, layout=layout)
fig.data[0]["showscale"] = False
return fig
def plot_correlation(ls, include=None, exclude=None):
"""Plot the correlation matrix for numeric columns
Plot a Spearman rank order correlation coefficient matrix showing the
correlation between columns. The matrix is reordered to group together
columns that have a higher correlation coefficient. The columns to be
plotted in the correlation plot can be selected through either the
``include`` or ``exclude`` keyword arguments. Only one of them can be
given.
Parameters
----------
ls : :class:`~lens.Summary`
Lens `Summary`.
include : list of str
List of columns to include in the correlation plot.
exclude : list of str
List of columns to exclude from the correlation plot.
Returns
-------
:class:`plotly.Figure`
Plotly figure containing the pairwise density plot.
"""
columns, correlation_matrix = ls.correlation_matrix(include, exclude)
num_cols = len(columns)
if num_cols > 10:
annotate = False
else:
annotate = True
hover_text = []
for i in range(num_cols):
hover_text.append(
[
"Corr({}, {}) = {:.2g}".format(
columns[i], columns[j], correlation_matrix[i, j]
)
for j in range(num_cols)
]
)
if annotate:
t = np.reshape(
["{:.2g}".format(x) for x in correlation_matrix.flatten()],
correlation_matrix.shape,
)[::-1].tolist()
else:
nrows, ncolumns = correlation_matrix.shape
t = [["" for i in range(nrows)] for j in range(ncolumns)]
fig = pff.create_annotated_heatmap(
z=correlation_matrix.tolist()[::-1],
colorscale="RdBu",
x=columns,
y=columns[::-1],
zmin=-1.0,
zmax=1.0,
annotation_text=t,
text=hover_text[::-1],
hoverinfo="text",
)
w = len(columns) * 2.5 * 72
while w > 600:
w /= np.sqrt(1.4)
fig.layout["width"] = w
fig.layout["height"] = w
fig.data[0]["showscale"] = True
return fig
|
11465089
|
from __future__ import absolute_import, division
import torch
from torch import nn
import torch.nn.functional as F
def my_grid_sample(inputs, grid):
return F.grid_sample(inputs, grid, align_corners=True)
def interpolate2d(inputs, size, mode="bilinear"):
return F.interpolate(inputs, size, mode=mode, align_corners=True)
def interpolate2d_as(inputs, target_as, mode="bilinear"):
_, _, h, w = target_as.size()
return interpolate2d(inputs, [h, w], mode=mode)
def _bchw2bhwc(tensor):
return tensor.transpose(1,2).transpose(2,3)
def _bhwc2bchw(tensor):
return tensor.transpose(2,3).transpose(1,2)
def upsample_flow_as(flow, output_as):
size_inputs = flow.size()[2:4]
size_targets = output_as.size()[2:4]
resized_flow = F.interpolate(flow, size=size_targets, mode="bilinear", align_corners=True)
# correct scaling of flow
u, v = resized_flow.chunk(2, dim=1)
u = u * float(size_targets[1] / size_inputs[1])
v = v * float(size_targets[0] / size_inputs[0])
return torch.cat([u, v], dim=1)
def get_grid(x):
b, _, h, w = x.size()
grid_H = torch.linspace(-1.0, 1.0, w).view(1, 1, 1, w).expand(b, 1, h, w).to(device=x.device, dtype=x.dtype)
grid_V = torch.linspace(-1.0, 1.0, h).view(1, 1, h, 1).expand(b, 1, h, w).to(device=x.device, dtype=x.dtype)
grids = torch.cat([grid_H, grid_V], dim=1).requires_grad_(False)
return grids
def get_coordgrid(x):
b, _, h, w = x.size()
grid_h = torch.linspace(0.0, w - 1, w).view(1, 1, 1, w).expand(b, 1, h, w).to(device=x.device, dtype=x.dtype)
grid_v = torch.linspace(0.0, h - 1, h).view(1, 1, h, 1).expand(b, 1, h, w).to(device=x.device, dtype=x.dtype)
ones = torch.ones_like(grid_h)
coordgrid = torch.cat((grid_h, grid_v, ones), dim=1).requires_grad_(False)
return coordgrid
class Meshgrid(nn.Module):
def __init__(self):
super(Meshgrid, self).__init__()
self.width = 0
self.height = 0
self.xx = None
self.yy = None
def _compute_meshgrid(self, width, height):
rangex = torch.arange(0, width)
rangey = torch.arange(0, height)
xx = rangex.repeat(height, 1).contiguous()
yy = rangey.repeat(width, 1).t().contiguous()
self.xx = xx.view(1, 1, height, width)
self.yy = yy.view(1, 1, height, width)
def forward(self, width, height, device=None, dtype=None):
if self.width != width or self.height != height:
self._compute_meshgrid(width=width, height=height)
self.width = width
self.height = height
self.xx = self.xx.to(device=device, dtype=dtype)
self.yy = self.yy.to(device=device, dtype=dtype)
return self.xx, self.yy
|
11465243
|
import torch.nn as nn
PADDING_LAYERS = {
'zero': nn.ZeroPad2d,
'reflect': nn.ReflectionPad2d,
'replicate': nn.ReplicationPad2d
}
def build_padding_layer(cfg, *args, **kwargs):
"""Build padding layer.
Args:
cfg (None or dict): The padding layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate a padding layer.
Returns:
nn.Module: Created padding layer.
"""
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'typename' not in cfg:
raise KeyError('the cfg dict must contain the key "typename"')
cfg_ = cfg.copy()
padding_type = cfg_.pop('typename')
if padding_type not in PADDING_LAYERS:
raise KeyError(f'Unrecognized padding type {padding_type}.')
else:
padding_layer = PADDING_LAYERS.get(padding_type)
layer = padding_layer(*args, **kwargs, **cfg_)
return layer
|
11465297
|
from psana import DataSource
from psana.psexp.utils import DataSourceFromString
import argparse
import sys
def detnames():
# this argument parsing doesn't feel ideal. ideally user should
# be able to pass in the "python code" specifying the datasource
# on the command line (e.g. exp=xpptut15,run=[1,2,4,7] or
# files=['data1.xtc2', 'data2.xtc2']. But having the shell not mangle
# that and then parsing it appropriately feels challenging. So only
# support single runs, shmem, and filenames using standard sys.argv.
parser = argparse.ArgumentParser()
parser.add_argument("dsname", help="psana datasource experiment/run (e.g. exp=xppd7114,run=43) or xtc2 filename or shmem=<my_shmem_identifier>")
parser.add_argument('-e','--epics', dest='epics', action='store_true', help='Dump epics variable aliases for use with Detector interface')
parser.add_argument('-s','--scan', dest='scan', action='store_true', help='Dump step-scan names for use with Detector interface')
parser.add_argument('-r','--raw', dest='raw', action='store_true', help='Expert only: dump data types in raw data')
parser.add_argument('-i','--ids', dest='ids', action='store_true', help="Expert only: dump segment-id's and unique-id's used by calibration database")
args = parser.parse_args()
ds = DataSourceFromString(args.dsname)
myrun = next(ds.runs())
if args.raw:
headers = ['Name','Det Type','Data Type','Version']
format_string = '{0:%d} | {1:%d} | {2:%d} | {3:%d}'
names = myrun.xtcinfo
elif args.epics:
headers = ['Detector Name','Epics Name']
format_string = '{0:%d} | {1:%d}'
names = myrun.epicsinfo
elif args.scan:
headers = ['Name','Data Type']
format_string = '{0:%d} | {1:%d}'
names = myrun.scaninfo
elif args.ids:
headers = ['Name','Data Type','Segments','UniqueId']
format_string = '{0:%d} | {1:%d} | {2:%d} | {3:%d}'
names = myrun.detinfo.keys()
newnames = []
for name in names:
datatype = name[1]
data = getattr(myrun.Detector(name[0]),datatype)
segments = ','.join([str(segid) for segid in data._sorted_segment_ids])
newnames.append((name[0],datatype,segments,data._uniqueid))
names = newnames
else:
headers = ['Name','Data Type']
format_string = '{0:%d} | {1:%d}'
names = myrun.detinfo.keys()
maxlen = [len(h) for h in headers]
for ntuple in names:
lengths = [len(n) for n in ntuple]
maxlen = [max(oldmax,length) for oldmax,length in zip(maxlen,lengths)]
# assumes that data rows are tuples
template = format_string % tuple(maxlen)
header = template.format(*headers)
print('-'*len(header))
print(header)
print('-'*len(header))
for n in names:
print(template.format(*n))
print('-'*len(header))
|
11465309
|
import matplotlib.pyplot as plt
import os
import numpy as np
SMALL_SIZE = 24
MEDIUM_SIZE = 24
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
base_dir = './results_usrnet_8iter'
result_list = sorted(os.listdir(base_dir))
default_psnr = list()
default_ssim = list()
k_gan_psnr = list()
k_gan_ssim = list()
noise_est_psnr = list()
noise_est_ssim = list()
noise_kernel_psnr = list()
noise_kernel_ssim = list()
for ind,folder in enumerate(result_list):
if not os.path.exists(os.path.join(base_dir,folder,'psnr_log.txt')):
continue
with open(os.path.join(base_dir,folder,'psnr_log.txt'), 'r') as f:
data = f.readlines()
default_psnr.append(float(data[0].split()[4]))
default_ssim.append(float(data[0].split()[-1]))
k_gan_psnr.append(float(data[1].split()[4]))
k_gan_ssim.append(float(data[1].split()[-1]))
noise_est_psnr.append(float(data[2].split()[4]))
noise_est_ssim.append(float(data[2].split()[-1]))
noise_kernel_psnr.append(float(data[3].split()[4]))
noise_kernel_ssim.append(float(data[3].split()[-1]))
print(f'Default PSNR: {np.mean(default_psnr):.3f}, STD: {np.std(default_psnr):.3f}')
print(f'Default SSIM: {np.mean(default_ssim):.3f}, STD: {np.std(default_ssim):.3f}')
print(f'noise PSNR: {np.mean(noise_est_psnr):.3f}, STD: {np.std(noise_est_psnr):.3f}')
print(f'noise SSIM: {np.mean(noise_est_ssim):.3f}, STD: {np.std(noise_est_ssim):.3f}')
print(f'Kernel PSNR: {np.mean(k_gan_psnr):.3f}, STD: {np.std(k_gan_psnr):.3f}')
print(f'Kernel SSIM: {np.mean(k_gan_ssim):.3f}, STD: {np.std(k_gan_ssim):.3f}')
print(f'noise+Kernel PSNR: {np.mean(noise_kernel_psnr):.3f}, STD: {np.std(noise_kernel_psnr):.3f}')
print(f'noise+kernel SSIM: {np.mean(noise_kernel_ssim):.3f}, STD: {np.std(noise_kernel_ssim):.3f}')
plt.figure()
plt.title('Method Comparison')
plt.xlabel('image')
plt.ylabel('PSNR [dB]')
plt.plot(default_psnr,'--o')
plt.plot(k_gan_psnr,'--o')
# plt.plot(noise_est_psnr)
# plt.plot(noise_kernel_psnr)
plt.legend([f'default mean={np.mean(default_psnr):.2f}', f'kernel est mean={np.mean(k_gan_psnr):.2f}', 'noise est', 'kernel+noise'])
plt.figure()
plt.title('Method Comparison')
plt.xlabel('image')
plt.ylabel('SSIM')
plt.plot(default_ssim,'--o')
plt.plot(k_gan_ssim,'--o')
# plt.plot(noise_est_ssim)
# plt.plot(noise_kernel_ssim)
plt.legend([f'default mean={np.mean(default_ssim):.3f}',f'kernel est mean={np.mean(k_gan_ssim):.3f}', 'noise est', 'kernel+noise'])
plt.show()
|
11465320
|
from progress.bar import Bar
from config.test_config import BasicParam
import os
import cv2 as cv
import time
import torch
import numpy as np
from dataset.dataset import YorkDataset
from progress.bar import Bar
from utils.utils import load_model
from utils.reconstruct import save_pic_mat, save_image
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device == 'cpu':
raise Exception('cpu version for training is not implemented.')
print('Using device: ', device)
BasicParameters = BasicParam()
log_path = BasicParameters.save_path
model = BasicParameters.model
batch_size = BasicParameters.batch_size
num_workers = BasicParameters.num_workers
model = load_model(model, BasicParameters.load_model_path, BasicParameters.resume,
selftrain=BasicParameters.selftrain)
model = model.cuda()
test_dataset = YorkDataset(BasicParameters.dataset_dir, BasicParameters)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
def inference(model, data_loader, threshold_list, lmbd_list, showvideo=True):
if showvideo:
win = "Line Detection"
cv.namedWindow(win)
cv.moveWindow(win, 60, 50)
model.eval()
with torch.no_grad():
for lmbd in lmbd_list:
for thresh in threshold_list:
time_list = []
num_iters = len(data_loader)
bar = Bar('Threshold:{}'.format(thresh), max=num_iters)
if not showvideo:
for iter_id, batch in enumerate(data_loader):
batch['input'] = batch['input'].float().cuda()
filename = batch['filename'][0]
torch.cuda.synchronize()
start_time = time.time()
outputs = model(batch['input'])
tmp_p, tmp_l, tmp_c, total_time = save_pic_mat(lmbd, thresh, outputs, batch['origin_img'], filename, log_path, save_mat=True)
mix_pic = np.concatenate([tmp_p, tmp_l, tmp_c], axis=1)
time_list.append(total_time - start_time)
save_dir = log_path + '/pic/' + str(thresh)
os.makedirs(save_dir, exist_ok=True)
cv.imwrite(save_dir + '/' + batch['filename'][0] + '.png', mix_pic)
Bar.suffix = '[{0}/{1}]|'.format(iter_id, num_iters)
bar.next()
bar.finish()
total = sum(time_list)
print('Avg time per image: ', total/len(time_list))
print('FPS: ', 1/(total / len(time_list)))
else:
for iter_id, batch in enumerate(data_loader):
batch['input'] = batch['input'].float().cuda()
torch.cuda.synchronize()
start_time = time.time()
outputs = model(batch['input'])
mix_pic, lines, total_time = save_image(lmbd, thresh, outputs, batch['origin_img'])
time_list.append(total_time - start_time)
total = sum(time_list)
fps = 1/(total / len(time_list))
cv.imshow(win, mix_pic)
cv.waitKey(1)
if showvideo:
cv.destroyAllWindows()
print('Starting testing...')
if not BasicParameters.showvideo:
# threshold of the root-point detection confidence
threshold_list=[0.01, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8]
# power coefficient α in point filter module
lmbd_list = [0.5]
inference(model, test_loader, threshold_list, lmbd_list, showvideo=False)
else:
threshold_list = [0.25]
lmbd_list = [0.5]
assert len(threshold_list) == 1 and len(lmbd_list) == 1
inference(model, test_loader, threshold_list, lmbd_list, showvideo=True)
|
11465409
|
from .pypi import PyPi, __red_end_user_data_statement__
def setup(bot):
bot.add_cog(PyPi(bot))
|
11465472
|
import os.path
import numpy as np
from PIL import Image
from .cityscapes import remap_labels_to_train_ids
from .gta5 import GTA5 #, LABEL2TRAIN
from .data_loader import register_data_params, register_dataset_obj
@register_dataset_obj('cyclegta5')
class CycleGTA5(GTA5):
def collect_ids(self):
ids = GTA5.collect_ids(self)
existing_ids = []
for id in ids:
filename = '{:05d}.png'.format(id)
if os.path.exists(os.path.join(self.root, 'images', filename)):
existing_ids.append(id)
return existing_ids
def __getitem__(self, index):
id = self.ids[index]
filename = '{:05d}.png'.format(id)
img_path = os.path.join(self.root, 'images', filename)
label_path = os.path.join(self.root, 'labels', filename)
img = Image.open(img_path).convert('RGB')
target = Image.open(label_path)
img = img.resize(target.size, resample=Image.BILINEAR)
if self.transform is not None:
img = self.transform(img)
if self.remap_labels:
target = np.asarray(target)
target = remap_labels_to_train_ids(target)
#target = self.label2train(target)
target = Image.fromarray(target, 'L')
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
|
11465520
|
from django.test import TestCase
from .models import (
HouseholdBillTotal,
HouseholdServiceTotal,
FinancialYear,
BudgetPhase,
HouseholdService,
HouseholdClass,
)
from scorecard.models import Geography
class HouseholdsTestCase(TestCase):
def setUp(self):
FinancialYear.objects.bulk_create(
[
FinancialYear(budget_year="2015/16", active=True),
FinancialYear(budget_year="2016/17", active=True),
FinancialYear(budget_year="2017/18", active=True),
]
)
BudgetPhase.objects.bulk_create(
[BudgetPhase(name="Audited Outcome"), BudgetPhase(name="Original Budget"),]
)
HouseholdClass.objects.bulk_create(
[
HouseholdClass(name="Middle Income Range"),
HouseholdClass(name="Indigent HH receiving FBS"),
]
)
HouseholdService.objects.bulk_create(
[
HouseholdService(name="Water"),
HouseholdService(name="Electricity"),
HouseholdService(name="Sanitation"),
]
)
Geography(
[
Geography(
geo_level="municipality",
geo_code="JHB",
name="City of Johannesburg",
parent_level="province",
parent_code="GT",
category="A",
)
]
)
HouseholdBillTotal.objects.create()
def test_bill_total(self):
pass
def test_service_total(self):
pass
def test_average_increase(self):
pass
|
11465529
|
from django.test import TestCase, override_settings
from django.utils.translation import override
from translations.languages import _get_supported_language, \
_get_default_language, _get_active_language, \
_get_all_languages, _get_all_choices, \
_get_translation_languages, _get_translation_choices, \
_get_translate_language, _get_probe_language, \
translate, probe
class GetsupportedLanguageTest(TestCase):
"""Tests for `_get_supported_language`."""
def test_unaccented(self):
self.assertEqual(
_get_supported_language('en'),
'en'
)
def test_nonexisting_accented(self):
self.assertEqual(
_get_supported_language('en-us'),
'en'
)
def test_existing_accented(self):
self.assertEqual(
_get_supported_language('en-gb'),
'en-gb'
)
def test_invalid(self):
with self.assertRaises(ValueError) as error:
_get_supported_language('xx')
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class GetDefaultLanguageTest(TestCase):
"""Tests for `_get_default_language`."""
@override_settings(LANGUAGE_CODE='en')
def test_unaccented(self):
self.assertEqual(
_get_default_language(),
'en'
)
@override_settings(LANGUAGE_CODE='en-us')
def test_nonexisting_accented(self):
self.assertEqual(
_get_default_language(),
'en'
)
@override_settings(LANGUAGE_CODE='en-gb')
def test_existing_accented(self):
self.assertEqual(
_get_default_language(),
'en-gb'
)
@override_settings(LANGUAGE_CODE='xx')
def test_invalid(self):
with self.assertRaises(ValueError) as error:
_get_default_language()
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class GetActiveLanguageTest(TestCase):
"""Tests for `_get_active_language`."""
@override(language='en', deactivate=True)
def test_unaccented(self):
self.assertEqual(
_get_active_language(),
'en'
)
@override(language='en-us', deactivate=True)
def test_nonexisting_accented(self):
self.assertEqual(
_get_active_language(),
'en'
)
@override(language='en-gb', deactivate=True)
def test_existing_accented(self):
self.assertEqual(
_get_active_language(),
'en-gb'
)
@override(language='xx', deactivate=True)
def test_invalid(self):
with self.assertRaises(ValueError) as error:
_get_active_language()
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class GetAllLanguagesTest(TestCase):
"""Tests for `_get_all_languages`."""
def test_get_all_languages(self):
self.assertListEqual(
_get_all_languages(),
[
'en',
'en-gb',
'de',
'tr',
]
)
class GetAllChoicesTest(TestCase):
"""Tests for `_get_all_choices`."""
def test_get_all_choices(self):
self.assertListEqual(
_get_all_choices(),
[
(None, '---------'),
('en', 'English'),
('en-gb', 'English (Great Britain)'),
('de', 'German'),
('tr', 'Turkish')
]
)
class GetTranslationLanguagesTest(TestCase):
"""Tests for `_get_translation_languages`."""
@override_settings(LANGUAGE_CODE='en')
def test_unaccented_default(self):
self.assertListEqual(
_get_translation_languages(),
[
'en-gb',
'de',
'tr',
]
)
@override_settings(LANGUAGE_CODE='en-us')
def test_nonexisting_accented_default(self):
self.assertListEqual(
_get_translation_languages(),
[
'en-gb',
'de',
'tr',
]
)
@override_settings(LANGUAGE_CODE='en-gb')
def test_existing_accented_default(self):
self.assertListEqual(
_get_translation_languages(),
[
'en',
'de',
'tr',
]
)
@override_settings(LANGUAGE_CODE='xx')
def test_invalid_default(self):
with self.assertRaises(ValueError) as error:
_get_translation_languages()
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class GetTranslationChoicesTest(TestCase):
"""Tests for `_get_translation_choices`."""
@override_settings(LANGUAGE_CODE='en')
def test_unaccented_default(self):
self.assertListEqual(
_get_translation_choices(),
[
(None, '---------'),
('en-gb', 'English (Great Britain)'),
('de', 'German'),
('tr', 'Turkish')
]
)
@override_settings(LANGUAGE_CODE='en-us')
def test_nonexisting_accented_default(self):
self.assertListEqual(
_get_translation_choices(),
[
(None, '---------'),
('en-gb', 'English (Great Britain)'),
('de', 'German'),
('tr', 'Turkish')
]
)
@override_settings(LANGUAGE_CODE='en-gb')
def test_existing_accented_default(self):
self.assertListEqual(
_get_translation_choices(),
[
(None, '---------'),
('en', 'English'),
('de', 'German'),
('tr', 'Turkish')
]
)
@override_settings(LANGUAGE_CODE='xx')
def test_invalid_default(self):
with self.assertRaises(ValueError) as error:
_get_translation_choices()
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class GetTranslateLanguageTest(TestCase):
"""Tests for `_get_translate_language`."""
@override(language='en', deactivate=True)
def test_unaccented_active(self):
self.assertEqual(
_get_translate_language(),
'en'
)
@override(language='en-us', deactivate=True)
def test_nonexisting_accented_active(self):
self.assertEqual(
_get_translate_language(),
'en'
)
@override(language='en-gb', deactivate=True)
def test_existing_accented_active(self):
self.assertEqual(
_get_translate_language(),
'en-gb'
)
@override(language='xx', deactivate=True)
def test_invalid_active(self):
with self.assertRaises(ValueError) as error:
_get_translate_language()
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
def test_unaccented_custom(self):
self.assertEqual(
_get_translate_language('en'),
'en'
)
def test_nonexisting_accented_custom(self):
self.assertEqual(
_get_translate_language('en-us'),
'en'
)
def test_existing_accented_custom(self):
self.assertEqual(
_get_translate_language('en-gb'),
'en-gb'
)
def test_invalid_custom(self):
with self.assertRaises(ValueError) as error:
_get_translate_language('xx')
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class GetProbeLanguageTest(TestCase):
"""Tests for `_get_probe_language`."""
@override(language='en', deactivate=True)
def test_unaccented_active(self):
self.assertEqual(
_get_probe_language(),
'en'
)
@override(language='en-us', deactivate=True)
def test_nonexisting_accented_active(self):
self.assertEqual(
_get_probe_language(),
'en'
)
@override(language='en-gb', deactivate=True)
def test_existing_accented_active(self):
self.assertEqual(
_get_probe_language(),
'en-gb'
)
@override(language='xx', deactivate=True)
def test_invalid_active(self):
with self.assertRaises(ValueError) as error:
_get_probe_language()
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
def test_unaccented_custom_str(self):
self.assertEqual(
_get_probe_language('en'),
'en'
)
def test_nonexisting_accented_custom_str(self):
self.assertEqual(
_get_probe_language('en-us'),
'en'
)
def test_existing_accented_custom_str(self):
self.assertEqual(
_get_probe_language('en-gb'),
'en-gb'
)
def test_invalid_custom_str(self):
with self.assertRaises(ValueError) as error:
_get_probe_language('xx')
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
def test_unaccented_custom_list(self):
self.assertEqual(
_get_probe_language(['en']),
['en']
)
def test_nonexisting_accented_custom_list(self):
self.assertEqual(
_get_probe_language(['en-us']),
['en']
)
def test_existing_accented_custom_list(self):
self.assertEqual(
_get_probe_language(['en-gb']),
['en-gb']
)
def test_invalid_custom_list(self):
with self.assertRaises(ValueError) as error:
_get_probe_language(['xx'])
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class TranslateTest(TestCase):
"""Tests for `_TRANSLATE`."""
@override_settings(LANGUAGE_CODE='en')
def test_default_unaccented(self):
self.assertEqual(
translate.DEFAULT,
'en'
)
@override_settings(LANGUAGE_CODE='en-us')
def test_default_nonexisting_accented(self):
self.assertEqual(
translate.DEFAULT,
'en'
)
@override_settings(LANGUAGE_CODE='en-gb')
def test_default_existing_accented(self):
self.assertEqual(
translate.DEFAULT,
'en-gb'
)
@override_settings(LANGUAGE_CODE='xx')
def test_default_invalid(self):
with self.assertRaises(ValueError) as error:
translate.DEFAULT
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
@override(language='en', deactivate=True)
def test_active_unaccented(self):
self.assertEqual(
translate.ACTIVE,
'en'
)
@override(language='en-us', deactivate=True)
def test_active_nonexisting_accented(self):
self.assertEqual(
translate.ACTIVE,
'en'
)
@override(language='en-gb', deactivate=True)
def test_active_existing_accented(self):
self.assertEqual(
translate.ACTIVE,
'en-gb'
)
@override(language='xx', deactivate=True)
def test_active_invalid(self):
with self.assertRaises(ValueError) as error:
translate.ACTIVE
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
class ProbeTest(TestCase):
"""Tests for `_PROBE`."""
@override_settings(LANGUAGE_CODE='en')
def test_default_unaccented(self):
self.assertEqual(
probe.DEFAULT,
'en'
)
@override_settings(LANGUAGE_CODE='en-us')
def test_default_nonexisting_accented(self):
self.assertEqual(
probe.DEFAULT,
'en'
)
@override_settings(LANGUAGE_CODE='en-gb')
def test_default_existing_accented(self):
self.assertEqual(
probe.DEFAULT,
'en-gb'
)
@override_settings(LANGUAGE_CODE='xx')
def test_default_invalid(self):
with self.assertRaises(ValueError) as error:
probe.DEFAULT
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
@override(language='en', deactivate=True)
def test_active_unaccented(self):
self.assertEqual(
probe.ACTIVE,
'en'
)
@override(language='en-us', deactivate=True)
def test_active_nonexisting_accented(self):
self.assertEqual(
probe.ACTIVE,
'en'
)
@override(language='en-gb', deactivate=True)
def test_active_existing_accented(self):
self.assertEqual(
probe.ACTIVE,
'en-gb'
)
@override(language='xx', deactivate=True)
def test_active_invalid(self):
with self.assertRaises(ValueError) as error:
probe.ACTIVE
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
@override(language='en', deactivate=True)
def test_default_active_same(self):
self.assertEqual(
probe.DEFAULT_ACTIVE,
'en'
)
@override(language='de', deactivate=True)
def test_default_active_different(self):
self.assertEqual(
probe.DEFAULT_ACTIVE,
['en', 'de']
)
@override_settings(LANGUAGE_CODE='en')
def test_translations_unaccented_default(self):
self.assertListEqual(
probe.TRANSLATION,
[
'en-gb',
'de',
'tr',
]
)
@override_settings(LANGUAGE_CODE='en-us')
def test_translations_nonexisting_accented_default(self):
self.assertListEqual(
probe.TRANSLATION,
[
'en-gb',
'de',
'tr',
]
)
@override_settings(LANGUAGE_CODE='en-gb')
def test_translations_existing_accented_default(self):
self.assertListEqual(
probe.TRANSLATION,
[
'en',
'de',
'tr',
]
)
@override_settings(LANGUAGE_CODE='xx')
def test_translations_invalid_default(self):
with self.assertRaises(ValueError) as error:
probe.TRANSLATION
self.assertEqual(
error.exception.args[0],
'`xx` is not a supported language.'
)
def test_all(self):
self.assertListEqual(
probe.ALL,
[
'en',
'en-gb',
'de',
'tr',
]
)
|
11465588
|
from collections import namedtuple
from .nodes import *
import re
ConditionalBlock = namedtuple('ConditionalBlock', 'parent source output skip_to_end')
class ParamStr(str):
def __new__(cls, s, args, n):
self = super().__new__(cls, s)
self._args = args
self._arg_idx = n
return self
@property
def escape(self):
return ParamStr('"%s"' % self.replace('"', '\\"'), self._args,
self._arg_idx)
@property
def remainder(self):
return ParamStr(','.join(self._args[self._arg_idx:]), [], 0)
class Preprocessor:
def __init__(self, input, filename):
import os
self.dirname = os.path.dirname(filename)
self.search_paths = [os.path.join(os.path.dirname(__file__), 'include')]
self.lines = input.splitlines()
self.lines.insert(0, '#include <builtin.h>')
self.lineptr = 0
self.output = ''
self.replacements = {}
self.block = ConditionalBlock(None, None, True, False)
import time
self.replacements.update({
'__FILE__': (0, 'simple', filename.replace('\\', '\\\\')),
'__LINE__': (1, 'dynamic', lambda: str(self.lineptr)),
'__DATE__': (2, 'simple', time.strftime('%b %d %Y')),
'__TIME__': (3, 'simple', time.strftime('%H:%M:%S')),
})
def transform(self):
while self.lineptr < len(self.lines):
line = self.next_line()
strip = line.strip()
if strip.startswith('#'):
self.process(strip[1:])
else:
self.append(line)
assert self.block.parent is None
return self.output
def append(self, line):
if not self.block.output:
return
self.output += self.substitute(line) + '\n'
def substitute(self, line):
items = sorted(self.replacements.items(), key=lambda r:r[1][0])
for key, (_, r_type, replacement) in items:
if r_type == 'simple':
line = line.replace(key, replacement)
elif r_type == 'dynamic':
line = line.replace(key, replacement())
elif r_type == 'function':
while True:
idx = line.find(key + '(')
if idx == -1:
break
start = idx + len(key) + 1
brackets = 0
args = []
s = ''
end = start
arg_num = 0
while brackets != -1:
for c in line[start:]:
end += 1
if c == '(':
brackets += 1
elif c == ')':
brackets -= 1
if brackets == -1:
break
if brackets == 0:
if c == ',':
args.append(ParamStr(s, args, arg_num))
s = ''
arg_num += 1
continue
s += c
if brackets != -1:
start = end
line += self.next_line()
args.append(ParamStr(s, args, arg_num))
try:
replaced = replacement.format(*args)
except IndexError:
raise TypeError(('Substitution error. Want:%s Got:%s\n' +
'On line: %s') % (
replacement, args, line))
line = line[:idx] + replaced + line[end:]
line = re.sub('\s*##\s*', '', line) # needs to be done properly
# Recursively substitute
line = self.substitute(line)
return line
def process(self, line):
while line.endswith('\\'):
line = line[:-1] + self.next_line().strip()
directive, *rest = line.split(' ', 1)
rest = rest[0] if rest else ''
dir_map = {
'include': self.handle_include,
'if': self.handle_if,
'ifdef': self.handle_ifdef,
'ifndef': self.handle_ifndef,
'else': self.handle_else,
'elif': self.handle_elif,
'endif': self.handle_endif,
'define': self.handle_define,
'undef': self.handle_undef,
'pragma': self.handle_pragma,
}
func = dir_map.get(directive)
if func is None:
raise NameError('Unknown directive %s' % directive)
func(rest)
def handle_include(self, arg):
if not self.block.output:
return
arg = arg.strip()
search = list(self.search_paths)
if arg.startswith('<'):
assert arg.endswith('>')
name = arg[1:-1]
else:
assert arg.startswith('"')
assert arg.endswith('"')
search.append(self.dirname)
name = arg[1:-1]
import os
for dir in search:
path = os.path.join(dir, name)
if os.path.exists(path):
self.include(path)
return
assert False, "Not found %s" % name
def include(self, path):
with open(path, 'r') as file:
self.lines[self.lineptr:self.lineptr] = file.read().splitlines()
def handle_if(self, arg):
if not self.block.output:
output = False
else:
output = self.evaluate(arg)
self.block = ConditionalBlock(self.block, 'if', output, output)
def handle_ifdef(self, arg):
if not self.block.output:
output = False
else:
output = arg.strip() in self.replacements
self.block = ConditionalBlock(self.block, 'if', output, output)
def handle_ifndef(self, arg):
if not self.block.output:
output = False
else:
output = arg.strip() not in self.replacements
self.block = ConditionalBlock(self.block, 'if', output, output)
def handle_else(self, arg):
parent = self.block.parent
output = not self.block.output and not self.block.skip_to_end and parent.output
self.block = ConditionalBlock(parent, 'else', output, True)
def handle_elif(self, arg):
parent = self.block.parent
if not self.block.skip_to_end and parent.output:
output = self.evaluate(arg)
else:
output = False
self.block = ConditionalBlock(parent, 'elif', output,
self.block.skip_to_end or output)
def handle_endif(self, arg):
self.block = self.block.parent
def handle_define(self, arg):
if not self.block.output:
return
match = re.match('(\w+)\s*(\((?:\w+\s*,\s*)*(?:(?:\w+|\.\.\.))\s*\))?(?:\s+|$)(.*)', arg)
if not match:
raise Exception('invalid #define "%s"' % arg)
name = match.group(1)
params = match.group(2)
replacement = match.group(3)
idx = len(self.replacements)
if params is None:
self.replacements[name] = (idx, 'simple', replacement)
else:
# Escape braces
replacement = replacement.replace('{', '{{').replace('}', '}}')
params = self._get_params(params)
for i in range(len(params)):
if params[i] == '...':
replacement = replacement.replace('#__VA_ARGS__',
'{%d.remainder.escape}' % i)
replacement = replacement.replace('__VA_ARGS__',
'{%d.remainder}' % i)
continue
replacement = re.sub(r'(\W|^)#%s(\b|$)' % params[i],
'\\1{%d.escape}' % i, replacement)
replacement = re.sub(r'(\b|^)%s(\b|$)' % params[i],
'{%d}' % i, replacement)
self.replacements[name] = (idx, 'function', replacement)
def _get_params(self, param_match):
params = param_match[1:-1].strip()
if params:
return tuple(map(str.strip, params.split(',')))
else:
return tuple()
def evaluate(self, expr):
from .parser_ import Parser
# Convert "defined var" into "__defined__(__no_def_var__)"
expr = re.sub('defined\s+(\w+)', r'__defined__(__no_def_\1__)', expr)
expr = Parser().parse_const_expr(self.substitute(expr))
return bool(self.visit_expr(expr))
def visit_expr(self, expr):
if isinstance(expr, IntLiteral):
return expr.val
elif isinstance(expr, IdentifierExpr):
return 0
elif isinstance(expr, UnaryExpr):
val = self.visit_expr(expr.expr)
if expr.op == '+':
return +val
elif expr.op == '-':
return -val
elif expr.op == '~':
return ~val
elif expr.op == '!':
return 0 if val else 1
else:
raise TypeError()
elif isinstance(expr, BinaryOperatorExpr):
left = self.visit_expr(expr.left)
right = self.visit_expr(expr.right)
op = {
'*': int.__mul__,
'/': int.__floordiv__,
'%': int.__mod__,
'+': int.__add__,
'-': int.__sub__,
'<<': int.__lshift__,
'>>': int.__rshift__,
'<': int.__lt__,
'<=': int.__le__,
'>': int.__gt__,
'>=': int.__ge__,
'==': int.__eq__,
'!=': int.__ne__,
'&': int.__and__,
'^': int.__xor__,
'|': int.__or__,
'&&': lambda a, b: a and b,
'||': lambda a, b: a or b,
}.get(expr.op)
if op is None:
raise TypeError()
return op(left, right)
elif isinstance(expr, ConditionalExpr):
test = self.visit_expr(expr.cond)
return self.visit_expr(expr.true if test else expr.false)
elif isinstance(expr, FunctionCallExpr):
assert isinstance(expr.ref, IdentifierExpr)
name = expr.ref.val
if name == '__defined__':
macro = expr.args[0].val[9:-2]
return macro in self.replacements
else:
raise TypeError()
else:
raise TypeError()
def handle_undef(self, arg):
if not self.block.output:
return
pass
def handle_pragma(self, arg):
self.append("_Pragma(%s)" % ParamStr(arg, None, None).escape)
def next_line(self):
line = self.lines[self.lineptr]
self.lineptr += 1
return line
|
11465590
|
from surprise import Dataset
from surprise import Reader
from surprise import NormalPredictor
from surprise import BaselineOnly
from surprise import KNNBasic
from surprise import KNNWithMeans
from surprise import KNNBaseline
from surprise import KNNWithZScore
from surprise import SVD
from surprise import SVDpp
from surprise import NMF
from surprise import SlopeOne
from surprise import CoClustering
from surprise.model_selection import cross_validate
import hyperopt
import time
import datetime
import os
import numpy as np
import pandas as pd
import sys
sys.path.insert(1, './')
from auto_surprise.engine import Engine
if __name__ == '__main__':
sys.settrace
print("Starting benchmark")
# Surprise algorithms to evaluate
algorithms = (SVD, SVDpp, NMF, SlopeOne, KNNBasic, KNNWithMeans, KNNWithZScore, KNNBaseline, CoClustering, BaselineOnly, NormalPredictor)
# Load Book crossing dataset
df = pd.read_csv('../datasets/BX-CSV-DUMP/BX-Book-Ratings.csv', sep=';', error_bad_lines=False, encoding="latin-1")
df.columns = ['user', 'item', 'rating']
reader = Reader(rating_scale=(0, 10))
data = Dataset.load_from_df(df.sample(n=100000, random_state=134), reader=reader)
del(df)
benchmark_results = {
'Algorithm': [],
'RMSE': [],
'MAE': [],
'Time': []
}
# Evaluate AutoSurprise
start_time = time.time()
time_limt = 60 * 60 * 12 # Run for 12 hours
engine = Engine(verbose=False)
best_model, best_params, best_score, tasks = engine.train(data=data, target_metric='test_rmse', quick_compute=False, cpu_time_limit=time_limt, max_evals=10000, hpo_algo=hyperopt.atpe.suggest)
cv_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
cv_results = cross_validate(engine.build_model(best_model, best_params), data, ['rmse', 'mae'])
mean_rmse = '{:.4f}'.format(np.mean(cv_results['test_rmse']))
mean_mae = '{:.4f}'.format(np.mean(cv_results['test_mae']))
print("--------- Done ----------")
print("Best model: ", best_model)
print("Best params: ", best_params)
print("Best score: ", best_score)
print("All tasks: ", tasks)
benchmark_results['Algorithm'].append('AutoSurprise')
benchmark_results['RMSE'].append(mean_rmse)
benchmark_results['MAE'].append(mean_mae)
benchmark_results['Time'].append(cv_time)
print("--- AutoSurprise results ---")
print(pd.DataFrame.from_dict(benchmark_results))
# Evaluate Surprise Algorithms
for algo in algorithms:
algo_name = algo.__name__
print("Running algorithm : %s" % algo_name)
try:
start_time = time.time()
cv_results = cross_validate(algo(), data, ['rmse', 'mae'], cv=3)
cv_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
mean_rmse = '{:.4f}'.format(np.mean(cv_results['test_rmse']))
mean_mae = '{:.4f}'.format(np.mean(cv_results['test_mae']))
benchmark_results['Algorithm'].append(algo_name)
benchmark_results['RMSE'].append(mean_rmse)
benchmark_results['MAE'].append(mean_mae)
benchmark_results['Time'].append(cv_time)
except Exception as e:
print('Exception : ', e)
# Load results to csv
results = pd.DataFrame.from_dict(benchmark_results)
print(results)
results.to_csv('book-crossing-benchmar-results.csv')
|
11465607
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.python.platform import flags
import logging, os, sys, pickle, argparse
sys.path.append('../utils/')
sys.path.append('../cleverhans/')
from cleverhans.utils import set_log_level
from model_eval import model_eval
import keras.backend
sys.path.append('load/')
from load_classifier import load_classifier
import pickle
from detect_attacks_logp import search_alpha, comp_logp, logsumexp
from superwhite import SuperWhite
FLAGS = flags.FLAGS
def combine(logits, combine_logits):
# combine logits of shape (K, N, dimY) to shape (N, dimY)
print('combine the logits from random network snapshots (%s)...' % combine_logits)
if combine_logits == 'ensemble':
results = tf.reduce_mean(tf.nn.softmax(logits), 0) # (N, dimY)
results = tf.log(tf.clip_by_value(results, 1e-20, np.inf))
if combine_logits == 'bayes':
logits_max = tf.reduce_max(logits, 0)
logits_ = logits - logits_max # (dimY, N)
results = tf.log(tf.clip_by_value(tf.reduce_mean(tf.exp(logits_), 0), 1e-20, np.inf))
results += logits_max
return results
def test_attacks(data_name, model_name, attack_method, eps, lbd, batch_size=100,
targeted=False, attack_snapshot=False, save=False):
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
if data_name == 'mnist':
from cleverhans.utils_mnist import data_mnist
X_train, Y_train, X_test, Y_test = data_mnist(train_start=0, train_end=60000,
test_start=0, test_end=10000)
if data_name in ['cifar10', 'plane_frog']:
from import_data_cifar10 import load_data_cifar10
if data_name == 'plane_frog':
labels = [0, 6]
else:
labels = None
data_path = '../cifar_data/'
X_train, X_test, Y_train, Y_test = load_data_cifar10(data_path, labels=labels, conv=True)
source_samples, img_rows, img_cols, channels = X_test.shape
nb_classes = Y_test.shape[1]
# Define input TF placeholder
batch_size = min(batch_size, X_test.shape[0])
print('use batch_size = %d' % batch_size)
x = tf.placeholder(tf.float32, shape=(batch_size, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(batch_size, nb_classes))
# Define TF model graph
model = load_classifier(sess, model_name, data_name, attack_snapshot=attack_snapshot)
if 'bayes' in model_name and 'distill' not in model_name and 'query' not in model_name:
model_name = model_name + '_cnn'
# Craft adversarial examples
nb_adv_per_sample = str(nb_classes - 1) if targeted else '1'
print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Evaluate the accuracy of the MNIST model on legitimate test examples
if 'bnn' not in model_name:
keras.backend.set_learning_phase(0)
else:
# need to set keras learning phase to training in order to run test-time dropout
keras.backend.set_learning_phase(1)
# make adv inputs and labels for the attack if targeted
if targeted:
adv_inputs = np.array(
[[instance] * nb_classes for
instance in X_test[:source_samples]], dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(source_samples * nb_classes, img_rows, img_cols, 1))
adv_ys = np.array([one_hot] * source_samples,
dtype=np.float32).reshape((source_samples *
nb_classes, nb_classes))
else:
adv_inputs = X_test[:source_samples]
adv_ys = Y_test[:source_samples]
model_logit = lambda x: model.predict(x, softmax=False)
attack = SuperWhite(model_logit, sess=sess)
if targeted:
yname = 'y_target'
else:
yname = 'y'
if 'bnn' in model_name:
combine_logits = 'ensemble'
if 'bayes' in model_name:
combine_logits = 'bayes'
attack_params = {yname: adv_ys,
'eps': eps,
'eps_iter': 0.01,
'nb_iter': 40,
'clip_min': 0.,
'clip_max': 1.,
'rand_init': True,
'delta_marginal': 0,
'delta_logit': 0,
'delta_kl': 0,
'kl_prob_vec': None,
'detection_lambda': lbd,
'combine_logits': combine_logits,
'batch_size': batch_size}
# compute statistics on data
y_logit_train = []
print('-------------------------------------')
print('compute statistics on data')
y_logit_op = model.predict(x, softmax=False)
if attack_snapshot:
y_logit_op = combine(y_logit_op, combine_logits)
for i in xrange(int(X_train.shape[0] / batch_size)):
X_batch = X_train[i*batch_size:(i+1)*batch_size]
y_logit_train.append(sess.run(y_logit_op, feed_dict={x: X_batch}))
y_logit_train = np.concatenate(y_logit_train)
y_train = Y_train[:y_logit_train.shape[0]]
acc_train = np.mean(np.argmax(y_logit_train, 1) == np.argmax(y_train, 1))
print('training set accuracy:', 100 * acc_train)
results_train = comp_logp(y_logit_train, y_train, 'train', comp_logit_dist = True)
# marginal detection
alpha, _ = search_alpha(results_train[0], results_train[1], results_train[2], plus=False)
delta_marginal = -(results_train[1] - alpha * results_train[2])
print('delta_marginal:', delta_marginal)
# logit detection
delta_logit = []
for i in xrange(nb_classes):
ind = np.where(y_train[:, i] == 1)[0]
alpha, _ = search_alpha(results_train[3][ind], results_train[4][i], results_train[5][i], plus=False)
delta_logit.append(-(results_train[4][i] - alpha * results_train[5][i]))
delta_logit = np.asarray(delta_logit, dtype='f')
print('delta_logit:', delta_logit)
# kl detection
logit_mean, _, kl_mean, kl_std, softmax_mean = results_train[-5:]
delta_kl = []
for i in xrange(nb_classes):
ind = np.where(y_train[:, i] == 1)[0]
logit_tmp = y_logit_train[ind] - logsumexp(y_logit_train[ind], axis=1)[:, np.newaxis]
kl = np.sum(softmax_mean[i] * (np.log(softmax_mean[i]) - logit_tmp), 1)
alpha, _ = search_alpha(kl, kl_mean[i], kl_std[i], plus=True)
delta_kl.append(kl_mean[i] + alpha * kl_std[i])
delta_kl = np.asarray(delta_kl, dtype='f')
print('delta_kl:', delta_kl)
# add in params. to attack_params
attack_params['delta_marginal'] = delta_marginal
attack_params['delta_logit'] = delta_logit
attack_params['delta_kl'] = delta_kl
attack_params['kl_prob_vec'] = np.array(softmax_mean)
# perform the attack!
adv = []
n_batch = int(adv_inputs.shape[0] / batch_size)
for i in xrange(n_batch):
adv_batch = adv_inputs[i*batch_size:(i+1)*batch_size]
attack_params[yname] = adv_ys[i*batch_size:(i+1)*batch_size] # only for untargeted
adv.append(attack.generate_np(adv_batch, **attack_params))
if (i+1) % 10 == 0:
print('finished %d/%d mini-batch' % (i+1, n_batch))
adv = np.concatenate(adv, axis=0)
print('--------------------------------------')
# evaluations
preds = model.predict(x, softmax=False) # output logits
if attack_snapshot:
preds = combine(preds, combine_logits)
eval_params = {'batch_size': batch_size}
accuracy, adv_logits = model_eval(sess, x, y, preds, adv, adv_ys,
args=eval_params, return_pred=True)
if targeted:
success_rate = accuracy * 100
print('untargeted attack success rate: %.4f' % success_rate)
else:
success_rate = (1 - accuracy) * 100
print('untargeted attack success rate: %.4f' % success_rate, adv.shape)
# Close TF session
sess.close()
# save results
if save:
if not os.path.isdir('raw_attack_results_superwhite/'):
os.mkdir('raw_attack_results_superwhite/')
print('create path raw_attack_results_superwhite/')
path = 'raw_attack_results_superwhite/' + model_name + '/'
attack_method = attack_method + '_' + 'eps%.2f' % attack_params['eps']
attack_method = attack_method + '_lambda%.1f' % attack_params['detection_lambda']
if not os.path.isdir(path):
os.mkdir(path); print('create path ' + path)
filename = data_name + '_' + attack_method
if targeted:
filename = filename + '_targeted'
else:
filename = filename + '_untargeted'
true_ys = Y_test[:source_samples]
results = [adv, true_ys, adv_ys, adv_logits]
pickle.dump(results, open(path+filename+'.pkl', 'w'))
print("results saved at %s.pkl" % (path+filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run RVAE experiments.')
parser.add_argument('--batch_size', '-B', type=int, default=100)
parser.add_argument('--data', '-D', type=str, default='plane_frog')
parser.add_argument('--targeted', '-T', action='store_true', default=False)
parser.add_argument('--attack', '-A', type=str, default='superwhite')
parser.add_argument('--eps', '-e', type=float, default=0.1)
parser.add_argument('--lbd', '-l', type=float, default=0.1)
parser.add_argument('--victim', '-V', type=str, default='bnn_K10')
parser.add_argument('--save', '-S', action='store_true', default=False)
parser.add_argument('--snapshot', '-R', action='store_true', default=False)
args = parser.parse_args()
test_attacks(data_name=args.data,
model_name=args.victim,
attack_method=args.attack,
eps=args.eps,
lbd=args.lbd,
batch_size=args.batch_size,
targeted=args.targeted,
attack_snapshot=args.snapshot,
save=args.save)
|
11465608
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "BOT"
addresses_name = (
"2021-04-16T11:13:32.139850/Boston Democracy_Club__06May2021 (2).tsv"
)
stations_name = (
"2021-04-16T11:13:32.139850/Boston Democracy_Club__06May2021 (2).tsv"
)
elections = ["2021-05-06"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
if record.addressline6 in [
"PE21 7AL",
"PE20 3AG",
"PE21 8LA",
"PE22 9JA",
"PE22 9JW",
"PE20 3QX",
"PE21 0RL",
"PE21 7LH",
"PE22 9LJ",
"PE21 7BJ",
]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
# St Thomas Church Hall London Road Boston PE21 8AG
if record.polling_place_id == "4024":
record = record._replace(polling_place_postcode="")
return super().station_record_to_dict(record)
|
11465620
|
from toee import *
#################################
# TELEPORT SHORTCUTS #
#################################
TELE_SHORT_TEMPLE_LEVEL_3 = 5078
def shopmap():
game.fade_and_teleport(0,0,0,5107,480,480)
return
def lgvignette():
game.fade_and_teleport(0,0,0,5096,480,480)
return
def lawfulgoodvignette():
lgvignette()
return
def lgvig():
lgvignette()
return
#################################
# HOMMLET #
#################################
def homm():
game.fade_and_teleport(0,0,0,5001,619,422)
return
def hommlet():
game.fade_and_teleport(0,0,0,5001,619,422)
return
def smith():
game.fade_and_teleport(0,0,0,5001,577,433)
return
def spugnoir():
game.fade_and_teleport(0,0,0,5007,475,482)
return
def courier():
game.fade_and_teleport(0,0,0,5009,479,483)
return
def templeagent(): #the courier
game.fade_and_teleport(0,0,0,5009,479,483)
return
def traders():
game.fade_and_teleport(0,0,0,5010,482,480)
return
def rannos():
game.fade_and_teleport(0,0,0,5010,482,480)
return
def gremag():
game.fade_and_teleport(0,0,0,5010,482,480)
return
def rannosdavl():
game.fade_and_teleport(0,0,0,5010,482,480)
return
def terjon():
game.fade_and_teleport(0,0,0,5011,486,480)
return
def calmert():
game.fade_and_teleport(0,0,0,5012,491,484)
return
def burne():
game.fade_and_teleport(0,0,0,5016,479,482)
return
def rufus():
game.fade_and_teleport(0,0,0,5016,479,482)
return
def percy():
game.fade_and_teleport(0,0,0,5020,478,481)
return
def tarim():
game.fade_and_teleport(0,0,0,5022,482,481)
return
def mathilde():
game.fade_and_teleport(0,0,0,5023,468,484)
return
def meleny():
game.fade_and_teleport(0,0,0,5025,485,489)
return
def althea():
game.fade_and_teleport(0,0,0,5025,485,489)
return
def filliken():
game.fade_and_teleport(0,0,0,5025,485,489)
return
def bing():
game.fade_and_teleport(0,0,0,5026,491,487)
return
def tenants():
game.fade_and_teleport(0,0,0,5029,485,491)
return
def jinnerth():
game.fade_and_teleport(0,0,0,5030,488,484)
return
def jeweller():
game.fade_and_teleport(0,0,0,5033,482,488)
return
def jeweler():
game.fade_and_teleport(0,0,0,5033,482,488)
return
def nira():
game.fade_and_teleport(0,0,0,5033,482,488)
return
def niramelubb():
game.fade_and_teleport(0,0,0,5033,482,488)
return
def moneychanger():
game.fade_and_teleport(0,0,0,5033,482,488)
return
def brewhouse():
game.fade_and_teleport(0,0,0,5037,475,479)
return
def cavanaugh():
game.fade_and_teleport(0,0,0,5037,475,479)
return
def deklo():
game.fade_and_teleport(0,0,0,5069,480,480)
return
def inn():
game.fade_and_teleport(0,0,0,5007,483,480)
return
def welcomewench():
game.fade_and_teleport(0,0,0,5007,483,480)
return
def furnok():
game.fade_and_teleport(0,0,0,5007,476,478)
return
def ostler():
game.fade_and_teleport(0,0,0,5007,482,477)
return
def gundigoot():
game.fade_and_teleport(0,0,0,5007,482,477)
return
def gundi():
game.fade_and_teleport(0,0,0,5007,482,477)
return
def innkeeper():
game.fade_and_teleport(0,0,0,5007,482,477)
return
def castle():
game.fade_and_teleport(0,0,0,5001,437,698)
return
def constructionsite():
game.fade_and_teleport(0,0,0,5001,437,698)
return
def castleconstructionsite():
game.fade_and_teleport(0,0,0,5001,437,698)
return
def jayfie():
if is_daytime():
game.fade_and_teleport(0,0,0,5001,437,698)
else:
jayfienight()
return
def laborerspy():
if is_daytime():
game.fade_and_teleport(0,0,0,5001,437,698)
else:
jayfienight()
return
def spy():
if is_daytime():
game.fade_and_teleport(0,0,0,5001,437,698)
else:
jayfienight()
return
def jayfienight():
game.fade_and_teleport(0,0,0,5001,383,519)
return
def yvy():
game.fade_and_teleport(0,0,0,5001,370,533)
return
def tatooartist():
game.fade_and_teleport(0,0,0,5001,370,533)
return
def tatoo():
game.fade_and_teleport(0,0,0,5001,370,533)
return
def campmother():
game.fade_and_teleport(0,0,0,5001,370,533)
return
def laborercook():
game.fade_and_teleport(0,0,0,5001,402,533)
return
def dex():
game.fade_and_teleport(0,0,0,5001,402,533)
return
def elder():
game.fade_and_teleport(0,0,0,5045,482,477)
return
def nevets():
game.fade_and_teleport(0,0,0,5045,482,477)
return
def kenternevets():
game.fade_and_teleport(0,0,0,5045,482,477)
return
def villageelder():
game.fade_and_teleport(0,0,0,5045,482,477)
return
def townelder():
game.fade_and_teleport(0,0,0,5045,482,477)
return
def kenter():
game.fade_and_teleport(0,0,0,5045,482,477)
return
def carpenter():
game.fade_and_teleport(0,0,0,5047,476,471)
return
def riklinkin():
game.fade_and_teleport(0,0,0,5047,476,471)
return
def marek():
game.fade_and_teleport(0,0,0,5047,476,471)
return
def barnmaker():
game.fade_and_teleport(0,0,0,5047,476,471)
return
def wainwright():
if is_daytime():
game.fade_and_teleport(0,0,0,5044,494,487)
else:
inn()
return
def valden():
if is_daytime():
game.fade_and_teleport(0,0,0,5044,494,487)
else:
inn()
return
def stonemason():
game.fade_and_teleport(0,0,0,5001,455,520)
return
def mason():
game.fade_and_teleport(0,0,0,5001,455,520)
return
def gister():
game.fade_and_teleport(0,0,0,5001,455,520)
return
def gisternoshim():
game.fade_and_teleport(0,0,0,5001,455,520)
return
def jay():
if is_daytime():
game.fade_and_teleport(0,0,0,5001,540,249)
else:
game.fade_and_teleport(0,0,0,5038,487,485)
return
def blackjay():
if is_daytime():
game.fade_and_teleport(0,0,0,5001,540,249)
else:
game.fade_and_teleport(0,0,0,5038,487,485)
return
def teamster():
game.fade_and_teleport(0,0,0,5032,474,482)
return
def sef():
game.fade_and_teleport(0,0,0,5032,474,482)
return
def sefflettner():
game.fade_and_teleport(0,0,0,5032,474,482)
return
def teamsterson():
if is_daytime() == 0 and game.global_flags[4] == 0:
game.fade_and_teleport(0,0,0,5001,561,225)
else:
game.fade_and_teleport(0,0,0,5032,471,485)
return
def corl():
if is_daytime() == 0 and game.global_flags[4] == 0:
game.fade_and_teleport(0,0,0,5001,561,225)
else:
game.fade_and_teleport(0,0,0,5032,471,485)
return
def jaroo():
game.fade_and_teleport(0,0,0,5042,491,474)
return
def druid():
game.fade_and_teleport(0,0,0,5042,491,474)
return
def renton():
game.fade_and_teleport(0,0,0,5063,470, 477)
return
def hall():
game.fade_and_teleport(0,0,0,5001,577,410)
return
def townhall():
game.fade_and_teleport(0,0,0,5001,577,410)
return
def council():
game.fade_and_teleport(0,0,0,5001,577,410)
return
def prison():
game.fade_and_teleport(0,0,0,5014,482,478)
return
def chase():
game.fade_and_teleport(0,0,0,5001,344,498)
return
def trail():
game.fade_and_teleport(0,0,0,5001,344,498)
return
def emridygiant():
game.fade_and_teleport(0,0,0,5094,611,513)
return
def rainbowrock():
game.fade_and_teleport(0,0,0,5094,487,503)
return
def emridy():
game.fade_and_teleport(0,0,0,5094,544,411)
return
#################################
# MOATHOUSE #
# #
#################################
def moathouse():
game.fade_and_teleport(0,0,0,5002,480,546)
return
def moatgate():
game.fade_and_teleport(0,0,0,5002,487,485)
return
def moathousegate():
game.fade_and_teleport(0,0,0,5002,487,485)
return
def moattower():
game.fade_and_teleport(0,0,0,5003,471,486)
return
def moathousetower():
game.fade_and_teleport(0,0,0,5003,471,486)
return
def lubash():
game.fade_and_teleport(0,0,0,5005,425,415)
return
def moatlevel1():
game.fade_and_teleport(0,0,0,5004,476,477)
return
def moathouseupperlevel():
moatlevel1()
return
def moatupper():
moatlevel1()
return
def moatinterior():
moatlevel1()
return
def moathouseinterior():
moatlevel1()
return
def moatstirges():
moatlevel1()
return
def raul():
game.fade_and_teleport(0,0,0,5004,476,477)
return
def raulthegrim():
game.fade_and_teleport(0,0,0,5004,476,477)
return
def moatzombies():
game.fade_and_teleport(0,0,0,5005,420,411)
return
def moatbugbears():
game.fade_and_teleport(0,0,0,5005,444,499)
return
def moatgnolls():
game.fade_and_teleport(0,0,0,5005,509,503)
return
def moathousegnolls():
game.fade_and_teleport(0,0,0,5005,509,503)
return
def moatsarge():
game.fade_and_teleport(0,0,0,5005,536,547)
return
def moathousesarge():
game.fade_and_teleport(0,0,0,5005,536,547)
return
def larethsarge():
game.fade_and_teleport(0,0,0,5005,536,547)
return
def larethsergeant():
game.fade_and_teleport(0,0,0,5005,536,547)
return
def lareth():
game.fade_and_teleport(0,0,0,5005,475,546)
return
#################################
# NULB #
#################################
def nulb():
game.fade_and_teleport(0,0,0,5051,505,367)
return
def imeryds():
game.fade_and_teleport(0,0,0,5068,477,482)
return
def otis():
game.fade_and_teleport(0,0,0,5051,467,527)
return
def mary():
game.fade_and_teleport(0,0,0,5058,502,479)
return
def riana():
game.fade_and_teleport(0,0,0,5058,498,489)
return
def ophelia():
game.fade_and_teleport(0,0,0,5057,488,489)
return
def madamophelia():
game.fade_and_teleport(0,0,0,5057,488,489)
return
def madam():
game.fade_and_teleport(0,0,0,5057,488,489)
return
def brothel():
game.fade_and_teleport(0,0,0,5057,488,489)
return
def snakepit():
game.fade_and_teleport(0,0,0,5057,488,489)
return
def serena():
game.fade_and_teleport(0,0,0,5051,558,529)
return
def sammy():
game.fade_and_teleport(0,0,0,5051,495,525)
return
def mona():
game.fade_and_teleport(0,0,0,5051,558,536)
return
def charlotte():
game.fade_and_teleport(0,0,0,5058,494,473)
return
def jenelda():
game.fade_and_teleport(0,0,0,5059,483,480)
return
def boatmanstavern():
game.fade_and_teleport(0,0,0,5051,399,522)
return
def tavern():
game.fade_and_teleport(0,0,0,5051,399,522)
return
def skole():
game.fade_and_teleport(0,0,0,5052,477,489)
return
def lodriss():
game.fade_and_teleport(0,0,0,5052,472,482)
return
def tolub():
game.fade_and_teleport(0,0,0,5052,480,479)
return
def grud():
game.fade_and_teleport(0,0,0,5051,382,483)
return
def grudsquinteye():
game.fade_and_teleport(0,0,0,5051,382,483)
return
def docks():
game.fade_and_teleport(0,0,0,5051,382,483)
return
def nulbdocks():
game.fade_and_teleport(0,0,0,5051,382,483)
return
def prestonwetz():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def residentialarea():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def nulbresidentialarea():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def nulbhouse():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def nulbhouses():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def preston():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def wetz():
game.fade_and_teleport(0,0,0,5051,478,470)
return
def hostel():
game.fade_and_teleport(0,0,0,5051,556,453)
return
def watersidehostel():
game.fade_and_teleport(0,0,0,5051,556,453)
return
def waterside():
game.fade_and_teleport(0,0,0,5051,556,453)
return
def alira():
game.fade_and_teleport(0,0,0,5060,477,503)
return
def pearl():
game.fade_and_teleport(0,0,0,5060,479,500)
return
def dala():
game.fade_and_teleport(0,0,0,5060,477,503)
return
def rentsch():
game.fade_and_teleport(0,0,0,5060,478,493)
return
def wat():
game.fade_and_teleport(0,0,0,5060,481,493)
return
def motherscreng():
game.fade_and_teleport(0,0,0,5053,483,484)
return
def screng():
game.fade_and_teleport(0,0,0,5053,483,484)
return
def ydey():
game.fade_and_teleport(0,0,0,5053,483,484)
return
def herbshop():
game.fade_and_teleport(0,0,0,5053,483,484)
return
def murfles():
game.fade_and_teleport(0,0,0,5054,477,487)
return
def hruda():
game.fade_and_teleport(0,0,0,5054,477,487)
return
def mickey():
game.fade_and_teleport(0,0,0,5051,556,472)
return
#################################
# TEMPLE #
#################################
def temple():
game.fade_and_teleport(0,0,0,5064,480,484)
return
def templelevel0():
game.fade_and_teleport(0,0,0,5064,480,484)
return
def templeoutside():
game.fade_and_teleport(0,0,0,5062,516,458)
return
def templeentrance():
game.fade_and_teleport(0,0,0,5062,516,458)
return
def bronzedoors():
game.fade_and_teleport(0,0,0,5062,516,458)
return
def stairs():
game.fade_and_teleport(0,0,0,5064,480,484)
return
def templestaircase():
game.fade_and_teleport(0,0,0,5064,480,484)
return
def secretstaircase():
game.fade_and_teleport(0,0,0,5064,480,484)
return
def staircase():
game.fade_and_teleport(0,0,0,5064,480,484)
return
def templelevel1():
game.fade_and_teleport(0,0,0,5066,524,564)
return
def romag():
game.fade_and_teleport(0,0,0,5066,460,448)
return
def earthcommander():
game.fade_and_teleport(0,0,0,5066,450,462)
return
def earthcom():
game.fade_and_teleport(0,0,0,5066,450,462)
return
def earthtroopcommander():
game.fade_and_teleport(0,0,0,5066,450,462)
return
def wonnilonhideout():
game.fade_and_teleport(0,0,0,5066,417,381)
return
def hideout():
game.fade_and_teleport(0,0,0,5066,417,381)
return
def wonnilon():
game.fade_and_teleport(0,0,0,5066,551,423)
return
def wonni():
game.fade_and_teleport(0,0,0,5066,551,423)
return
def earthaltar():
game.fade_and_teleport(0,0,0,5066,471,387)
return
def turnkey():
game.fade_and_teleport(0,0,0,5066,541,453)
return
def harpies():
game.fade_and_teleport(0,0,0,5066,424,557)
return
def morgan():
game.fade_and_teleport(0,0,0,5066,553,567)
return
def templelevel2():
game.fade_and_teleport(0,0,0,5067,568,382)
return
def lvl2bugbears():
game.fade_and_teleport(0,0,0,5067,423,405)
return
def feldrin():
game.fade_and_teleport(0,0,0,5067,524,454)
return
def brunk():
game.fade_and_teleport(0,0,0,5067,524,454)
return
def oohlgrist():
game.fade_and_teleport(0,0,0,5067,473,612)
return
def aern():
game.fade_and_teleport(0,0,0,5067,465,562)
return
def bassanio():
game.fade_and_teleport(0,0,0,5067,433,540)
return
def tillahi():
game.fade_and_teleport(0,0,0,5067,413,439)
return
def countess():
game.fade_and_teleport(0,0,0,5067,413,439)
return
def alrrem():
# this is the proper spelling...
game.fade_and_teleport(0,0,0,5067,416,499)
return
def allrem():
# ... but for the sake of convenience...
game.fade_and_teleport(0,0,0,5067,416,499)
return
def cave():
game.fade_and_teleport(0,0,0,5113,478,517)
return
def ogrecave():
game.fade_and_teleport(0,0,0,5113,478,517)
return
def tubal():
game.fade_and_teleport(0,0,0,5067,416,499)
return
def antonio():
game.fade_and_teleport(0,0,0,5067,416,499)
return
def werewolves():
game.fade_and_teleport(0,0,0,5067,464,482)
return
def belsornig():
game.fade_and_teleport(0,0,0,5067,534,518)
return
def kelno():
game.fade_and_teleport(0,0,0,5067,524,484)
return
def minotaur():
game.fade_and_teleport(0,0,0,5067,568,382)
return
def littlesttroll():
game.fade_and_teleport(0,0,0,5067,475,397)
return
def thrommel():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,554,449)
return
def leucrottas():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,423,590)
return
def lamia():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,603,608)
return
def whitman():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,420,571)
return
def mandy():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,420,571)
return
def smigmal():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,632,470)
return
def falrinth():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,632,470)
return
def scorpp():
game.fade_and_teleport(0,0,0,TELE_SHORT_TEMPLE_LEVEL_3,553,490)
return
def templelevel4():
game.fade_and_teleport(0,0,0,5080,481,580)
return
def kella():
game.fade_and_teleport(0,0,0,5080,538,612)
return
def paida():
game.fade_and_teleport(0,0,0,5080,594,539)
return
def deggum():
game.fade_and_teleport(0,0,0,5080,421,535)
return
def senshock():
game.fade_and_teleport(0,0,0,5080,376,547)
return
def hedrack():
game.fade_and_teleport(0,0,0,5080,479,471)
return
def zuggtmoy():
game.fade_and_teleport(0,0,0,5079,541,503)
return
def gemthrone():
game.fade_and_teleport(0,0,0,5079,574,480)
return
def templetower():
game.fade_and_teleport(0,0,0,5111,485,507)
return
def earthtemple():
game.fade_and_teleport(0,0,0,5066,524,564)
return
def ogrechief():
game.fade_and_teleport(0,0,0,5066,484,535)
return
def earthogrechief():
game.fade_and_teleport(0,0,0,5066,484,535)
return
def gnollleader():
game.fade_and_teleport(0,0,0,5066,484,535)
return
def airaltar():
game.fade_and_teleport(0,0,0,5067,495,499)
return
#################################
# NODES #
#################################
def airnode():
game.fade_and_teleport(0,0,0,5081,480,480)
return
def ashrem():
game.fade_and_teleport(0,0,0,5081,538,396)
return
def taki():
game.fade_and_teleport(0,0,0,5081,417,553)
return
def vrock():
game.fade_and_teleport(0,0,0,5081,411,398)
return
def firenode():
game.fade_and_teleport(0,0,0,5083,503,496)
return
def darley():
game.fade_and_teleport(0,0,0,5083,569,529)
return
def balor():
game.fade_and_teleport(0,0,0,5083,455,387)
return
def waternode():
game.fade_and_teleport(0,0,0,5084,523,474)
return
def hezrou():
game.fade_and_teleport(0,0,0,5084,419,447)
return
def grank():
game.fade_and_teleport(0,0,0,5084,520,484)
return
def earthnode():
game.fade_and_teleport(0,0,0,5082,483,472)
return
def jaer():
game.fade_and_teleport(0,0,0,5082,438,549)
return
def sargen():
game.fade_and_teleport(0,0,0,5082,415,481)
return
#################################
# VERBOBONC #
#################################
def verbobonc():
game.fade_and_teleport(0,0,0,5121,228,507)
return
def verbo():
game.fade_and_teleport(0,0,0,5121,228,507)
return
def vbbc():
game.fade_and_teleport(0,0,0,5121,228,507)
return
def verbocono():
game.fade_and_teleport(0,0,0,5121,228,507)
return
def darlia():
game.fade_and_teleport(0,0,0,5156,472,476)
return
def viscount():
if is_daytime():
game.fade_and_teleport(0,0,0,5170,497,484)
else:
game.fade_and_teleport(0,0,0,5122,478,481)
return
def wilfrick():
viscount()
def welkwood():
game.fade_and_teleport(0,0,0,5093,516,323)
return
def welkwoodexterior():
welkwood()
return
def welkwoodbog():
welkwood()
return
def welkwood_bog():
welkwood()
return
def bog():
welkwood()
return
|
11465642
|
import operator
import rutokenizer
import rupostagger
import rulemma
if __name__ == '__main__':
print('Loading dictionaries and models...')
lemmatizer = rulemma.Lemmatizer()
lemmatizer.load('../tmp/rulemma.dat')
tokenizer = rutokenizer.Tokenizer()
tokenizer.load()
tagger = rupostagger.RuPosTagger()
tagger.load()
print('Loading finished')
#sent = u'во сне я мимо школы проходил'
#tokens = tokenizer.tokenize(sent)
#tags = tagger.tag(tokens)
#lemmas = lemmatizer.lemmatize(tags)
#for word, tags, lemma, *_ in lemmas:
# print(u'{:15}\t{:15}\t{}'.format(word, lemma, tags))
#sent = u'Его коробило'
sent = u'я оценил другую физику'
#sent = u'я оценил другого физика'
#sent = u'я отдал нашему физику приборы'
tokens = tokenizer.tokenize(sent)
tags = tagger.tag(tokens)
lemmas = lemmatizer.lemmatize(tags)
for word, tags, lemma, *_ in lemmas:
print(u'{:15}\t{:15}\t{}'.format(word, lemma, tags))
tests = [(u'механику стало жарко', u'механик стать жарко'),
(u'я оценил другую физику', u'я оценить другой физика'),
(u'Хотя , вот Ян Сатуновский .', u'хотя , вот ян сатуновский .'),
(u'во сне я мимо школы проходил', u'в сон я мимо школа проходить'),
(u'рой яму', u'рыть яма'),
(u'мой окна', u'мыть окно'),
(u'я тебя вижу', u'я ты видеть'),
(u'я вижу хрюнделя', u'я видеть хрюндель'),
(u'ты смотрел на хрюнделей', u'ты смотреть на хрюндель'),
(u'Мяукая, голодные кошки ловят жирненьких мышек', u'мяукать , голодный кошка ловить жирненький мышка'),
(u'Мы спрашивали про уроки и оценки', u'мы спрашивать про урок и оценка'),
(u'Куда же улетели облачка?', u'куда же улететь облачко ?')
]
print('Start testing...')
for sent, required_lemmas in tests:
tokens = tokenizer.tokenize(sent)
tags = tagger.tag(tokens)
lemmas = lemmatizer.lemmatize(tags)
predicted_lemmas = u' '.join(map(operator.itemgetter(2), lemmas))
if predicted_lemmas != required_lemmas:
print(u'Test failed for "{}": required_lemmas="{}", predicted_lemmas="{}"'.format(sent, required_lemmas, predicted_lemmas))
print('All tests OK.')
|
11465643
|
class CloudExternalFileProcessorMixin(object):
def get_input_path(self, in_file):
return in_file.instance.file.url
|
11465647
|
from setuptools import find_packages, setup
VERSION = {} # type: ignore
with open("trapper/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
def get_requirements():
with open("requirements.txt") as f:
return f.read().splitlines()
extras_require = {
"dev": [
"black==21.7b0",
"flake8==3.9.2",
"isort==5.9.2",
"pytest>=6.2.4",
"pytest-cov>=2.12.1",
"pylint>=2.11",
"mypy>=0.9"
],
}
setup(
name="trapper",
version=VERSION["VERSION"],
author="OBSS",
url="https://github.com/obss/trapper",
description="State-of-the-art NLP through transformer models in a modular design and consistent APIs.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=find_packages(
exclude=[
"*.tests", "*.tests.*", "tests.*", "tests",
"test_fixtures", "test_fixtures.*",
"scripts", "scripts.*"
]),
entry_points={"console_scripts": ["trapper=trapper.__main__:run"]},
python_requires=">=3.7.1",
install_requires=get_requirements(),
extras_require=extras_require,
include_package_data=True,
classifiers=[
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="python, nlp, natural-language-processing, deep-learning, transformer, pytorch, transformers, allennlp, pytorch-transformers",
)
|
11465670
|
import sys
sys.path.insert(0, '..')
import kalman_filter
import kf_simple3d
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os.path
import pdb
np.set_printoptions(precision=4)
class Track:
def __init__(self, track_id, first_detection, kf_type):
# initiate kf
if kf_type == "2d":
self.kf = kalman_filter.KalmanFilter()
elif kf_type == "simple3d":
self.kf = kf_simple3d.KalmanFilterSimple3D()
self.mean, self.cov = self.kf.initiate(first_detection)
self.id = track_id
n = len(self.mean)
self.n = n
m = len(first_detection)
self.m = m
# initialize data stores
self.frame_log = np.zeros((0))
self.measurement_log = np.zeros((0, m))
self.gt_log = np.zeros((0, m))
self.mean_log = np.zeros((0, n))
self.cov_log = np.zeros((0, n, n))
self.gating_distance_log = np.zeros((0))
def update(self, measurement, gt, frame):
# log data
self.mean_log = np.vstack((self.mean_log, self.mean))
self.cov_log = np.concatenate((self.cov_log, self.cov[np.newaxis,:,:]))
self.measurement_log = np.vstack((self.measurement_log, measurement))
self.gt_log = np.vstack((self.gt_log, gt))
self.frame_log = np.append(self.frame_log, frame)
gating_distance = self.kf.gating_distance(self.mean, self.cov, measurement)
self.gating_distance_log = np.append(self.gating_distance_log, gating_distance)
# KF predict and update
self.mean, self.cov = self.kf.predict(self.mean, self.cov)
self.mean, self.cov = self.kf.update(self.mean, self.cov, measurement)
def plot(self):
t = self.frame_log
gt = self.gt_log
meas = self.measurement_log
state = self.mean_log
plt.subplot(321)
plt.plot(t, gt[:,0], label='Ground Truth')
plt.plot(t, meas[:,0], label='Measured')
plt.plot(t, state[:,0], label='filtered')
plt.xlabel('time')
plt.ylabel('x')
plt.legend()
plt.subplot(322)
plt.plot(t, gt[:,1], label='Ground Truth')
plt.plot(t, meas[:,1], label='Measured')
plt.plot(t, state[:,1], label='filtered')
plt.xlabel('time')
plt.ylabel('y')
plt.legend()
plt.subplot(323)
plt.plot(gt[:,0], gt[:,1], label='Ground Truth')
plt.plot(meas[:,0], meas[:,1], label='Measured')
plt.plot(state[:,0], state[:,1], label='filtered')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.subplot(324)
plt.plot(t, state[:,self.m], label='filtered')
plt.xlabel('time')
plt.ylabel('Vx')
plt.legend()
plt.subplot(325)
plt.plot(t, state[:,self.m+1], label='filtered')
plt.xlabel('time')
plt.ylabel('Vy')
plt.legend()
plt.show()
def file2data(fname):
# data should be a list of lists of numpy arrays
# Each element in the list represents a frame
# Each frame is a list of detections
# Each detection is a numpy array of measurements.
with open(fname, "rb") as f:
data = pickle.load(f)
return data
def cmp_tracks(track1, track2):
# Expect perfect match in mean log and gating distance
mean_log_pass = np.max(np.abs(track1.mean_log == track2.mean_log)) > 1e-12
gating_distance_pass = np.max(np.abs(track1.gating_distance_log == track2.gating_distance_log)) > 1e-12
return mean_log_pass and gating_distance_pass
def cmp(data, val):
any_fail = False
for itrack in data:
passed = cmp_tracks(data[itrack], val[itrack])
if not passed:
print("Mismatch found in track: ", itrack)
# pdb.set_trace()
any_fail = True
else:
print("Tracks matched: ", itrack)
return not any_fail
def validate(data, fname):
if os.path.isfile(fname):
val_data = file2data(fname)
return cmp(data, val_data)
else:
with open(fname, "wb") as f:
pickle.dump(data, f)
return True
def run_kf_test(fname, kf_type):
print("Running test for: {}".format(fname))
data = file2data(fname)
first_frame = data[0]
tracks = {}
for detection in first_frame:
meas, gt, gt_id = (detection[0], detection[1], detection[2])
tracks[gt_id] = Track(gt_id, meas, kf_type)
frame_cnt = 0;
for frame in data:
for detection in frame:
meas, gt, gt_id = (detection[0], detection[1], detection[2])
tracks[gt_id].update(meas, gt, frame_cnt)
frame_cnt += 1
passed = validate(tracks, fname + ".val")
if not passed:
for track_id in tracks:
tracks[track_id].plot()
if __name__=='__main__':
run_kf_test("single_track_4state_test.p", "2d")
run_kf_test("two_track_4state_test.p", "2d")
run_kf_test("single_track_6state_test.p", "simple3d")
|
11465681
|
import numpy as np
from sklearn.decomposition import PCA
def test(X):
pca = PCA(n_components=2)
projected = pca.fit_transform(X)
print(f'n_observations: {pca.n_samples_}')
print(f'n_features: {pca.n_features_}')
print(f'n_components: {pca.n_components_}')
print(f'\nprojected:\n{projected}')
print(f'\ncomponents:\n{pca.components_}')
print(f'\nmean:\n{pca.mean_}')
print(f'\nexplained_variance:\n{pca.explained_variance_}')
print(f'\nexplained_variance_ratio:\n{pca.explained_variance_ratio_}')
print(f'\nsingular_values:\n{pca.singular_values_}')
print(f'\nnoise_variance: {pca.noise_variance_}')
print('--------------------------------------')
X = np.array(
[[2.5, 2.4],
[0.5, 0.7],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
[2.0, 1.6],
[1.0, 1.1],
[1.5, 1.6],
[1.1, 0.9]])
test(X)
# n_observations: 10
# n_features: 2
# n_components: 2
#
# projected:
# [[-0.82797019 -0.17511531]
# [ 1.77758033 0.14285723]
# [-0.99219749 0.38437499]
# [-0.27421042 0.13041721]
# [-1.67580142 -0.20949846]
# [-0.9129491 0.17528244]
# [ 0.09910944 -0.3498247 ]
# [ 1.14457216 0.04641726]
# [ 0.43804614 0.01776463]
# [ 1.22382056 -0.16267529]]
#
# components:
# [[-0.6778734 -0.73517866]
# [-0.73517866 0.6778734 ]]
#
# mean:
# [1.81 1.91]
#
# explained_variance:
# [1.28402771 0.0490834 ]
#
# explained_variance_ratio:
# [0.96318131 0.03681869]
#
# singular_values:
# [3.3994484 0.66464321]
#
# noise_variance: 0.0
print('--------------------------------------')
X = np.array(
[[-1, -1],
[-2, -1],
[-3, -2],
[1, 1],
[2, 1],
[3, 2]])
test(X)
# n_components: 2
# n_features: 2
# n_observations: 6
# projected:
# n_observations: 6
# n_features: 2
# n_components: 2
# projected:
# [[ 1.38340578 0.2935787 ]
# [ 2.22189802 -0.25133484]
# [ 3.6053038 0.04224385]
# [-1.38340578 -0.2935787 ]
# [-2.22189802 0.25133484]
# [-3.6053038 -0.04224385]]
# components:
# [[-0.83849224 -0.54491354]
# [ 0.54491354 -0.83849224]]
# mean:
# [0. 0.]
# explained_variance:
# [7.93954312 0.06045688]
# explained_variance_ratio:
# [0.99244289 0.00755711]
# singular_values:
# [6.30061232 0.54980396]
# noise_variance: 0.0
print('--------------------------------------')
X = np.array(
[[-1, -1, 1],
[-2, -1, 4],
[-3, -2, -10],
[1, 1, 0],
[2, 1, 7],
[3, 2, 3]])
test(X)
n_observations: 6
n_features: 3
n_components: 2
# projected:
# [[ 0.29798775 1.36325421]
# [-2.25650538 3.13955756]
# [11.41612855 -0.16713955]
# [ 0.33140736 -1.58301663]
# [-6.55502622 -0.06288375]
# [-3.23399207 -2.68977185]]
# components:
# [[-0.27778488 -0.17755165 -0.94409267]
# [-0.78737248 -0.52094113 0.32964362]]
# mean:
# [0. 0. 0.83333333]
# explained_variance:
# [37.80910168 4.29759759]
# explained_variance_ratio:
# [0.89665854 0.10191931]
# singular_values:
# [13.74938211 4.63551378]
# noise_variance: 0.059967392969596925
|
11465690
|
from django.test import TestCase
from django.utils import timezone
from . import schedules
from datetime import datetime
now = timezone.make_aware(
datetime(1983, 7, 1, 3, 41),
timezone.utc)
class ScheduleTest(TestCase):
def test_hourly(self):
sched = schedules.Hourly(20, 2, 4)
self.assertEqual(sched.time_before(now),
timezone.make_aware(
datetime(1983, 7, 1, 3, 20, 2, 4),
timezone.utc))
self.assertEqual(sched.time_after(now),
timezone.make_aware(
datetime(1983, 7, 1, 4, 20, 2, 4),
timezone.utc))
def test_every(self):
sched = schedules.Every(minutes=1)
self.assertEqual(sched.time_before(now),
timezone.make_aware(
datetime(1983, 7, 1, 3, 41),
timezone.utc))
self.assertEqual(sched.time_after(now),
timezone.make_aware(
datetime(1983, 7, 1, 3, 42),
timezone.utc))
|
11465719
|
t_SH_StockMarketDataL2 = {
"nTime": "T_I32",
"nStatus": "T_I32",
"uPreClose": "T_U32",
"uOpen": "T_U32",
"uHigh": "T_U32",
"uLow": "T_U32",
"uMatch": "T_U32",
"uAskPrice": "T_U32",
"uAskVol": "T_U32",
"uBidPrice": "T_U32",
"uBidVol": "T_U32",
"uNumTrades": "T_U32",
"iVolume": "T_I64",
"iTurnover": "T_I64",
"iTotalBidVol": "T_I64",
"iTotalAskVol": "T_I64",
"uWeightedAvgBidPrice": "T_U32",
"uWeightedAvgAskPrice": "T_U32",
"nIOPV": "T_I32",
"nYieldToMaturity": "T_I32",
"uHighLimited": "T_U32",
"uLowLimited": "T_U32",
"sPrefix": "char",
"nSyl1": "T_I32",
"nSyl2": "T_I32",
"nSD2": "T_I32",
"sTradingPhraseCode": "char",
"nPreIOPV": "T_I32",
}
Stock_MarketData = t_SH_StockMarketDataL2
T_SH_StockMarketDataL2 = t_SH_StockMarketDataL2
t_SH_StockMarketDataL1 = {
"nTime": "T_I32",
"nStatus": "T_I32",
"uPreClose": "T_U32",
"uOpen": "T_U32",
"uHigh": "T_U32",
"uLow": "T_U32",
"uMatch": "T_U32",
"uAskPrice": "T_U32",
"uAskVol": "T_U32",
"uBidPrice": "T_U32",
"uBidVol": "T_U32",
"uNumTrades": "T_U32",
"iVolume": "T_I64",
"iTurnover": "T_I64",
"uHighLimited": "T_U32",
"uLowLimited": "T_U32",
"sTradingPhraseCode": "char",
"nPreIOPV": "T_I32",
"nIOPV": "T_I32",
}
StockMarketDataL1 = t_SH_StockMarketDataL1
t_SH_StockStepTrade = {
"nTradeIndex": "T_I32",
"nTradeChannel": "T_I32",
"nTradeTime": "T_I32",
"nTradePrice": "T_I32",
"iTradeQty": "T_I64",
"iTradeMoney": "T_I64",
"iTradeBuyNo": "T_I64",
"iTradeSellNo": "T_I64",
"cTradeBSflag": "char",
"sRes": "char",
}
t_OrderQueueHead = {
"nItem": "T_I32",
}
Stock_OrderQueue_Head = t_OrderQueueHead
t_OrderQueueItem = {
"nTime": "T_I32",
"nSide": "T_I32",
"nPrice": "T_I32",
"nOrders": "T_I32",
"nABItems": "T_I32",
"nABVolume": "T_I32",
}
Stock_OrderQueue = t_OrderQueueItem
T_OrderQueueItem = t_OrderQueueItem
t_SH_StockOrderQueue = {
"tHead": "T_OrderQueueHead",
"tItem": "T_OrderQueueItem",
}
StockOrderQueue = t_SH_StockOrderQueue
T_SH_StockOrderQueue = t_SH_StockOrderQueue
t_SH_StockIndex = {
"nTime": "T_I32",
"nOpenIndex": "T_I32",
"nHighIndex": "T_I32",
"nLowIndex": "T_I32",
"nLastIndex": "T_I32",
"iTotalVolume": "T_I64",
"iTurnover": "T_I64",
"nPreCloseIndex": "T_I32",
}
Stock_IndexData = t_SH_StockIndex
T_SH_StockIndex = t_SH_StockIndex
t_SH_Kline = {
"uDay": "T_U32",
"nTime": "T_I32",
"nPreClose": "T_I32",
"nValOpen": "T_I32",
"nValHigh": "T_I32",
"nValLow": "T_I32",
"nValClose": "T_I32",
"i64Volume": "T_I64",
"i64ValTotal": "T_I64",
"i64TotalVol": "T_I64",
"i64TotalTurnOver": "T_I64",
"nTurover": "T_I32",
"nValIncrease": "T_I32",
}
T_Kline = t_SH_Kline
T_SH_Kline = t_SH_Kline
t_CPXX = {
"szStkCode": "char",
"szISIN": "char",
"uUpdateTime": "T_U32",
"szStkNameZN": "char",
"szStkNameEn": "char",
"szUnderlyingCode": "char",
"szMktClass": "char",
"szStkClass": "char",
"szStkSubClass": "char",
"szCurrency": "char",
"i64FaceValue": "T_I64",
"i64MatchQty": "T_I64",
"szLastDate": "char",
"uListingData": "T_U32",
"uProductSETId": "T_U32",
"i64BuyNumUnit": "T_I64",
"i64SellNumUnit": "T_I64",
"i64DecalaredLowestNum": "T_I64",
"i64DecalaredHightestNum": "T_I64",
"i64PreClosePrice": "T_I64",
"i64PriceLevel": "T_I64",
"cPriceLimitsType": "char",
"i64UpLimitsPrice": "T_I64",
"i64LowerLimitPrice": "T_I64",
"i64DividendRatio": "T_I64",
"i64DividendAmount": "T_I64",
"cFinaSubjectFlag": "char",
"cMarginSubjectFlag": "char",
"szProdStatusFlag": "char",
"i64MPDecalaredLowestNum": "T_I64",
"i64MPDecalaredHightestNum": "T_I64",
"szStkNameZNLong": "char",
"szNote": "char",
}
T_CPXX = t_CPXX
T_SH_BaseInfo = t_CPXX
t_SH_ETFExtends = {
"nTime": "T_I32",
"nIOPV": "T_I32",
"nEtfBuyNum": "T_I32",
"i64EtfBuyAmount": "T_I64",
"i64EtfBuyMoney": "T_I64",
"nEtfSellNum": "T_I32",
"i64EtfSellAmount": "T_I64",
"i64EtfSellMoney": "T_I64",
"nWithDrawBuyNum": "T_I32",
"i64WithDrawBuyAmount": "T_I64",
"i64WithDrawBuyMoney": "T_I64",
"nWithDrawSellNum": "T_I32",
"i64WithDrawSellAmount": "T_I64",
"i64WithDrawSellMoney": "T_I64",
}
T_ETFEXTENDS = t_SH_ETFExtends
T_SH_ETFEXTENDS = t_SH_ETFExtends
t_SZ_StockMarketDataL2 = {
"nTime": "T_I32",
"nStatus": "T_I32",
"uPreClose": "T_U32",
"uOpen": "T_U32",
"uHigh": "T_U32",
"uLow": "T_U32",
"uMatch": "T_U32",
"uAskPrice": "T_U32",
"uAskVol": "T_U32",
"uBidPrice": "T_U32",
"uBidVol": "T_U32",
"uNumTrades": "T_U32",
"iVolume": "T_I64",
"iTurnover": "T_I64",
"iTotalBidVol": "T_I64",
"iTotalAskVol": "T_I64",
"uWeightedAvgBidPrice": "T_U32",
"uWeightedAvgAskPrice": "T_U32",
"nIOPV": "T_I32",
"nYieldToMaturity": "T_I32",
"uHighLimited": "T_U32",
"uLowLimited": "T_U32",
"sPrefix": "char",
"nSyl1": "T_I32",
"nSyl2": "T_I32",
"nSD2": "T_I32",
"sTradingPhraseCode": "char",
"nPreIOPV": "T_I32",
}
t_SZ_StockMarketDataL1 = {
"nTime": "T_I32",
"nStatus": "T_I32",
"uPreClose": "T_U32",
"uOpen": "T_U32",
"uHigh": "T_U32",
"uLow": "T_U32",
"uMatch": "T_U32",
"uAskPrice": "T_U32",
"uAskVol": "T_U32",
"uBidPrice": "T_U32",
"uBidVol": "T_U32",
"uNumTrades": "T_U32",
"iVolume": "T_I64",
"iTurnover": "T_I64",
"uHighLimited": "T_U32",
"uLowLimited": "T_U32",
"sTradingPhraseCode": "char",
"nPreIOPV": "T_I32",
"nIOPV": "T_I32",
}
t_SZ_StepTrade = {
"usChannelNo": "T_U16",
"i64ApplSeqNum": "T_I64",
"sMDStreamID": "char",
"i64BidApplSeqNum": "T_I64",
"i64OfferApplSeqNum": "T_I64",
"sSecurityID": "char",
"sSecurityIDSource": "char",
"i64LastPx": "T_I64",
"i64LastQty": "T_I64",
"cExecType": "char",
"i64TransactTime": "T_I64",
"sExtendFields": "char",
}
T_SZ_STEPTRADE = t_SZ_StepTrade
t_SZ_300191ExtendFields = {
}
t_SZ_300591ExtendFields = {
}
t_SZ_300791ExtendFields = {
}
t_SZ_StepOrder = {
"usChannelNo": "T_U16",
"i64ApplSeqNum": "T_I64",
"sMDStreamID": "char",
"sSecurityID": "char",
"sSecurityIDSource": "char",
"i64Price": "T_I64",
"i64OrderQty": "T_I64",
"cSide": "char",
"i64TransactTime": "T_I64",
"sExtendFields": "char",
}
T_SZ_STEPORDER = t_SZ_StepOrder
t_SZ_300192ExtendFields = {
"cOrdType": "char",
}
t_SZ_300592ExtendFields = {
"sConfirmID": "char",
"sContactor": "char",
"sContactInfo": "char",
}
t_SZ_300792ExtendFields = {
"usExpirationDays": "T_U16",
"ucExpirationType": "T_U8",
}
t_OrderQueueHead = {
"nItem": "T_I32",
}
t_OrderQueueItem = {
"nTime": "T_I32",
"nSide": "T_I32",
"nPrice": "T_I32",
"nOrders": "T_I32",
"nABItems": "T_I32",
"nABVolume": "T_I32",
}
t_SZ_StockOrderQueue = {
"tHead": "T_OrderQueueHead",
"tItem": "T_OrderQueueItem",
}
t_SZ_StockIndex = {
"nTime": "T_I32",
"nOpenIndex": "T_I32",
"nHighIndex": "T_I32",
"nLowIndex": "T_I32",
"nLastIndex": "T_I32",
"iTotalVolume": "T_I64",
"iTurnover": "T_I64",
"nPreCloseIndex": "T_I32",
}
t_SZ_Kline = {
"uDay": "T_U32",
"nTime": "T_I32",
"nPreClose": "T_I32",
"nValOpen": "T_I32",
"nValHigh": "T_I32",
"nValLow": "T_I32",
"nValClose": "T_I32",
"i64Volume": "T_I64",
"i64ValTotal": "T_I64",
"i64TotalVol": "T_I64",
"i64TotalTurnOver": "T_I64",
"nTurover": "T_I32",
"nValIncrease": "T_I32",
}
t_SZ_StockMarketDataLF = {
"nTime": "T_I32",
"uPreClose": "T_U32",
"uOpen": "T_U32",
"uHigh": "T_U32",
"uLow": "T_U32",
"uMatch": "T_U32",
"uAskPrice": "T_U32",
"uAskVol": "T_U32",
"uBidPrice": "T_U32",
"uBidVol": "T_U32",
"uNumTrades": "T_U32",
"iVolume": "T_I64",
"iTurnover": "T_I64",
"iTotalBidVol": "T_I64",
"iTotalAskVol": "T_I64",
"uWeightedAvgBidPrice": "T_U32",
"uWeightedAvgAskPrice": "T_U32",
"nIOPV": "T_I32",
"nYieldToMaturity": "T_I32",
"uHighLimited": "T_U32",
"uLowLimited": "T_U32",
"sPrefix": "char",
"nSyl1": "T_I32",
"nSyl2": "T_I32",
"nSD2": "T_I32",
"sTradingPhraseCode": "char",
"nPreIOPV": "T_I32",
}
t_TickByTickData = {
"cType": "char",
"entrust": "T_SZ_STEPORDER",
"trade": "T_SZ_STEPTRADE",
}
T_SZ_TickByTickData = t_TickByTickData
ErrMsg = {
"channel": "int",
"errcode": "int",
"errstr": "char",
"mktype": "MKtype",
"datatype": "DATAtype",
"usize": "unsigned int",
"codes": "char",
}
|
11465745
|
aa = 0
a = 1
fim = int(input('Digite um termo: '))
for n in range (0, fim):
s = (aa + a)
print(s, end = ' → ')
aa = a
a = s
print()
s = 0
while s <= fim:
if s % 2 == 0:
print(s, end=' → ')
s = s + 1
#https://pt.stackoverflow.com/q/411542/101
|
11465757
|
from .client import GCSClient
from .data import CollectionDocument, GuestCollectionDocument, MappedCollectionDocument
from .errors import GCSAPIError
from .response import IterableGCSResponse, UnpackingGCSResponse
__all__ = (
"GCSClient",
"CollectionDocument",
"GuestCollectionDocument",
"MappedCollectionDocument",
"GCSAPIError",
"IterableGCSResponse",
"UnpackingGCSResponse",
)
|
11465760
|
import pytest
from BetweenHours import is_between_hours
TEST_INPUTS = [
('12:00:00', '02:00:00', '21:00:00', True, 'sanity 1'),
('12:00:00', '11:10:00', '12:50:00', True, 'sanity 3'),
('12:00:00', '12:00:00', '12:00:00', True, 'exact same date'),
('12:00:00', '12:00:00', '22:00:00', True, 'same as begin date'),
('12:00:00', '10:00:00', '12:00:00', True, 'same as end date'),
('10:00:00', '12:00:00', '20:00:00', False, 'before begin date'),
('15:00:00', '02:00:00', '12:00:00', False, 'after end date'),
]
@pytest.mark.parametrize("value, begin_time, end_time, expected_result, test_title", TEST_INPUTS)
def test_is_between_hours(value, begin_time, end_time, expected_result, test_title):
assert is_between_hours(value, begin_time, end_time) == expected_result, test_title
|
11465773
|
from rest_framework.throttling import AnonRateThrottle
from app.throttling import ConfigurableThrottlingMixin
class AuthAnonRateThrottle(ConfigurableThrottlingMixin, AnonRateThrottle):
"""Throttle for any authorization views."""
scope = 'anon-auth'
|
11465805
|
from __future__ import absolute_import, division, print_function, with_statement
from tornado.concurrent import Future
from tornado import gen
from tornado import netutil
from tornado.stack_context import NullContext
from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
from tornado.test.util import unittest, skipIfNonUnix, refusing_port
from microproxy.tornado_ext.iostream import MicroProxyIOStream
from microproxy.tornado_ext.iostream import MicroProxySSLIOStream
from microproxy.protocol.tls import create_src_sslcontext
from microproxy.protocol.tls import create_basic_sslcontext
from OpenSSL import crypto
from OpenSSL import SSL
from service_identity import VerificationError
import errno
import os
import platform
import socket
import sys
try:
from unittest import mock # type: ignore
except ImportError:
import mock # type: ignore
def _server_ssl_options():
cert_file = "microproxy/test/test.crt"
private_key_file = "microproxy/test/test.key"
with open(cert_file, "rb") as fp:
_buffer = fp.read()
ca_root = crypto.load_certificate(crypto.FILETYPE_PEM, _buffer)
with open(private_key_file, "rb") as fp:
_buffer = fp.read()
private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, _buffer)
return create_src_sslcontext(cert=ca_root, priv_key=private_key)
def _client_ssl_options(verify_mode, verify_cb, alpn=None):
ssl_ctx = create_basic_sslcontext()
ssl_ctx.set_verify(verify_mode, verify_cb)
try:
ssl_ctx.set_alpn_protos(alpn or [])
except NotImplementedError:
pass
return ssl_ctx
class TestIOStreamMixin(object):
def _make_server_iostream(self, connection, **kwargs):
raise NotImplementedError()
def _make_client_iostream(self, connection, **kwargs):
raise NotImplementedError()
def make_iostream_pair(self, **kwargs):
listener, port = bind_unused_port()
streams = [None, None]
def accept_callback(connection, address):
streams[0] = self._make_server_iostream(connection, **kwargs)
self.stop()
def connect_callback():
streams[1] = client_stream
self.stop()
netutil.add_accept_handler(listener, accept_callback,
io_loop=self.io_loop)
client_stream = self._make_client_iostream(socket.socket(), **kwargs)
client_stream.connect(('127.0.0.1', port),
callback=connect_callback)
self.wait(condition=lambda: all(streams))
self.io_loop.remove_handler(listener.fileno())
listener.close()
return streams
def test_streaming_callback_with_data_in_buffer(self):
server, client = self.make_iostream_pair()
client.write(b"abcd\r\nefgh")
server.read_until(b"\r\n", self.stop)
data = self.wait()
self.assertEqual(data, b"abcd\r\n")
def closed_callback(chunk):
self.fail()
server.read_until_close(callback=closed_callback,
streaming_callback=self.stop)
# self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
data = self.wait()
self.assertEqual(data, b"efgh")
server.close()
client.close()
def test_write_zero_bytes(self):
# Attempting to write zero bytes should run the callback without
# going into an infinite loop.
server, client = self.make_iostream_pair()
server.write(b'', callback=self.stop)
self.wait()
server.close()
client.close()
def test_connection_refused(self):
# When a connection is refused, the connect callback should not
# be run. (The kqueue IOLoop used to behave differently from the
# epoll IOLoop in this respect)
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
stream = MicroProxyIOStream(socket.socket(), io_loop=self.io_loop)
self.connect_called = False
def connect_callback():
self.connect_called = True
self.stop()
stream.set_close_callback(self.stop)
stream.connect(("127.0.0.1", port), connect_callback)
self.wait()
self.assertFalse(self.connect_called)
self.assertTrue(isinstance(stream.error, socket.error), stream.error)
if sys.platform != 'cygwin':
_ERRNO_CONNREFUSED = (errno.ECONNREFUSED,)
if hasattr(errno, "WSAECONNREFUSED"):
_ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,)
# cygwin's errnos don't match those used on native windows python
self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
@unittest.skipIf(mock is None, 'mock package not present')
def test_gaierror(self):
# Test that MicroProxyIOStream sets its exc_info on getaddrinfo error.
# It's difficult to reliably trigger a getaddrinfo error;
# some resolvers own't even return errors for malformed names,
# so we mock it instead. If MicroProxyIOStream changes to call a Resolver
# before sock.connect, the mock target will need to change too.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = MicroProxyIOStream(s, io_loop=self.io_loop)
stream.set_close_callback(self.stop)
with mock.patch('socket.socket.connect',
side_effect=socket.gaierror(errno.EIO, 'boom')):
stream.connect(('localhost', 80), callback=self.stop)
self.wait()
self.assertIsInstance(stream.error, socket.gaierror)
def test_read_callback_error(self):
# Test that MicroProxyIOStream sets its exc_info when a read callback throws
server, client = self.make_iostream_pair()
try:
server.set_close_callback(self.stop)
# with ExpectLog(
# app_log, "(Uncaught exception|Exception in callback)"
# ):
# Clear ExceptionStackContext so MicroProxyIOStream catches error
with NullContext():
server.read_bytes(1, callback=lambda data: 1 / 0)
client.write(b"1")
self.wait()
self.assertTrue(isinstance(server.error, ZeroDivisionError))
finally:
server.close()
client.close()
def test_streaming_callback(self):
server, client = self.make_iostream_pair()
try:
chunks = []
final_called = []
def streaming_callback(data):
chunks.append(data)
self.stop()
def final_callback(data):
self.assertFalse(data)
final_called.append(True)
self.stop()
server.read_bytes(6, callback=final_callback,
streaming_callback=streaming_callback)
client.write(b"1234")
self.wait(condition=lambda: chunks)
client.write(b"5678")
self.wait(condition=lambda: final_called)
self.assertEqual(chunks, [b"1234", b"56"])
# the rest of the last chunk is still in the buffer
server.read_bytes(2, callback=self.stop)
data = self.wait()
self.assertEqual(data, b"78")
finally:
server.close()
client.close()
def test_streaming_until_close(self):
server, client = self.make_iostream_pair()
try:
chunks = []
closed = [False]
def streaming_callback(data):
chunks.append(data)
self.stop()
def close_callback(data):
assert not data, data
closed[0] = True
self.stop()
client.read_until_close(callback=close_callback,
streaming_callback=streaming_callback)
server.write(b"1234")
self.wait(condition=lambda: len(chunks) == 1)
server.write(b"5678", self.stop)
self.wait()
server.close()
self.wait(condition=lambda: closed[0])
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
server.close()
client.close()
def test_streaming_until_close_future(self):
server, client = self.make_iostream_pair()
try:
chunks = []
@gen.coroutine
def client_task():
yield client.read_until_close(streaming_callback=chunks.append)
@gen.coroutine
def server_task():
yield server.write(b"1234")
yield gen.sleep(0.01)
yield server.write(b"5678")
server.close()
@gen.coroutine
def f():
yield [client_task(), server_task()]
self.io_loop.run_sync(f)
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
server.close()
client.close()
def test_delayed_close_callback(self):
# The scenario: Server closes the connection while there is a pending
# read that can be served out of buffered data. The client does not
# run the close_callback as soon as it detects the close, but rather
# defers it until after the buffered read has finished.
server, client = self.make_iostream_pair()
try:
client.set_close_callback(self.stop)
server.write(b"12")
chunks = []
def callback1(data):
chunks.append(data)
client.read_bytes(1, callback2)
server.close()
def callback2(data):
chunks.append(data)
client.read_bytes(1, callback1)
self.wait() # stopped by close_callback
self.assertEqual(chunks, [b"1", b"2"])
finally:
server.close()
client.close()
def test_future_delayed_close_callback(self):
# Same as test_delayed_close_callback, but with the future interface.
server, client = self.make_iostream_pair()
# We can't call make_iostream_pair inside a gen_test function
# because the ioloop is not reentrant.
@gen_test
def f(self):
server.write(b"12")
chunks = []
chunks.append((yield client.read_bytes(1)))
server.close()
chunks.append((yield client.read_bytes(1)))
self.assertEqual(chunks, [b"1", b"2"])
try:
f(self)
finally:
server.close()
client.close()
def test_close_buffered_data(self):
# Similar to the previous test, but with data stored in the OS's
# socket buffers instead of the MicroProxyIOStream's read buffer. Out-of-band
# close notifications must be delayed until all data has been
# drained into the MicroProxyIOStream buffer. (epoll used to use out-of-band
# close events with EPOLLRDHUP, but no longer)
#
# This depends on the read_chunk_size being smaller than the
# OS socket buffer, so make it small.
server, client = self.make_iostream_pair(read_chunk_size=256)
try:
server.write(b"A" * 512)
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b"A" * 256, data)
server.close()
# Allow the close to propagate to the client side of the
# connection. Using add_callback instead of add_timeout
# doesn't seem to work, even with multiple iterations
self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
self.wait()
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b"A" * 256, data)
finally:
server.close()
client.close()
def test_read_until_close_after_close(self):
# Similar to test_delayed_close_callback, but read_until_close takes
# a separate code path so test it separately.
server, client = self.make_iostream_pair()
try:
server.write(b"1234")
server.close()
# Read one byte to make sure the client has received the data.
# It won't run the close callback as long as there is more buffered
# data that could satisfy a later read.
client.read_bytes(1, self.stop)
data = self.wait()
self.assertEqual(data, b"1")
client.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"234")
finally:
server.close()
client.close()
@unittest.skipIf(mock is None, 'mock package not present')
def test_read_until_close_with_error(self):
server, client = self.make_iostream_pair()
try:
with mock.patch('tornado.iostream.BaseIOStream._try_inline_read',
side_effect=IOError('boom')):
with self.assertRaisesRegexp(IOError, 'boom'):
client.read_until_close(self.stop)
finally:
server.close()
client.close()
def test_streaming_read_until_close_after_close(self):
# Same as the preceding test but with a streaming_callback.
# All data should go through the streaming callback,
# and the final read callback just gets an empty string.
server, client = self.make_iostream_pair()
try:
server.write(b"1234")
server.close()
client.read_bytes(1, self.stop)
data = self.wait()
self.assertEqual(data, b"1")
streaming_data = []
client.read_until_close(self.stop,
streaming_callback=streaming_data.append)
data = self.wait()
self.assertEqual(b'', data)
self.assertEqual(b''.join(streaming_data), b"234")
finally:
server.close()
client.close()
def test_large_read_until(self):
# Performance test: read_until used to have a quadratic component
# so a read_until of 4MB would take 8 seconds; now it takes 0.25
# seconds.
server, client = self.make_iostream_pair()
try:
# This test fails on pypy with ssl. I think it's because
# pypy's gc defeats moves objects, breaking the
# "frozen write buffer" assumption.
if (isinstance(server, MicroProxySSLIOStream) and
platform.python_implementation() == 'PyPy'):
raise unittest.SkipTest(
"pypy gc causes problems with openssl")
NUM_KB = 4096
for i in range(NUM_KB):
client.write(b"A" * 1024)
client.write(b"\r\n")
server.read_until(b"\r\n", self.stop)
data = self.wait()
self.assertEqual(len(data), NUM_KB * 1024 + 2)
finally:
server.close()
client.close()
def test_close_callback_with_pending_read(self):
# Regression test for a bug that was introduced in 2.3
# where the MicroProxyIOStream._close_callback would never be called
# if there were pending reads.
OK = b"OK\r\n"
server, client = self.make_iostream_pair()
client.set_close_callback(self.stop)
try:
server.write(OK)
client.read_until(b"\r\n", self.stop)
res = self.wait()
self.assertEqual(res, OK)
server.close()
client.read_until(b"\r\n", lambda x: x)
# If _close_callback (self.stop) is not called,
# an AssertionError: Async operation timed out after 5 seconds
# will be raised.
res = self.wait()
self.assertTrue(res is None)
finally:
server.close()
client.close()
@skipIfNonUnix
def test_inline_read_error(self):
# An error on an inline read is raised without logging (on the
# assumption that it will eventually be noticed or logged further
# up the stack).
#
# This test is posix-only because windows os.close() doesn't work
# on socket FDs, but we can't close the socket object normally
# because we won't get the error we want if the socket knows
# it's closed.
server, client = self.make_iostream_pair()
try:
os.close(server.socket.fileno())
if isinstance(server, MicroProxySSLIOStream):
with self.assertRaises(SSL.SysCallError):
server.read_bytes(1, lambda data: None)
if isinstance(server, MicroProxyIOStream):
with self.assertRaises(socket.error):
server.read_bytes(1, lambda data: None)
finally:
server.close()
client.close()
def test_async_read_error_logging(self):
# Socket errors on asynchronous reads should be logged (but only
# once).
server, client = self.make_iostream_pair()
server.set_close_callback(self.stop)
try:
# Start a read that will be fulfilled asynchronously.
server.read_bytes(1, lambda data: None)
client.write(b'a')
# Stub out read_from_fd to make it fail.
def fake_read_from_fd():
os.close(server.socket.fileno())
server.__class__.read_from_fd(server)
server.read_from_fd = fake_read_from_fd
# This log message is from _handle_read (not read_from_fd).
self.wait()
finally:
server.close()
client.close()
def test_future_close_callback(self):
# Regression test for interaction between the Future read interfaces
# and MicroProxyIOStream._maybe_add_error_listener.
server, client = self.make_iostream_pair()
closed = [False]
def close_callback():
closed[0] = True
self.stop()
server.set_close_callback(close_callback)
try:
client.write(b'a')
future = server.read_bytes(1)
self.io_loop.add_future(future, self.stop)
self.assertEqual(self.wait().result(), b'a')
self.assertFalse(closed[0])
client.close()
self.wait()
self.assertTrue(closed[0])
finally:
server.close()
client.close()
def test_read_bytes_partial(self):
server, client = self.make_iostream_pair()
try:
# Ask for more than is available with partial=True
client.read_bytes(50, self.stop, partial=True)
server.write(b"hello")
data = self.wait()
self.assertEqual(data, b"hello")
# Ask for less than what is available; num_bytes is still
# respected.
client.read_bytes(3, self.stop, partial=True)
server.write(b"world")
data = self.wait()
self.assertEqual(data, b"wor")
# Partial reads won't return an empty string, but read_bytes(0)
# will.
client.read_bytes(0, self.stop, partial=True)
data = self.wait()
self.assertEqual(data, b'')
finally:
server.close()
client.close()
def test_read_until_max_bytes(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Extra room under the limit
client.read_until(b"def", self.stop, max_bytes=50)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Just enough space
client.read_until(b"def", self.stop, max_bytes=6)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
client.read_until(b"def", self.stop, max_bytes=5)
server.write(b"123456")
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_max_bytes_inline(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Similar to the error case in the previous test, but the
# server writes first so client reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
server.write(b"123456")
client.read_until(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_max_bytes_ignores_extra(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
server.write(b"abcdef")
client.read_until(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Extra room under the limit
client.read_until_regex(b"def", self.stop, max_bytes=50)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Just enough space
client.read_until_regex(b"def", self.stop, max_bytes=6)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
client.read_until_regex(b"def", self.stop, max_bytes=5)
server.write(b"123456")
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes_inline(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Similar to the error case in the previous test, but the
# server writes first so client reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
server.write(b"123456")
client.read_until_regex(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes_ignores_extra(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
server.write(b"abcdef")
client.read_until_regex(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_small_reads_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
server.write(b"a" * 1024 * 100)
for i in range(100):
client.read_bytes(1024, self.stop)
data = self.wait()
self.assertEqual(data, b"a" * 1024)
finally:
server.close()
client.close()
def test_small_read_untils_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
server.write((b"a" * 1023 + b"\n") * 100)
for i in range(100):
client.read_until(b"\n", self.stop, max_bytes=4096)
data = self.wait()
self.assertEqual(data, b"a" * 1023 + b"\n")
finally:
server.close()
client.close()
def test_flow_control(self):
MB = 1024 * 1024
server, client = self.make_iostream_pair(max_buffer_size=5 * MB)
try:
# Client writes more than the server will accept.
client.write(b"a" * 10 * MB)
# The server pauses while reading.
server.read_bytes(MB, self.stop)
self.wait()
self.io_loop.call_later(0.1, self.stop)
self.wait()
# The client's writes have been blocked; the server can
# continue to read gradually.
for i in range(9):
server.read_bytes(MB, self.stop)
self.wait()
finally:
server.close()
client.close()
class TestIOStream(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
return MicroProxyIOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return MicroProxyIOStream(connection, **kwargs)
class TestSSLIOStream(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
dest_context = _server_ssl_options()
ssl_sock = SSL.Connection(dest_context,
connection)
ssl_sock.set_accept_state()
return MicroProxySSLIOStream(
ssl_sock, io_loop=self.io_loop, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
def verify_cb(conn, x509, err_num, err_depth, err_code):
return True
dest_context = _client_ssl_options(SSL.VERIFY_NONE, verify_cb)
return MicroProxySSLIOStream(
connection, io_loop=self.io_loop,
ssl_options=dest_context, **kwargs)
class TestIOStreamStartTLS(AsyncTestCase):
def setUp(self):
try:
super(TestIOStreamStartTLS, self).setUp()
self.listener, self.port = bind_unused_port()
self.server_stream = None
self.server_accepted = Future()
netutil.add_accept_handler(self.listener, self.accept)
self.client_stream = MicroProxyIOStream(socket.socket())
self.io_loop.add_future(self.client_stream.connect(
('127.0.0.1', self.port)), self.stop)
self.wait()
self.io_loop.add_future(self.server_accepted, self.stop)
self.wait()
except Exception as e:
print(e)
raise
def tearDown(self):
if self.server_stream is not None:
self.server_stream.close()
if self.client_stream is not None:
self.client_stream.close()
self.listener.close()
super(TestIOStreamStartTLS, self).tearDown()
def accept(self, connection, address):
if self.server_stream is not None:
self.fail("should only get one connection")
self.server_stream = MicroProxyIOStream(connection)
self.server_accepted.set_result(None)
@gen.coroutine
def client_send_line(self, line):
self.client_stream.write(line)
recv_line = yield self.server_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
@gen.coroutine
def server_send_line(self, line):
self.server_stream.write(line)
recv_line = yield self.client_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
def client_start_tls(self, ssl_options=None, server_hostname=None):
client_stream = self.client_stream
self.client_stream = None
return client_stream.start_tls(False, ssl_options, server_hostname)
def server_start_tls(self, ssl_options=None):
server_stream = self.server_stream
self.server_stream = None
return server_stream.start_tls(True, ssl_options)
@gen_test
def test_start_tls_smtp(self):
def verify_cb(conn, x509, err_num, err_depth, err_code):
return True
# This flow is simplified from RFC 3207 section 5.
# We don't really need all of this, but it helps to make sure
# that after realistic back-and-forth traffic the buffers end up
# in a sane state.
yield self.server_send_line(b"220 mail.example.com ready\r\n")
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250-mail.example.com welcome\r\n")
yield self.server_send_line(b"250 STARTTLS\r\n")
yield self.client_send_line(b"STARTTLS\r\n")
yield self.server_send_line(b"220 Go ahead\r\n")
client_future = self.client_start_tls(
_client_ssl_options(SSL.VERIFY_NONE, verify_cb))
server_future = self.server_start_tls(_server_ssl_options())
self.client_stream = yield client_future
self.server_stream = yield server_future
self.assertTrue(isinstance(self.client_stream, MicroProxySSLIOStream))
self.assertTrue(isinstance(self.server_stream, MicroProxySSLIOStream))
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250 mail.example.com welcome\r\n")
@gen_test
def test_handshake_fail(self):
def verify_cb(conn, x509, err_num, err_depth, err_code):
return False
server_future = self.server_start_tls(_server_ssl_options())
client_future = self.client_start_tls(
_client_ssl_options(SSL.VERIFY_PEER, verify_cb))
with self.assertRaises(SSL.Error):
yield client_future
with self.assertRaises((SSL.Error, socket.error)):
yield server_future
@gen_test
def test_check_hostname(self):
def verify_cb(conn, x509, err_num, err_depth, err_code):
return True
server_future = self.server_start_tls(_server_ssl_options())
client_future = self.client_start_tls(
_client_ssl_options(SSL.VERIFY_PEER, verify_cb),
server_hostname=b'localhost')
with self.assertRaises(VerificationError):
yield client_future
# TODO: server will not raise.
# with self.assertRaises(Exception):
yield server_future
|
11465826
|
import json
from os.path import isdir
from os import mkdir
from pathlib import Path
from gitlab_api_client import GitlabApi
config_location = Path.home() / ".gitlab-client.json"
def get_gitlab_api_client(gitlab_instance) -> GitlabApi:
config = __get_gitlab_instance_config(gitlab_instance)
return GitlabApi(config["url"], config["token"], config["checkout_url"])
def get_project_dir_location() -> str:
config = __load_config()
project_dir_key = 'project_dir'
if project_dir_key not in config:
provided_dir = input("Please provide directory for project checkout: ").lstrip().rstrip().rstrip("/")
if not provided_dir.startswith("/"):
provided_dir = Path.home() / provided_dir
else:
provided_dir = Path(provided_dir)
print(f"Saving {provided_dir} as project checkout directory...")
config[project_dir_key] = str(provided_dir.absolute())
__save_config(config)
if not isdir(config[project_dir_key]):
mkdir(config[project_dir_key])
return config[project_dir_key]
def __get_gitlab_instance_config(gitlab_instance):
config = __load_config()
gitlab_instances_key = 'gitlab_instances'
if gitlab_instances_key not in config:
config[gitlab_instances_key] = {}
if gitlab_instance not in config[gitlab_instances_key]:
provided_url = input("Please provide url to gitlab: ").lstrip().rstrip().rstrip("/")
provided_token = input("Please provide access token to gitlab: ").lstrip().rstrip()
config[gitlab_instances_key][gitlab_instance] = {
"url": provided_url,
"token": provided_token
}
__save_config(config)
if "checkout_url" not in config[gitlab_instances_key][gitlab_instance]:
default_url = config[gitlab_instances_key][gitlab_instance]["url"]
default_url = default_url.replace("https://", "").replace("http://", "")
default_url = f"ssh://git@{default_url}"
checkout_url = input(f"Please provide url base for checkout [{default_url}]: ").lstrip().rstrip().rstrip("/")
if not checkout_url:
checkout_url = default_url
config[gitlab_instances_key][gitlab_instance]["checkout_url"] = checkout_url
__save_config(config)
return config[gitlab_instances_key][gitlab_instance]
def __save_config(config):
config_location.write_text(json.dumps(config, indent=4) + "\n")
def __load_config():
__ensure_config_file_exists()
return json.loads(config_location.read_text())
def __ensure_config_file_exists():
if not config_location.is_file():
config_location.write_text('{}\n')
|
11465887
|
import this
from flask import Flask
app = Flask(__name__)
rot13 = str.maketrans(
"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz",
"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm"
)
def simple_html(body):
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Book Example</title>
</head>
<body>
{body}
</body>
</html>
"""
@app.route('/')
def hello():
return simple_html("<a href=/zen>Python Zen</a>")
@app.route('/zen')
def zen():
return simple_html(
"<br>".join(this.s.translate(rot13).split("\n"))
)
if __name__ == '__main__':
app.run()
|
11465892
|
from envs.IsaacGym import *
from elegantrl.demo import *
"""
Humanoid
GPU 4
GPU 5 if_use_cri_target = True
"""
def demo_isaac_on_policy():
args = Arguments(if_on_policy=True) # hyper-parameters of on-policy is different from off-policy
args.agent = AgentPPO()
args.random_seed += 1943
if_train_ant = 0
if if_train_ant:
# env = build_env('IsaacOneEnvAnt', if_print=True, device_id=0, env_num=1)
args.eval_env = 'IsaacOneEnvAnt'
args.agnet.if_use_cri_target = True
args.eval_gpu_id = 7
# env = build_env('IsaacVecEnvAnt', if_print=True, device_id=0, env_num=2)
args.env = f'IsaacVecEnvAnt'
args.env_num = 1024
args.max_step = 1000
args.state_dim = 60
args.action_dim = 8
args.if_discrete = False
args.target_return = 4000
args.agent.lambda_entropy = 0.05
args.agent.lambda_gae_adv = 0.97
args.learning_rate = 2 ** -15
args.if_per_or_gae = True
args.break_step = int(8e7)
args.reward_scale = 2 ** -2 # (-50) 0 ~ 2500 (3340)
args.repeat_times = 2 ** 3
args.net_dim = 2 ** 9
args.batch_size = args.net_dim * 2 ** 3
args.target_step = 2 ** 10
args.break_step = int(2e7)
args.if_allow_break = False
if_train_humanoid = 1
if if_train_humanoid:
# env = build_env('IsaacOneEnvHumanoid', if_print=True, device_id=0, env_num=1)
args.eval_env = 'IsaacOneEnvHumanoid'
args.eval_gpu_id = 7
# env = build_env('IsaacVecEnvHumanoid', if_print=True, device_id=0, env_num=2)
args.env = f'IsaacVecEnvHumanoid'
args.env_num = 1024
args.max_step = 1000
args.state_dim = 108
args.action_dim = 21
args.if_discrete = False
args.target_return = 7000
args.agent.lambda_entropy = 0.02
args.agent.lambda_gae_adv = 0.97
args.learning_rate = 2 ** -14
args.if_per_or_gae = True
args.break_step = int(8e7)
args.reward_scale = 2 ** -1
args.repeat_times = 2 ** 3
args.net_dim = 2 ** 9
args.batch_size = args.net_dim * 2 ** 4
args.target_step = 2 ** 10
args.break_step = int(2e8)
args.if_allow_break = False
args.init_before_training()
# train_and_evaluate(args)
args.learner_gpus = (5, )
args.workers_gpus = args.learner_gpus
args.worker_num = 1
train_and_evaluate_mp(args)
if __name__ == '__main__':
demo_isaac_on_policy()
|
11465931
|
import os
import sys
import torch
import shutil
import random
import numpy as np
from tensorboardX import SummaryWriter
from Datasets.dataset_shapenetpart import ShapeNetPartDataset
from Datasets.dataset_samplers import RandomSampler, Sampler
from Models.model_PN2 import PointNet2
from Models.model_mRes import mRes
from Models.model_convPN import convPN
from Configs.shapenetpart_options import ShapeNetPartOptions
from Utils.evaluation_metrics import compute_performance_metrics
np.seterr(divide='ignore', invalid='ignore')
import pdb
def compute_loss(pred, target):
num_batch, num_points, num_classes = pred.size()
pred = pred.contiguous().view(num_batch * num_points, num_classes)
target = target.view(num_batch * num_points)
loss = torch.nn.functional.cross_entropy(pred, target)
return loss
def train_shapenetpart(opt):
# Creating the device
if opt.use_GPU:
device = torch.device("cuda:" + str(opt.device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print('Network loaded on the device: ', device)
# Colored console output
green = lambda x: '\033[92m' + x + '\033[0m'
blue = lambda x: '\033[94m' + x + '\033[0m'
# Set up folder directories
log_dirname = os.path.join(opt.logdir, opt.name)
params_filename = os.path.join(opt.outdir, '%s_params.pth' % (opt.name))
model_filename = os.path.join(opt.outdir, '%s_model.pth' % (opt.name))
desc_filename = os.path.join(opt.outdir, '%s_description.txt' % (opt.name))
if os.path.exists(log_dirname) or os.path.exists(model_filename):
response = input('A training run named "%s" already exists, overwrite? (y/n) ' % (opt.name))
if response == 'y':
if os.path.exists(log_dirname):
shutil.rmtree(os.path.join(opt.logdir, opt.name))
else:
sys.exit()
if not os.path.isdir(opt.outdir):
os.makedirs(opt.outdir)
# Set up the seed
if opt.seed < 0:
opt.seed = random.randint(1, 10000)
print("Random Seed: %d" % (opt.seed))
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
# Create train and test dataset loaders
train_dataset = ShapeNetPartDataset(root=opt.indir, seed=opt.seed, num_points=opt.num_points_training, center_points=opt.center_points,
use_pca=opt.use_pca, mode='training')
train_datasampler = RandomSampler(data_source=train_dataset, seed=opt.seed, identical_epochs=opt.identical_epochs)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=opt.batch_size, num_workers=int(opt.workers))
print('training set: %d pointclouds (in %d minibatches)' % (len(train_datasampler), len(train_dataloader)))
if opt.validation_batch == True:
test_dataset = ShapeNetPartDataset(root=opt.indir, seed=opt.seed, num_points=opt.num_points_training, center_points=opt.center_points,
use_pca=opt.use_pca, mode='validation')
test_datasampler = Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=opt.batch_size, num_workers=int(opt.workers))
print('test set: %d pointclouds (in %d minibatches)' % (len(test_datasampler), len(test_dataloader)))
list_num_parts = list(train_dataset.dictionary_categories.values())
# Creating the network
if opt.network == 'PointNet++':
# PN++
network = PointNet2(opt.batch_size, opt.nb_subsampled_points, opt.nb_neighbours, opt.sampling_method, opt.patch_radius, opt.in_channel_x_complete,
opt.in_channel, opt.list_dim_channels_encoding1, opt.use_x, opt.pooling_operation, opt.list_dim_channels_encoding2,
opt.intermediate_size_fc, opt.dropout_rate, opt.nb_interpolating_points, opt.use_x_complete_unsampled,opt.list_dim_channels_decoding,
opt.num_classes, opt.num_parts).to(device)
elif (opt.network == 'mRes') or (opt.network == 'mResX'):
# mRes
network = mRes(opt.batch_size, opt.nb_subsampled_points, opt.nb_neighbours, opt.sampling_method, opt.patch_radius, opt.in_channel_x_complete, opt.in_channel,
opt.list_dim_channels_encoding1, opt.use_x, opt.cross_connection, opt.pooling_operation, opt.list_dim_channels_encoding2, opt.intermediate_size_fc,
opt.dropout_rate, opt.nb_interpolating_points, opt.use_x_complete_unsampled, opt.list_dim_channels_decoding, opt.num_classes, opt.num_parts,
opt.dropout_rate_cross, opt.nb_interpolating_points_encoding).to(device)
network.add_cross_connection(opt.batch_size, opt.nb_interpolating_points_crossconnection)
network = network.to(device)
elif (opt.network == 'convPN') or (opt.network == 'deepConvPN'):
# convPN
network = convPN(opt.batch_size, opt.nb_subsampled_points, opt.nb_neighbours, opt.sampling_method, opt.patch_radius, opt.in_channel_x_complete, opt.in_channel,
opt.list_dim_channels_encoding, opt.use_x, opt.use_crosslinks, opt.use_reslinks, opt.sequence, opt.pooling_operation, opt.residuallinks_input,
opt.residuallinks_output, opt.intermediate_size_fc, opt.dropout_rate, opt.nb_interpolating_points, opt.use_x_complete_unsampled,
opt.list_dim_channels_decoding, opt.num_classes, opt.num_parts, opt.blockout_rate, test=False).to(device)
if opt.refine != '':
network.load_state_dict(torch.load(opt.refine, map_location=lambda storage, loc: storage))
num_parameters = np.sum([np.prod(parameter.shape) for parameter in network.parameters()])
print('Number of parameters for ' + opt.network + ': ' + str(num_parameters))
# Creating the tensorboardX writers
train_writer = SummaryWriter(os.path.join(log_dirname, 'train'))
if opt.validation_batch == True:
test_writer = SummaryWriter(os.path.join(log_dirname, 'validation'))
# Creating the optimizer
optimizer = torch.optim.Adam(network.parameters(), lr=opt.lr, betas=(0.9,0.999), eps=1e-8, weight_decay=opt.weight_decay, amsgrad=True)
# Saving parameters
torch.save(opt, params_filename)
# Saving description
with open(desc_filename, 'w+') as text_file:
print(opt.desc, file=text_file)
# Starting the training
print('Starting the training')
for epoch in range(opt.nepoch):
# Updating the learning rate and the batch norm decay
for param_group in optimizer.param_groups:
param_group['lr'] = max(opt.lr * opt.decay_rate**(epoch // opt.milestone_step), opt.lr_clip)
print('Learning rate: ' + str(optimizer.param_groups[0]['lr']))
bn_decay = min(1 - opt.bn_init_decay * opt.bn_decay_decay_rate**(epoch // opt.bn_decay_decay_step), opt.bn_decay_clip)
input_decay = 1 - bn_decay
print('Batchnorm decay: ' + str(bn_decay))
# Setting to training mode
network.train()
if (opt.network == 'convPN') or (opt.network == 'deepConvPN'):
network.train_custom()
# Initializing the metrics variable
loss_training = 0
cpt_rolling_average = 0
accuracy_training = 0
iou_training = 0
intersection_training = np.zeros([opt.num_parts])
union_training = np.zeros([opt.num_parts])
# Iterating over the batches
for train_batchind, data in enumerate(train_dataloader, 0):
# Rebooting the optimizer gradients
optimizer.zero_grad()
# Getting the input tensors from the data list
input_tensor = data[0].type(torch.FloatTensor).to(device)
vertices = input_tensor[:,:,:3]
normals = input_tensor[:,:,3:]
labels_cat = data[1].type(torch.LongTensor).to(device)
labels_seg = data[2].type(torch.LongTensor).to(device)
parts_tensor = data[3].type(torch.FloatTensor).to(device)
zero_tensor = torch.zeros([1]).to(device)
one_tensor = torch.ones([1]).to(device)
# Forward pass
pred = network(vertices, normals, labels_cat, bn_decay_value=input_decay)
loss = compute_loss(pred=pred, target=labels_seg)
loss_training = loss_training + loss.detach().cpu().item()
batch_accuracy, batch_iou, batch_intersection, batch_union = compute_performance_metrics(labels_cat, labels_seg, pred, None, parts_tensor, zero_tensor, one_tensor)
cpt_rolling_average += 1
accuracy_training += batch_accuracy
iou_training += batch_iou
intersection_training = intersection_training + batch_intersection.detach().cpu().numpy()
union_training = union_training + batch_union.detach().cpu().numpy()
# Backward pass
loss.backward()
optimizer.step()
if (train_batchind % opt.nb_rolling_iterations == 0) or (train_batchind == len(train_dataloader) - 1):
loss_training /= cpt_rolling_average
accuracy_training /= cpt_rolling_average
iou_training /= cpt_rolling_average
partiou_training = np.nanmean(intersection_training / union_training)
print('[%s %d - %d / %d] %s Loss: %f' % (opt.name, epoch, train_batchind+1, len(train_dataloader), green('training'), loss_training))
print('[%s %d - %d / %d] %s Accuracy: %f' % (opt.name, epoch, train_batchind+1, len(train_dataloader), green('training'), accuracy_training))
print('[%s %d - %d / %d] %s IoU: %f' % (opt.name, epoch, train_batchind+1, len(train_dataloader), green('training'), iou_training))
print('[%s %d - %d / %d] %s PartIoU: %f' % (opt.name, epoch, train_batchind+1, len(train_dataloader), green('training'), partiou_training))
train_writer.add_scalar('Loss', loss_training, len(train_dataloader)*epoch + (train_batchind+1))
train_writer.add_scalar('Accuracy', accuracy_training, len(train_dataloader)*epoch + (train_batchind+1))
train_writer.add_scalar('IoU', iou_training, len(train_dataloader)*epoch + (train_batchind+1))
train_writer.add_scalar('PartIoU', partiou_training, len(train_dataloader)*epoch + (train_batchind+1))
# Rebooting the rolling variables
cpt_rolling_average = 0
loss_training = 0
accuracy_training = 0
iou_training = 0
intersection_training = 0
union_training = 0
if (opt.validation_batch == True) and ((epoch % opt.nb_rolling_iterations == 0) or (epoch == opt.nepoch - 1)):
loss_validation, accuracy_validation, iou_validation, partiou_validation = validation_epoch(opt, network, test_dataloader, list_num_parts, device)
print('[%s %d - %d / %d] %s Loss: %f' % (opt.name, epoch, len(train_dataloader), len(train_dataloader), blue('validation'), loss_validation))
print('[%s %d - %d / %d] %s Accuracy: %f' % (opt.name, epoch, len(train_dataloader), len(train_dataloader), blue('validation'), accuracy_validation))
print('[%s %d - %d / %d] %s IoU: %f' % (opt.name, epoch, len(train_dataloader), len(train_dataloader), blue('validation'), iou_validation))
print('[%s %d - %d / %d] %s PartIoU: %f' % (opt.name, epoch, len(train_dataloader), len(train_dataloader), blue('validation'), partiou_validation))
test_writer.add_scalar('Loss', loss_validation, len(train_dataloader) * (epoch + 1))
test_writer.add_scalar('Accuracy', accuracy_validation, len(train_dataloader) * (epoch + 1))
test_writer.add_scalar('IoU', iou_validation, len(train_dataloader) * (epoch + 1))
test_writer.add_scalar('PartIoU', partiou_validation, len(train_dataloader) * (epoch + 1))
if (epoch % opt.nb_rolling_iterations == 0) or (epoch == opt.nepoch - 1):
torch.save(network.state_dict(), os.path.join(opt.outdir, '%s_model_%d.pth' % (opt.name, epoch)))
def validation_epoch(opt, network, test_dataloader, list_num_parts, device):
# Setting to evalution mode
network.eval()
if (opt.network == 'convPN') or (opt.network == 'deepConvPN'):
network.eval_custom()
# Initializing the metrics variable
loss_validation = 0
accuracy_validation = 0
iou_validation = 0
intersection_validation = np.zeros([opt.num_parts])
union_validation = np.zeros([opt.num_parts])
# Iterating over the batches
for _, data in enumerate(test_dataloader, 0):
input_tensor = data[0].type(torch.FloatTensor).to(device)
vertices = input_tensor[:,:,:3]
normals = input_tensor[:,:,3:]
labels_cat = data[1].type(torch.LongTensor).to(device)
labels_seg = data[2].type(torch.LongTensor).to(device)
parts_tensor = data[3].type(torch.FloatTensor).to(device)
zero_tensor = torch.zeros([1]).to(device)
one_tensor = torch.ones([1]).to(device)
with torch.no_grad():
pred = network(vertices, normals, labels_cat, bn_decay_value=None)
loss = compute_loss(pred=pred, target=labels_seg)
loss_validation = loss_validation + loss.detach().cpu().item()
batch_accuracy, batch_iou, batch_intersection, batch_union = compute_performance_metrics(labels_cat, labels_seg, pred, None, parts_tensor, zero_tensor, one_tensor)
accuracy_validation += batch_accuracy
iou_validation += batch_iou
intersection_validation = intersection_validation + batch_intersection.detach().cpu().numpy()
union_validation = union_validation + batch_union.detach().cpu().numpy()
loss_validation /= len(test_dataloader)
accuracy_validation /= len(test_dataloader)
iou_validation /= len(test_dataloader)
partiou_validation = np.nanmean(intersection_validation / union_validation)
return loss_validation, accuracy_validation, iou_validation, partiou_validation
if __name__ == '__main__':
configs = ShapeNetPartOptions()
opt = configs.parse()
train_shapenetpart(opt)
|
11465978
|
from __future__ import print_function
from __future__ import division
import math
import torch
import torch.nn as nn
from torch.nn import Parameter
from torchkit.head.localfc.common import calc_logits
class CosFace(nn.Module):
""" Implement of CosFace (https://arxiv.org/abs/1801.09414)
"""
def __init__(self,
in_features,
out_features,
scale=64.0,
margin=0.40):
""" Args:
in_features: size of each input features
out_features: size of each output features
scale: norm of input feature
margin: margin
"""
super(CosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.scale = scale
self.margin = margin
self.kernel = Parameter(torch.FloatTensor(in_features, out_features))
# nn.init.xavier_uniform_(self.kernel)
nn.init.normal_(self.kernel, std=0.01)
# init.kaiming_uniform_(self.kernel, a=math.sqrt(5))
def forward(self, embeddings, labels):
cos_theta, origin_cos = calc_logits(embeddings, self.kernel)
target_logit = cos_theta[torch.arange(0, embeddings.size(0)), labels].view(-1, 1)
final_target_logit = target_logit - self.margin
cos_theta.scatter_(1, labels.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.scale
return output, origin_cos * self.scale
|
11466025
|
import numpy as np
import gym
#*******************************************************************
# FIND CAVE TASK
#*******************************************************************
# custom action wrapper for complete GAIL agent for MineRL
class ActionShaping_FindCave(gym.ActionWrapper):
def __init__(self, env, camera_angle=10, always_attack=False):
super().__init__(env)
self.camera_angle = camera_angle
self.always_attack = always_attack
self._actions = [
[('attack', 1)], #0
[('forward', 1)], #1
[('forward', 1), ('jump', 1)], #2
[('camera', [-self.camera_angle, 0])], #3
[('camera', [self.camera_angle, 0])], #4
[('camera', [0, self.camera_angle])], #5
[('camera', [0, -self.camera_angle])], #6
[('back', 1)], #7
[('left', 1)], #8
[('right', 1)], #9
[('jump', 1)], #10
#[('equip',11), ('use', 1)],
[('forward', 1), ('attack', 1)], #11
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
# add no-op action
act = self.env.action_space.noop()
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
def processed_actions_to_wrapper_actions_FindCave(dataset_actions, camera_margin=5):
"""
Turn a batch of actions from dataset (`batch_iter`) to a numpy
array that corresponds to batch of actions of ActionShaping wrapper (_actions).
Camera margin sets the threshold what is considered "moving camera".
Note: Hardcoded to work for actions in ActionShaping._actions, with "intuitive"
ordering of actions.
If you change ActionShaping._actions, remember to change this!
Array elements are integers corresponding to actions, or "-1"
for actions that did not have any corresponding discrete match.
"""
# There are dummy dimensions of shape one
camera_actions = dataset_actions[:,10:].astype(np.float32)
attack_actions = dataset_actions[:,0].astype(np.float32)
forward_actions = dataset_actions[:,3].astype(np.float32)
jump_actions = dataset_actions[:,4].astype(np.float32)
back_actions = dataset_actions[:,1].astype(np.float32)
left_actions = dataset_actions[:,5].astype(np.float32)
right_actions = dataset_actions[:,6].astype(np.float32)
equip_actions = dataset_actions[:,2]
use_actions = dataset_actions[:,9].astype(np.float32)
sneak_actions = dataset_actions[:,7].astype(np.float32)
sprint_actions = dataset_actions[:,8].astype(np.float32)
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
for i in range(len(camera_actions)):
# Moving camera is most important (horizontal first)
if camera_actions[i][0] < -camera_margin:
actions[i] = 3
elif camera_actions[i][0] > camera_margin:
actions[i] = 4
elif camera_actions[i][1] > camera_margin:
actions[i] = 5
elif camera_actions[i][1] < -camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
elif attack_actions[i] == 1:
actions[i] = 11
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
elif left_actions[i] == 1:
actions[i] = 8
elif right_actions[i] ==1:
actions[i] = 9
elif back_actions[i] == 1:
actions[i] = 7
elif jump_actions[i] == 1:
actions[i] = 10
else:
# No reasonable mapping (would be no-op)
actions[i] = 12
return actions
#*******************************************************************
# WATERFALL TASK
#*******************************************************************
# custom action wrapper for complete GAIL agent for MineRL
class ActionShaping_Waterfall(gym.ActionWrapper):
def __init__(self, env, camera_angle=10, always_attack=False):
super().__init__(env)
self.camera_angle = camera_angle
self.always_attack = always_attack
self._actions = [
[('attack', 1)], #0
[('forward', 1)], #1
[('forward', 1), ('jump', 1)], #2
[('camera', [-self.camera_angle, 0])], #3
[('camera', [self.camera_angle, 0])], #4
[('camera', [0, self.camera_angle])], #5
[('camera', [0, -self.camera_angle])], #6
[('back', 1)], #7
[('left', 1)], #8
[('right', 1)], #9
[('jump', 1)], #10
[('forward', 1), ('attack', 1)], #11
[('equip','water_bucket'), ('use', 1)], #12 #water bucket
[('equip','stone_pickaxe'), ('use', 1)], #13 #stone pickaxe
[('equip','stone_shovel'), ('use', 1)], #14 #stone shovel
[('equip','cobblestone'), ('use', 1)], #15 #cobblestone
#[('equip',1), ('use', 1)], #16 #bucket
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
# add no-op action
act = self.env.action_space.noop()
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
def processed_actions_to_wrapper_actions_Waterfall(dataset_actions, camera_margin=5):
"""
Turn a batch of actions from dataset (`batch_iter`) to a numpy
array that corresponds to batch of actions of ActionShaping wrapper (_actions).
Camera margin sets the threshold what is considered "moving camera".
Note: Hardcoded to work for actions in ActionShaping._actions, with "intuitive"
ordering of actions.
If you change ActionShaping._actions, remember to change this!
Array elements are integers corresponding to actions, or "-1"
for actions that did not have any corresponding discrete match.
"""
# There are dummy dimensions of shape one
camera_actions = dataset_actions[:,10:].astype(np.float32)
attack_actions = dataset_actions[:,0].astype(np.float32)
forward_actions = dataset_actions[:,3].astype(np.float32)
jump_actions = dataset_actions[:,4].astype(np.float32)
back_actions = dataset_actions[:,1].astype(np.float32)
left_actions = dataset_actions[:,5].astype(np.float32)
right_actions = dataset_actions[:,6].astype(np.float32)
equip_actions = dataset_actions[:,2]
use_actions = dataset_actions[:,9].astype(np.float32)
sneak_actions = dataset_actions[:,7].astype(np.float32)
sprint_actions = dataset_actions[:,8].astype(np.float32)
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
#Enum(air,bucket,carrot,cobblestone,fence,fence_gate,none,other,snowball,stone_pickaxe,stone_shovel,water_bucket,wheat,wheat_seeds),
equip_actions_dict = dict()
equip_actions_dict['water_bucket'] = 12
equip_actions_dict['stone_pickaxe'] = 13
equip_actions_dict['stone_shovel'] = 14
equip_actions_dict['cobblestone'] = 15
#equip_actions_dict['bucket'] = 16
# step through all actions
currently_equipped_item = 'stone_pickaxe'
for i in range(len(camera_actions)):
# keep track of what is currently equipped
if equip_actions[i] != 'none' and equip_actions[i] in equip_actions_dict:
currently_equipped_item = equip_actions[i]
# equip and use actions are the most important
if use_actions[i] == 1:
actions[i] = equip_actions_dict[currently_equipped_item]
# Moving camera is second most important (horizontal first)
elif camera_actions[i][0] < -camera_margin:
actions[i] = 3
elif camera_actions[i][0] > camera_margin:
actions[i] = 4
elif camera_actions[i][1] > camera_margin:
actions[i] = 5
elif camera_actions[i][1] < -camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
elif attack_actions[i] == 1:
actions[i] = 11
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
elif left_actions[i] == 1:
actions[i] = 8
elif right_actions[i] ==1:
actions[i] = 9
elif back_actions[i] == 1:
actions[i] = 7
elif jump_actions[i] == 1:
actions[i] = 10
else:
# No reasonable mapping (would be no-op)
actions[i] = 16
return actions
#*******************************************************************
# ANIMAL PEN TASK
#*******************************************************************
# custom action wrapper for complete GAIL agent for MineRL
class ActionShaping_Animalpen(gym.ActionWrapper):
def __init__(self, env, camera_angle=10, always_attack=False):
super().__init__(env)
self.equip_mapping = {'air':0,'bucket':1,'carrot':2,'cobblestone':3,'fence':4,'fence_gate':5,
'none':6,'other':7,'snowball':8,'stone_pickaxe':9,'stone_shovel':10,'water_bucket':11,
'wheat':12,'wheat_seeds':13}
self.camera_angle = camera_angle
self.always_attack = always_attack
self._actions = [
[('attack', 1)], #0
[('forward', 1)], #1
[('forward', 1), ('jump', 1)], #2
[('camera', [-self.camera_angle, 0])], #3
[('camera', [self.camera_angle, 0])], #4
[('camera', [0, self.camera_angle])], #5
[('camera', [0, -self.camera_angle])], #6
[('back', 1)], #7
[('left', 1)], #8
[('right', 1)], #9
[('jump', 1)], #10
[('forward', 1), ('attack', 1)], #11
[('equip','carrot')], #12 #carrot
[('equip','fence'), ('use', 1)], #13 #fence
[('equip','fence_gate'), ('use', 1)], #14 #fence_gate
[('equip','wheat')], #15 #wheat
[('equip','wheat_seeds')], #16 #wheat_seeds
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
# add no-op action
act = self.env.action_space.noop()
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
def processed_actions_to_wrapper_actions_Animalpen(dataset_actions, camera_margin=5):
"""
Turn a batch of actions from dataset (`batch_iter`) to a numpy
array that corresponds to batch of actions of ActionShaping wrapper (_actions).
Camera margin sets the threshold what is considered "moving camera".
Note: Hardcoded to work for actions in ActionShaping._actions, with "intuitive"
ordering of actions.
If you change ActionShaping._actions, remember to change this!
Array elements are integers corresponding to actions, or "-1"
for actions that did not have any corresponding discrete match.
"""
# There are dummy dimensions of shape one
camera_actions = dataset_actions[:,10:].astype(np.float32)
attack_actions = dataset_actions[:,0].astype(np.float32)
forward_actions = dataset_actions[:,3].astype(np.float32)
jump_actions = dataset_actions[:,4].astype(np.float32)
back_actions = dataset_actions[:,1].astype(np.float32)
left_actions = dataset_actions[:,5].astype(np.float32)
right_actions = dataset_actions[:,6].astype(np.float32)
equip_actions = dataset_actions[:,2]
use_actions = dataset_actions[:,9].astype(np.float32)
sneak_actions = dataset_actions[:,7].astype(np.float32)
sprint_actions = dataset_actions[:,8].astype(np.float32)
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
#Enum(air,bucket,carrot,cobblestone,fence,fence_gate,none,other,snowball,stone_pickaxe,stone_shovel,water_bucket,wheat,wheat_seeds)
equip_actions_dict = dict()
equip_actions_dict['carrot'] = 12
equip_actions_dict['fence'] = 13
equip_actions_dict['fence_gate'] = 14
equip_actions_dict['wheat'] = 15
equip_actions_dict['wheat_seeds'] = 16
# step through all actions
currently_equipped_item = 'stone_pickaxe'
for i in range(len(camera_actions)):
# keep track of what is currently equipped
if equip_actions[i] != 'none' and equip_actions[i] in equip_actions_dict:
currently_equipped_item = equip_actions[i]
# equip and use actions are the most important
if equip_actions[i] == 'carrot':
actions[i] = equip_actions_dict['carrot']
elif equip_actions[i] == 'wheat':
actions[i] = equip_actions_dict['wheat']
elif equip_actions[i] == 'wheat_seeds':
actions[i] = equip_actions_dict['wheat_seeds']
elif use_actions[i] == 1:
actions[i] = equip_actions_dict[currently_equipped_item]
# Moving camera is second most important (horizontal first)
elif camera_actions[i][0] < -camera_margin:
actions[i] = 3
elif camera_actions[i][0] > camera_margin:
actions[i] = 4
elif camera_actions[i][1] > camera_margin:
actions[i] = 5
elif camera_actions[i][1] < -camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
elif attack_actions[i] == 1:
actions[i] = 11
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
elif left_actions[i] == 1:
actions[i] = 8
elif right_actions[i] ==1:
actions[i] = 9
elif back_actions[i] == 1:
actions[i] = 7
elif jump_actions[i] == 1:
actions[i] = 10
else:
# No reasonable mapping (would be no-op)
actions[i] = 17
return actions
#*******************************************************************
# VILLAGE HOUSE TASK
#*******************************************************************
# custom action wrapper for complete GAIL agent for MineRL
class ActionShaping_Villagehouse(gym.ActionWrapper):
def __init__(self, env, camera_angle=10, always_attack=False):
super().__init__(env)
self.equip_mapping = {'acacia_door':0,'acacia_fence':1,'cactus':2,'cobblestone':3,'dirt':4,'fence':5,'flower_pot':6,
'glass':7,'ladder':8,'log#0':9,'log#1':10,'log2#0':12,'none':13,'other':14,'planks#0':15,
'planks#1':16,'planks#4':17,'red_flower':18,'sand,sandstone#0':19,'sandstone#2':20,'sandstone_stairs':21,
'snowball':22,'spruce_door':23,'spruce_fence':24,'stone_axe':25,'stone_pickaxe':26,'stone_stairs':27,
'torch':28,'wooden_door':29,'wooden_pressure_plate':30}
self.camera_angle = camera_angle
self.always_attack = always_attack
self._actions = [
[('attack', 1)], #0
[('forward', 1)], #1
[('forward', 1), ('jump', 1)], #2
[('camera', [-self.camera_angle, 0])], #3
[('camera', [self.camera_angle, 0])], #4
[('camera', [0, self.camera_angle])], #5
[('camera', [0, -self.camera_angle])], #6
[('back', 1)], #7
[('left', 1)], #8
[('right', 1)], #9
[('jump', 1)], #10
[('forward', 1), ('attack', 1)], #11
[('equip','acacia_door'), ('use', 1)], #12
[('equip','acacia_fence'), ('use', 1)], #13
[('equip','cactus'), ('use', 1)], #14
[('equip','cobblestone'), ('use', 1)], #15
[('equip','dirt'), ('use', 1)], #16
[('equip','fence'), ('use', 1)], #17
[('equip','flower_pot'), ('use', 1)], #18
[('equip','glass'), ('use', 1)], #19
[('equip','ladder'), ('use', 1)], #20
[('equip','log#0'), ('use', 1)], #21
[('equip','log#1'), ('use', 1)], #22
[('equip','log2#0'), ('use', 1)], #23
[('equip','planks#0'), ('use', 1)], #24
[('equip','planks#1'), ('use', 1)], #25
[('equip','planks#4'), ('use', 1)], #26
[('equip','red_flower'), ('use', 1)], #27
[('equip','sand,sandstone#0'), ('use', 1)], #28
[('equip','sandstone#2'), ('use', 1)], #29
[('equip','sandstone_stairs'), ('use', 1)],#30
[('equip','spruce_door'), ('use', 1)], #31
[('equip','spruce_fence'), ('use', 1)], #32
[('equip','stone_axe'), ('use', 1)], #33
[('equip','stone_pickaxe'), ('use', 1)], #34
[('equip','stone_stairs'), ('use', 1)], #35
[('equip','torch'), ('use', 1)], #36
[('equip','wooden_door'), ('use', 1)], #37
[('equip','wooden_pressure_plate'), ('use', 1)], #38
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
# add no-op action
act = self.env.action_space.noop()
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
def processed_actions_to_wrapper_actions_Villagehouse(dataset_actions, camera_margin=5):
"""
Turn a batch of actions from dataset (`batch_iter`) to a numpy
array that corresponds to batch of actions of ActionShaping wrapper (_actions).
Camera margin sets the threshold what is considered "moving camera".
Note: Hardcoded to work for actions in ActionShaping._actions, with "intuitive"
ordering of actions.
If you change ActionShaping._actions, remember to change this!
Array elements are integers corresponding to actions, or "-1"
for actions that did not have any corresponding discrete match.
"""
# There are dummy dimensions of shape one
camera_actions = dataset_actions[:,10:].astype(np.float32)
attack_actions = dataset_actions[:,0].astype(np.float32)
forward_actions = dataset_actions[:,3].astype(np.float32)
jump_actions = dataset_actions[:,4].astype(np.float32)
back_actions = dataset_actions[:,1].astype(np.float32)
left_actions = dataset_actions[:,5].astype(np.float32)
right_actions = dataset_actions[:,6].astype(np.float32)
equip_actions = dataset_actions[:,2]
use_actions = dataset_actions[:,9].astype(np.float32)
sneak_actions = dataset_actions[:,7].astype(np.float32)
sprint_actions = dataset_actions[:,8].astype(np.float32)
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
#Enum(acacia_door,acacia_fence,cactus,cobblestone,dirt,fence,flower_pot,glass,ladder,log#0,log#1,log2#0,none,other,planks#0,planks#1,planks#4,red_flower,sand,sandstone#0,sandstone#2,sandstone_stairs,snowball,spruce_door,spruce_fence,stone_axe,stone_pickaxe,stone_stairs,torch,wooden_door,wooden_pressure_plate)
equip_actions_dict = dict()
equip_actions_dict['carrot'] = 12
equip_actions_dict['fence'] = 13
equip_actions_dict['fence_gate'] = 14
equip_actions_dict['wheat'] = 15
equip_actions_dict['wheat_seeds'] = 16
equip_actions_dict['acacia_door']=12
equip_actions_dict['acacia_fence']=13
equip_actions_dict['cactus']=14
equip_actions_dict['cobblestone']=15
equip_actions_dict['dirt']=16
equip_actions_dict['fence']=17
equip_actions_dict['flower_pot']=18
equip_actions_dict['glass']=19
equip_actions_dict['ladder']=20
equip_actions_dict['log#0']=21
equip_actions_dict['log#1']=22
equip_actions_dict['log2#0']=23
equip_actions_dict['planks#0']=24
equip_actions_dict['planks#1']=25
equip_actions_dict['planks#4']=26
equip_actions_dict['red_flower']=27
equip_actions_dict['sand,sandstone#0']=28
equip_actions_dict['sandstone#2']=29
equip_actions_dict['sandstone_stairs']=30
equip_actions_dict['spruce_door']=31
equip_actions_dict['spruce_fence']=32
equip_actions_dict['stone_axe']=33
equip_actions_dict['stone_pickaxe']=34
equip_actions_dict['stone_stairs']=35
equip_actions_dict['torch']=36
equip_actions_dict['wooden_door']=37
equip_actions_dict['wooden_pressure_plate']=38
# step through all actions
currently_equipped_item = 'stone_pickaxe'
for i in range(len(camera_actions)):
# keep track of what is currently equipped
if equip_actions[i] != 'none' and equip_actions[i] in equip_actions_dict:
currently_equipped_item = equip_actions[i]
# equip and use actions are the most important
if use_actions[i] == 1:
actions[i] = equip_actions_dict[currently_equipped_item]
# Moving camera is second most important (horizontal first)
elif camera_actions[i][0] < -camera_margin:
actions[i] = 3
elif camera_actions[i][0] > camera_margin:
actions[i] = 4
elif camera_actions[i][1] > camera_margin:
actions[i] = 5
elif camera_actions[i][1] < -camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
elif attack_actions[i] == 1:
actions[i] = 11
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
elif left_actions[i] == 1:
actions[i] = 8
elif right_actions[i] ==1:
actions[i] = 9
elif back_actions[i] == 1:
actions[i] = 7
elif jump_actions[i] == 1:
actions[i] = 10
else:
# No reasonable mapping (would be no-op)
actions[i] = 39
return actions
# custom action wrapper for Simple GAIL agent for MineRL
#*******************************************************************
# NAVIGATION SUBTASK
#*******************************************************************
# custom action wrapper for complete GAIL agent for MineRL
class ActionShaping_Navigation(gym.ActionWrapper):
def __init__(self, env, camera_angle=10, always_attack=False):
super().__init__(env)
self.camera_angle = camera_angle
self.always_attack = always_attack
self._actions = [
[('attack', 1)], #0
[('forward', 1)], #1
[('forward', 1), ('jump', 1)], #2
[('camera', [0, self.camera_angle])], #3 #horizontal (right)
[('camera', [0, -self.camera_angle])], #4 #horizontal (left)
[('camera', [-self.camera_angle, 0])], #5 #verticle
[('camera', [self.camera_angle, 0])], #6 #verticle
[('back', 1)], #7
[('left', 1)], #8
[('right', 1)], #9
[('jump', 1)], #10
#[('equip',11), ('use', 1)],
[('forward', 1), ('attack', 1)], #11
]
self.actions = []
for actions in self._actions:
act = self.env.action_space.noop()
for a, v in actions:
act[a] = v
if self.always_attack:
act['attack'] = 1
self.actions.append(act)
# add no-op action
act = self.env.action_space.noop()
self.actions.append(act)
self.action_space = gym.spaces.Discrete(len(self.actions))
def action(self, action):
return self.actions[action]
return self.actions[action]
def processed_actions_to_wrapper_actions_Navigation(dataset_actions, camera_margin=5):
"""
Turn a batch of actions from dataset (`batch_iter`) to a numpy
array that corresponds to batch of actions of ActionShaping wrapper (_actions).
Camera margin sets the threshold what is considered "moving camera".
Note: Hardcoded to work for actions in ActionShaping._actions, with "intuitive"
ordering of actions.
If you change ActionShaping._actions, remember to change this!
Array elements are integers corresponding to actions, or "-1"
for actions that did not have any corresponding discrete match.
"""
# There are dummy dimensions of shape one
camera_actions = dataset_actions[:,10:].astype(np.float32)
attack_actions = dataset_actions[:,0].astype(np.float32)
forward_actions = dataset_actions[:,3].astype(np.float32)
jump_actions = dataset_actions[:,4].astype(np.float32)
back_actions = dataset_actions[:,1].astype(np.float32)
left_actions = dataset_actions[:,5].astype(np.float32)
right_actions = dataset_actions[:,6].astype(np.float32)
equip_actions = dataset_actions[:,2]
use_actions = dataset_actions[:,9].astype(np.float32)
sneak_actions = dataset_actions[:,7].astype(np.float32)
sprint_actions = dataset_actions[:,8].astype(np.float32)
batch_size = len(camera_actions)
actions = np.zeros((batch_size,), dtype=int)
for i in range(len(camera_actions)):
# Moving camera is most important (horizontal first!!!)
if camera_actions[i][1] < -camera_margin:
actions[i] = 3
elif camera_actions[i][1] > camera_margin:
actions[i] = 4
elif camera_actions[i][0] > camera_margin:
actions[i] = 5
elif camera_actions[i][0] < -camera_margin:
actions[i] = 6
elif forward_actions[i] == 1:
if jump_actions[i] == 1:
actions[i] = 2
elif attack_actions[i] == 1:
actions[i] = 11
else:
actions[i] = 1
elif attack_actions[i] == 1:
actions[i] = 0
elif left_actions[i] == 1:
actions[i] = 8
elif right_actions[i] ==1:
actions[i] = 9
elif jump_actions[i] == 1:
actions[i] = 10
elif back_actions[i] == 1:
actions[i] = 7
elif sum(dataset_actions[i,(0,1,3,4,5,6,7,8,9)].astype(np.float32)):
# actual noop
actions[i] = 12
else: #catch everthing else and remove later
actions[i] = 99
return actions
# return only image as the observation
class PovOnlyObservation(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = self.env.observation_space['pov']
def observation(self, observation):
obs = observation['pov'].squeeze().astype(np.float32)
# Transpose observations to be channel-first (BCHW instead of BHWC)
obs = obs.transpose(2, 0, 1)
# Normalize observations
obs /= 255.0
return obs
|
11466048
|
from __future__ import print_function
from stompy import harm_decomp
import numpy as np
def test_basic():
# A sample problem:
omegas = np.array([1.0,0.0])
# the constructed data:
amps = np.array([1,5.0])
phis = np.array([1,0])
t = np.linspace(0,10*np.pi,125)
h = amps[0]*np.cos(omegas[0]*t - phis[0]) + amps[1]*np.cos(omegas[1]*t - phis[1])
comps = harm_decomp.decompose(t,h,omegas)
recon=harm_decomp.recompose(t,comps,omegas)
assert np.allclose( recon, h)
print("Components: ",comps)
|
11466072
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
rhoTable = cms.EDProducer("GlobalVariablesTableProducer",
variables = cms.PSet(
fixedGridRhoFastjetAll = ExtVar( cms.InputTag("fixedGridRhoFastjetAll"), "double", doc = "rho from all PF Candidates, used e.g. for JECs" ),
fixedGridRhoFastjetCentralNeutral = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralNeutral"), "double", doc = "rho from neutral PF Candidates with |eta| < 2.5, used e.g. for rho corrections of some lepton isolations" ),
fixedGridRhoFastjetCentralCalo = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralCalo"), "double", doc = "rho from calo towers with |eta| < 2.5, used e.g. egamma PFCluster isolation" ),
fixedGridRhoFastjetCentral = ExtVar( cms.InputTag("fixedGridRhoFastjetCentral"), "double", doc = "rho from all PF Candidates for central region, used e.g. for JECs" ),
fixedGridRhoFastjetCentralChargedPileUp = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralChargedPileUp"), "double", doc = "rho from charged PF Candidates for central region, used e.g. for JECs" ),
)
)
puTable = cms.EDProducer("NPUTablesProducer",
src = cms.InputTag("slimmedAddPileupInfo"),
pvsrc = cms.InputTag("offlineSlimmedPrimaryVertices"),
zbins = cms.vdouble( [0.0,1.7,2.6,3.0,3.5,4.2,5.2,6.0,7.5,9.0,12.0] ),
savePtHatMax = cms.bool(False),
)
genTable = cms.EDProducer("SimpleGenEventFlatTableProducer",
src = cms.InputTag("generator"),
cut = cms.string(""),
name= cms.string("Generator"),
doc = cms.string("Generator information"),
singleton = cms.bool(True),
extension = cms.bool(False),
variables = cms.PSet(
x1 = Var( "?hasPDF?pdf().x.first:-1", float, doc="x1 fraction of proton momentum carried by the first parton",precision=14 ),
x2 = Var( "?hasPDF?pdf().x.second:-1", float, doc="x2 fraction of proton momentum carried by the second parton",precision=14 ),
xpdf1 = Var( "?hasPDF?pdf().xPDF.first:-1", float, doc="x*pdf(x) for the first parton", precision=14 ),
xpdf2 = Var( "?hasPDF?pdf().xPDF.second:-1", float, doc="x*pdf(x) for the second parton", precision=14 ),
id1 = Var( "?hasPDF?pdf().id.first:-1", int, doc="id of first parton", precision=6 ),
id2 = Var( "?hasPDF?pdf().id.second:-1", int, doc="id of second parton", precision=6 ),
scalePDF = Var( "?hasPDF?pdf().scalePDF:-1", float, doc="Q2 scale for PDF", precision=14 ),
binvar = Var("?hasBinningValues()?binningValues()[0]:-1", float, doc="MC generation binning value", precision=14),
weight = Var("weight()", float,doc="MC generator weight", precision=14),
),
)
globalTablesTask = cms.Task(rhoTable)
globalTablesMCTask = cms.Task(puTable,genTable)
|
11466096
|
import os
import csv, json
from collections import defaultdict
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
from expertise.dataset import Dataset
from expertise import utils
import ipdb
def setup(config):
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
dataset = Dataset(**config.dataset)
experiment_dir = os.path.abspath(config.experiment_dir)
setup_dir = os.path.join(experiment_dir, 'setup')
if not os.path.exists(setup_dir):
os.mkdir(setup_dir)
(train_set_ids,
dev_set_ids,
test_set_ids) = utils.split_ids(list(dataset.submission_ids), seed=config.random_seed)
bids_by_forum = utils.get_bids_by_forum(dataset)
test_labels = utils.format_bid_labels(test_set_ids, bids_by_forum)
utils.dump_jsonl(os.path.join(config.setup_dir, 'test_labels.jsonl'), test_labels)
def train(config):
print('Nothing to train. This model is a shell that reads in pre-computed TPMS scores.')
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
def infer(config):
print('Nothing to infer. This model is a shell that reads in pre-computed TPMS scores.')
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
def test(config):
score_file_path = os.path.join(config.test_dir, 'test_scores.jsonl')
labels_file_path = os.path.join(config.setup_dir, 'test_labels.jsonl')
tpms_scores_file = config.tpms_scores_file
scores = {}
for data in utils.jsonl_reader(tpms_scores_file):
source_id = data['source_id']
target_id = data['target_id']
score = data['score']
if source_id not in scores:
scores[source_id] = {}
if target_id not in scores[source_id]:
scores[source_id][target_id] = score
with open(score_file_path, 'w') as w:
for data in utils.jsonl_reader(labels_file_path):
paperid = data['source_id']
userid = data['target_id']
label = data['label']
if paperid in scores:
score = scores[paperid].get(userid, 0.0)
if float(score) > -float('inf'):
result = {
'source_id': paperid,
'target_id': userid,
'score': float(score),
'label': int(label)
}
w.write(json.dumps(result) + '\n')
(list_of_list_of_labels,
list_of_list_of_scores) = utils.load_labels(score_file_path)
map_score = float(eval_map(list_of_list_of_labels, list_of_list_of_scores))
hits_at_1 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=1))
hits_at_3 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=3))
hits_at_5 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=5))
hits_at_10 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
config.test_save(score_lines, 'test.scores.tsv')
|
11466138
|
from tests.analyzer.utils import UnusedTestCase
from unimport.statement import Import, ImportFrom
class AsImportTestCase(UnusedTestCase):
def test_as_import_all_unused_all_cases(self):
self.assertSourceAfterScanningEqualToExpected(
"""\
from x import y as z
import x
from t import s as ss
from f import a as c, l as k, i as ii
from fo import (bar, i, x as z)
import le as x
""",
[
ImportFrom(
lineno=1,
column=1,
name="z",
package="x",
star=False,
suggestions=[],
),
Import(
lineno=2,
column=1,
name="x",
package="x",
),
ImportFrom(
lineno=3,
column=1,
name="ss",
package="t",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=1,
name="c",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=2,
name="k",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=3,
name="ii",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=1,
name="bar",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=2,
name="i",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=3,
name="z",
package="fo",
star=False,
suggestions=[],
),
Import(
lineno=6,
column=1,
name="x",
package="le",
),
],
)
def test_as_import_one_used_in_function_all_cases(self):
self.assertSourceAfterScanningEqualToExpected(
"""\
from x import y as z
import x
from t import s as ss
from f import a as c, l as k, i as ii
from fo import (bar, i, x as z)
import le as x
def x(t=x):pass
""",
[
ImportFrom(
lineno=1,
column=1,
name="z",
package="x",
star=False,
suggestions=[],
),
Import(
lineno=2,
column=1,
name="x",
package="x",
),
ImportFrom(
lineno=3,
column=1,
name="ss",
package="t",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=1,
name="c",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=2,
name="k",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=3,
name="ii",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=1,
name="bar",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=2,
name="i",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=3,
name="z",
package="fo",
star=False,
suggestions=[],
),
],
)
|
11466160
|
from datetime import timedelta
from pathlib import Path
import click
from overhave.base_settings import LoggingSettings
from overhave.cli.group import overhave
from overhave.transport import OverhaveS3Bucket, OverhaveS3ManagerSettings, S3Manager
from overhave.utils import get_current_time
@overhave.group(short_help="Run s3 cloud interaction commands")
def s3() -> None:
pass
@s3.group(short_help="S3 cloud bucket's interaction commands")
def bucket() -> None:
pass
def _check_bucket_registered(name: str) -> None:
if name in (item.value for item in list(OverhaveS3Bucket)):
return
click.secho(f"Note: specified s3 bucket name '{name}' not presented in OverhaveS3Bucket enum!", fg="yellow")
def _get_s3_manager() -> S3Manager:
LoggingSettings().setup_logging()
manager = S3Manager(OverhaveS3ManagerSettings(autocreate_buckets=False))
manager.initialize()
return manager
@bucket.command(short_help="Create s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
def create(name: str) -> None:
""" Create s3 bucket. """
_check_bucket_registered(name)
_get_s3_manager().create_bucket(name)
@bucket.command(short_help="Delete s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--force", is_flag=True, help="Delete all files in bucket, then delete bucket",
)
def delete(name: str, force: bool) -> None:
""" Delete s3 bucket. """
_check_bucket_registered(name)
_get_s3_manager().delete_bucket(name, force=force)
@bucket.command(short_help="Remove old s3 cloud bucket files")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-d", "--days", type=int, help="Remove all files in bucket older then specified days value",
)
def remove_files(name: str, days: int) -> None:
""" Remove s3 bucket files older . """
_check_bucket_registered(name)
manager = _get_s3_manager()
target_date = get_current_time() - timedelta(days=days)
objects = manager.get_bucket_objects(name)
objects_to_delete = []
for obj in objects:
if not obj.modified_at < target_date:
continue
objects_to_delete.append(obj)
if not objects_to_delete:
click.secho(f"No one object older than {days} days.")
return
click.secho(f"Objects older then {days} days: {[x.name for x in objects_to_delete]}")
manager.delete_bucket_objects(bucket=bucket, objects=objects_to_delete)
@s3.command(short_help="Download file from s3 bucket")
@click.option(
"-b", "--bucket", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--filename", type=str, help="Filename for downloading",
)
@click.option("-d", "--dir-to-save", type=str, help="Directory for saving file", default=".")
def download_file(bucket: str, filename: str, dir_to_save: str) -> None:
""" Create s3 bucket. """
_check_bucket_registered(bucket)
_get_s3_manager().download_file(filename=filename, bucket=bucket, dir_to_save=Path(dir_to_save))
|
11466212
|
import argparse, yaml, os, os.path
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input")
parser.add_argument("-o", "--output")
parser.add_argument("--tm2", "--tm", action="store_const", const="tm2", dest="action")
parser.add_argument("--tm2source", "--tmsource", action="store_const", const="tm2source", dest="action")
parser.add_argument("--zoom", default="14", help="Last zoom in the vector tile, default 14")
parser.add_argument("--source", action="store_true", dest="source")
parser.add_argument("--no-source", action="store_false", dest="source")
parser.add_argument("--only-shapefiles", action="store_true")
parser.add_argument("--only-postgis", action="store_true")
args = parser.parse_args()
cwd = os.getcwd()
with open(args.input) as fp:
projectfile = yaml.load(fp)
if args.action == 'tm2':
new_layers = []
for layer in projectfile['Layer']:
new_layers.append({'id': layer['id']})
projectfile['Layer'] = new_layers
if args.source:
projectfile['source'] = "tmsource://{}/osm-carto.tm2source/".format(cwd)
elif args.action == 'tm2source':
del projectfile['source']
del projectfile['Stylesheet']
zoom = int(args.zoom)
projectfile['maxzoom'] = zoom
for layer in projectfile['Layer']:
# If the maxzoom is less than the minzoom, don't do anything
# This can happen for a generic land polygon shapefile, which has
# maxzoom: 9. If you include a minzoom: 14 in that layer, then that
# layer won't show up from 0-9, i.e. it won't show up at all.
if layer['properties'].get('minzoom', 22) > zoom and layer['properties'].get('maxzoom', 22) >= zoom:
layer['properties']['minzoom'] = zoom
if args.only_shapefiles:
projectfile['Layer'] = [l for l in projectfile['Layer'] if l['Datasource']['type'] == 'shape']
elif args.only_postgis:
projectfile['Layer'] = [l for l in projectfile['Layer'] if l['Datasource']['type'] == 'postgis']
else:
raise NotImplementedError()
with open(args.output, 'w') as fp:
yaml.safe_dump(projectfile, fp)
|
11466232
|
import copy
import io
import itertools
import os
import shutil
import unittest
import tempfile
import pkg_resources
import pyhmmer
from pyhmmer.errors import EaselError, AlphabetMismatch
from pyhmmer.easel import Alphabet, SequenceFile
from pyhmmer.plan7 import HMM, HMMFile, Profile, Background
class TestProfile(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.hmm_path = pkg_resources.resource_filename("tests", "data/hmms/db/Thioesterase.hmm")
with HMMFile(cls.hmm_path) as hmm_file:
cls.hmm = next(hmm_file)
cls.alphabet = cls.hmm.alphabet
cls.background = Background(cls.alphabet)
cls.profile = Profile(cls.hmm.M, cls.alphabet)
cls.profile.configure(cls.hmm, cls.background, 200)
def test_configure(self):
profile = Profile(self.hmm.M, Alphabet.dna())
bg = Background(profile.alphabet)
self.assertNotEqual(profile.alphabet, self.hmm.alphabet)
self.assertRaises(AlphabetMismatch, profile.configure, self.hmm, bg, 200)
hmm = HMM(100, profile.alphabet)
self.assertNotEqual(profile.alphabet, self.background.alphabet)
self.assertRaises(AlphabetMismatch, profile.configure, hmm, self.background, 200)
def test_eq(self):
profile2 = Profile(self.hmm.M, self.alphabet)
profile2.configure(self.hmm, self.background, 200)
self.assertEqual(self.profile, profile2)
self.assertNotEqual(self.profile, 1)
def test_copy(self):
profile2 = copy.copy(self.profile)
self.assertEqual(self.profile, profile2)
def test_profile_modes(self):
profile = self.profile.copy()
for multihit in (False, True):
for local in (False, True):
profile.configure(
self.hmm,
self.background,
200,
multihit=multihit,
local=local
)
self.assertEqual(profile.is_multihit(), multihit)
self.assertEqual(profile.is_local(), local)
def test_M(self):
self.assertEqual(self.profile.M, self.hmm.M)
def test_L(self):
profile = Profile(self.hmm.M, self.alphabet)
profile.configure(self.hmm, self.background, 200)
self.assertEqual(profile.L, 200)
profile.L = 300
self.assertEqual(profile.L, 300)
def test_accession(self):
self.assertEqual(self.profile.accession, self.hmm.accession)
def test_description(self):
self.assertEqual(self.profile.description, self.hmm.description)
def test_name(self):
self.assertEqual(self.profile.name, self.hmm.name)
def test_clear(self):
profile = self.profile.copy()
self.assertNotEqual(profile.M, 0)
profile.clear()
self.assertEqual(profile.M, 0)
def test_consensus(self):
self.assertEqual(self.profile.consensus, self.hmm.consensus)
profile = Profile(self.hmm.M, self.alphabet)
self.assertIs(profile.consensus, None)
def test_consensus_structure(self):
self.assertEqual(self.profile.consensus, self.hmm.consensus)
profile = Profile(self.hmm.M, self.alphabet)
self.assertIs(profile.consensus, None)
def test_offsets(self):
self.assertIs(self.profile.offsets.model, None)
self.assertIs(self.profile.offsets.profile, None)
self.assertIs(self.profile.offsets.filter, None)
|
11466238
|
from project import db
import datetime
from project.corsi.models import Corso
# Tabella di relazione 1 Corso : N Serate
class Serata(db.Model):
__tablename__ = "serata"
__table_args__ = (db.UniqueConstraint("id", "data", name="constraint_serata"),)
id = db.Column(db.Integer(), primary_key=True)
nome = db.Column(db.String(255), nullable=False)
descrizione = db.Column(db.String(255), nullable=False)
data = db.Column(db.DateTime(), nullable=False)
link_partecipazione = db.Column(db.String(255), nullable=True)
link_registrazione = db.Column(db.String(255), nullable=True)
corso_id = db.Column(db.Integer(), db.ForeignKey("corso.id"))
def __init__(self, nome, descrizione, data, link_partecipazione='', link_registrazione=''):
self.nome = nome
self.descrizione = descrizione
self.data = data
self.link_partecipazione = link_partecipazione
self.link_registrazione = link_registrazione
def __repr__(self):
return "<Descrizione '{}'. Link registrazione>".format(self.descrizione, self.link_registrazione)
@staticmethod
def insert_test_serate():
lista_serate = [
("Flask 1", "Introduzione a Flask e ai web server con Jinja Base", datetime.datetime(2020, 10, 12, hour=20), '', 'https://www.youtube.com/watch?v=FPI5-oGKiVI&t=759s'),
("Flask 2", "Jinja avanzato e Forms", datetime.datetime(2020, 10, 19, hour=20), '', 'https://www.youtube.com/watch?v=C-iEkd-BpE4'),
("Flask 3", "Flask con Database", datetime.datetime(2020, 10, 26, hour=20), '', 'https://www.youtube.com/watch?v=rCXhuSiOcZU'),
("Flask 4", "Review con Andrea", datetime.datetime(2020, 11, 2, hour=20), '', 'https://www.youtube.com/watch?v=izIKXOrbI5U'),
("Flask 5", "Review con Mario", datetime.datetime(2020, 11, 9, hour=20), '', 'https://vimeo.com/478050019'),
("Flask 6", "Blueprints, refactoring e tests con Mario", datetime.datetime(2020, 11, 16, hour=20), 'https://zoom.us/j/99953652561?pwd=<PASSWORD>', 'https://vimeo.com/480155611'),
("Flask 7", "Autenticazione con Mario", datetime.datetime(2020, 11, 23, hour=20), '', ''),
]
corso_flask = Corso.query.filter_by(nome="Flask").first()
for serata in lista_serate:
serata_db = Serata.query.filter_by(nome=serata[0]).first()
if serata_db is None:
serata_db = Serata(*serata)
serata_db.corso_id = corso_flask.id
db.session.add(serata_db)
db.session.commit()
'''
s1.corso_id = c.id
s2.corso_id = c.id
s3.corso_id = c.id
s4.corso_id = c.id
s5.corso_id = c.id
s6.corso_id = c.id
s7.corso_id = c.id
data_serata = data_serata.replace(day=30)
si6 = Serata("Da impostare", "Non ancora definita", data_serata)
serate = [s1, s2, s3, s4, s5, s6, s7, si6]
'''
|
11466248
|
import shutil
from typing import Dict
from covid_shared import workflow
import covid_model_seiir_pipeline
from covid_model_seiir_pipeline.pipeline.parameter_fit.specification import FIT_JOBS, FitScenario
class BetaFitTaskTemplate(workflow.TaskTemplate):
tool = workflow.get_jobmon_tool(covid_model_seiir_pipeline)
task_name_template = f"{FIT_JOBS.fit}_{{scenario}}_{{draw_id}}"
command_template = (
f"{shutil.which('stask')} "
f"{FIT_JOBS.fit} "
"--fit-version {fit_version} "
"--scenario {scenario} "
"--draw-id {draw_id} "
"-vv"
)
node_args = ['scenario', 'draw_id']
task_args = ['fit_version']
class FitWorkflow(workflow.WorkflowTemplate):
tool = workflow.get_jobmon_tool(covid_model_seiir_pipeline)
workflow_name_template = 'seiir-oos-fit-{version}'
task_template_classes = {
FIT_JOBS.fit: BetaFitTaskTemplate,
}
def attach_tasks(self, n_draws: int, scenarios: Dict[str, FitScenario]):
fit_template = self.task_templates[FIT_JOBS.fit]
for scenario_name, scenario_spec in scenarios.items():
for draw in range(n_draws):
fit_task = fit_template.get_task(
fit_version=self.version,
draw_id=draw,
scenario=scenario_name,
)
self.workflow.add_task(fit_task)
|
11466249
|
execfile('ex-3.02.py')
B = (3, 1)
D = (4 * dtype.extent, 0)
newtype = dtype.Create_hindexed(B, D)
dtype.Free()
newtype.Free()
|
11466252
|
import copy
import logging
import lib.const as C
import lib.visit as v
from .. import util
from ..meta import class_nonce, register_class, class_lookup
from ..meta.program import Program
from ..meta.clazz import Clazz
from ..meta.method import Method
from ..meta.field import Field
from ..meta.statement import Statement, to_statements
from ..meta.expression import Expression, to_expression
"""
generator class Automaton { body_of_Automaton }
class RegularLanguage extends Automaton { ... }
class DBConnection {
class Monitor extends Automaton { ... }
}
=>
class Automaton1 { copy_of_body_of_Automaton }
class Automaton2 { copy_of_body_of_Automaton }
class RegularLanague extends Automaton1 { ... }
class DBConnection {
class Monitor extends Automaton2 { ... }
}
"""
class CGenerator(object):
# to avoid name conflict, use fresh counter as suffix
__cnt = 0
@classmethod
def fresh_cnt(cls):
cls.__cnt = cls.__cnt + 1
return cls.__cnt
def __init__(self):
self._cgens = []
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Program)
def visit(self, node):
self._pgr = node
# collect class-level generators
for cls in util.flatten_classes(node.classes, "inners"):
if C.mod.GN in cls.mods:
logging.debug("found class generator: {}".format(cls.name))
self._cgens.append(cls)
@v.when(Clazz)
def visit(self, node):
if not node.sup: return
sup = class_lookup(node.sup)
if sup not in self._cgens: return
# specialize the class generator
specialized_cls_name = u"{}{}".format(node.sup, CGenerator.fresh_cnt())
# deep copy
specialized_cls = copy.deepcopy(sup)
# rename <init>s
for init in specialized_cls.inits:
init.name = specialized_cls_name
init.typ = specialized_cls_name
# rename the class
specialized_cls.name = specialized_cls_name
register_class(specialized_cls)
self._pgr.add_classes([specialized_cls])
node.sup = specialized_cls_name
logging.debug("specializing {} for {}".format(specialized_cls_name, node.name))
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node): pass
@v.when(Statement)
def visit(self, node): return [node]
@v.when(Expression)
def visit(self, node): return node
"""
class A {
static generator foo(...) {
... foo(...); // may be recursive
}
}
class X {
... bar(...) {
... A.foo(...);
... A.foo(...);
}
}
=>
class A {
...
static foo1(...) {
return foo(...); // delegation
}
static foo2(...) {
return foo(...); // delegation
}
}
class X {
... bar(...) {
... A.foo1(...);
... A.foo2(...);
}
}
"""
class MGenerator(object):
# to avoid name conflict, use fresh counter as suffix
__cnt = 0
@classmethod
def fresh_cnt(cls):
cls.__cnt = cls.__cnt + 1
return cls.__cnt
def __init__(self):
# { mname: mtd } for easier lookup
self._mgens = {}
self._cur_mtd = None
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Program)
def visit(self, node):
## collect method-level generators
for cls in util.flatten_classes(node.classes, "inners"):
for mtd in cls.mtds:
if mtd.is_generator:
logging.debug("found method generator: {}.{}".format(cls.name, mtd.name))
self._mgens[mtd.name] = mtd
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node):
self._cur_mtd = node
@v.when(Statement)
def visit(self, node): return [node]
@v.when(Expression)
def visit(self, node):
if node.kind != C.E.CALL: return node
l_callee = unicode(node.f).split('.')
u_callee = l_callee[-1]
if u_callee not in self._mgens: return node
# avoid recursive calls inside a method generator
if u_callee == self._cur_mtd.name: return node
# avoid calls inside a specialized method
if hasattr(self._cur_mtd, "generator"): return node
mgen_mtd = self._mgens[u_callee]
# specialize the method generator
specialized_mtd_name = u"{}{}".format(u_callee, MGenerator.fresh_cnt())
_mods = list(set(mgen_mtd.mods) - set([C.mod.GN]))
specialized_mtd = Method(clazz=mgen_mtd.clazz, name=specialized_mtd_name, mods=_mods, typ=mgen_mtd.typ, params=mgen_mtd.params)
# associate the method generator, for easier decoding
setattr(specialized_mtd, "generator", mgen_mtd)
# delegate the call
args = u", ".join(mgen_mtd.param_vars)
delegation = u"{}({});".format(u_callee, args)
if mgen_mtd.typ != C.J.v: # i.e., has a return value
delegation = u"return {}".format(delegation)
specialized_mtd.body = to_statements(specialized_mtd, delegation)
mgen_mtd.clazz.add_mtd(specialized_mtd)
# replace the callee, e.g., foo -> foo1
logging.debug("specializing {} to {}".format(u_callee, specialized_mtd_name))
n_callee = u'.'.join(l_callee[:-1] + [specialized_mtd_name])
node.f = to_expression(n_callee)
return node
|
11466259
|
import numpy as np
import properties
import z_order_utils
class BaseMetadata(properties.HasProperties):
name = properties.String("Name of the block model", default="")
description = properties.String("Description of the block model", default="")
# Other named metadata?
class BaseOrientation(properties.HasProperties):
corner = properties.Vector3(
"Origin of the block model, where axes extend from",
default="ZERO",
)
axis_u = properties.Vector3("Vector orientation of u-direction", default="X")
axis_v = properties.Vector3("Vector orientation of v-direction", default="Y")
axis_w = properties.Vector3("Vector orientation of w-direction", default="Z")
class RegularBlockModel(BaseMetadata, BaseOrientation):
block_size = properties.Vector3(
"Size of each block",
)
block_count = properties.List(
"Number of blocks in each dimension",
min_length=3,
max_length=3,
prop=properties.Integer("", min=1),
)
class TensorBlockModel(BaseMetadata, BaseOrientation):
tensor_u = properties.Array(
"Tensor cell widths, u-direction", shape=("*",), dtype=float
)
tensor_v = properties.Array(
"Tensor cell widths, v-direction", shape=("*",), dtype=float
)
tensor_w = properties.Array(
"Tensor cell widths, w-direction", shape=("*",), dtype=float
)
@property
def block_count(self):
return [
len(self.tensor_u),
len(self.tensor_v),
len(self.tensor_w),
]
@property
def num_blocks(self):
return np.prod(self.block_count)
class BaseCompressedBlockStorage(properties.HasProperties):
parent_block_size = properties.Vector3(
"Size of each parent block",
)
parent_block_count = properties.List(
"Number of parent blocks in each dimension",
min_length=3,
max_length=3,
prop=properties.Integer("", min=1),
)
@property
def num_parent_blocks(self):
return np.prod(self.parent_block_count)
@property
def num_blocks(self):
return self.compressed_block_index[-1]
@property
def is_sub_blocked(self):
self.compressed_block_index # assert that _cbi exists
return (self._cbi[1:] - self._cbi[:-1]) > 1
def _get_starting_cbi(self):
return np.arange(self.num_parent_blocks + 1, dtype="uint32")
@property
def compressed_block_index(self):
# Need the block counts to exist
assert self._props["parent_block_count"].assert_valid(
self, self.parent_block_count
)
if "sub_block_count" in self._props:
assert self._props["sub_block_count"].assert_valid(
self, self.sub_block_count
)
# Note: We could have some warnings here, if the above change
# It is probably less relevant as these are not targeted
# to be used in a dynamic context?
# If the sub block storage does not exist, create it
if not hasattr(self, "_cbi"):
# Each parent cell has a single attribute before refinement
self._cbi = self._get_starting_cbi()
return self._cbi
def _get_parent_index(self, ijk):
pbc = self.parent_block_count
assert len(ijk) == 3 # Should be a 3 length integer tuple/list
assert (
(0 <= ijk[0] < pbc[0]) & (0 <= ijk[1] < pbc[1]) & (0 <= ijk[2] < pbc[2])
), "Must be valid ijk index"
(parent_index,) = np.ravel_multi_index(
[[ijk[0]], [ijk[1]], [ijk[2]]], # Index into the block model
self.parent_block_count, # shape of the parent
order="F", # Explicit column major ordering, "i moves fastest"
)
return parent_index
class RegularSubBlockModel(BaseMetadata, BaseOrientation, BaseCompressedBlockStorage):
sub_block_count = properties.List(
"Number of sub blocks in each sub-blocked parent",
min_length=3,
max_length=3,
prop=properties.Integer("", min=1),
)
@property
def sub_block_size(self):
return self.parent_block_size / np.array(self.sub_block_count)
def refine(self, ijk):
self.compressed_block_index # assert that _cbi exists
parent_index = self._get_parent_index(ijk)
# Adding "num_sub_blocks" - 1, because the parent was already counted
self._cbi[parent_index + 1 :] += np.prod(self.sub_block_count) - 1
# Attribute index is where to insert into attribute arrays
attribute_index = tuple(self._cbi[parent_index : parent_index + 2])
return parent_index, attribute_index
# Note: Perhaps if there is an unrefined RSBM,
# then OMF should serialize as a RBM?
class OctreeSubBlockModel(BaseMetadata, BaseOrientation, BaseCompressedBlockStorage):
@property
def z_order_curves(self):
forest = self._get_forest()
cbi = self.compressed_block_index
curves = np.zeros(self.num_blocks, dtype="uint32")
for i, tree in enumerate(forest):
curves[cbi[i] : cbi[i + 1]] = sorted(tree)
return curves
def _get_forest(self):
"""Want a set before we create the array.
This may not be useful for less dynamic implementations.
"""
if not hasattr(self, "_forest"):
# Do your part for the planet:
# Plant trees in every parent block.
self._forest = [{0} for _ in range(self.num_parent_blocks)]
return self._forest
def _refine_child(self, ijk, ind):
self.compressed_block_index # assert that _cbi exists
parent_index = self._get_parent_index(ijk)
tree = self._get_forest()[parent_index]
if ind not in tree:
raise IndexError(ind)
p, lvl = z_order_utils.get_pointer(ind)
w = z_order_utils.level_width(lvl + 1)
children = [
[p[0], p[1], p[2], lvl + 1],
[p[0] + w, p[1], p[2], lvl + 1],
[p[0], p[1] + w, p[2], lvl + 1],
[p[0] + w, p[1] + w, p[2], lvl + 1],
[p[0], p[1], p[2] + w, lvl + 1],
[p[0] + w, p[1], p[2] + w, lvl + 1],
[p[0], p[1] + w, p[2] + w, lvl + 1],
[p[0] + w, p[1] + w, p[2] + w, lvl + 1],
]
for child in children:
tree.add(z_order_utils.get_index(child[:3], child[3]))
tree.remove(ind)
# Adding "num_sub_blocks" - 1, because the parent was already counted
self._cbi[parent_index + 1 :] += 7
return children
class ArbitrarySubBlockModel(BaseMetadata, BaseOrientation, BaseCompressedBlockStorage):
def _get_starting_cbi(self):
"""Unlike octree and rsbm, this has zero sub-blocks to start with."""
return np.zeros(self.num_parent_blocks + 1, dtype="uint32")
def _get_lists(self):
"""Want a set before we create the array.
This may not be useful for less dynamic implementations.
"""
if not hasattr(self, "_lists"):
# Do your part for the planet:
# Plant trees in every parent block.
self._lists = [
(np.zeros((0, 3)), np.zeros((0, 3)))
for _ in range(self.num_parent_blocks)
]
return self._lists
def _add_sub_blocks(self, ijk, new_centroids, new_sizes):
self.compressed_block_index # assert that _cbi exists
parent_index = self._get_parent_index(ijk)
centroids, sizes = self._get_lists()[parent_index]
if not isinstance(new_centroids, np.ndarray):
new_centroids = np.array(new_centroids)
new_centroids = new_centroids.reshape((-1, 3))
if not isinstance(new_sizes, np.ndarray):
new_sizes = np.array(new_sizes)
new_sizes = new_sizes.reshape((-1, 3))
assert (
(new_centroids.size % 3 == 0)
& (new_sizes.size % 3 == 0)
& (new_centroids.size == new_sizes.size)
)
# TODO: Check that the centroid exists in the block
self._lists[parent_index] = (
np.r_[centroids, new_centroids],
np.r_[sizes, new_sizes],
)
self._cbi[parent_index + 1 :] += new_sizes.size // 3
|
11466342
|
import enum
class WeekDay(enum.Enum):
monday = 'MONDAY'
tuesday = 'TUESDAY'
wednesday = 'WEDNESDAY'
thursday = 'THURSDAY'
friday = 'FRIDAY'
saturday = 'SATURDAY'
sunday = 'SUNDAY'
|
11466392
|
from django.apps import AppConfig
class FiberConfig(AppConfig):
name = 'fiber'
default_auto_field = 'django.db.models.AutoField'
|
11466423
|
from dartcms.utils.loading import is_model_registered
from django.db.models.signals import post_save, pre_delete, pre_save
from .abstract_models import *
from .signals import *
__all__ = []
if not is_model_registered('shop', 'ProductCatalog'):
class ProductCatalog(AbstractProductCatalog):
pass
__all__.append('ProductCatalog')
if is_model_registered('shop', 'ProductSection'):
section_model = get_model('shop', 'ProductSection')
else:
class ProductSection(AbstractProductSection):
pass
__all__.append('ProductSection')
section_model = ProductSection
if not is_model_registered('shop', 'ProductLabel'):
class ProductLabel(AbstractProductLabel):
pass
__all__.append('ProductLabel')
if not is_model_registered('shop', 'ProductManufacturer'):
class ProductManufacturer(AbstractProductManufacturer):
pass
__all__.append('ProductManufacturer')
if not is_model_registered('shop', 'Product'):
class Product(AbstractProduct):
pass
__all__.append('Product')
if not is_model_registered('shop', 'ProductImage'):
class ProductImage(AbstractProductImage):
pass
__all__.append('ProductImage')
if not is_model_registered('shop', 'OrderStatus'):
class OrderStatus(AbstractOrderStatus):
pass
__all__.append('OrderStatus')
if not is_model_registered('shop', 'OrderPaymentType'):
class OrderPaymentType(AbstractOrderPaymentType):
pass
__all__.append('OrderPaymentType')
if not is_model_registered('shop', 'OrderShippingType'):
class OrderShippingType(AbstractOrderShippingType):
pass
__all__.append('OrderShippingType')
if not is_model_registered('shop', 'Order'):
class Order(AbstractOrder):
pass
__all__.append('Order')
if not is_model_registered('shop', 'OrderDetail'):
class OrderDetail(AbstractOrderDetail):
pass
__all__.append('OrderDetail')
pre_save.connect(pre_save_handler_section, sender=section_model)
post_save.connect(post_save_handler_section, sender=section_model)
pre_delete.connect(pre_delete_handler_section, sender=section_model)
|
11466430
|
import pytest
import numpy as np
@pytest.fixture(autouse=True)
def reload_cmap():
import importlib
import terracotta.cmaps.get_cmaps
try:
yield
finally:
importlib.reload(terracotta.cmaps.get_cmaps)
def test_get_cmap():
from terracotta.cmaps.get_cmaps import get_cmap, AVAILABLE_CMAPS
for name in AVAILABLE_CMAPS:
cmap = get_cmap(name)
assert cmap.shape == (255, 4)
assert cmap.dtype == np.uint8
def test_get_cmap_filesystem(monkeypatch):
import pkg_resources
import importlib
import terracotta.cmaps.get_cmaps
def throw_error(*args, **kwargs):
raise pkg_resources.DistributionNotFound('monkeypatched')
with monkeypatch.context() as m:
m.setattr(pkg_resources.Requirement, 'parse', throw_error)
with pytest.raises(pkg_resources.DistributionNotFound):
pkg_resources.Requirement.parse('terracotta')
importlib.reload(terracotta.cmaps.get_cmaps)
cmap = terracotta.cmaps.get_cmaps.get_cmap('jet')
assert cmap.shape == (255, 4)
assert cmap.dtype == np.uint8
def test_extra_cmap(monkeypatch, tmpdir):
import importlib
import terracotta.cmaps.get_cmaps
custom_cmap_data = np.tile(
np.arange(255, dtype='uint8'),
(4, 1)
).T
np.save(str(tmpdir / f'foo{terracotta.cmaps.get_cmaps.SUFFIX}'), custom_cmap_data)
np.save(str(tmpdir / 'bar.npy'), custom_cmap_data)
with monkeypatch.context() as m:
m.setenv('TC_EXTRA_CMAP_FOLDER', str(tmpdir))
importlib.reload(terracotta.cmaps.get_cmaps)
assert 'foo' in terracotta.cmaps.get_cmaps.AVAILABLE_CMAPS
assert 'bar' not in terracotta.cmaps.get_cmaps.AVAILABLE_CMAPS
np.testing.assert_equal(
custom_cmap_data,
terracotta.cmaps.get_cmaps.get_cmap('foo')
)
def test_extra_cmap_invalid_shape(monkeypatch, tmpdir):
import importlib
import terracotta.cmaps.get_cmaps
broken_cmap_data = np.tile(
np.arange(666, dtype='uint8'),
(4, 1)
).T
np.save(str(tmpdir / f'foo{terracotta.cmaps.get_cmaps.SUFFIX}'), broken_cmap_data)
with monkeypatch.context() as m:
m.setenv('TC_EXTRA_CMAP_FOLDER', str(tmpdir))
with pytest.raises(ValueError) as raised_exc:
importlib.reload(terracotta.cmaps.get_cmaps)
assert 'foo' in str(raised_exc.value)
assert '666' in str(raised_exc.value)
def test_extra_cmap_invalid_folder(monkeypatch):
import importlib
import terracotta.cmaps.get_cmaps
with monkeypatch.context() as m:
m.setenv('TC_EXTRA_CMAP_FOLDER', 'bar')
with pytest.raises(IOError) as raised_exc:
importlib.reload(terracotta.cmaps.get_cmaps)
assert 'bar' in str(raised_exc.value)
def test_extra_cmap_invalid_dtype(monkeypatch, tmpdir):
import importlib
import terracotta.cmaps.get_cmaps
broken_cmap_data = np.tile(
np.arange(255, dtype='float'),
(4, 1)
).T
np.save(str(tmpdir / f'foo{terracotta.cmaps.get_cmaps.SUFFIX}'), broken_cmap_data)
with monkeypatch.context() as m:
m.setenv('TC_EXTRA_CMAP_FOLDER', str(tmpdir))
with pytest.raises(ValueError) as raised_exc:
importlib.reload(terracotta.cmaps.get_cmaps)
assert 'foo' in str(raised_exc.value)
assert 'float' in str(raised_exc.value)
|
11466462
|
import datetime
import math
import random
import string
import bson
import bsonnumpy
import numpy as np
from test import client_context, millis, unittest, TestToNdarray, PY3
class TestSequenceFlat(TestToNdarray):
def test_incorrect_arguments(self):
# Expects iterator, dtype, count
needs_iter = r"sequence_to_ndarray requires an iterator"
needs_seq = r"sequence_to_ndarray requires sequence of bytes objects"
invalid = r"document from sequence failed validation"
with self.assertRaisesPattern(TypeError, needs_iter):
bsonnumpy.sequence_to_ndarray(1, np.dtype([("a", np.int)]), 1)
if PY3:
with self.assertRaisesPattern(TypeError, needs_seq):
bsonnumpy.sequence_to_ndarray("asdf", np.dtype([("a", np.int)]), 1)
with self.assertRaisesPattern(TypeError, needs_seq):
bsonnumpy.sequence_to_ndarray(b"asdf", np.dtype([("a", np.int)]), 1)
else:
with self.assertRaisesPattern(bsonnumpy.error, invalid):
bsonnumpy.sequence_to_ndarray("asdf", np.dtype([("a", np.int)]), 1)
with self.assertRaisesPattern(bsonnumpy.error, invalid):
bsonnumpy.sequence_to_ndarray(b"asdf", np.dtype([("a", np.int)]), 1)
with self.assertRaises(TypeError):
bsonnumpy.sequence_to_ndarray(10, 10, 1)
with self.assertRaisesPattern(
TypeError, "sequence_to_ndarray requires an iterator"):
bsonnumpy.sequence_to_ndarray(None, np.dtype([("a", np.int)]), 1)
def test_empty(self):
dtype = np.dtype([("a", np.int)])
result = bsonnumpy.sequence_to_ndarray([], dtype, 0)
self.assertEqual(result.dtype, dtype)
self.assertTrue(np.array_equal(result, np.array([], dtype)))
@client_context.require_connected
def test_int32(self):
docs = [{"x": i, "y": 10 - i} for i in range(10)]
dtype = np.dtype([('x', np.int32), ('y', np.int32)])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', np.int32), ('x', np.int32)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_int64(self):
docs = [{"x": i, "y": 2 ** 63 - 1 - i} for i in range(10)]
dtype = np.dtype([('x', np.int64), ('y', np.int64)])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', np.int64), ('x', np.int64)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_objectid(self):
docs = [{"x": bson.ObjectId()} for _ in range(10)]
dtype = np.dtype([('x', '<V12')])
self.client.bsonnumpy_test.coll.delete_many({})
self.client.bsonnumpy_test.coll.insert_many(docs)
cursor = self.client.bsonnumpy_test.coll.find_raw_batches()
ndarray = bsonnumpy.sequence_to_ndarray(cursor, dtype, cursor.count())
for i, row in enumerate(ndarray):
document = docs[i]
self.assertEqual(document["x"].binary, row["x"].tobytes())
@client_context.require_connected
def test_bool(self):
docs = [{"x": True}, {"x": False}]
dtype = np.dtype([('x', np.bool)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_datetime(self):
docs = [{"x": datetime.datetime(1970, 1, 1)},
{"x": datetime.datetime(1980, 1, 1)},
{"x": datetime.datetime(1990, 1, 1)}]
dtype = np.dtype([('x', np.int64)])
self.client.bsonnumpy_test.coll.delete_many({})
self.client.bsonnumpy_test.coll.insert_many(docs)
cursor = self.client.bsonnumpy_test.coll.find_raw_batches()
ndarray = bsonnumpy.sequence_to_ndarray(cursor, dtype, cursor.count())
for i, row in enumerate(ndarray):
document = docs[i]
self.assertEqual(
millis(document["x"] - datetime.datetime(1970, 1, 1)),
row["x"])
@client_context.require_connected
def test_double(self):
docs = [{"x": math.pi}, {"x": math.pi ** 2}]
dtype = np.dtype([('x', np.double)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_binary(self):
docs = [{"x": bson.Binary(b"asdf")}]
dtype = np.dtype([('x', np.dtype("<V10"))])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_mixed_scalar(self):
docs = [{"x": i, "y": random.choice(string.ascii_lowercase) * 11} for i
in range(10)]
dtype = np.dtype([('x', np.int32), ('y', 'S11')])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', 'S11'), ('x', np.int32)])
self.make_mixed_collection_test(docs, dtype)
def test_void(self):
# TODO: test for types that are 'V'
pass
@client_context.require_connected
def test_aggregate_raw_batches(self):
dtype = np.dtype([('y', np.int32)])
docs = [{"x": i} for i in range(10)]
expected = [(2 * i,) for i in range(10)]
coll = self.get_cursor_sequence(docs)
pipeline = [{'$project': {'y': {'$multiply': [2, '$x']}}}]
ndarray = bsonnumpy.sequence_to_ndarray(
coll.aggregate_raw_batches(pipeline), dtype, coll.count())
self.assertEqual(dtype, ndarray.dtype)
np.testing.assert_array_equal(ndarray, np.array(expected, dtype))
class TestSequenceArray(TestToNdarray):
@client_context.require_connected
def test_subarray1d(self):
# 1d subarray
docs = [{"x": [1 + i, -i - 1], "y": [i, -i]} for i in range(5)]
dtype = np.dtype([('x', '2int32'), ('y', '2int32')])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', '2int32'), ('x', '2int32')])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_subarray2d(self):
# 2d subarray
docs = [{"x": [[i + 0, i + 1, i + 2],
[i + 3, i + 4, i + 5],
[i + 6, i + 7, i + 8],
[i + 9, i + 10, i + 11]],
"y": "string!!" + str(i)} for i in range(10)]
dtype = np.dtype([('x', "(4,3)int32"), ('y', 'S10')])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', 'S10'), ('x', "(4,3)int32")])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_subarray3d(self):
# 3d subarray
docs = []
for i in range(5):
docs.append({
"x": [
[
[i, i + 1], [i + 1, i + 2], [i + 2, i + 3]
],
[
[-i, -i + 1], [-i - 1, -i], [-i - 2, -i - 1]
],
[
[100 * i, 100 * i + i], [100 * i + 1, 100 * i + i],
[100 * i + 2, 100 * i + i]
],
[
[0, 1], [1, 2], [3, 4]
]
],
"some_other_key": [
"string" + str(i), "string" + str(i + 1)
]
})
dtype = np.dtype([('x', "(4,3,2)int32"), ('some_other_key', '2S10')])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('some_other_key', '2S10'), ('x', "(4,3,2)int32")])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_subarray2d2(self):
# 3d subarray
docs = [{"x": [[i, i + 1, i + 2],
[-i, -i - 1, -i - 2],
[100 * i, 100 * i + 1, 100 * i + 2]],
"y": 100 - i} for i in range(2)]
dtype = np.dtype([('x', "(3,3)int32"), ('y', np.int32)])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', np.int32), ('x', "(3,3)int32")])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_mixed(self):
docs = [{"x": [i, -i], "y": random.choice(string.ascii_lowercase) * 11,
"z": bson.Binary(b'foobar')} for i in range(10)]
dtype = np.dtype([('x', '2int32'), ('y', 'S11'), ('z', 'V12')])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('z', 'V12'), ('x', '2int32'), ('y', 'S11')])
self.make_mixed_collection_test(docs, dtype)
dtype = np.dtype([('y', 'S11'), ('x', '2int32'), ('z', 'V12')])
self.make_mixed_collection_test(docs, dtype)
class TestSequenceDoc(TestToNdarray):
@client_context.require_connected
def test_subdoc1(self):
# nested documents
docs = [{'x': {'y': 100 + i}} for i in range(10)]
dtype = np.dtype([('y', np.int32)])
dtype_sub = np.dtype([('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_subdoc2(self):
# sub-doc has multiple fields
docs = [{'x': {'y': 100 + i, 'z': i}} for i in range(10)]
dtype = np.dtype([('y', np.int32), ('z', np.int32)])
dtype_sub = np.dtype([('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_subdoc3(self):
# doc has multiple fields
docs = [{'x': {'y': 100 + i}, 'q': {'y': -i}} for i in range(10)]
dtype = np.dtype([('y', np.int32)])
dtype_sub = np.dtype([('x', dtype), ('q', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_subdoc4(self):
# doc and subdoc have multiple fields
docs = [{'x': {'y': 100 + i, 'z': i}, 'q': {'y': -i, 'z': 100 - i}} for
i in range(10)]
dtype = np.dtype([('y', np.int32), ('z', np.int32)])
dtype_sub = np.dtype([('x', dtype), ('q', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
dtype = np.dtype([('z', np.int32), ('y', np.int32)])
dtype_sub = np.dtype([('q', dtype), ('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_subdoc4_mixed(self):
docs = [{'x': {'y': str(10 + i) * i, 'z': i},
'q': {'y': str(i) * i, 'z': 100 - i}} for i in range(10)]
dtype = np.dtype([('y', 'S110'), ('z', np.int32)])
dtype_sub = np.dtype([('x', dtype), ('q', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
dtype = np.dtype([('z', np.int32), ('y', 'S110')])
dtype_sub = np.dtype([('q', dtype), ('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_subdoc5(self):
# 3x nested documents
docs = [{'x': {'y': {'z': 100 + i}}} for i in range(10)]
dtype0 = np.dtype([('z', np.int32)])
dtype1 = np.dtype([('y', dtype0)])
dtype = np.dtype([('x', dtype1)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_subdoc6(self):
# 3x nested documents
docs = [{'x': {'y': {'z': i,
'z2': "this is a string!"},
'y2': i},
'x2': i} for i in range(10)]
dtype0 = np.dtype([('z', np.int32), ('z2', 'S17')])
dtype1 = np.dtype([('y', dtype0), ('y2', np.int32)])
dtype = np.dtype([('x', dtype1), ('x2', 'int32')])
self.make_mixed_collection_test(docs, dtype)
dtype0 = np.dtype([('z2', 'S17'), ('z', np.int32)])
dtype1 = np.dtype([('y2', np.int32), ('y', dtype0)])
dtype = np.dtype([('x2', 'int32'), ('x', dtype1)])
self.make_mixed_collection_test(docs, dtype)
class TestSequenceNestedArray(TestToNdarray):
@client_context.require_connected
def test_nested_array(self):
docs = [
{'x': {'y': [100 + i, 100, i],
'y1': (i + 1) * 10},
'x1': i + 5}
for i in range(10)]
dtype = np.dtype([('y', '3int32'), ('y1', 'int32')])
dtype_sub = np.dtype([('x', dtype), ('x1', 'int32')])
self.make_mixed_collection_test(docs, dtype_sub)
dtype = np.dtype([('y1', 'int32'), ('y', '3int32')])
dtype_sub = np.dtype([('x1', 'int32'), ('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
def test_deeply_nested_array(self):
# arrays of length 1 are maintained when they are within another array
dtype = np.dtype([("a", "(3,2,1)int32"),
("b", "(3,2,1)int32")])
doc = bson.SON([("a",
[[[9], [9]],
[[8], [8]],
[[7], [7]]]),
("b",
[[[6], [6]],
[[5], [5]],
[[4], [4]]])])
utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)
result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
self.assertEqual(dtype, result.dtype)
self.assertTrue(np.array_equal(
result,
np.array([([[[9], [9]],
[[8], [8]],
[[7], [7]]],
[[[6], [6]],
[[5], [5]],
[[4], [4]]])], dtype)))
dtype = np.dtype([("a", "(3,1)int32"),
("b", "(3,1)int32"),
("c", "(3,1)int32")])
doc = bson.SON([("a", [[9], [8], [7]]),
("b", [[6], [5], [4]]),
("c", [[3], [2], [1]])])
utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)
result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
self.assertEqual(dtype, result.dtype)
self.assertTrue(np.array_equal(
result,
np.array([([[9], [8], [7]],
[[6], [5], [4]],
[[3], [2], [1]])], dtype)))
dtype = np.dtype([("a", "2int32")])
doc = bson.SON([("a", [7, 7])])
utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)
result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
self.assertEqual(dtype, result.dtype)
self.assertTrue(np.array_equal(
result,
np.array([([7, 7],)], dtype)))
dtype = np.dtype([("a", "(2,1,1,1)int32")])
doc = bson.SON([("a", [[[[99]]], [[[88]]]])])
utf8 = bson._dict_to_bson(
doc, False, bson.DEFAULT_CODEC_OPTIONS)
result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
self.assertEqual(dtype, result.dtype)
self.assertTrue(np.array_equal(
result,
np.array([([[[[99]]], [[[88]]]],)], dtype)))
@client_context.require_connected
def test_nested_array2x(self):
docs = [{'x': {'y': [[100 + i, 100, i],
[i, i + 1, i + 2]],
'y1': (i + 1) * 10},
'x1': i + 5} for i in range(10)]
dtype = np.dtype([('y', '(2,3)int32'), ('y1', 'int32')])
dtype_sub = np.dtype([('x', dtype), ('x1', 'int32')])
self.make_mixed_collection_test(docs, dtype_sub)
dtype = np.dtype([('y1', 'int32'), ('y', '(2,3)int32')])
dtype_sub = np.dtype([('x1', 'int32'), ('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_nested_array2x_mixed(self):
docs = [{'x': {'y': [[100 + i, 100, i],
[i, i + 1, i + 2]],
'y1': (i + 1) * 10},
'x1': [[[i + 5, i + 6], [i + 7, i + 8]],
[[i + 9, i + 10], [i + 11, i + 12]]]} for i in
range(10)]
dtype = np.dtype([('y', '(2,3)int32'), ('y1', 'int32')])
dtype_sub = np.dtype([('x', dtype), ('x1', '(2,2,2)int32')])
self.make_mixed_collection_test(docs, dtype_sub)
dtype = np.dtype([('y1', 'int32'), ('y', '(2,3)int32')])
dtype_sub = np.dtype([('x1', '(2,2,2)int32'), ('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_nested_array2x_mixed2(self):
docs = [{'x': {'y': [[100 + i, 100, i],
[i, i + 1, i + 2]],
'y1': (i + 1) * 10,
'y2': random.choice(string.ascii_lowercase) * i},
'x1': [[[i + 5, i + 6], [i + 7, i + 8]],
[[i + 9, i + 10], [i + 11, i + 12]]]} for i in
range(10)]
dtype = np.dtype([('y', '(2,3)int32'), ('y1', 'int32'), ('y2', 'S12')])
dtype_sub = np.dtype([('x', dtype), ('x1', '(2,2,2)int32')])
self.make_mixed_collection_test(docs, dtype_sub)
dtype = np.dtype([('y2', 'S12'), ('y1', 'int32'), ('y', '(2,3)int32')])
dtype_sub = np.dtype([('x1', '(2,2,2)int32'), ('x', dtype)])
self.make_mixed_collection_test(docs, dtype_sub)
@client_context.require_connected
def test_nested_array3x(self):
# 3x nested documents
docs = [{'x': {'y': {'z': [
100 + i, 100 - i]}}} for i in range(10)]
dtype0 = np.dtype([('z', '2int32')])
dtype1 = np.dtype([('y', dtype0)])
dtype = np.dtype([('x', dtype1)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_nested_array3x2d(self):
# 3x nested documents with 2d array
docs = [
{'x': {'y': {
'z': [[100 + i, 100 - i, 100],
[1 * i, 2 * i, 3 * i],
[4 * i, 5 * i, 6 * i],
[7 * i, 8 * i, 9 * i]]}}} for i in range(10)]
dtype0 = np.dtype([('z', '(4,3)int32')])
dtype1 = np.dtype([('y', dtype0)])
dtype = np.dtype([('x', dtype1)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_nested_array3x2d_mixed(self):
# 3x nested documents with 2d array and other fields
docs = [
{'x': {'y': {
'z': [[100 + i, 100 - i, 100],
[1 * i, 2 * i, 3 * i],
[4 * i, 5 * i, 6 * i],
[7 * i, 8 * i, 9 * i]],
'z2': "this is a string!"},
'y2': {'a': "a different doc string"}},
'x2': [1, 2, 3]} for i in range(10)]
dtype2 = np.dtype([('a', 'S26')])
dtype0 = np.dtype([('z', '(4,3)int32'), ('z2', 'S17')])
dtype1 = np.dtype([('y', dtype0), ('y2', dtype2)])
dtype = np.dtype([('x', dtype1), ('x2', '3int32')])
self.make_mixed_collection_test(docs, dtype)
dtype0 = np.dtype([('z2', 'S17'), ('z', '(4,3)int32')])
dtype1 = np.dtype([('y2', dtype2), ('y', dtype0)])
dtype = np.dtype([('x2', '3int32'), ('x', dtype1)])
self.make_mixed_collection_test(docs, dtype)
@client_context.require_connected
def test_nested_array_complicated(self):
num = 3
docs = [{} for _ in range(num)]
for i in range(num):
doc = docs[i]
letter_index = 2
for letter in string.ascii_lowercase:
doc[letter] = {}
subarray = [[letter for _ in range(letter_index)] for _ in
range(letter_index)]
doc[letter + '1'] = subarray
doc[letter + '2'] = letter * random.randint(0, 100)
doc = doc[letter]
letter_index += 1
doc['LOWEST'] = [1 * i + 99, 2 * i + 98, 3 * i + 97]
doc['LOWEST2'] = i
doc['LOWEST3'] = 'another long string'
dt = np.dtype(
[('LOWEST', '3int32'), ('LOWEST2', np.int32), ('LOWEST3', 'S20')])
letter_index = len(string.ascii_lowercase) + 1
for letter in string.ascii_lowercase[::-1]:
type_name = '(' + str(letter_index) + ',' + str(
letter_index) + ')S2'
dt = np.dtype([(letter, dt), (letter + '2', 'S100'),
(letter + '1', type_name)])
letter_index -= 1
self.make_mixed_collection_test(docs, dt) # OMG this works!!
if __name__ == '__main__':
unittest.main()
|
11466500
|
import json
from uuid import uuid4
from urllib.request import urlopen, Request
from urllib.error import HTTPError
class Yggdrasil(object):
ygg_version = 1
ygg_url = 'https://authserver.mojang.com'
def __init__(self, username='', password='', client_token='',
access_token=''):
self.username = username
self.password = password
self.client_token = client_token
self.access_token = access_token
self.available_profiles = []
self.selected_profile = {}
self._last_err = ""
last_err = property(lambda self: self._last_err)
def login(self):
if self.access_token and self.validate():
return True
if self.access_token and self.client_token and self.refresh():
return True
self.client_token = uuid4().hex
return self.username and self.password and self.authenticate()
def logout(self):
return self.access_token and self.client_token and self.invalidate()
def _ygg_req(self, endpoint, payload):
try:
resp = urlopen(
Request(url=self.ygg_url + endpoint,
data=json.dumps(payload).encode('utf-8'),
headers={'Content-Type': 'application/json'}))
except HTTPError as e:
resp = e
data = resp.read().decode('utf-8')
return json.loads(data) if data else dict()
def authenticate(self):
"""
Generate an access token using an username and password. Any existing
client token is invalidated if not provided.
Returns:
dict: Response or error dict
"""
endpoint = '/authenticate'
payload = {
'agent': {
'name': 'Minecraft',
'version': self.ygg_version,
},
'username': self.username,
'password': <PASSWORD>,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
self._last_err = rep['errorMessage'] if rep else 'Unknown failure'
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.available_profiles = rep['availableProfiles']
self.selected_profile = rep['selectedProfile']
return True
def refresh(self):
"""
Generate an access token with a client/access token pair. Used
access token is invalidated.
Returns:
dict: Response or error dict
"""
endpoint = '/refresh'
payload = {
'accessToken': self.access_token,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
self._last_err = rep['errorMessage'] if rep else 'Unknown failure'
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.selected_profile = rep['selectedProfile']
return True
def signout(self):
"""
Invalidate access tokens with a username and password.
Returns:
dict: Empty or error dict
"""
endpoint = '/signout'
payload = {
'username': self.username,
'password': <PASSWORD>,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
self._last_err = rep['errorMessage'] if rep else 'Unknown failure'
return False
self.client_token = ''
self.access_token = ''
self.available_profiles = []
self.selected_profile = {}
return True
def invalidate(self):
"""
Invalidate access tokens with a client/access token pair
Returns:
dict: Empty or error dict
"""
endpoint = '/invalidate'
payload = {
'accessToken': self.access_token,
'clientToken': self.client_token,
}
self._ygg_req(endpoint, payload)
self.client_token = ''
self.access_token = ''
self.available_profiles = []
self.selected_profile = {}
return True
def validate(self):
"""
Check if an access token is valid
Returns:
dict: Empty or error dict
"""
endpoint = '/validate'
payload = dict(accessToken=self.access_token)
rep = self._ygg_req(endpoint, payload)
return not bool(rep)
|
11466530
|
import boto3
import time
stream_name = 'mystream'
#Creating a kinesis client
client = boto3.client('kinesis')
#Getting shard id by describing the stream
response = client.describe_stream(StreamName=stream_name)
shard_id = response['StreamDescription']['Shards'][1]['ShardId']
#getting the shard iterator (pointer) value
shard_iterator = client.get_shard_iterator(StreamName=stream_name,
ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON')
stream_shard_iterator = shard_iterator['ShardIterator']
#Getting records from the stream
response = client.get_records(ShardIterator=stream_shard_iterator, Limit=1)
#looping infinitely to get records instantly
#limit can be set from 1-10,000
while True:
response = client.get_records(
ShardIterator=response['NextShardIterator'],
Limit=1
)
print response
#making sure we are not getting throttled by kinesis
time.sleep(0.5)
|
11466608
|
import sys
import json
from subprocess import Popen, PIPE
from smac.configspace import Configuration
from smac.tae.execute_ta_run import StatusType, ExecuteTARun
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.0.1"
class ExecuteTARunAClib(ExecuteTARun):
"""Executes a target algorithm run with a given configuration on a given
instance and some resource limitations. Uses the AClib 2.0 style
"""
def run(self, config: Configuration,
instance: str=None,
cutoff: float=None,
seed: int=12345,
instance_specific: str="0"):
"""Runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics> for at most
<cutoff> seconds and random seed <seed>
Parameters
----------
config : Configuration
Dictionary param -> value
instance : str
Problem instance
cutoff : float
Runtime cutoff
seed : int
Random seed
instance_specific: str
Instance specific information -- ignored here
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if instance is None:
instance = "0"
if cutoff is None:
cutoff = 99999999999999
results, stdout_, stderr_ = self._call_ta(config=config,
instance=instance,
instance_specific=instance_specific,
cutoff=cutoff, seed=seed)
if results["status"] in ["SAT", "UNSAT", "SUCCESS"]:
status = StatusType.SUCCESS
elif results["status"] in ["TIMEOUT"]:
status = StatusType.TIMEOUT
elif results["status"] in ["CRASHED"]:
status = StatusType.CRASHED
elif results["status"] in ["ABORT"]:
status = StatusType.ABORT
elif results["status"] in ["MEMOUT"]:
status = StatusType.MEMOUT
else:
self.logger.warn("Could not identify status; should be one of the following: "
"SAT, UNSAT, SUCCESS, TIMEOUT, CRASHED, ABORT or MEMOUT; "
"Treating as CRASHED run.")
status = StatusType.CRASHED
if status in [StatusType.CRASHED, StatusType.ABORT]:
self.logger.warn(
"Target algorithm crashed. Last 5 lines of stdout and stderr")
self.logger.warn("\n".join(stdout_.split("\n")[-5:]))
self.logger.warn("\n".join(stderr_.split("\n")[-5:]))
if results.get("runtime") is None:
self.logger.warn("The target algorithm has not returned a"
" runtime -- imputed by 0.")
# (TODO) Check 0
results["runtime"] = 0
runtime = float(results["runtime"])
if self.run_obj == "quality" and results.get("cost") is None:
self.logger.error(
"The target algorithm has not returned a quality/cost value" +
"although we optimize cost.")
# (TODO) Do not return 0
results["cost"] = 0
if self.run_obj == "runtime":
cost = float(results["runtime"])
else:
cost = float(results["cost"])
del results["status"]
try:
del results["runtime"]
except KeyError:
pass
try:
del results["cost"]
except KeyError:
pass
return status, cost, runtime, results
def _call_ta(self,
config: Configuration,
instance: str,
instance_specific: str,
cutoff: float,
seed: int):
# TODO: maybe replace fixed instance specific and cutoff_length (0) to
# other value
cmd = []
cmd.extend(self.ta)
cmd.extend(["--instance", instance,
"--cutoff", str(cutoff),
"--seed", str(seed),
"--config"
])
for p in config:
if not config.get(p) is None:
cmd.extend(["-" + str(p), str(config[p])])
self.logger.debug("Calling: %s" % (" ".join(cmd)))
p = Popen(cmd, shell=False, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
stdout_, stderr_ = p.communicate()
self.logger.debug("Stdout: %s" % (stdout_))
self.logger.debug("Stderr: %s" % (stderr_))
results = {"status": "CRASHED",
"cost": 1234567890
}
for line in stdout_.split("\n"):
if line.startswith("Result of this algorithm run:"):
fields = ":".join(line.split(":")[1:])
results = json.loads(fields)
return results, stdout_, stderr_
|
11466618
|
from .args import Args
from .format.args_format import ArgsFormat
from .raw_args import RawArgs
class ArgsParser(object):
"""
Parses raw console arguments and returns the parsed arguments.
"""
def parse(
self, args, fmt, lenient=False
): # type: (RawArgs, ArgsFormat, bool) -> Args
raise NotImplementedError()
|
11466644
|
import pytest
from plenum.common.util import randomString
from storage.optimistic_kv_store import OptimisticKVStore
@pytest.fixture()
def optimistic_store(parametrised_storage) -> OptimisticKVStore:
store = OptimisticKVStore(parametrised_storage)
return store
def gen_data(num):
return {randomString(32).encode(): randomString(100).encode()
for _ in range(num)}
def test_set_get_reject_keys_basic(optimistic_store):
# Set some keys, check their values, commit and check values again,
# reject and check values again
num_batches = 5
num_keys_per_batch = 5
batch_keys = []
data = gen_data(num_batches * num_keys_per_batch)
i = 0
for k, v in data.items():
if i % num_batches == 0:
batch_keys.append([])
batch_keys[-1].append((k, v))
i += 1
assert len(batch_keys) == num_batches
def chk_keys(batch, uncommitted=True, committed=False):
for k, v in batch:
if uncommitted:
assert optimistic_store.get(k, is_committed=False) == v
else:
with pytest.raises(KeyError):
optimistic_store.get(k, is_committed=False)
if committed:
assert optimistic_store.get(k, is_committed=True) == v
else:
with pytest.raises(KeyError):
optimistic_store.get(k, is_committed=True)
for i, batch in enumerate(batch_keys):
assert not optimistic_store.current_batch_ops
for k, v in batch:
optimistic_store.set(k, v, is_committed=False)
assert optimistic_store.current_batch_ops
old_len = len(optimistic_store.un_committed)
chk_keys(batch)
optimistic_store.create_batch_from_current(i)
assert not optimistic_store.current_batch_ops
assert len(optimistic_store.un_committed) == old_len + 1
chk_keys(batch)
if i < 3:
# Test uncommitted and committed set and get
assert optimistic_store.commit_batch() == i
assert len(optimistic_store.un_committed) == old_len
chk_keys(batch, committed=True)
else:
# Test reject
optimistic_store.reject_batch()
assert len(optimistic_store.un_committed) == old_len
chk_keys(batch, uncommitted=False, committed=False)
def test_set_get_reject_same_keys(optimistic_store):
# Set some keys, commit them, check their values, set new values for them
# in new batch, check committed and non committed values
num_keys = 10
keys = [randomString(32).encode() for _ in range(num_keys)]
vals_1 = {k: randomString(100).encode() for k in keys}
vals_2 = {k: randomString(100).encode() for k in keys}
vals_3 = {k: randomString(100).encode() for k in keys}
for k, v in vals_1.items():
optimistic_store.set(k, v, is_committed=False)
optimistic_store.create_batch_from_current(randomString(10))
optimistic_store.commit_batch()
for k, v in vals_1.items():
assert optimistic_store.get(k, is_committed=False) == v
assert optimistic_store.get(k, is_committed=True) == v
for k, v in vals_2.items():
optimistic_store.set(k, v, is_committed=False)
optimistic_store.create_batch_from_current(randomString(10))
for k, v in vals_2.items():
assert optimistic_store.get(k, is_committed=False) == v
assert optimistic_store.get(k, is_committed=True) == vals_1[k]
# More than 1 uncommitted batch exists containing the same keys but value
# of a key is equal to the value in last batch
for k, v in vals_3.items():
optimistic_store.set(k, v, is_committed=False)
optimistic_store.create_batch_from_current(randomString(10))
for k, v in vals_3.items():
assert optimistic_store.get(k, is_committed=False) != vals_1[k]
assert optimistic_store.get(k, is_committed=False) != vals_2[k]
assert optimistic_store.get(k, is_committed=False) == v
|
11466646
|
import pexpect
import sys
def run(name,opts,res):
child = pexpect.spawn('./{}'.format(name))
child.logfile = sys.stdout
try:
child.expect('>')
child.sendline('app.async(0)')
child.expect(r'< trans.send\(1,1\)')
child.expect('>')
child.sendline('trans.recv(1,1)')
child.expect(r'trans.send\(0,1\)')
child.expect('>')
child.sendline('trans.recv(0,1)')
child.expect(r'serv.elect\(0\)')
child.expect('>')
child.sendline('trans.recv(0,0)')
child.expect(r'assumption failed')
return True
except pexpect.EOF:
print child.before
return False
|
11466663
|
from collections import Counter
from collections import ChainMap
from multiprocessing import Pool
import itertools
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
def multiprocessing(strings, function, cores = 16, list_mode = True):
df_split = chunks(strings, len(strings) // cores)
pool = Pool(cores)
pooled = pool.map(function, df_split)
pool.close()
pool.join()
if list_mode:
return list(itertools.chain(*pooled))
else:
return dict(ChainMap(*pooled))
|
11466669
|
import numpy as np
import pandas as pd
import pvl
import sys
import functools
import json
from os import path
from plio.io.io_json import read_json
from plio.utils._tes2numpy import tes_dtype_map
from plio.utils._tes2numpy import tes_columns
from plio.utils._tes2numpy import tes_scaling_factors
class Tes(object):
"""
Attributes
----------
spectra : panel
A pandas panel containing n individual spectra.
ancillary_data : dataframe
A pandas DataFrame of the parsed ancillary data (PVL label)
label : object
The raw PVL label object
"""
def __init__(self, input_data, var_file = None, data_set=None):
"""
Read the .spc file, parse the label, and extract the spectra
Parameters
----------
input_data : string
The PATH to the input .tab file
"""
def expand_column(df, expand_column, columns): # pragma: no cover
array = np.asarray([np.asarray(list(tup[0])) for tup in df[expand_column].as_matrix()], dtype=np.uint8)
new_df = pd.concat([df, pd.DataFrame(array, columns=columns)], axis=1)
del new_df[expand_column]
return new_df
def bolquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:3]), bit2bool(bitarr[3:4]))]
types = [('BOLOMETRIC_INERTIA_RATING', '>u1'), ('BOLOMETER_LAMP_ANOMALY', 'bool_')]
arr = np.array(lis, dtype=types)
return arr
def obsquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:2]), bitarr2int(bitarr[2:5]),
bitarr2int(bitarr[5:6]), bitarr2int(bitarr[6:7]),
bitarr2int(bitarr[7:8]), bitarr2int(bitarr[8:9]))]
types = [('HGA_MOTION', '>u1'), ('SOLAR_PANEL_MOTION', '>u1'), ('ALGOR_PATCH', '>u1'),
('IMC_PATCH', '>u1'), ('MOMENTUM_DESATURATION', '>u1'), ('EQUALIZATION_TABLE', '>u1')]
arr = np.array(lis, dtype=types)
return arr
def obsclass2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:3]), bitarr2int(bitarr[3:7]),
bitarr2int(bitarr[7:11]), bitarr2int(bitarr[11:13]),
bitarr2int(bitarr[13:14]), bitarr2int(bitarr[14:16]),
bitarr2int(bitarr[16:]))]
types = [('MISSION_PHASE', '>u1'), ('INTENDED_TARGET', '>u1'), ('TES_SEQUENCE', '>u1'),
('NEON_LAMP_STATUS', '>u1'), ('TIMING_ACCURACY', '>u1'), ('SPARE', '>u1'), ('CLASSIFICATION_VALUE', '>u2')]
arr = np.array(lis, dtype=types)
return arr
def radquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:1]), bitarr2int(bitarr[1:2]),
bitarr2int(bitarr[2:3]), bitarr2int(bitarr[3:5]),
bitarr2int(bitarr[5:7]), bitarr2int(bitarr[5:8]),
bitarr2int(bitarr[8:9]))]
types = [('MAJOR_PHASE_INVERSION', '>u1'), ('ALGOR_RISK', '>u1'), ('CALIBRATION_FAILURE', '>u1'),
('CALIBRATION_QUALITY', '>u1'), ('SPECTROMETER_NOISE', '>u1'), ('SPECTRAL_INERTIA_RATING', '>u1'),
('DETECTOR_MASK_PROBLEM', '>u1')]
arr = np.array(lis, dtype=types)
return arr
def atmquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:2]), bitarr2int(bitarr[2:4]))]
types = [('TEMPERATURE_PROFILE_RATING', '>u1'), ('ATMOSPHERIC_OPACITY_RATING', '>u1')]
arr = np.array(lis, dtype=types)
return arr
def expand_column(df, expand_column, columns): # pragma: no cover
array = np.asarray([np.asarray(list(tup[0])) for tup in df[expand_column].as_matrix()], dtype=np.uint8)
new_df = pd.concat([df, pd.DataFrame(array, columns=columns)], axis=1)
del new_df[expand_column]
return new_df
def bolquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:3]), bit2bool(bitarr[3:4]))]
types = [('BOLOMETRIC_INERTIA_RATING', '>u1'), ('BOLOMETER_LAMP_ANOMALY', 'bool_')]
arr = np.array(lis, dtype=types)
return arr
def obsquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:2]), bitarr2int(bitarr[2:5]),
bitarr2int(bitarr[5:6]), bitarr2int(bitarr[6:7]),
bitarr2int(bitarr[7:8]), bitarr2int(bitarr[8:9]))]
types = [('HGA_MOTION', '>u1'), ('SOLAR_PANEL_MOTION', '>u1'), ('ALGOR_PATCH', '>u1'),
('IMC_PATCH', '>u1'), ('MOMENTUM_DESATURATION', '>u1'), ('EQUALIZATION_TABLE', '>u1')]
arr = np.array(lis, dtype=types)
return arr
def obsclass2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:3]), bitarr2int(bitarr[3:7]),
bitarr2int(bitarr[7:11]), bitarr2int(bitarr[11:13]),
bitarr2int(bitarr[13:14]), bitarr2int(bitarr[14:16]),
bitarr2int(bitarr[16:]))]
types = [('MISSION_PHASE', '>u1'), ('INTENDED_TARGET', '>u1'), ('TES_SEQUENCE', '>u1'),
('NEON_LAMP_STATUS', '>u1'), ('TIMING_ACCURACY', '>u1'), ('SPARE', '>u1'), ('CLASSIFICATION_VALUE', '>u2')]
arr = np.array(lis, dtype=types)
return arr
def radquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:1]), bitarr2int(bitarr[1:2]),
bitarr2int(bitarr[2:3]), bitarr2int(bitarr[3:5]),
bitarr2int(bitarr[5:7]), bitarr2int(bitarr[5:8]),
bitarr2int(bitarr[8:9]))]
types = [('MAJOR_PHASE_INVERSION', '>u1'), ('ALGOR_RISK', '>u1'), ('CALIBRATION_FAILURE', '>u1'),
('CALIBRATION_QUALITY', '>u1'), ('SPECTROMETER_NOISE', '>u1'), ('SPECTRAL_INERTIA_RATING', '>u1'),
('DETECTOR_MASK_PROBLEM', '>u1')]
arr = np.array(lis, dtype=types)
return arr
def atmquality2arr(arr): # pragma: no cover
bitarr = np.unpackbits(np.asarray(arr, dtype=np.uint8))
lis = [(bitarr2int(bitarr[0:2]), bitarr2int(bitarr[2:4]))]
types = [('TEMPERATURE_PROFILE_RATING', '>u1'), ('ATMOSPHERIC_OPACITY_RATING', '>u1')]
arr = np.array(lis, dtype=types)
return arr
def bitarr2int(arr): # pragma: no cover
arr = "".join(str(i) for i in arr)
return np.uint8(int(arr,2))
def bit2bool(bit): # pragma: no cover
return np.bool_(bit)
def expand_bitstrings(df, dataset): # pragma: no cover
if dataset == 'BOL':
quality_columns = ['ti_bol_rating', 'bol_ref_lamp']
df['quality'] = df['quality'].apply(bolquality2arr)
return expand_column(df, 'quality', quality_columns)
elif dataset == 'OBS':
quality_columns = ['hga_motion', 'pnl_motion', 'algor_patch', 'imc_patch',
'momentum', 'equal_tab']
class_columns = ['phase', 'type', 'sequence',
'lamp_status', 'timing', 'spare', 'class_value']
df['quality'] = df['quality'].apply(obsquality2arr)
df['class'] = df['class'].apply(obsclass2arr)
new_df = expand_column(df, 'quality', quality_columns)
new_df = expand_column(new_df, 'class', class_columns)
return new_df
elif dataset == 'RAD':
quality_columns = ['phase_inversion', 'algor_risk', 'calib_fail', 'calib_quality',
'spect_noise', 'ti_spc_rating', 'det_mask_problem']
df['quality'] = df['quality'].apply(radquality2arr)
return expand_column(df, 'quality', quality_columns)
elif dataset == 'ATM':
quality_columns = ['atm_pt_rating', 'atm_opacity_rating']
df['quality'] = df['quality'].apply(atmquality2arr)
return expand_column(df, 'quality', quality_columns)
else:
return df
if isinstance(input_data, pd.DataFrame):
self.dataset = None
if not data_set:
for key in tes_columns.keys():
if len(set(tes_columns[key]).intersection(set(input_data.columns))) > 3 :
self.dataset = key
else:
self.dataset=data_set
self.label = None
self.data = input_data
return
self.label = pvl.load(input_data)
nrecords = self.label['TABLE']['ROWS']
nbytes_per_rec = self.label['RECORD_BYTES']
data_start = self.label['LABEL_RECORDS'] * self.label['RECORD_BYTES']
dataset = self.label['TABLE']['^STRUCTURE'].split('.')[0]
self.dataset = dataset
numpy_dtypes = tes_dtype_map
columns = tes_columns
scaling_factors = tes_scaling_factors
with open(input_data, 'rb') as file:
file.seek(data_start)
buffer = file.read(nrecords*nbytes_per_rec)
array = np.frombuffer(buffer, dtype=numpy_dtypes[dataset.upper()]).byteswap().newbyteorder()
df = pd.DataFrame(data=array, columns=columns[dataset.upper()])
# Read Radiance array if applicable
if dataset.upper() == 'RAD': # pragma: no cover
if not var_file:
filename, file_extension = path.splitext(input_data)
var_file = filename + ".var"
with open(var_file, "rb") as var:
buffer = var.read()
def process_rad(index):
if index == -1:
return None
length = np.frombuffer(buffer[index:index+2], dtype='>u2')[0]
exp = np.frombuffer(buffer[index+2:index+4], dtype='>i2')[0]
scale = 2**(int(exp)-15)
radarr = np.frombuffer(buffer[index+4:index+4+length-2], dtype='>i2') * scale
if np.frombuffer(buffer[index+4+length-2:index+4+length], dtype='>u2')[0] != length:
warnings.warn("Last element did not match the length for file index {} in file {}".format(index, f))
return radarr
df["raw_rad"] = df["raw_rad"].apply(process_rad)
df["cal_rad"] = df["cal_rad"].apply(process_rad)
# Apply scaling factors
for column in scaling_factors[dataset]: # pragma: no cover
def scale(x):
return np.multiply(x, scaling_factors[dataset][column])
df[column] = df[column].apply(scale)
df = expand_bitstrings(df, dataset.upper())
self.data = df
def join(tes_data):
"""
Given a list of Tes objects, merges them into a single dataframe using
SPACECRAFT_CLOCK_START_COUNT (sclk_time) as the index.
Parameters
----------
tes_data : iterable
A Python iterable of Tes objects
Returns
-------
: dataframe
A pandas dataframe containing the merged data
: outliers
A list of Tes() objects containing the tables containing no matches
"""
if not hasattr(tes_data, '__iter__') and not isinstance(tes_data, Tes):
raise TypeError("Input data must be a Tes datasets or an iterable of Tes datasets, got {}".format(type(tes_data)))
elif not hasattr(tes_data, '__iter__'):
tes_data = [tes_data]
if len(tes_data) == 0:
warn("Input iterable is empty")
if not all([isinstance(obj, Tes) for obj in tes_data]):
# Get the list of types and the indices of elements that caused the error
types = [type(obj) for obj in tes_data]
error_idx = [i for i, x in enumerate([isinstance(obj, Tes) for obj in tes_data]) if x == False]
raise TypeError("Input data must must be a Tes dataset, input array has non Tes objects at indices: {}\
for inputs of type: {}".format(error_idx, types))
single_key_sets = {'ATM', 'POS', 'TLM', 'OBS'}
compound_key_sets = {'BOL', 'CMP', 'GEO', 'IFG', 'PCT', 'RAD'}
dfs = dict.fromkeys(single_key_sets | compound_key_sets, DataFrame())
# Organize the data based on datasets
for ds in tes_data:
# Find a way to do this in place?
dfs[ds.dataset] = dfs[ds.dataset].append(ds.data)
# remove and dataframes that are empty
empty_dfs = [key for key in dfs.keys() if dfs[key].empty]
for key in empty_dfs:
dfs.pop(key, None)
single_key_dfs = [dfs[key] for key in dfs.keys() if key in single_key_sets]
compound_key_dfs = [dfs[key] for key in dfs.keys() if key in compound_key_sets]
all_dfs = single_key_dfs+compound_key_dfs
keyspace = functools.reduce(lambda left,right: left|right, [set(df['sclk_time']) for df in all_dfs])
single_key_merged = functools.reduce(lambda left,right: pd.merge(left, right, on=["sclk_time"]), single_key_dfs)
compound_key_merged = functools.reduce(lambda left,right: pd.merge(left, right, on=["sclk_time", "detector"]), compound_key_dfs)
merged = single_key_merged.merge(compound_key_merged, on="sclk_time")
outlier_idx = keyspace-set(merged["sclk_time"])
outliers = [Tes(tds.data[tds.data['sclk_time'].isin(outlier_idx)], data_set=tds.dataset) for tds in tes_data]
return merged, [tds for tds in outliers if not tds.data.empty]
|
11466675
|
import sys
# sys.argv.append("--dynet-viz") #the package is broken
import dynet_config
# Declare GPU as the default device type
# dynet_config.set_gpu()
# Set some parameters manualy
from dynetcon import Deserializer
dynet_config.set(mem=4, random_seed=9)
# Initialize dynet import using above configuration in the current scope
import dynet as dy
dyparams = dy.DynetParams()
dy.init()
from dynetcon import Learner
from neuralogic import lrnn
def main(argv):
print(sys.argv, len(sys.argv))
args = []
for k,v in argv.items():
args.append(k)
args.append(v)
neural_samples, neural_model, logic_model = lrnn.ground_NNs(args)
deserializer = Deserializer(neural_model)
samples = [deserializer.deserializeSample(sample) for sample in neural_samples]
learner = Learner(deserializer)
learner.learn(samples)
sys.exit(0)
|
11466697
|
import re
from typing import Optional
from openvpn_api.models import VPNModelBase
from openvpn_api.util import errors
class ServerStats(VPNModelBase):
"""OpenVPN server stats model."""
def __init__(self, client_count: int = None, bytes_in: int = None, bytes_out: int = None,) -> None:
# Number of connected clients
self.client_count: Optional[int] = client_count
# Server bytes in
self.bytes_in: Optional[int] = bytes_in
# Server bytes out
self.bytes_out: Optional[int] = bytes_out
@classmethod
def parse_raw(cls, raw: str) -> "ServerStats":
"""Parse raw `load-stats` response into an instance."""
for line in raw.splitlines():
if not line.startswith("SUCCESS"):
continue
match = re.search(
r"SUCCESS: nclients=(?P<nclients>\d+),bytesin=(?P<bytesin>\d+),bytesout=(?P<bytesout>\d+)", line
)
if not match:
raise errors.ParseError("Unable to parse stats from raw load-stats response.")
return cls(
client_count=int(match.group("nclients")),
bytes_in=int(match.group("bytesin")),
bytes_out=int(match.group("bytesout")),
)
raise errors.ParseError("Did not get expected data from load-stats.")
def __repr__(self) -> str:
return f"<ServerStats client_count={self.client_count}, bytes_in={self.bytes_in}, bytes_out={self.bytes_out}>"
|
11466703
|
from genty import genty, genty_dataset
from os.path import expanduser, join
from app.common.build_artifact import BuildArtifact
from app.util.conf.configuration import Configuration
from test.framework.base_unit_test_case import BaseUnitTestCase
@genty
class TestBuildArtifact(BaseUnitTestCase):
def setUp(self):
super().setUp()
Configuration['artifact_directory'] = expanduser('~')
@genty_dataset(
default=(join(expanduser('~'), '1', 'artifact_2_3'), 1, 2, 3),
with_nondefault_root=(join('override', '1', 'artifact_2_3'), 1, 2, 3, join('override')),
)
def test_atom_artifact_directory_returns_proper_artifact_path(self, expected_path, build_id, subjob_id=None,
atom_id=None, result_root=None):
self.assertEquals(
expected_path,
BuildArtifact.atom_artifact_directory(build_id, subjob_id, atom_id, result_root=result_root),
'The generated atom artifact directory is incorrect.'
)
@genty_dataset(
default=(join(expanduser('~'), '1'), 1),
with_nondefault_root=(join('override', '1'), 1, join('override')),
)
def test_build_artifact_directory_returns_proper_artifact_path(self, expected_path, build_id, result_root=None):
self.assertEquals(
expected_path,
BuildArtifact.build_artifact_directory(build_id, result_root=result_root),
'The generated build artifact directory is incorrect.'
)
@genty_dataset(
relative_path=('artifact_0_1', 0, 1),
absolute_path=('/path/to/build/1/artifact_0_1', 0, 1),
)
def test_subjob_and_atom_ids_parses_for_properly_formatted_directory(self, artifact_directory, expected_subjob_id,
expected_atom_id):
subjob_id, atom_id = BuildArtifact._subjob_and_atom_ids(artifact_directory)
self.assertEquals(subjob_id, expected_subjob_id)
self.assertEquals(atom_id, expected_atom_id)
@genty_dataset(
'artifact_0',
'/full/path/artifact_0',
'wrong_0_1',
'artifact_0_',
)
def test_subjob_and_atom_ids_raises_value_error_with_incorrect_format(self, incorrect_artifact_directory):
with self.assertRaises(ValueError):
BuildArtifact._subjob_and_atom_ids(incorrect_artifact_directory)
|
11466707
|
from typing import Union
from typing import List
from typing import Tuple
from typing import Optional
from typing import Dict
from typing import Any
import copy
import random
import itertools
from loguru import logger
from rdkit import Chem
from rdkit.Chem import rdmolops
from rdkit.Chem.MolStandardize import rdMolStandardize
from rdkit.Chem.MolStandardize import canonicalize_tautomer_smiles
import datamol as dm
from . import _sanifix4
PERIODIC_TABLE = Chem.rdchem.GetPeriodicTable()
TRIPLE_BOND = Chem.rdchem.BondType.TRIPLE
DOUBLE_BOND = Chem.rdchem.BondType.DOUBLE
SINGLE_BOND = Chem.rdchem.BondType.SINGLE
AROMATIC_BOND = Chem.rdchem.BondType.AROMATIC
DATIVE_BOND = Chem.rdchem.BondType.DATIVE
def copy_mol(mol: dm.Mol) -> dm.Mol:
"""Copy a molecule and return a new one.
Args:
mol: a molecule to copy.
"""
return copy.deepcopy(mol)
def to_mol(
mol: Union[str, dm.Mol],
add_hs: bool = False,
explicit_only: bool = False,
ordered: bool = False,
kekulize: bool = False,
sanitize: bool = True,
) -> Optional[Chem.rdchem.Mol]:
"""Convert an input molecule (smiles representation) into a `Chem.rdchem.Mol`.
Args:
mol: A SMILES or a molecule.
add_hs: Whether hydrogens should be added the molecule.
explicit_only: Whether to only add explicit hydrogen or both
(implicit and explicit). when `add_hs` is set to True.
ordered: Whether the atom should be ordered. This option is
important if you want to ensure that the features returned will always maintain
a single atom order for the same molecule, regardless of its original SMILES representation.
kekulize: Whether to perform kekulization of the input molecules.
sanitize: Whether to apply rdkit sanitization when input is a SMILES.
Returns:
mol: the molecule if some conversion have been made. If the conversion fails
None is returned so make sure that you handle this case on your own.
"""
if not isinstance(mol, (str, Chem.rdchem.Mol)):
raise ValueError(f"Input should be a Chem.rdchem.Mol or a string instead of '{type(mol)}'")
if isinstance(mol, str):
_mol = Chem.MolFromSmiles(mol, sanitize=sanitize) # type: ignore
if not sanitize and _mol is not None:
_mol.UpdatePropertyCache(False)
else:
_mol = mol
# Add hydrogens
if _mol is not None and add_hs:
_mol = Chem.AddHs(_mol, explicitOnly=explicit_only, addCoords=True) # type: ignore
# Reorder atoms
if _mol is not None and ordered:
_mol = reorder_atoms(_mol)
if _mol is not None and kekulize:
Chem.Kekulize(_mol, clearAromaticFlags=False) # type: ignore
return _mol
def same_mol(mol1: Optional[Chem.rdchem.Mol], mol2: Optional[Chem.rdchem.Mol]):
"""Check two molecules are the same by comparing their InChiKey.
Invalid molecules (None) are always considered as not the same.
Args:
mol1: A molecule.
mol2: A molecule.
"""
if mol1 is None or mol2 is None:
return False
return dm.to_inchikey(mol1) == dm.to_inchikey(mol2)
def reorder_atoms(
mol: Chem.rdchem.Mol,
break_ties: bool = True,
include_chirality: bool = True,
include_isotopes: bool = True,
) -> Optional[Chem.rdchem.Mol]:
"""Reorder the atoms in a mol. It ensures a single atom order for the same molecule,
regardless of its original representation.
Args:
mol: a molecule.
break_ties: Force breaking of ranked ties.
include_chirality: Use chiral information when computing rank.
include_isotopes: Use isotope information when computing rank.
Returns:
mol: a molecule.
"""
if mol.GetNumAtoms() == 0:
return mol
new_order = Chem.CanonicalRankAtoms( # type: ignore
mol,
breakTies=break_ties,
includeChirality=include_chirality,
includeIsotopes=include_isotopes,
)
new_order = sorted([(y, x) for x, y in enumerate(new_order)])
return Chem.RenumberAtoms(mol, [y for (x, y) in new_order]) # type: ignore
def randomize_atoms(mol: Chem.rdchem.Mol) -> Optional[Chem.rdchem.Mol]:
"""Randomize the position of the atoms in a mol.
Args:
mol: a molecule.
Returns:
mol: a molecule.
"""
if mol.GetNumAtoms() == 0:
return mol
atom_indices = list(range(mol.GetNumAtoms()))
random.shuffle(atom_indices)
return Chem.RenumberAtoms(mol, atom_indices) # type: ignore
def to_neutral(mol: Chem.rdchem.Mol) -> Optional[Chem.rdchem.Mol]:
"""Neutralize the charge of a molecule.
Args:
mol: a molecule.
Returns:
mol: a molecule.
"""
if mol is None:
return mol
for a in mol.GetAtoms():
if a.GetFormalCharge() < 0 or (
a.GetExplicitValence() >= PERIODIC_TABLE.GetDefaultValence(a.GetSymbol())
and a.GetFormalCharge() > 0
):
a.SetFormalCharge(0)
a.UpdatePropertyCache(False)
return mol
def sanitize_mol(
mol: Chem.rdchem.Mol,
charge_neutral: bool = False,
sanifix: bool = True,
verbose: bool = True,
add_hs: bool = False,
) -> Optional[Chem.rdchem.Mol]:
"""An augmented version of RDKit `sanitize=True`. It uses a
mol-SMILES-mol conversion to catch potential aromaticity errors
and try to fix aromatic nitrogen (using the popular sanifix4 script).
Optionally, it can neutralize the charge of the molecule.
Note #1: Only the first conformer (if present) will be preserved and
a warning will be displayed if more than one conformer is detected.
Note #2: The molecule's properties will be preserved but the atom's
properties will be lost.
Args:
mol: a molecule.
charge_neutral: whether charge neutralization should be applied.
sanifix: whether to run the sanifix from <NAME>
(sanifix4.py) that try to adjust aromatic nitrogens.
verbose: Whether displaying a warning about multiple conformers.
add_hs: Add hydrogens to the returned molecule. Useful when the input
molecule already contains hydrogens.
Returns:
mol: a molecule.
"""
if mol is None:
return mol
# Extract properties.
original_mol = copy_mol(mol)
properties = original_mol.GetPropsAsDict()
if charge_neutral:
mol = to_neutral(mol)
if sanifix:
mol = _sanifix4.sanifix(mol)
if mol is not None:
# Detect multiple conformers
if verbose and mol.GetNumConformers() > 1:
logger.warning(
f"The molecule contains multiple conformers. Only the first one will be preserved."
)
# Try catch to avoid occasional aromaticity errors
try:
# `cxsmiles` is used here to preserve the first conformer.
mol = to_mol(dm.to_smiles(mol, cxsmiles=True), sanitize=True, add_hs=add_hs) # type: ignore
except Exception:
mol = None
if mol is not None:
# Insert back properties.
mol = dm.set_mol_props(mol, properties)
return mol
def sanitize_smiles(smiles: str, isomeric: bool = True) -> Optional[str]:
"""Takes SMILES string and returns its sanitized version.
Args:
smiles: smiles to be sanitized.
isomeric: Whether to include information about stereochemistry in the SMILES.
Returns:
sanitized smiles.
"""
try:
mol = dm.to_mol(smiles, sanitize=False)
mol = dm.sanitize_mol(mol, False)
except Exception:
return None
if mol is None:
return None
try:
smiles = dm.to_smiles(mol, isomeric=isomeric) # type: ignore
except:
return None
return smiles
def sanitize_first(mols: List[Chem.rdchem.Mol], charge_neutral: bool = False, sanifix: bool = True):
"""Sanitize a list of molecules and return the first valid molecule seen in the list.
Args:
mols: a list of molecules.
charge_neutral: whether charge neutralization should be applied.
sanifix: whether to run the sanifix from <NAME>
(sanifix4.py) that try to adjust aromatic nitrogens.
Returns:
mol: a molecule.
"""
for mol in mols:
mol = sanitize_mol(mol, charge_neutral=charge_neutral, sanifix=sanifix)
if mol:
return mol
return None
def standardize_smiles(smiles: str, tautomer: bool = False):
r"""
Apply smile standardization procedure. This is a convenient function wrapped arrounf RDKit
smiles standardizer and tautomeric canonicalization.
Args:
smiles: Smiles to standardize
tautomer: Whether to canonicalize tautomers
Returns:
standard_smiles: the standardized smiles
"""
smiles = rdMolStandardize.StandardizeSmiles(smiles)
if tautomer:
smiles = canonicalize_tautomer_smiles(smiles)
return smiles
def standardize_mol(
mol: Chem.rdchem.Mol,
disconnect_metals: bool = False,
normalize: bool = True,
reionize: bool = True,
uncharge: bool = False,
stereo: bool = True,
):
r"""
This function returns a standardized version the given molecule, with or without disconnect the metals.
The process is apply in the order of the argument.
Arguments:
mol: The molecule to standardize.
disconnect_metals: Whether to disconnect the metallic atoms from non-metals
normalize: Whether to apply normalization (correct functional groups and recombine charges).
reionize: Whether to apply molecule reionization
uncharge: Whether to remove all charge from molecule
stereo: Whether to attempt to assign stereochemistry
Returns:
mol: The standardized molecule.
"""
mol = copy_mol(mol)
if disconnect_metals:
md = rdMolStandardize.MetalDisconnector()
mol = md.Disconnect(mol)
if normalize:
mol = rdMolStandardize.Normalize(mol)
if reionize:
reionizer = rdMolStandardize.Reionizer()
mol = reionizer.reionize(mol)
if uncharge:
uncharger = rdMolStandardize.Uncharger()
mol = uncharger.uncharge(mol)
if stereo:
Chem.AssignStereochemistry(mol, force=False, cleanIt=True) # type: ignore
return mol
def fix_valence_charge(mol: Chem.rdchem.Mol, inplace: bool = False) -> Optional[Chem.rdchem.Mol]:
"""Fix valence issues that are due to incorrect charges.
Args:
mol: Input molecule with incorrect valence for some atoms
inplace: Whether to modify in place or make a copy.
Returns:
Fixed molecule via charge correction or original molecule if failed.
"""
vm = rdMolStandardize.RDKitValidation()
# Don't fix something that is not broken
if len(vm.validate(mol)) > 0:
if not inplace:
mol = copy.copy(mol)
mol.UpdatePropertyCache(False)
for a in mol.GetAtoms():
n_electron = (
a.GetImplicitValence()
+ a.GetExplicitValence()
- dm.PERIODIC_TABLE.GetDefaultValence(a.GetSymbol())
)
a.SetFormalCharge(n_electron)
return mol
def incorrect_valence(a: Union[Chem.rdchem.Mol, Chem.rdchem.Atom], update: bool = False) -> bool:
"""Check if an atom connection is not valid or all the atom of a molecule.
Args:
a: atom or molecule to check for valence issue.
update: Update owning molecule property cache first.
Returns:
Whether the input atom valence is correct.
"""
if isinstance(a, Chem.rdchem.Mol):
a.UpdatePropertyCache(False)
vm = rdMolStandardize.RDKitValidation()
return len(vm.validate(a)) > 0
if update:
m = a.GetOwningMol()
m.UpdatePropertyCache(False)
return (a.GetImplicitValence() == 0) and (
a.GetExplicitValence() > max(PERIODIC_TABLE.GetValenceList(a.GetSymbol()))
)
def decrease_bond(bond: Chem.rdchem.Bond) -> Optional[Union[list, Chem.rdchem.Bond]]:
"""Remove one single bond from the input bond. Note that you should
first kekulize your molecules and remove non-standard bond.
Args:
bond: a bond.
"""
if bond.GetBondType() == TRIPLE_BOND:
return DOUBLE_BOND
if bond.GetBondType() == DOUBLE_BOND:
return SINGLE_BOND
if bond.GetBondType() == SINGLE_BOND:
return None
return bond
def fix_valence(
mol, inplace: bool = False, allow_ring_break: bool = False
) -> Optional[Chem.rdchem.Mol]:
"""Identify and try to fix valence issues by removing any supplemental bond
that should not be in the graph.
Args:
mol: input molecule with incorrect valence for some atoms
inplace: Whether to modify in place or make a copy
allow_ring_break: Whether bond removal involving ring is allowed.
Returns:
Fixed potential valence issue in molecule or original molecule when nothing is broken
of if failed.
"""
if not inplace:
mol = copy.copy(mol)
vm = rdMolStandardize.RDKitValidation()
if len(vm.validate(mol)) == 0: # don't fix something that is not broken
return mol
try:
m = Chem.RemoveHs( # type: ignore
mol,
implicitOnly=False,
updateExplicitCount=True,
sanitize=False,
)
m.UpdatePropertyCache(False)
# first pass using explicit false count
for atom in m.GetAtoms():
while incorrect_valence(atom) and atom.GetTotalNumHs() > 0:
cur_hydrogen = atom.GetTotalNumHs()
atom.SetNumExplicitHs(max(0, cur_hydrogen - 1))
atom.SetFormalCharge(max(0, atom.GetFormalCharge() - 1))
# atom.SetNumRadicalElectrons(0)
atom.UpdatePropertyCache(False)
em = Chem.RWMol(m) # type: ignore
bonds = em.GetBonds()
bonds = [
bond
for bond in bonds
if any(
[
incorrect_valence(bond.GetBeginAtom()),
incorrect_valence(bond.GetEndAtom()),
]
)
]
for bond in bonds:
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if incorrect_valence(a1) or incorrect_valence(a2):
mbond = decrease_bond(bond)
if allow_ring_break or (mbond or not bond.IsInRing()):
em.RemoveBond(a1.GetIdx(), a2.GetIdx())
if mbond is not None:
em.AddBond(a1.GetIdx(), a2.GetIdx(), mbond)
a1.UpdatePropertyCache(False)
a2.UpdatePropertyCache(False)
m = em.GetMol()
except Exception:
return None
return m
def adjust_singleton(mol: Chem.rdchem.Mol) -> Optional[Chem.rdchem.Mol]:
"""Remove all atoms that are essentially disconnected singleton nodes in the molecular graph.
For example, the chlorine atom and methane fragment will be removed in Cl.[N:1]1=CC(O)=CC2CCCCC12.CC.C",
but not the ethane fragment.
Args:
mol: a molecule.
"""
to_rem = []
em = Chem.RWMol(mol) # type: ignore
for atom in mol.GetAtoms():
if atom.GetExplicitValence() == 0:
to_rem.append(atom.GetIdx())
to_rem.sort(reverse=True)
for a_idx in to_rem:
em.RemoveAtom(a_idx)
return em.GetMol()
def remove_dummies(mol: Chem.rdchem.Mol, dummy: str = "*") -> Optional[Chem.rdchem.Mol]:
"""Remove dummy atoms from molecules."""
du = dm.to_mol(dummy)
out = mol
try:
out = Chem.ReplaceSubstructs(mol, du, dm.to_mol("[H]"), True)[0] # type: ignore
out = Chem.RemoveHs(out) # type: ignore
except Exception as e:
out = Chem.DeleteSubstructs(mol, du) # type: ignore
return out
def fix_mol(
mol: Chem.rdchem.Mol,
n_iter: int = 1,
remove_singleton: bool = False,
largest_only: bool = False,
inplace: bool = False,
) -> Optional[Chem.rdchem.Mol]:
"""Fix error in molecule using a greedy approach.
Args:
mol: input molecule to fix
n_iter: Number of valence fix iteration to apply
remove_singleton: Whether `adjust_singleton` should be applied
largest_only: Whether only the largest fragment should be kept
inplace: Whether to return a copy of the mol or perform in place operation
Returns:
Fixed molecule.
"""
if not inplace:
mol = copy.copy(mol)
m = sanitize_mol(mol) or mol # fail back to mol when the fixer fail
if m is not None:
m = remove_dummies(m)
for _ in range(n_iter):
m = fix_valence(m)
if remove_singleton:
m = adjust_singleton(m)
if largest_only:
# m = max(Chem.rdmolops.GetMolFrags(m, asMols=True, sanitizeFrags=False), key=lambda m: m.GetNumAtoms())
m = rdMolStandardize.FragmentParent(m, skipStandardize=True)
return m
def replace_dummies_atoms(
mol: Chem.rdchem.Mol,
atom: str = "C",
dummy: str = "*",
replace_all: bool = True,
) -> Optional[Chem.rdchem.Mol]:
"""Remove dummy atoms from molecules.
Args:
mol: molecule with dummies
atom: replacement atom, default is carbon
dummy: dummy atom representation
replace_all: Whether to replace all dummies
Returns:
mol: Molecule with dummy replaced
"""
du = Chem.MolFromSmiles(dummy) # type: ignore
replacement = Chem.MolFromSmiles(atom) # type: ignore
out = Chem.ReplaceSubstructs(mol, du, replacement, replaceAll=replace_all)[0] # type: ignore
return out
def keep_largest_fragment(mol: Chem.rdchem.Mol) -> Optional[Chem.rdchem.Mol]:
"""Only keep largest fragment of each molecule."""
return max(
rdmolops.GetMolFrags(mol, asMols=True),
default=mol,
key=lambda m: m.GetNumAtoms(),
)
def is_transition_metal(at: Chem.rdchem.Atom) -> bool:
"""Check if atom is a transition metal.
Args:
at: an atom.
"""
n = at.GetAtomicNum()
return (n >= 22 and n <= 29) or (n >= 40 and n <= 47) or (n >= 72 and n <= 79)
def set_dative_bonds(
mol: Chem.rdchem.Mol, from_atoms: Tuple[int, int] = (7, 8)
) -> Optional[Chem.rdchem.Mol]:
"""Replaces some single bonds between metals and atoms with atomic numbers in fromAtoms
with dative bonds. The replacement is only done if the atom has "too many" bonds.
Arguments:
mol: molecule with bond to modify
from_atoms: List of atoms (symbol or atomic number) to consider for bond replacement.
By default, only Nitrogen (7) and Oxygen (8) are considered.
Returns:
The modified molecule.
"""
rwmol = Chem.RWMol(mol) # type: ignore
rwmol.UpdatePropertyCache(strict=False)
metals = [at for at in rwmol.GetAtoms() if is_transition_metal(at)]
for metal in metals:
for nbr in metal.GetNeighbors():
if (nbr.GetAtomicNum() in from_atoms or nbr.GetSymbol() in from_atoms) and (
nbr.GetExplicitValence() > PERIODIC_TABLE.GetDefaultValence(nbr.GetAtomicNum())
and rwmol.GetBondBetweenAtoms(nbr.GetIdx(), metal.GetIdx()).GetBondType()
== SINGLE_BOND
):
rwmol.RemoveBond(nbr.GetIdx(), metal.GetIdx())
rwmol.AddBond(nbr.GetIdx(), metal.GetIdx(), DATIVE_BOND)
return rwmol
def set_mol_props(
mol: Chem.rdchem.Mol,
props: Dict[str, Any],
copy: bool = False,
) -> Chem.rdchem.Mol:
"""Set properties to a mol from a dict.
Args:
mol: the mol where to copy the props.
props: the props to copy.
copy: whether to copy the provided mol
"""
if copy is True:
mol = dm.copy_mol(mol)
for k, v in props.items():
if isinstance(v, bool):
mol.SetBoolProp(k, v)
elif isinstance(v, int):
mol.SetIntProp(k, v)
elif isinstance(v, float):
mol.SetDoubleProp(k, v)
else:
mol.SetProp(k, str(v))
return mol
def copy_mol_props(source: Chem.rdchem.Mol, destination: Chem.rdchem.Mol):
"""Copy properties from one source molecule to another destination
molecule.
Args:
source: a molecule to copy from.
destination: a molecule to copy to.
"""
props = source.GetPropsAsDict()
dm.set_mol_props(destination, props)
def atom_indices_to_mol(mol: Chem.rdchem.Mol, copy: bool = False):
"""Add the `molAtomMapNumber` property to each atoms.
Args:
mol: a molecule
copy: Whether to copy the molecule.
"""
if copy is True:
mol = copy_mol(mol)
for atom in mol.GetAtoms():
atom.SetProp("molAtomMapNumber", str(atom.GetIdx()))
return mol
def atom_list_to_bond(
mol: dm.Mol,
atom_indices: List[int],
bond_as_idx: bool = False,
):
"""Return a list of existing bond indices between a list of
atom indices.
Args:
mol: A molecule.
atom_indices: A list of atom indices.
"""
# Build an atom map
atom_map = {}
submol = Chem.PathToSubmol(mol, atom_indices, useQuery=True, atomMap=atom_map) # type: ignore
atom_map_reversed = {v: k for k, v in atom_map.items()}
bonds = []
for bond in submol.GetBonds():
a1, a2 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
ori_a1 = atom_map_reversed[a1]
ori_a2 = atom_map_reversed[a2]
if ori_a1 in atom_indices and ori_a2 in atom_indices:
ori_bond = mol.GetBondBetweenAtoms(ori_a1, ori_a2)
if bond_as_idx:
bonds.append(ori_bond.GetIdx())
else:
bonds.append(ori_bond)
return bonds
def substructure_matching_bonds(mol: dm.Mol, query: dm.Mol, **kwargs):
"""Perform a substructure match using `GetSubstructMatches` but instead
of returning only the atom indices also return the bond indices.
Args:
mol: A molecule.
query: A molecule used as a query to match against.
kwargs: Any other arguments to pass to `mol.GetSubstructMatches()`.
Returns:
atom_matches: A list of lists of atom indices.
bond_matches: A list of lists of bond indices.
"""
# NOTE(hadim): If more substructure functions are added here, consider moving it to
# a dedicated `substructure` module.
# Set default arguments
kwargs.setdefault("uniquify", True)
# Get the matching atom indices
atom_matches = list(mol.GetSubstructMatches(query, **kwargs))
# Get the bond to highligh from the query
query_bond_indices = [
(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()) for bond in query.GetBonds()
]
# Retrieve the atom indices
query_atom_indices = [atom.GetIdx() for i, atom in enumerate(query.GetAtoms())]
bond_matches = []
for match in atom_matches:
# Map the atom of the query to the atom of the mol matching the query
atom_map = dict(zip(query_atom_indices, match))
# For this match atoms we now, we use the map to retrieve the matching bonds
# in the mol.
mol_bond_indices = [(atom_map[a1], atom_map[a2]) for a1, a2 in query_bond_indices]
# Convert the bond atom indices to bond indices
mol_bond_indices = [mol.GetBondBetweenAtoms(a1, a2).GetIdx() for a1, a2 in mol_bond_indices]
bond_matches.append(mol_bond_indices)
return atom_matches, bond_matches
def protect_atoms(
mol: Chem.rdchem.Mol,
substruct: Optional[Chem.rdchem.Mol] = None,
atoms: Union[List[int], int] = None,
in_place: bool = False,
) -> Chem.rdchem.Mol:
"""Protect a list of atoms or substruct in a molecule.
The _protected attributes of a molecule is used by RDKit in several functions, especially for reactions
where "protected" atoms are disallowed from taking part in reactions.
Args:
mol: input molecule to protect
substruct: optional substructure query to identify atoms to protect
atoms: optional list of atom indices to protect
in_place: whether to perform the protection in place or return a copy of the molecule
"""
if atoms is None:
atoms = []
elif not isinstance(atoms, (tuple, list)):
atoms = [atoms]
# do not perform protection in place
if in_place:
mol_copy = mol
else:
mol_copy = copy_mol(mol)
if substruct is not None:
matches = mol_copy.GetSubstructMatches(substruct)
atoms.extend(itertools.chain(*matches))
for a in atoms:
if a is None:
continue
mol_copy.GetAtomWithIdx(a).SetProp("_protected", "1")
return mol_copy
def add_hs(
mol: dm.Mol,
explicit_only: bool = False,
add_coords: bool = False,
only_on_atoms: List[int] = None,
add_residue_info: bool = False,
copy: bool = True,
):
"""Adds hydrogens to the molecule.
Args:
mol: a molecule.
explicit_only: whether to only add explicit hydrogens.
add_coords: whether to add 3D coordinates to the hydrogens.
onlyOnAtoms: a list of atoms to add hydrogens only on.
add_residue_info: whether to add residue information to the hydrogens.
Useful for PDB files.
copy: whether to copy the input molecule.
"""
if copy:
mol = dm.copy_mol(mol)
mol = Chem.AddHs( # type: ignore
mol,
explicitOnly=explicit_only,
addCoords=add_coords,
onlyOnAtoms=only_on_atoms,
addResidueInfo=add_residue_info,
)
return mol
def remove_hs(
mol: dm.Mol,
implicit_only: bool = False,
update_explicit_count: bool = False,
sanitize: bool = True,
copy: bool = True,
):
"""Removes hydrogens from a molecule.
Args:
mol: a molecule.
implicit_only: whether to only remove implicit hydrogens.
update_explicit_count: whether to update the explicit hydrogen count.
sanitize: whether to sanitize the molecule after the hydrogens are removed.
copy: whether to copy the input molecule.
"""
if copy:
mol = dm.copy_mol(mol)
mol = Chem.RemoveHs( # type: ignore
mol,
implicitOnly=implicit_only,
updateExplicitCount=update_explicit_count,
sanitize=sanitize,
)
return mol
|
11466713
|
import os
def rid(i,base64):
os.makedirs('pic',exist_ok=True)
with open(os.path.join('pic',i+'.txt'),'w') as f:
f.write(base64)
|
11466759
|
import datetime
from django.utils.timezone import utc
import factory
from factory import fuzzy
from product_details import product_details
from pytz import common_timezones
from remo.events.models import (Attendance, Event, EventComment,
EventMetric, EventMetricOutcome)
from remo.profiles.tests import FunctionalAreaFactory, UserFactory
from remo.remozilla.tests import BugFactory
COUNTRIES = product_details.get_regions('en').values()
START_DT = datetime.datetime(2011, 1, 1, tzinfo=utc)
END_DT = datetime.datetime(2011, 2, 1, tzinfo=utc)
ATTENDANCE_CHOICES = [10, 50, 100, 500, 1000, 2000]
class EventFactory(factory.django.DjangoModelFactory):
"""Factory for Event model."""
name = factory.Sequence(lambda n: 'Event #%s' % n)
start = fuzzy.FuzzyDateTime(START_DT, END_DT)
end = fuzzy.FuzzyDateTime(END_DT + datetime.timedelta(days=3))
timezone = fuzzy.FuzzyChoice(common_timezones)
venue = 'VenueName'
city = 'CityName'
region = 'RegionName'
country = fuzzy.FuzzyChoice(COUNTRIES)
lat = fuzzy.FuzzyInteger(-90, 90)
lon = fuzzy.FuzzyInteger(-180, 180)
external_link = 'example.com'
owner = factory.SubFactory(UserFactory)
estimated_attendance = fuzzy.FuzzyInteger(1, 100)
actual_attendance = fuzzy.FuzzyInteger(1, 100)
description = 'This is an event description.'
extra_content = 'Extra content for event page.'
mozilla_event = fuzzy.FuzzyChoice([True, False])
hashtag = 'EventHashtag'
converted_visitors = fuzzy.FuzzyInteger(100)
swag_bug = factory.SubFactory(BugFactory)
budget_bug = factory.SubFactory(BugFactory)
times_edited = fuzzy.FuzzyInteger(10)
class Meta:
model = Event
@factory.post_generation
def categories(self, create, extracted, **kwargs):
"""Add event categories after event creation."""
if not create:
return
if extracted:
for category in extracted:
self.categories.add(category)
else:
area = FunctionalAreaFactory.create()
self.categories.add(area)
class AttendanceFactory(factory.django.DjangoModelFactory):
"""Factory for Attendance model."""
user = factory.SubFactory(UserFactory)
event = factory.SubFactory(EventFactory)
email = factory.SelfAttribute('user.email')
class Meta:
model = Attendance
class EventCommentFactory(factory.django.DjangoModelFactory):
"""Factory for EventComment model."""
user = factory.SubFactory(UserFactory)
event = factory.SubFactory(EventFactory)
comment = factory.LazyAttribute(lambda o: 'Comment for %s from %s'
% (o.event, o.user))
class Meta:
model = EventComment
class EventMetricFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda n: 'EventMetric #{0}'.format(n))
class Meta:
model = EventMetric
class EventMetricOutcomeFactory(factory.django.DjangoModelFactory):
event = factory.SubFactory(EventFactory)
metric = factory.SubFactory(EventMetricFactory)
expected_outcome = fuzzy.FuzzyInteger(1, 100)
class Meta:
model = EventMetricOutcome
|
11466765
|
import eel
from file_handler import getFile, read_data
from json import load
from os.path import join
from program import start, loadEditor, cleanup, loadOptions, saveEditor
from generator import gen_img
eel.init("UI")
@eel.expose
def getTemplate():
return getFile("template")
@eel.expose
def getCSV():
return getFile("csv")
@eel.expose
def startProgram(event_name, template, csv):
if event_name and template and csv:
start(event_name, template, csv)
return True
else:
return "incomplete"
@eel.expose
def setupEditor():
j = loadEditor()
return [j["event_name"], [j["template"], j["width"], j["height"]], read_data(j["csv"], only_cols=True), j["template_base"], j["csv_base"]]
@eel.expose
def setupOptions():
opts = loadOptions()
return opts
@eel.expose
def saveTrans(trans):
saveEditor(trans)
return True
@eel.expose
def startgen(options):
gen_img(options)
return True
@eel.expose
def finish():
with open(join("UI/temp", "dsc-cert-gen.json")) as f:
j = load(f)
cleanup()
return j["event_name"]
try:
eel.start("index.html", options={"mode": "chrome-app"})
except Exception as e:
if str(e) == "Can't find Chrome or Chromium installation":
eel.start("index.html?r=true", options={"mode": "default"})
|
11466788
|
from app.main.model.database import BlacklistToken
from sanic.log import logger
from ..util.response import *
async def save_token(token):
blacklist_token = BlacklistToken(token=token)
try:
# insert the token
await blacklist_token.commit()
return response_message(SUCCESS)
except Exception as e:
logger.exception(e)
return response_message(UNKNOWN_ERROR)
|
11466811
|
import multiprocessing
import math
import cv2 as cv
import keras.backend as K
import numpy as np
from tensorflow.python.client import device_lib
from config import epsilon, epsilon_sqr
from config import img_cols
from config import img_rows
from config import unknown_code
# overall loss: weighted summation of the two individual losses.
#
def overall_loss(y_true, y_pred):
w_l = 0.5
return w_l * alpha_prediction_loss(y_true, y_pred) + (1 - w_l) * compositional_loss(y_true, y_pred)
# alpha prediction loss: the abosolute difference between the ground truth alpha values and the
# predicted alpha values at each pixel. However, due to the non-differentiable property of
# absolute values, we use the following loss function to approximate it.
def alpha_prediction_loss(y_true, y_pred):
mask = y_true[:, :, :, 1]
diff = y_pred[:, :, :, 0] - y_true[:, :, :, 0]
diff = diff * mask
num_pixels = K.sum(mask)
return K.sum(K.sqrt(K.square(diff) + epsilon_sqr)) / (num_pixels + epsilon)
# compositional loss: the aboslute difference between the ground truth RGB colors and the predicted
# RGB colors composited by the ground truth foreground, the ground truth background and the predicted
# alpha mattes.
def compositional_loss(y_true, y_pred):
mask = y_true[:, :, :, 1]
mask = K.reshape(mask, (-1, img_rows, img_cols, 1))
image = y_true[:, :, :, 2:5]
fg = y_true[:, :, :, 5:8]
bg = y_true[:, :, :, 8:11]
c_g = image
c_p = y_pred * fg + (1.0 - y_pred) * bg
diff = c_p - c_g
diff = diff * mask
num_pixels = K.sum(mask)
return K.sum(K.sqrt(K.square(diff) + epsilon_sqr)) / (num_pixels + epsilon)
# compute the MSE error given a prediction, a ground truth and a trimap.
# pred: the predicted alpha matte
# target: the ground truth alpha matte
# trimap: the given trimap
#
def compute_mse_loss(pred, target, trimap):
error_map = (pred - target) / 255.
mask = np.equal(trimap, unknown_code).astype(np.float32)
# print('unknown: ' + str(unknown))
loss = np.sum(np.square(error_map) * mask) / np.sum(mask)
# print('mse_loss: ' + str(loss))
return loss
# compute the SAD error given a prediction, a ground truth and a trimap.
#
def compute_sad_loss(pred, target, trimap):
error_map = np.abs(pred - target) / 255.
mask = np.equal(trimap, unknown_code).astype(np.float32)
loss = np.sum(error_map * mask)
# the loss is scaled by 1000 due to the large images used in our experiment.
loss = loss / 1000
# print('sad_loss: ' + str(loss))
return loss
# getting the number of GPUs
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# getting the number of CPUs
def get_available_cpus():
return multiprocessing.cpu_count()
def get_final_output(out, trimap):
mask = np.equal(trimap, unknown_code).astype(np.float32)
return (1 - mask) * trimap + mask * out
def patch_dims(mat_size, patch_size):
return np.ceil(np.array(mat_size) / patch_size).astype(int)
def create_patches(mat, patch_size):
mat_size = mat.shape
assert len(mat_size) == 3, "Input mat need to have 4 channels (R, G, B, trimap)"
assert mat_size[-1] == 4 , "Input mat need to have 4 channels (R, G, B, trimap)"
patches_dim = patch_dims(mat_size=mat_size[:2], patch_size=patch_size)
patches_count = np.product(patches_dim)
patches = np.zeros(shape=(patches_count, patch_size, patch_size, 4), dtype=np.float32)
for y in range(patches_dim[0]):
y_start = y * patch_size
for x in range(patches_dim[1]):
x_start = x * patch_size
# extract patch from input mat
single_patch = mat[y_start: y_start + patch_size, x_start: x_start + patch_size, :]
# zero pad patch in bottom and right side if real patch size is smaller than patch size
real_patch_h, real_patch_w = single_patch.shape[:2]
patch_id = y + x * patches_dim[0]
patches[patch_id, :real_patch_h, :real_patch_w, :] = single_patch
return patches
def assemble_patches(pred_patches, mat_size, patch_size):
patch_dim_h, patch_dim_w = patch_dims(mat_size=mat_size, patch_size=patch_size)
result = np.zeros(shape=(patch_size * patch_dim_h, patch_size * patch_dim_w), dtype=np.uint8)
patches_count = pred_patches.shape[0]
for i in range(patches_count):
y = (i % patch_dim_h) * patch_size
x = int(math.floor(i / patch_dim_h)) * patch_size
result[y:y+patch_size, x:x+patch_size] = pred_patches[i]
return result
def safe_crop(mat, x, y, crop_size=(img_rows, img_cols)):
crop_height, crop_width = crop_size
if len(mat.shape) == 2:
ret = np.zeros((crop_height, crop_width), np.float32)
else:
ret = np.zeros((crop_height, crop_width, 3), np.float32)
crop = mat[y:y + crop_height, x:x + crop_width]
h, w = crop.shape[:2]
ret[0:h, 0:w] = crop
if crop_size != (img_rows, img_cols):
ret = cv.resize(ret, dsize=(img_rows, img_cols), interpolation=cv.INTER_NEAREST)
return ret
def draw_str(dst, target, s):
x, y = target
cv.putText(dst, s, (x + 1, y + 1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
|
11466878
|
from .ExamplePage import ExamplePage
class OtherFileTypes(ExamplePage):
def writeContent(self):
self.writeln('<h4>Test for other file types:</h4>')
self.writeLink('test.text')
self.writeLink('test.html')
def writeLink(self, link):
self.writeln(f'<p><a href="Tests/{link}">{link}</a></p>')
|
11466936
|
import random
import torch
import numpy as np
PAD, UNK, BOS, EOS = '<pad>', '<unk>', '<bos>', '<eos>'
BOC, EOC = '<boc>', '<eoc>'
LS, RS, SP = '<s>', '</s>', ' '
CS = ['<c-1>'] + ['<c' + str(i) + '>' for i in range(32)] # content
SS = ['<s-1>'] + ['<s' + str(i) + '>' for i in range(512)] # segment
PS = ['<p-1>'] + ['<p' + str(i) + '>' for i in range(512)] # position
TS = ['<t-1>'] + ['<t' + str(i) + '>' for i in range(32)] # other types
PUNCS = set([",", ".", "?", "!", ":", ",", "。", "?", "!", ":"])
BUFSIZE = 4096000
def ListsToTensor(xs, vocab=None):
max_len = max(len(x) for x in xs)
ys = []
for x in xs:
if vocab is not None:
y = vocab.token2idx(x) + [vocab.padding_idx]*(max_len - len(x))
else:
y = x + [0]*(max_len - len(x))
ys.append(y)
return ys
def _back_to_text_for_check(x, vocab):
w = x.t().tolist()
for sent in vocab.idx2token(w):
print (' '.join(sent))
def batchify(data, vocab):
xs_tpl, xs_seg, xs_pos, \
ys_truth, ys_inp, \
ys_tpl, ys_seg, ys_pos, msk = [], [], [], [], [], [], [], [], []
for xs_tpl_i, xs_seg_i, xs_pos_i, ys_i, ys_tpl_i, ys_seg_i, ys_pos_i in data:
xs_tpl.append(xs_tpl_i)
xs_seg.append(xs_seg_i)
xs_pos.append(xs_pos_i)
ys_truth.append(ys_i)
ys_inp.append([BOS] + ys_i[:-1])
ys_tpl.append(ys_tpl_i)
ys_seg.append(ys_seg_i)
ys_pos.append(ys_pos_i)
msk.append([1 for i in range(len(ys_i))])
xs_tpl = torch.LongTensor(ListsToTensor(xs_tpl, vocab)).t_().contiguous()
xs_seg = torch.LongTensor(ListsToTensor(xs_seg, vocab)).t_().contiguous()
xs_pos = torch.LongTensor(ListsToTensor(xs_pos, vocab)).t_().contiguous()
ys_truth = torch.LongTensor(ListsToTensor(ys_truth, vocab)).t_().contiguous()
ys_inp = torch.LongTensor(ListsToTensor(ys_inp, vocab)).t_().contiguous()
ys_tpl = torch.LongTensor(ListsToTensor(ys_tpl, vocab)).t_().contiguous()
ys_seg = torch.LongTensor(ListsToTensor(ys_seg, vocab)).t_().contiguous()
ys_pos = torch.LongTensor(ListsToTensor(ys_pos, vocab)).t_().contiguous()
msk = torch.FloatTensor(ListsToTensor(msk)).t_().contiguous()
return xs_tpl, xs_seg, xs_pos, ys_truth, ys_inp, ys_tpl, ys_seg, ys_pos, msk
def s2t(strs, vocab):
inp, msk = [], []
for x in strs:
inp.append(x)
msk.append([1 for i in range(len(x))])
inp = torch.LongTensor(ListsToTensor(inp, vocab)).t_().contiguous()
msk = torch.FloatTensor(ListsToTensor(msk)).t_().contiguous()
return inp, msk
def s2xy(lines, vocab, max_len, min_len):
data = []
for line in lines:
res = parse_line(line, max_len, min_len)
if not res:
continue
data.append(res)
return batchify(data, vocab)
def parse_line(line, max_len, min_len):
line = line.strip()
if not line:
return None
fs = line.split("<s2>")
author, cipai = fs[0].split("<s1>")
sents = fs[1].strip()
if len(sents) > max_len:
sents = sents[:max_len]
if len(sents) < min_len:
return None
sents = sents.split("</s>")
ys = []
xs_tpl = []
xs_seg = []
xs_pos = []
ctx = cipai
ws = [w for w in ctx]
xs_tpl = ws + [EOC]
xs_seg = [SS[0] for w in ws] + [EOC]
xs_pos = [SS[i+300] for i in range(len(ws))] + [EOC]
ys_tpl = []
ys_seg = []
ys_pos = []
for si, sent in enumerate(sents):
ws = []
sent = sent.strip()
if not sent:
continue
for w in sent:
ws.append(w)
if w.strip() and w not in PUNCS:
ys_tpl.append(CS[2])
else:
ys_tpl.append(CS[1])
ys += ws + [RS]
if ws[-1] in PUNCS:
ys_tpl[-2] = CS[3]
else:
ys_tpl[-1] = CS[3]
ys_tpl += [RS]
ys_seg += [SS[si + 1] for w in ws] + [RS]
ys_pos += [PS[len(ws) - i] for i in range(len(ws))] + [RS]
ys += [EOS]
ys_tpl += [EOS]
ys_seg += [EOS]
ys_pos += [EOS]
xs_tpl += ys_tpl
xs_seg += ys_seg
xs_pos += ys_pos
if len(ys) < min_len:
return None
return xs_tpl, xs_seg, xs_pos, ys, ys_tpl, ys_seg, ys_pos
def s2xy_polish(lines, vocab, max_len, min_len):
data = []
for line in lines:
res = parse_line_polish(line, max_len, min_len)
data.append(res)
return batchify(data, vocab)
def parse_line_polish(line, max_len, min_len):
line = line.strip()
if not line:
return None
fs = line.split("<s2>")
author, cipai = fs[0].split("<s1>")
sents = fs[1].strip()
if len(sents) > max_len:
sents = sents[:max_len]
if len(sents) < min_len:
return None
sents = sents.split("</s>")
ys = []
xs_tpl = []
xs_seg = []
xs_pos = []
ctx = cipai
ws = [w for w in ctx]
xs_tpl = ws + [EOC]
xs_seg = [SS[0] for w in ws] + [EOC]
xs_pos = [SS[i+300] for i in range(len(ws))] + [EOC]
ys_tpl = []
ys_seg = []
ys_pos = []
for si, sent in enumerate(sents):
ws = []
sent = sent.strip()
if not sent:
continue
for w in sent:
ws.append(w)
if w == "_":
ys_tpl.append(CS[2])
else:
ys_tpl.append(w)
ys += ws + [RS]
ys_tpl += [RS]
ys_seg += [SS[si + 1] for w in ws] + [RS]
ys_pos += [PS[len(ws) - i] for i in range(len(ws))] + [RS]
ys += [EOS]
ys_tpl += [EOS]
ys_seg += [EOS]
ys_pos += [EOS]
xs_tpl += ys_tpl
xs_seg += ys_seg
xs_pos += ys_pos
if len(ys) < min_len:
return None
return xs_tpl, xs_seg, xs_pos, ys, ys_tpl, ys_seg, ys_pos
class DataLoader(object):
def __init__(self, vocab, filename, batch_size, max_len_y, min_len_y):
self.batch_size = batch_size
self.vocab = vocab
self.max_len_y = max_len_y
self.min_len_y = min_len_y
self.filename = filename
self.stream = open(self.filename, encoding='utf8')
self.epoch_id = 0
def __iter__(self):
lines = self.stream.readlines(BUFSIZE)
if not lines:
self.epoch_id += 1
self.stream.close()
self.stream = open(self.filename, encoding='utf8')
lines = self.stream.readlines(BUFSIZE)
data = []
for line in lines[:-1]: # the last sent may be imcomplete
res = parse_line(line, self.max_len_y, self.min_len_y)
if not res:
continue
data.append(res)
random.shuffle(data)
idx = 0
while idx < len(data):
yield batchify(data[idx:idx+self.batch_size], self.vocab)
idx += self.batch_size
class Vocab(object):
def __init__(self, filename, min_occur_cnt, specials = None):
idx2token = [PAD, UNK, BOS, EOS] + [BOC, EOC, LS, RS, SP] + CS + SS + PS + TS \
+ (specials if specials is not None else [])
for line in open(filename, encoding='utf8').readlines():
try:
token, cnt = line.strip().split()
except:
continue
if int(cnt) >= min_occur_cnt:
idx2token.append(token)
self._token2idx = dict(zip(idx2token, range(len(idx2token))))
self._idx2token = idx2token
self._padding_idx = self._token2idx[PAD]
self._unk_idx = self._token2idx[UNK]
@property
def size(self):
return len(self._idx2token)
@property
def unk_idx(self):
return self._unk_idx
@property
def padding_idx(self):
return self._padding_idx
def random_token(self):
return self.idx2token(1 + np.random.randint(self.size-1))
def idx2token(self, x):
if isinstance(x, list):
return [self.idx2token(i) for i in x]
return self._idx2token[x]
def token2idx(self, x):
if isinstance(x, list):
return [self.token2idx(i) for i in x]
return self._token2idx.get(x, self.unk_idx)
|
11467009
|
from pelican import readers
from pelican.readers import PelicanHTMLTranslator
from pelican import signals
from docutils import nodes
LINK_CHAR = "*"
def init_headerid(sender):
global LINK_CHAR
char = sender.settings.get("HEADERID_LINK_CHAR")
if char:
LINK_CHAR = char
def register():
signals.initialized.connect(init_headerid)
class HeaderIDPatchedPelicanHTMLTranslator(PelicanHTMLTranslator):
def depart_title(self, node):
close_tag = self.context[-1]
parent = node.parent
if (
isinstance(parent, nodes.section)
and parent.hasattr("ids")
and parent["ids"]
):
anchor_name = parent["ids"][0]
# add permalink anchor
if close_tag.startswith("</h"):
self.body.append(
'<a class="headerlink" href="#%s" title="Permalink to this headline">%s</a>'
% (anchor_name, LINK_CHAR)
)
PelicanHTMLTranslator.depart_title(self, node)
readers.PelicanHTMLTranslator = HeaderIDPatchedPelicanHTMLTranslator
|
11467014
|
from PyQt5 import QtCore, QtWidgets, QtGui
from shapely.geometry import Point as ShapelyPoint
from shapely.geometry.polygon import Polygon as ShapelyPolygon
from Item import Item
from Tikzifyables.DashPatternable import DashPatternable
from Tikzifyables.Colourable.LineColourable import LineColourable
from Tikzifyables.Fillable import Fillable
from Tikzifyables.Decorationable import Decorationable
from Tikzifyables.CurveStrategyable import CurveStrategyable
import Constant as c
class Polygon(Item, DashPatternable, LineColourable, Fillable, Decorationable, CurveStrategyable):
def __init__(self, item):
"""Construct Polygon."""
Item.__init__(self, item)
if item is None:
self.dictionary_builder(None, "")
DashPatternable.__init__(self, self.item)
LineColourable.__init__(self, self.item)
Fillable.__init__(self, self.item)
Decorationable.__init__(self, self.item)
CurveStrategyable.__init__(self, self.item)
def tikzify(self):
strategy_options, strategy_coordinates = self.tikzify_strategy(False)
options = [
self.tikzify_dash(),
'draw=' + self.tikzify_line_colour(),
'' if self.item["line"]["line_width"] == c.Point.Default.LINE_WIDTH else f'line width={self.item["line"]["line_width"]}',
self.tikzify_fill_pattern(),
self.tikzify_decoration(),
strategy_options
]
options = filter(bool, options)
return "\\draw[%s] %s;" % (', '.join(options), strategy_coordinates)
def __str__(self):
return "Segment from (%s) to (%s)" % (self.item["definition"]["A"], self.item["definition"]["B"])
def draw_on_canvas(self, items, scene, colour=QtCore.Qt.darkMagenta):
qpolygon = QtGui.QPolygonF([QtCore.QPointF(*items[i].get_canvas_coordinates()) for i in self.item["definition"]])
polygon_item = QtWidgets.QGraphicsPolygonItem(qpolygon)
polygon_item.setBrush(QtGui.QBrush(colour))
pen = QtGui.QPen()
pen.setStyle(QtCore.Qt.NoPen)
polygon_item.setPen(pen)
scene.addItem(polygon_item)
def definition_string(self):
def_str = [('{0:.6g}'.format(i) if isinstance(i, float) else i) for i in self.item["definition"]]
return '%s(%s)' % (type(self).__name__, ', '.join(def_str))
@staticmethod
def draw_on_canvas_static(x, y, id_history, scene, colour=QtCore.Qt.darkMagenta):
coordinates = [QtCore.QPointF(*scene.project_data.items[x].get_canvas_coordinates()) for x in id_history]
qpolygon = QtGui.QPolygonF(coordinates + [QtCore.QPointF(x, y)])
polygon_item = QtWidgets.QGraphicsPolygonItem(qpolygon)
polygon_item.setBrush(QtGui.QBrush(colour))
pen = QtGui.QPen()
pen.setStyle(QtCore.Qt.NoPen)
polygon_item.setPen(pen)
scene.addItem(polygon_item)
def is_inside(self, x, y, items):
polygon_coordinates = []
for item_id in self.depends_on():
polygon_coordinates.append(items[item_id].get_canvas_coordinates())
shapely_polygon = ShapelyPolygon(polygon_coordinates)
return shapely_polygon.contains(ShapelyPoint(x, y))
def depends_on(self):
return self.item["definition"]
@staticmethod
def static_patterns():
return [i*'p' for i in range(3,40)]
def patterns(self):
return [i*'p' for i in range(3,40)]
def next_id_func(self, definition, iter_counter):
return 'Polygon_' + chr(ord('A') + iter_counter % 26) + (iter_counter // 26) * '\''
def definition_builder(self, data, items=None):
return data[:-1]
def parse_into_definition(self, arguments, items):
# arguments length condition
if len(arguments) <= 2:
return None
# all arguments are members of the regular expression for argument name
if not all(map(lambda x: self.name_pattern(x), arguments)):
return None
# all arguments are items that already exist
if not all(map(lambda x: x in items, arguments)):
return None
# the type of all arguments is of a certain type
if not all(map(lambda x: items[x].item["type"] == 'point', arguments)):
return None
# self-reference condition (self-reference is not permitted)
if self.get_id() in arguments:
return None
return self.definition_builder(arguments+['mock item'])
def dictionary_builder(self, definition, id_, sub_type=None):
dictionary = {}
dictionary["id"] = id_
dictionary["type"] = 'polygon'
dictionary["sub_type"] = None
dictionary["show"] = True
dictionary["definition"] = definition
dictionary["line"] = {}
dictionary["line"]["line_width"] = c.Polygon.Default.LINE_WIDTH
dictionary["line"]["colour"] = {}
dictionary["line"]["colour"]["name"] = c.Polygon.Default.Line_Colour.NAME
dictionary["line"]["colour"]["mix_with"] = c.Polygon.Default.Line_Colour.MIX_WITH
dictionary["line"]["colour"]["mix_percent"] = c.Polygon.Default.Line_Colour.MIX_RATIO
dictionary["line"]["colour"]["strength"] = c.Polygon.Default.Line_Colour.STRENGTH
dictionary["line"]["dash"] = {}
dictionary["line"]["dash"]["stroke"] = c.Polygon.Default.LINE_DASH_STROKE
dictionary["line"]["dash"]["custom_pattern"] = c.Polygon.Default.LINE_DASH_CUSTOM
dictionary["line"]["decoration"] = {}
dictionary["line"]["decoration"]["type"] = c.Polygon.Default.Decoration.TYPE
dictionary["line"]["decoration"]["amplitude"] = c.Polygon.Default.Decoration.AMPLITUDE
dictionary["line"]["decoration"]["wavelength"] = c.Polygon.Default.Decoration.WAVELENGTH
dictionary["line"]["decoration"]["text"] = c.Polygon.Default.Decoration.TEXT
dictionary["line"]["strategy"] = {}
dictionary["line"]["strategy"]["type"] = c.Polygon.Default.Strategy.TYPE
dictionary["line"]["strategy"]["rounded_corners"] = c.Polygon.Default.Strategy.ROUNDED_CORNERS
dictionary["line"]["strategy"]["bend_angle"] = c.Polygon.Default.Strategy.BEND_ANGLE
dictionary["line"]["strategy"]["in_angle"] = c.Polygon.Default.Strategy.IN_ANGLE
dictionary["line"]["strategy"]["out_angle"] = c.Polygon.Default.Strategy.OUT_ANGLE
dictionary["line"]["strategy"]["smooth_tension"] = c.Polygon.Default.Strategy.SMOOTH_TENSION
dictionary["fill"] = {}
dictionary["fill"]["colour"] = {}
dictionary["fill"]["colour"]["name"] = c.Polygon.Default.Fill_Colour.NAME
dictionary["fill"]["colour"]["mix_with"] = c.Polygon.Default.Fill_Colour.MIX_WITH
dictionary["fill"]["colour"]["mix_percent"] = c.Polygon.Default.Fill_Colour.MIX_RATIO
dictionary["fill"]["colour"]["strength"] = c.Polygon.Default.Fill_Colour.STRENGTH
dictionary["fill"]["pattern"] = {}
dictionary["fill"]["pattern"]["type"] = c.Polygon.Default.Fill_Pattern.TYPE
dictionary["fill"]["pattern"]["distance"] = c.Polygon.Default.Fill_Pattern.DISTANCE
dictionary["fill"]["pattern"]["size"] = c.Polygon.Default.Fill_Pattern.SIZE
dictionary["fill"]["pattern"]["rotation"] = c.Polygon.Default.Fill_Pattern.ROTATION
dictionary["fill"]["pattern"]["xshift"] = c.Polygon.Default.Fill_Pattern.XSHIFT
dictionary["fill"]["pattern"]["yshift"] = c.Polygon.Default.Fill_Pattern.YSHIFT
self.item = dictionary
|
11467031
|
from rest_framework import status
from usaspending_api.download.lookups import CFO_CGACS
base_query = "/api/v2/references/filter_tree/tas/"
common_query = base_query + "?depth=0"
# Can the endpoint successfully create a search tree node?
def test_one_agency(client, basic_agency):
resp = _call_and_expect_200(client, common_query)
assert resp.json() == {
"results": [{"id": "001", "ancestors": [], "description": "Agency 001 (001)", "count": 1, "children": None}]
}
# Can the endpoint correctly populate an array?
def test_multiple_agencies(client, cfo_agencies):
resp = _call_and_expect_200(client, common_query)
assert len(resp.json()["results"]) == 5 # length of arbitrary_cfo_cgac_sample from fixture class
# Does the endpoint put agencies in CFO presentation order?
def test_agency_order(client, cfo_agencies, non_cfo_agencies):
resp = _call_and_expect_200(client, common_query)
assert (
len(resp.json()["results"]) == 100 - len(CFO_CGACS) + 5
) # length of arbitrary_cfo_cgac_sample from fixture class
assert resp.json()["results"][0]["id"] == CFO_CGACS[1] # the first CGAC from the cfo_agencies fixture
assert resp.json()["results"][1]["id"] == CFO_CGACS[2] # the second CGAC from the cfo_agencies fixture
assert resp.json()["results"][2]["id"] == CFO_CGACS[3] # the third CGAC from the cfo_agencies fixture
assert resp.json()["results"][3]["id"] == CFO_CGACS[7] # the fourth CGAC from the cfo_agencies fixture
assert resp.json()["results"][4]["id"] == CFO_CGACS[13] # the fifth CGAC from the cfo_agencies fixture
assert not [elem for elem in resp.json()["results"][5:] if elem["id"][:3] in CFO_CGACS]
# Does the endpoint only return agencies with file D data?
def test_unsupported_agencies(client, cfo_agencies, unsupported_agencies):
resp = _call_and_expect_200(client, common_query)
assert len(resp.json()["results"]) == 5 # length of arbitrary_cfo_cgac_sample from fixture class
# Does the endpoint default to depth of zero?
def test_default_depth(client, cfo_agencies):
resp = _call_and_expect_200(client, base_query)
assert len(resp.json()["results"]) == 5 # length of arbitrary_cfo_cgac_sample from fixture class
# Does the endpoint handle depth greater than zero?
def test_positive_depth(client, cfo_agencies):
resp = _call_and_expect_200(client, base_query + "?depth=1")
assert len(resp.json()["results"]) == 5 # length of arbitrary_cfo_cgac_sample from fixture class
# all of these should have one FA under them
assert len([elem["children"][0] for elem in resp.json()["results"]]) == 5
def _call_and_expect_200(client, url):
resp = client.get(url)
assert resp.status_code == status.HTTP_200_OK, "Failed to return 200 Response"
return resp
|
11467032
|
import unicodedata
import jsonlines
import re
from urllib.parse import unquote
import regex
import numpy as np
import scipy.sparse as sp
from sklearn.utils import murmurhash3_32
def normalize(text):
"""Resolve different type of unicode encodings / capitarization in HotpotQA data."""
text = unicodedata.normalize('NFD', text)
return text[0].capitalize() + text[1:]
def make_wiki_id(title, para_index):
title_id = "{0}_{1}".format(normalize(title), para_index)
return title_id
def find_hyper_linked_titles(text_w_links):
titles = re.findall(r'href=[\'"]?([^\'" >]+)', text_w_links)
titles = [unquote(title) for title in titles]
titles = [title[0].capitalize() + title[1:] for title in titles]
return titles
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
def process_jsonlines(filename):
"""
This is process_jsonlines method for extracted Wikipedia file.
After extracting items by using Wikiextractor (with `--json` and `--links` options),
you will get the files named with wiki_xx, where each line contains the information of each article.
e.g.,
{"id": "316", "url": "https://en.wikipedia.org/wiki?curid=316", "title": "Academy Award for Best Production Design",
"text": "Academy Award for Best Production Design\n\nThe <a href=\"Academy%20Awards\">Academy Award</a> for
Best Production Design recognizes achievement for <a href=\"art%20direction\">art direction</a> \n\n"}
This function takes these input and extract items.
Each article contains one or more than one paragraphs, and each paragraphs are separeated by \n\n.
"""
# item should be nested list
extracted_items = []
with jsonlines.open(filename) as reader:
for obj in reader:
wiki_id = obj["id"]
title = obj["title"]
title_id = make_wiki_id(title, 0)
text_with_links = obj["text"]
hyper_linked_titles_text = ""
# When we consider the whole article as a document unit (e.g., SQuAD Open, Natural Questions Open)
# we'll keep the links with the original articles, and dynamically process and extract the links
# when we process with our selector.
extracted_items.append({"wiki_id": wiki_id, "title": title_id,
"plain_text": text_with_links,
"hyper_linked_titles": hyper_linked_titles_text,
"original_title": title})
return extracted_items
def process_jsonlines_hotpotqa(filename):
"""
This is process_jsonlines method for intro-only processed_wikipedia file.
The item example:
{"id": "45668011", "url": "https://en.wikipedia.org/wiki?curid=45668011", "title": "Flouch Roundabout",
"text": ["Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616."],
"charoffset": [[[0, 6],...]]
"text_with_links" : ["Flouch Roundabout is a roundabout near <a href=\"Penistone\">Penistone</a>,
<a href=\"South%20Yorkshire\">South Yorkshire</a>, England, where the <a href=\"A628%20road\">A628</a>
meets the <a href=\"A616%20road\">A616</a>."],
"charoffset_with_links": [[[0, 6], ... [213, 214]]]}
"""
# item should be nested list
extracted_items = []
with jsonlines.open(filename) as reader:
for obj in reader:
wiki_id = obj["id"]
title = obj["title"]
title_id = make_wiki_id(title, 0)
plain_text = "\t".join(obj["text"])
text_with_links = "\t".join(obj["text_with_links"])
hyper_linked_titles = []
hyper_linked_titles = find_hyper_linked_titles(text_with_links)
if len(hyper_linked_titles) > 0:
hyper_linked_titles_text = "\t".join(hyper_linked_titles)
else:
hyper_linked_titles_text = ""
extracted_items.append({"wiki_id": wiki_id, "title": title_id,
"plain_text": plain_text,
"hyper_linked_titles": hyper_linked_titles_text,
"original_title": title})
return extracted_items
# ------------------------------------------------------------------------------
# Sparse matrix saving/loading helpers.
# ------------------------------------------------------------------------------
def save_sparse_csr(filename, matrix, metadata=None):
data = {
'data': matrix.data,
'indices': matrix.indices,
'indptr': matrix.indptr,
'shape': matrix.shape,
'metadata': metadata,
}
np.savez(filename, **data)
def load_sparse_csr(filename):
loader = np.load(filename, allow_pickle=True)
matrix = sp.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
return matrix, loader['metadata'].item(0) if 'metadata' in loader else None
# ------------------------------------------------------------------------------
# Token hashing.
# ------------------------------------------------------------------------------
def hash(token, num_buckets):
"""Unsigned 32 bit murmurhash for feature hashing."""
return murmurhash3_32(token, positive=True) % num_buckets
# ------------------------------------------------------------------------------
# Text cleaning.
# ------------------------------------------------------------------------------
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven',
'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren',
'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``"
}
def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode)
def get_field(d, field_list):
"""get the subfield associated to a list of elastic fields
E.g. ['file', 'filename'] to d['file']['filename']
"""
if isinstance(field_list, str):
return d[field_list]
else:
idx = d.copy()
for field in field_list:
idx = idx[field]
return idx
def load_para_collections_from_tfidf_id_intro_only(tfidf_id, db):
if "_0" not in tfidf_id:
tfidf_id = "{0}_0".format(tfidf_id)
if db.get_doc_text(tfidf_id) is None:
logger.warning("{0} is missing".format(tfidf_id))
return []
return [[tfidf_id, db.get_doc_text(tfidf_id).split("\t")]]
def load_linked_titles_from_tfidf_id(tfidf_id, db):
para_titles = db.get_paras_with_article(tfidf_id)
linked_titles_all = []
for para_title in para_titles:
linked_title_per_para = db.get_hyper_linked(para_title)
if len(linked_title_per_para) > 0:
linked_titles_all += linked_title_per_para.split("\t")
return linked_titles_all
def load_para_and_linked_titles_dict_from_tfidf_id(tfidf_id, db):
"""
load paragraphs and hyperlinked titles from DB.
This method is mainly for Natural Questions Open benchmark.
"""
# will be fixed in the later version; current tfidf weights use indexed titles as keys.
if "_0" not in tfidf_id:
tfidf_id = "{0}_0".format(tfidf_id)
paras, linked_titles = db.get_doc_text_hyper_linked_titles_for_articles(
tfidf_id)
if len(paras) == 0:
logger.warning("{0} is missing".format(tfidf_id))
return [], []
paras_dict = {}
linked_titles_dict = {}
article_name = tfidf_id.split("_0")[0]
# store the para_dict and linked_titles_dict; skip the first para (title)
for para_idx, (para, linked_title_list) in enumerate(zip(paras[1:], linked_titles[1:])):
paras_dict["{0}_{1}".format(article_name, para_idx)] = para
linked_titles_dict["{0}_{1}".format(
article_name, para_idx)] = linked_title_list
return paras_dict, linked_titles_dict
def prune_top_k_paragraphs(question_text, paragraphs, tfidf_vectorizer, pruning_l=10):
para_titles, para_text = list(paragraphs.keys()), list(paragraphs.values())
# prune top l paragraphs using the question as query to reduce the search space.
top_tfidf_para_indices = tfidf_vectorizer.prune(
question_text, para_text)[:pruning_l]
para_title_text_pairs_pruned = {}
# store the selected paras into dictionary.
for idx in top_tfidf_para_indices:
para_title_text_pairs_pruned[para_titles[idx]] = para_text[idx]
return para_title_text_pairs_pruned
|
11467048
|
from typing import Optional, Tuple
from .endpoint_type import EndpointType
SHOULD_USE_MAP = {
"globus collection delete": [
("globus endpoint delete", EndpointType.traditional_endpoints()),
],
"globus endpoint delete": [
("globus collection delete", EndpointType.collections()),
],
"globus collection show": [
("globus endpoint show", EndpointType.non_collection_types()),
],
"globus endpoint show": [
("globus collection show", EndpointType.collections()),
],
"globus collection update": [
("globus endpoint update", EndpointType.traditional_endpoints()),
],
"globus endpoint update": [
("globus collection update", EndpointType.collections()),
],
}
class WrongEndpointTypeError(ValueError):
def __init__(
self,
from_command: str,
endpoint_id: str,
actual_type: EndpointType,
expected_types: Tuple[EndpointType, ...],
) -> None:
self.from_command = from_command
self.endpoint_id = str(endpoint_id)
self.actual_type = actual_type
self.expected_types = expected_types
self.expected_message = self._get_expected_message()
self.actual_message = self._get_actual_message()
super().__init__(f"{self.expected_message} {self.actual_message}")
def _get_expected_message(self) -> str:
expect_str = ", ".join(EndpointType.nice_name(x) for x in self.expected_types)
if len(self.expected_types) == 1:
expect_str = f"a {expect_str}"
else:
expect_str = f"one of [{expect_str}]"
return f"Expected {self.endpoint_id} to be {expect_str}."
def _get_actual_message(self) -> str:
actual_str = EndpointType.nice_name(self.actual_type)
return f"Instead, found it was of type '{actual_str}'."
def should_use_command(self) -> Optional[str]:
if self.from_command in SHOULD_USE_MAP:
for should_use, if_types in SHOULD_USE_MAP[self.from_command]:
if self.actual_type in if_types:
return should_use
return None
class ExpectedCollectionError(WrongEndpointTypeError):
def _get_expected_message(self):
return f"Expected {self.endpoint_id} to be a collection ID."
class ExpectedEndpointError(WrongEndpointTypeError):
def _get_expected_message(self):
return f"Expected {self.endpoint_id} to be an endpoint ID."
|
11467057
|
from docusign_click import AccountsApi
from flask import request, session
from ...utils import create_click_api_client
class Eg005Controller:
@staticmethod
def get_args():
"""Get required session and request arguments"""
return {
"account_id": session.get("ds_account_id"), # Represents your {ACCOUNT_ID}
"access_token": session.get("ds_access_token"), # Represents your {ACCESS_TOKEN}
"clickwrap_id": session.get("clickwrap_id"),
"client_user_id": request.form.get("client_user_id"),
}
@staticmethod
def worker(args):
"""
1. Create an API client with headers
2. Get clickwrap responses using SDK
"""
# Step 1. Create an API client with headers
api_client = create_click_api_client(
access_token=args["access_token"]
)
# Step 2. Get clickwrap responses using SDK
accounts_api = AccountsApi(api_client)
response = accounts_api.get_clickwrap_agreements(
account_id=args["account_id"],
clickwrap_id=args["clickwrap_id"],
client_user_id=args["client_user_id"],
status="agreed"
)
return response
|
11467084
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import io
import os
from observations.util import maybe_download_and_extract
def ptb(path):
"""Load the Penn Treebank data set [@marcus1993building].
The dataset is preprocessed and has a vocabulary of 10,000 words,
including the end-of-sentence marker and a special symbol (<unk>)
for rare words. There are 929,589 training words, 73,760 validation
words, and 82,430 test words.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filename is `simple-examples/`.
Returns:
Tuple of str `x_train, x_test, x_valid`.
"""
path = os.path.expanduser(path)
if not os.path.exists(os.path.join(path, 'simple-examples')):
url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
maybe_download_and_extract(path, url)
path = os.path.join(path, 'simple-examples/data')
with io.open(os.path.join(path, 'ptb.train.txt'),
encoding='utf-8') as f:
x_train = f.read().replace("\n", "<eos>")
with io.open(os.path.join(path, 'ptb.test.txt'),
encoding='utf-8') as f:
x_test = f.read().replace("\n", "<eos>")
with io.open(os.path.join(path, 'ptb.valid.txt'),
encoding='utf-8') as f:
x_valid = f.read().replace("\n", "<eos>")
return x_train, x_test, x_valid
|
11467091
|
import itertools
import json
import time
import ray
from typing import Callable
import numpy as np
from mesh_transformer.train_actor import NetworkRunner
from google.cloud import storage
from smart_open import open
from func_timeout import func_set_timeout
class TPUCluster:
@func_set_timeout(1200)
def __init__(self,
mesh_shape,
node_count,
model: Callable,
version=1):
assert ray.is_initialized() # needs a valid ray cluster to start
self.nodes = []
self.node_count = node_count
self.dp, self.mp = mesh_shape
self.version = version
start = time.time()
for i in range(node_count):
self.nodes.append(NetworkRunner.options(max_concurrency=2).remote(mesh_shape, model))
for n in self.nodes:
n.run.remote()
params = []
for n in self.nodes:
params.append(n.get_params.remote())
self.param_count = ray.get(params)[0]
print(f"Ray actors created in {time.time() - start:.06}s")
@func_set_timeout(600)
def train(self, data):
data_chunks = np.array_split(data, len(self.nodes), axis=1)
res = []
for n, d in zip(self.nodes, data_chunks):
res.append(n.train.remote({
"obs": d[:, :, :-1],
"target": d[:, :, 1:],
}))
res = ray.get(res)
loss = []
last_loss = []
for r in res:
loss.append(r[0])
last_loss.append(r[1])
return np.array(loss).mean(), np.array(last_loss).mean()
@func_set_timeout(600)
def eval(self, data):
if isinstance(data, dict):
data_chunked = [{} for _ in self.nodes]
for k, v in data.items():
v_chunks = np.array_split(v, len(self.nodes), axis=0)
for idx, v_chunk in enumerate(v_chunks):
data_chunked[idx][k] = v_chunk
res = []
for n, d in zip(self.nodes, data_chunked):
res.append(n.eval.remote(d))
total = 0
correct = 0
last_correct = 0
total_last_loss = 0
mask_loss = []
each_correct = []
for input, output in zip(data_chunked, ray.get(res)):
correct_and_valid = np.logical_and(output["correct"], input["eval_mask"])
correct_tokens_count = np.sum(correct_and_valid, -1)
valid_tokens_count = np.sum(input["eval_mask"], -1)
correct_example = np.logical_and(valid_tokens_count == correct_tokens_count, valid_tokens_count > 0)
valid_example = valid_tokens_count > 0
last_correct_example = correct_and_valid[:, -1]
each_correct += correct_example.tolist()
total += sum(valid_example)
correct += sum(correct_example)
last_correct += sum(last_correct_example)
total_last_loss += sum(valid_example * output["last_loss"])
valid_loss = np.sum(output["all_loss"] * input["eval_mask"], -1)
mask_loss += valid_loss.tolist()
return {
"total": total,
"correct": correct,
"last_correct": last_correct,
"last_loss": total_last_loss,
"mask_loss": np.array(mask_loss),
"each_correct": np.array(each_correct)
}
else:
data_chunks = np.array_split(data, len(self.nodes), axis=0)
res = []
for n, d in zip(self.nodes, data_chunks):
res.append(n.eval.remote({
"obs": d[:, :-1],
"target": d[:, 1:],
}))
return np.array([i["loss"] for i in ray.get(res)]).mean()
@func_set_timeout(600)
def generate(self, context, ctx_length, gen_len):
context = np.array_split(context, len(self.nodes), axis=0)
ctx_length = np.array_split(ctx_length, len(self.nodes), axis=0)
res = []
for n, ctx, l in zip(self.nodes, context, ctx_length):
res.append(n.generate.remote((
ctx,
np.ones(len(ctx), dtype=np.uint32) * l,
gen_len
)))
return np.concatenate([i[1][0][:, :, 0] for i in ray.get(res)], axis=0)
@func_set_timeout(600)
def move(self):
start = time.time()
res = []
for node in self.nodes:
res.append(node.move_params.remote())
ray.get(res)
print(f"Moved weights to TPU in {time.time() - start:.06}s")
@func_set_timeout(1800)
def load(self, bucket, path):
with open(f"gs://{bucket}/{path}/meta.json", "r") as f:
meta = json.load(f)
ckpt_step = meta["checkpoints"][-1]
# do replicated checkpoint reading
start = time.time()
res = []
for node in self.nodes:
res.append(node.load_ckpt.remote(f"gs://{bucket}/{path}/step_{ckpt_step}/"))
# make sure they all read from the same checkpoint
step = np.array(ray.get(res))
assert (step[0] == step).all()
step = int(step[0])
print(f"Checkpoint@step{step} restored in {time.time() - start:.06}s")
return step, meta["aux"][str(ckpt_step)]
@func_set_timeout(600)
def save(self, step, bucket, path, aux=None, init=False, overwrite=False, keep_n=3, delete_old=True):
assert path
client = storage.Client()
if aux is None:
aux = {}
if init:
# check existing checkpoint folder does not exist, and delete it if it does
for blob in client.list_blobs(bucket, prefix=f"{path}/"):
assert overwrite
# print(f"deleting {blob.name}")
assert path in blob.name
blob.delete()
# create metadata file
with open(f"gs://{bucket}/{path}/meta.json", "w") as f:
json.dump({
"step": 0,
"checkpoints": [],
"aux": {}
}, f)
# do sharded checkpoint writing
start = time.time()
res = []
if self.version == 1:
for shard_id, node in zip(range(self.mp), itertools.cycle(self.nodes)):
res.append(node.write_ckpt.remote(f"gs://{bucket}/{path}/step_{step}/", shard_id))
elif self.version == 2:
for node in self.nodes:
res.append(node.write_ckpt.remote(f"gs://{bucket}/{path}/step_{step}", 0))
ray.get(res)
print(f"Wrote checkpoint in {time.time() - start:.06}s")
with open(f"gs://{bucket}/{path}/meta.json", "r") as f:
meta = json.load(f)
meta["step"] = step
meta["checkpoints"].append(step)
all_aux = meta.get("aux", {})
while len(meta["checkpoints"]) > keep_n:
ckpt_to_delete = meta["checkpoints"].pop(0)
try:
del all_aux[str(ckpt_to_delete)]
except:
print(f"failed to delete the aux state for {step}")
if delete_old:
print(f"deleting checkpoint {ckpt_to_delete}")
for blob in client.list_blobs(bucket, prefix=f"{path}/step_{ckpt_to_delete}/"):
# print(f"deleting {blob.name}")
assert path in blob.name
blob.delete()
else:
print(f"keeping checkpoint {ckpt_to_delete}")
all_aux[step] = aux
meta["aux"] = all_aux
with open(f"gs://{bucket}/{path}/meta.json", "w") as f:
json.dump(meta, f)
|
11467176
|
from DataReader import *
class Png(DataReader):
"""
Manages the fetching of single peices of data on the cpu onto the gpu
"""
def __init__(self,dataset_root,data_list_path,channels=3,dtype=tf.uint8):
with tf.variable_scope(None,default_name="image_data_reader"):
DataReader.__init__(self,dataset_root,data_list_path)
# graph setup
img_path = tf.placeholder(dtype=tf.string)
img = tf.image.decode_png(tf.read_file(img_path), channels=channels, dtype=dtype)
# expose tensors
self.data_path = img_path
self.data_out = img
self.data_type = dtype
self.data_shape = [-1,-1,channels]
|
11467181
|
import os
import tkinter
import tkinter.ttk
import warnings
from concurrent.futures import ThreadPoolExecutor
from tkinter import scrolledtext
import matplotlib.colors as mcolors
from pynput.keyboard import Listener
import constants
import helper
import keyboard_helper
from forza import Forza
from logger import Logger, TextHandler
# suppress matplotlib warning while running in thread
warnings.filterwarnings("ignore", category=UserWarning)
class MainWindow:
def __init__(self):
"""init
"""
self.root = tkinter.Tk()
# init text
self.language = helper.get_sys_lang()
self.init_text()
self.text_update(self.language)
# Configure the rows that are in use to have weight #
self.root.grid_rowconfigure(0, minsize=500, weight=500)
self.root.grid_rowconfigure(1, minsize=300, weight=300)
# Configure the cols that are in use to have weight #
self.root.grid_columnconfigure(0, minsize=150, weight=150)
self.root.grid_columnconfigure(1, minsize=650, weight=650)
self.root.grid_columnconfigure(2, minsize=200, weight=100)
self.root.title("Forza Horizon 5: Auto Gear Shifting")
self.root.geometry("1300x800")
self.root.minsize(1200, 800)
self.root.maxsize(1800, 1000)
self.root["background"] = constants.background_color
# widgets to be updated
self.speed_tree = {}
self.rpm_tree = {}
self.car_id_var = tkinter.StringVar()
self.car_id_var.set("None")
self.car_perf_var = tkinter.IntVar()
self.car_perf_var.set(0)
self.car_class_var = tkinter.IntVar()
self.car_class_var.set(-1)
self.car_drivetrain_var = tkinter.StringVar()
self.car_drivetrain_var.set('N')
self.tires = {}
self.tire_color = mcolors.LinearSegmentedColormap.from_list("", [(0, "green"), (1, "red")])
self.acceleration_var = tkinter.StringVar()
self.acceleration_var.set("0%")
self.brake_var = tkinter.StringVar()
self.brake_var.set("0%")
# set log frame
self.set_log_frame()
# forza info
self.threadPool = ThreadPoolExecutor(max_workers=8, thread_name_prefix="exec")
self.forza5 = Forza(self.threadPool, self.logger, constants.packet_format, enable_clutch=constants.enable_clutch)
self.listener = Listener(on_press=self.on_press)
self.set_car_setting_frame()
self.set_car_perf_frame()
self.set_shift_point_frame()
self.set_button_frame()
self.set_program_info_frame()
self.root.protocol('WM_DELETE_WINDOW', self.close)
self.logger.info('Forza Horizon 5: Auto Gear Shifting Started!!!')
self.listener.start()
self.root.mainloop()
def init_text(self):
self.select_language_txt = tkinter.StringVar()
self.language_txt = tkinter.StringVar()
self.clutch_shortcut_txt = tkinter.StringVar()
self.upshift_shortcut_txt = tkinter.StringVar()
self.downshift_shortcut_txt = tkinter.StringVar()
self.clutch_txt = tkinter.StringVar()
self.farm_txt = tkinter.StringVar()
self.offroad_rally_txt = tkinter.StringVar()
self.car_id = tkinter.StringVar()
self.car_perf = tkinter.StringVar()
self.car_drivetrain = tkinter.StringVar()
self.tire_information_txt = tkinter.StringVar()
self.accel_txt = tkinter.StringVar()
self.brake_txt = tkinter.StringVar()
self.shift_point_txt = tkinter.StringVar()
self.tree_value_txt = tkinter.StringVar()
self.speed_txt = tkinter.StringVar()
self.rpm_txt = tkinter.StringVar()
self.collect_button_txt = tkinter.StringVar()
self.analysis_button_txt = tkinter.StringVar()
self.run_button_txt = tkinter.StringVar()
self.pause_button_txt = tkinter.StringVar()
self.exit_button_txt = tkinter.StringVar()
self.clear_log_text = tkinter.StringVar()
self.program_info_txt = tkinter.StringVar()
def text_update(self, lang_index):
self.select_language_txt.set(constants.select_language_txt[lang_index])
self.language_txt.set(constants.language_txt[lang_index])
self.clutch_shortcut_txt.set(constants.clutch_shortcut_txt[lang_index])
self.upshift_shortcut_txt.set(constants.upshift_shortcut_txt[lang_index])
self.downshift_shortcut_txt.set(constants.downshift_shortcut_txt[lang_index])
self.clutch_txt.set(constants.clutch_txt[lang_index])
self.farm_txt.set(constants.farm_txt[lang_index])
self.offroad_rally_txt.set(constants.offroad_rally_txt[lang_index])
self.car_id.set(constants.car_id[lang_index])
self.car_perf.set(constants.car_perf[lang_index])
self.car_drivetrain.set(constants.car_drivetrain[lang_index])
self.tire_information_txt.set(constants.tire_information_txt[lang_index])
self.accel_txt.set(constants.accel_txt[lang_index])
self.brake_txt.set(constants.brake_txt[lang_index])
self.shift_point_txt.set(constants.shift_point_txt[lang_index])
self.tree_value_txt.set(constants.tree_value_txt[lang_index])
self.speed_txt.set(f'{constants.speed_txt[lang_index]} km/h')
self.rpm_txt.set(f'{constants.rpm_txt[lang_index]} r/m')
self.collect_button_txt.set(f'{constants.collect_button_txt[lang_index]} ({constants.collect_data.name})')
self.analysis_button_txt.set(f'{constants.analysis_button_txt[lang_index]} ({constants.analysis.name})')
self.run_button_txt.set(f'{constants.run_button_txt[lang_index]} ({constants.auto_shift.name})')
self.pause_button_txt.set(f'{constants.pause_button_txt[lang_index]} ({constants.stop.name})')
self.exit_button_txt.set(f'{constants.exit_button_txt[lang_index]} ({constants.close.name})')
self.clear_log_text.set(constants.clear_log_txt[lang_index])
self.program_info_txt.set(constants.program_info_txt[lang_index])
# widgets to be set
# if hasattr(self, 'perf_canvas'):
# self.perf_canvas.itemconfigure(self.car_class_text, text=self.)
if hasattr(self, 'tire_canvas'):
self.tire_canvas.itemconfigure(self.tire_canvas_text, text=self.tire_information_txt.get())
if hasattr(self, 'treeview'):
self.treeview.heading('#0', text=self.shift_point_txt.get(), anchor=tkinter.CENTER)
self.treeview.heading('value', text=self.tree_value_txt.get(), anchor=tkinter.CENTER)
self.treeview.item(self.speed_level, text=self.speed_txt.get())
self.treeview.item(self.rpm_level, text=self.rpm_txt.get())
if hasattr(self, 'program_info'):
self.program_info.configure(state=tkinter.NORMAL)
self.program_info.delete("1.0", tkinter.END)
self.program_info.insert('1.0', self.program_info_txt.get())
self.program_info.configure(state=tkinter.DISABLED)
def update_tree(self):
"""Update shift point tree
"""
for key, value in self.forza5.shift_point.items():
self.treeview.item(self.speed_tree[key], values=round(value['speed'], 3))
self.treeview.item(self.rpm_tree[key], values=round(value['rpmo'], 3))
for i in range(key + 1, 11):
self.treeview.item(self.speed_tree[i], values='-')
self.treeview.item(self.rpm_tree[i], values='-')
def update_car_info(self, fdp):
"""update car info
Args:
fdp: fdp
"""
if self.forza5.isRunning:
# Update car information
self.car_id_var.set(fdp.car_ordinal)
self.car_perf_var.set(fdp.car_performance_index)
self.car_class_var.set(fdp.car_class)
self.car_drivetrain_var.set(constants.car_drivetrain_list[fdp.drivetrain_type][self.language])
# Update PERF CARD
self.perf_canvas.config(bg=constants.car_class_color[fdp.car_class])
self.perf_canvas.itemconfig(self.car_class_text, text=constants.car_class_list[fdp.car_class])
self.perf_index_canvas.itemconfig(self.perf_index_text, text=fdp.car_performance_index)
# Update acceleration and brake value
self.acceleration_var.set(f"{str(round(fdp.accel / 255 * 100, 1))}%")
self.brake_var.set(f"{str(round(fdp.brake / 255 * 100, 1))}%")
# FL tire
slip = abs(fdp.tire_combined_slip_FL) if abs(fdp.tire_combined_slip_FL) < 1 else 1
color = self.tire_color(slip / 0.8 * 0.5 if slip < 0.8 else (1 - slip) / 0.2 * 0.5 + 0.8)
self.tire_canvas.itemconfig(self.tires["FL"], fill=helper.rgb(color[0], color[1], color[2]))
# FR tire
slip = abs(fdp.tire_combined_slip_FR) if abs(fdp.tire_combined_slip_FR) < 1 else 1
color = self.tire_color(slip / 0.8 * 0.5 if slip < 0.8 else (1 - slip) / 0.2 * 0.5 + 0.8)
self.tire_canvas.itemconfig(self.tires["FR"], fill=helper.rgb(color[0], color[1], color[2]))
# RL tire
slip = abs(fdp.tire_combined_slip_RL) if abs(fdp.tire_combined_slip_RL) < 1 else 1
color = self.tire_color(slip / 0.8 * 0.5 if slip < 0.8 else (1 - slip) / 0.2 * 0.5 + 0.8)
self.tire_canvas.itemconfig(self.tires["RL"], fill=helper.rgb(color[0], color[1], color[2]))
# RR tire
slip = abs(fdp.tire_combined_slip_RR) if abs(fdp.tire_combined_slip_RR) < 1 else 1
color = self.tire_color(slip / 0.8 * 0.5 if slip < 0.8 else (1 - slip) / 0.2 * 0.5 + 0.8)
self.tire_canvas.itemconfig(self.tires["RR"], fill=helper.rgb(color[0], color[1], color[2]))
def reset_car_info(self):
"""reset car info and tree view
"""
# reset tree
for key, _ in self.speed_tree.items():
self.treeview.item(self.speed_tree[key], values="-")
self.treeview.item(self.rpm_tree[key], values="-")
# reset accel and brake
self.acceleration_var.set("0%")
self.brake_var.set("0%")
# FL tire
self.tire_canvas.itemconfig(self.tires["FL"], fill=constants.background_color)
# FR tire
self.tire_canvas.itemconfig(self.tires["FR"], fill=constants.background_color)
# RL tire
self.tire_canvas.itemconfig(self.tires["RL"], fill=constants.background_color)
# RR tire
self.tire_canvas.itemconfig(self.tires["RR"], fill=constants.background_color)
def on_press(self, key):
"""on press callback
Args:
key: key
"""
try:
if key == constants.collect_data:
self.collect_data_handler(None)
elif key == constants.analysis:
self.analysis_handler(None, performance_profile=False, is_guid=False)
elif key == constants.auto_shift:
self.run_handler(None)
elif key == constants.stop:
self.pause_handler(None)
elif key == constants.close:
self.exit_handler(None)
except BaseException as e:
self.forza5.logger.exception(e)
def close(self):
"""close program
"""
shutdown(self.forza5, self.threadPool, self.listener)
self.root.destroy()
def place_languages(self, pre_widget_count=0):
# ==== language setting ====
# language label
language_label = tkinter.Label(self.car_info_frame, textvariable=self.select_language_txt, bg=constants.background_color, fg=constants.text_color)
language_label.place(relx=0.06, rely=self.get_rely(pre_widget_count), anchor="w")
pre_widget_count = pre_widget_count + 1
# language options
language_combobox = tkinter.ttk.Combobox(self.car_info_frame, values=constants.language_txt, state='readonly')
language_combobox.current(self.language)
def set_language(event):
self.language = constants.language_txt.index(event.widget.get())
self.text_update(self.language)
language_combobox.bind("<<ComboboxSelected>>", set_language)
language_combobox.place(relx=0.08, rely=self.get_rely(pre_widget_count), anchor="w")
return pre_widget_count + 1
def place_ip_port(self, pre_widget_count=0):
self.ip_widget = tkinter.Text(self.car_info_frame, borderwidth=0, bg=constants.background_color, fg=constants.text_color, wrap=tkinter.WORD)
self.ip_widget.insert("1.0", f'IP: {self.forza5.ip}')
self.ip_widget.place(relx=0.08, rely=self.get_rely(pre_widget_count), relwidth=0.85, relheight=0.03, anchor="w")
self.ip_widget.configure(state="disabled")
pre_widget_count = pre_widget_count + 1
self.port_widget = tkinter.Text(self.car_info_frame, borderwidth=0, bg=constants.background_color, fg=constants.text_color, wrap=tkinter.WORD)
self.port_widget.insert("1.0", f'Port: {self.forza5.port}')
self.port_widget.place(relx=0.08, rely=self.get_rely(pre_widget_count), relwidth=0.85, relheight=0.03, anchor="w")
self.port_widget.configure(state="disabled")
pre_widget_count = pre_widget_count + 1
return pre_widget_count
def place_shortcuts(self, pre_widget_count=0):
"""place shortcuts comboboxes
"""
def get_available_shortcuts(cur_shortcut):
all_boundKeys = self.forza5.boundKeys()
all_boundKeys.extend(constants.boundKeys)
return [x for x in keyboard_helper.key_list if x not in all_boundKeys or x == cur_shortcut]
shortcut_list = []
# ==== short-cut options ====
# == define clutch shortcuts ==
# clutch shortcut label
clutch_shortcut_label = tkinter.Label(self.car_info_frame, textvariable=self.clutch_shortcut_txt, bg=constants.background_color, fg=constants.text_color)
shortcut_list.append(tuple((clutch_shortcut_label, "")))
# clutch options
clutch_shortcuts = get_available_shortcuts(self.forza5.clutch)
clutch_shortcut = tkinter.ttk.Combobox(self.car_info_frame, values=clutch_shortcuts, state='readonly')
clutch_shortcut.current(clutch_shortcuts.index(self.forza5.clutch))
shortcut_list.append(tuple((clutch_shortcut, "clutch")))
# == upshift shortcut ==
# upshift short label
upshift_shortcut_label = tkinter.Label(self.car_info_frame, textvariable=self.upshift_shortcut_txt, bg=constants.background_color, fg=constants.text_color)
shortcut_list.append(tuple((upshift_shortcut_label, "")))
# upshift options
upshift_shortcuts = get_available_shortcuts(self.forza5.upshift)
upshift_shortcut = tkinter.ttk.Combobox(self.car_info_frame, values=upshift_shortcuts, state='readonly')
upshift_shortcut.current(upshift_shortcuts.index(self.forza5.upshift))
shortcut_list.append(tuple((upshift_shortcut, "upshift")))
# == downshift shortcut ==
# downshift short label
downshift_shortcut_label = tkinter.Label(self.car_info_frame, textvariable=self.downshift_shortcut_txt, bg=constants.background_color, fg=constants.text_color)
shortcut_list.append(tuple((downshift_shortcut_label, "")))
# downshift options
downshift_shortcuts = get_available_shortcuts(self.forza5.downshift)
downshift_shortcut = tkinter.ttk.Combobox(self.car_info_frame, values=downshift_shortcuts, state='readonly')
downshift_shortcut.current(downshift_shortcuts.index(self.forza5.downshift))
shortcut_list.append(tuple((downshift_shortcut, "downshift")))
all_combobox = [box[0] for box in shortcut_list if type(box[0]) is tkinter.ttk.Combobox]
for i in range(len(shortcut_list)):
if type(shortcut_list[i][0]) is tkinter.Label:
shortcut_list[i][0].place(relx=0.06, rely=self.get_rely(i + pre_widget_count), anchor="w")
elif type(shortcut_list[i][0]) is tkinter.ttk.Combobox:
def set_clutch_shortcut(event):
box = [x for x in shortcut_list if x[0] == event.widget][0]
if box[1] == "clutch":
self.forza5.clutch = event.widget.get()
self.logger.info(f"clutch shortcut is: {self.forza5.clutch}")
elif box[1] == "upshift":
self.forza5.upshift = event.widget.get()
self.logger.info(f"upshift shortcut is: {self.forza5.upshift}")
elif box[1] == "downshift":
self.forza5.downshift = event.widget.get()
self.logger.info(f"downshift shortcut is: {self.forza5.downshift}")
for box in all_combobox:
box['values'] = get_available_shortcuts(box.get())
shortcut_list[i][0].bind("<<ComboboxSelected>>", set_clutch_shortcut)
shortcut_list[i][0].place(relx=0.08, rely=self.get_rely(i + pre_widget_count), anchor="w")
return len(shortcut_list) + pre_widget_count
def get_rely(self, count):
"""get relative y
Args:
count (int): previous widgets count
Returns:
float: relative y
"""
return 0.03 + 0.05 * count
def set_car_setting_frame(self):
"""set car setting frame
"""
# place car setting frame
self.car_info_frame = tkinter.Frame(self.root, border=0, bg=constants.background_color, relief="groove", highlightthickness=True, highlightcolor=constants.text_color)
total_widget = 0
# ==== IP/Port setting ====
total_widget = self.place_ip_port(total_widget)
# ==== language setting ====
total_widget = self.place_languages(total_widget)
# ==== place shortcuts ====
total_widget = self.place_shortcuts(total_widget)
# ==== features settings ====
# clutch setting
enable_clutch = tkinter.IntVar(value=self.forza5.enable_clutch)
def set_clutch():
self.forza5.enable_clutch = enable_clutch.get()
clutch_check = tkinter.Checkbutton(self.car_info_frame, textvariable=self.clutch_txt, onvalue=1, offvalue=0, variable=enable_clutch, bg=constants.background_color, command=set_clutch, fg=constants.text_color)
clutch_check.place(relx=0.05, rely=self.get_rely(total_widget), anchor="w")
total_widget = total_widget + 1
# farming setting
enable_farm = tkinter.IntVar(value=self.forza5.farming)
def set_farm():
self.forza5.farming = enable_farm.get()
farm_check = tkinter.Checkbutton(self.car_info_frame, textvariable=self.farm_txt, onvalue=1, offvalue=0, variable=enable_farm, bg=constants.background_color, command=set_farm, fg=constants.text_color)
farm_check.place(relx=0.05, rely=self.get_rely(total_widget), anchor="w")
total_widget = total_widget + 1
self.car_info_frame.grid(row=0, column=0, sticky='news')
# off-road, rally setting
enable_offroad_rally = tkinter.IntVar(value=0)
def set_offroad_rally():
self.forza5.shift_point_factor = constants.offroad_rally_shift_factor if enable_offroad_rally.get() == 1 else constants.shift_factor
offroad_rally_check = tkinter.Checkbutton(self.car_info_frame, textvariable=self.offroad_rally_txt, onvalue=1, offvalue=0, variable=enable_offroad_rally, bg=constants.background_color, command=set_offroad_rally, fg=constants.text_color)
offroad_rally_check.place(relx=0.05, rely=self.get_rely(total_widget), anchor="w")
total_widget = total_widget + 1
self.car_info_frame.grid(row=0, column=0, sticky='news')
def set_car_perf_frame(self):
"""set car perf frame
"""
# Place car perf frame
self.car_perf_frame = tkinter.Frame(self.root, border=0, bg=constants.background_color, relief="groove", highlightthickness=True, highlightcolor=constants.text_color)
self.car_perf_frame.grid(row=0, column=1, sticky='news')
self.car_perf_frame.update()
# place car id
tkinter.Label(
self.car_perf_frame,
textvariable=self.car_id,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 15 bold')
).place(
relx=constants.car_info_leftbound_relx,
rely=constants.car_info_topbound_rely,
anchor=tkinter.W
)
tkinter.Label(
self.car_perf_frame,
textvariable=self.car_id_var,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 20 bold')
).place(
relx=constants.car_info_leftbound_relx,
rely=constants.car_info_topbound_rely + constants.car_info_line_gap,
anchor=tkinter.W
)
# place car perf
perf_y = constants.car_info_topbound_rely + 0.28
tkinter.Label(
self.car_perf_frame,
textvariable=self.car_perf,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 15 bold')
).place(
relx=constants.car_info_leftbound_relx,
rely=perf_y,
anchor=tkinter.W
)
# place car perf sticker canvas
perf_width = 0.12
perf_height = 0.06
self.perf_canvas = tkinter.Canvas(self.car_perf_frame, background=constants.car_class_color[self.forza5.car_class], bd=0, highlightthickness=False)
self.perf_canvas.place(relx=constants.car_info_leftbound_relx + 0.01, rely=perf_y + constants.car_info_line_gap, relwidth=perf_width, relheight=perf_height, anchor=tkinter.W)
self.car_class_text = self.perf_canvas.create_text(
self.car_perf_frame.winfo_width() * perf_width * 0.225,
self.car_perf_frame.winfo_height() * perf_height / 2,
text=constants.car_class_list[self.forza5.car_class],
fill=constants.perf_sticker_background,
font=('Helvetica 15 bold'),
anchor=tkinter.CENTER
)
perf_index_width = 0.064
perf_index_height = 0.05
self.perf_index_canvas = tkinter.Canvas(self.car_perf_frame, background=constants.perf_sticker_background, bd=0, highlightthickness=False)
self.perf_index_canvas.place(relx=constants.car_info_leftbound_relx + perf_width * 0.55 - 0.002, rely=perf_y + constants.car_info_line_gap - 0.00055, relwidth=perf_index_width, relheight=perf_index_height, anchor=tkinter.W)
self.perf_index_text = self.perf_index_canvas.create_text(
self.car_perf_frame.winfo_width() * perf_index_width / 2,
self.car_perf_frame.winfo_height() * perf_index_height / 2,
text=self.forza5.car_perf,
fill=constants.background_color,
font=('Helvetica 15 bold'),
anchor=tkinter.CENTER
)
# place car drivetrain
tkinter.Label(
self.car_perf_frame,
textvariable=self.car_drivetrain,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 15 bold')
).place(
relx=constants.car_info_leftbound_relx,
rely=constants.car_info_bottombound_rely - constants.car_info_line_gap,
anchor=tkinter.SW
)
tkinter.Label(
self.car_perf_frame,
textvariable=self.car_drivetrain_var,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 20 bold')
).place(
relx=constants.car_info_leftbound_relx,
rely=constants.car_info_bottombound_rely,
anchor=tkinter.SW
)
# place tire information canvas
self.tire_canvas = tkinter.Canvas(self.car_perf_frame, background=constants.background_color, bd=0, highlightthickness=False)
self.tire_canvas.place(relx=constants.tire_canvas_relx, rely=constants.tire_canvas_rely, relwidth=constants.tire_canvas_relwidth, relheight=constants.tire_canvas_relheight, anchor=tkinter.CENTER)
self.tire_canvas_text = self.tire_canvas.create_text(
self.car_perf_frame.winfo_width() * constants.tire_canvas_relwidth / 2,
self.car_perf_frame.winfo_height() * constants.y_padding_top * 0.5,
text=self.tire_information_txt.get(),
fill=constants.text_color,
font=('Helvetica 15 bold'),
anchor=tkinter.CENTER
)
for pos, info in constants.tires.items():
self.tires[pos] = self.round_rectangle(
self.tire_canvas,
self.car_perf_frame.winfo_width() * info[0],
self.car_perf_frame.winfo_height() * info[1],
self.car_perf_frame.winfo_width() * info[2],
self.car_perf_frame.winfo_height() * info[3],
radius=info[4],
fill=constants.background_color,
width=2,
outline=constants.text_color
)
# place acceleration information text
tkinter.Label(
self.car_perf_frame,
textvariable=self.accel_txt,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 15 bold')
).place(
relx=constants.car_info_rightbound_relx,
rely=constants.car_info_topbound_rely,
anchor=tkinter.E
)
tkinter.Label(
self.car_perf_frame,
textvariable=self.acceleration_var,
bg=constants.background_color,
fg=constants.text_color,
font=('Helvetica 35 bold italic')
).place(
relx=constants.car_info_rightbound_relx,
rely=0.35,
anchor=tkinter.E
)
# place brake information test
tkinter.Label(self.car_perf_frame, textvariable=self.brake_txt, bg=constants.background_color, fg=constants.text_color, font=('Helvetica 15 bold')).place(relx=constants.car_info_rightbound_relx, rely=0.545, anchor=tkinter.E)
tkinter.Label(self.car_perf_frame, textvariable=self.brake_var, bg=constants.background_color, fg=constants.text_color, font=('Helvetica 35 bold italic')).place(relx=constants.car_info_rightbound_relx, rely=0.7, anchor=tkinter.E)
def set_shift_point_frame(self):
"""set shift point frame
"""
# place shift point frame
self.shift_point_frame = tkinter.Frame(self.root, border=0, relief="groove", background=constants.background_color, highlightthickness=True, highlightcolor=constants.text_color)
style = tkinter.ttk.Style()
style.theme_use("clam")
# set background and foreground of the treeview
style.configure("Treeview", background=constants.background_color, foreground=constants.text_color, fieldbackground=constants.background_color)
style.map('Treeview', background=[('selected', '#BFBFBF')], foreground=[('selected', 'black')], fieldbackground=[('selected', 'black')])
self.treeview = tkinter.ttk.Treeview(self.shift_point_frame, columns="value", style='Treeview')
self.treeview.heading('#0', text=self.shift_point_txt.get(), anchor=tkinter.CENTER)
self.treeview.heading('value', text=self.tree_value_txt.get(), anchor=tkinter.CENTER)
self.treeview.column('#0', width=80, anchor=tkinter.CENTER)
self.treeview.column('value', width=120, anchor=tkinter.CENTER)
self.speed_level = self.treeview.insert(parent='', index=tkinter.END, text=self.speed_txt.get(), open=True)
self.rpm_level = self.treeview.insert(parent='', index=tkinter.END, text=self.rpm_txt.get(), open=True)
for i in range(1, 11):
self.speed_tree[i] = self.treeview.insert(self.speed_level, tkinter.END, text=i, values="-")
self.rpm_tree[i] = self.treeview.insert(self.rpm_level, tkinter.END, text=i, values="-")
self.treeview.pack(fill="both", expand=True)
self.shift_point_frame.grid(row=0, column=2, sticky='news')
def set_button_frame(self):
"""set buttom frame
"""
# place button frame
self.button_frame = tkinter.Frame(self.root, border=0, bg=constants.background_color, relief="groove", highlightthickness=True, highlightcolor=constants.text_color)
button_names = [(self.collect_button_txt, self.collect_data_handler), (self.analysis_button_txt, self.analysis_handler), (self.run_button_txt, self.run_handler), (self.pause_button_txt, self.pause_handler),
(self.exit_button_txt, self.exit_handler)]
for i, (name, func) in enumerate(button_names):
button = tkinter.Button(self.button_frame, textvariable=name, bg=constants.background_color, fg=constants.text_color, borderwidth=3, highlightcolor=constants.text_color, highlightthickness=True)
button.bind('<Button-1>', func)
button.place(relx=0.5, rely=1 / len(button_names) * i + 1 / len(button_names) / 2, relwidth=0.8, relheight=1 / len(button_names) * 0.9, anchor='center')
self.button_frame.grid(row=1, column=0, sticky='news')
def set_log_frame(self):
"""set log frame
"""
# place log frame
self.log_frame = tkinter.Frame(self.root, border=0, bg=constants.background_color, relief="groove", highlightthickness=True, highlightcolor=constants.text_color)
log = scrolledtext.ScrolledText(self.log_frame, bg=constants.background_color, borderwidth=2, font='Monaco 9 bold', fg=constants.text_color)
log.pack(fill="both", expand=True)
log_handler = TextHandler(log)
self.logger = (Logger(log_handler))('ForzaHorizon5')
button = tkinter.Button(self.log_frame, textvariable=self.clear_log_text, bg=constants.background_color, fg=constants.text_color, borderwidth=3, highlightcolor=constants.text_color, highlightthickness=True)
button.bind('<Button-1>', lambda x: log.delete(1.0, 'end'))
button.place(relx=0.93, rely=0.053, relwidth=0.05, relheight=0.07, anchor='center', bordermode='inside')
self.log_frame.grid(row=1, column=1, sticky='news')
def set_program_info_frame(self):
"""set code info frame
"""
# place code info frame
self.program_info_frame = tkinter.Frame(self.root, border=0, bg=constants.background_color, relief="groove", highlightthickness=True, highlightcolor=constants.text_color)
self.program_info = tkinter.Text(self.program_info_frame, borderwidth=0, bg=constants.background_color, fg=constants.text_color, wrap=tkinter.WORD)
self.program_info.insert("current", self.program_info_txt.get())
self.program_info.place(relx=0.03, rely=0.03, relwidth=0.95, relheight=0.95, anchor='nw', bordermode='inside')
self.program_info.configure(state="disabled")
self.program_info_frame.grid(row=1, column=2, sticky='news')
def collect_data_handler(self, event):
"""collect data button callback
Args:
event
"""
if self.forza5.isRunning:
self.logger.info('stopping gear test')
def stopping():
self.forza5.isRunning = False
self.reset_car_info()
self.threadPool.submit(stopping)
else:
self.logger.info('starting gear test')
def starting():
self.forza5.isRunning = True
self.forza5.test_gear(self.update_car_info)
self.threadPool.submit(starting)
def analysis_handler(self, event, performance_profile=True, is_guid=True):
"""analysis button callback
Args:
event
performance_profile (bool, optional): draw performance of not. Defaults to True.
is_guid (bool, optional): is guid or not. Defaults to True.
"""
if len(self.forza5.records) <= 0:
self.logger.info(f'load config {constants.example_car_ordinal}.json for analysis as an example')
helper.load_config(self.forza5, os.path.join(constants.root_path, 'example', f'{constants.example_car_ordinal}.json'))
self.logger.info('Analysis')
self.forza5.analyze(performance_profile=performance_profile, is_gui=is_guid)
self.update_tree()
def run_handler(self, event):
"""run button callback
Args:
event
"""
if self.forza5.isRunning:
self.forza5.logger.info('stopping auto gear')
def stopping():
self.forza5.isRunning = False
self.reset_car_info()
self.threadPool.submit(stopping)
else:
self.forza5.logger.info('starting auto gear')
def starting():
self.forza5.isRunning = True
self.forza5.run(self.update_tree, self.update_car_info)
self.threadPool.submit(starting)
def pause_handler(self, event):
"""pause button callback
Args:
event
"""
shutdown(self.forza5, self.threadPool, self.listener)
self.reset_car_info()
self.threadPool = ThreadPoolExecutor(max_workers=8, thread_name_prefix="exec")
self.forza5.threadPool = self.threadPool
self.listener = Listener(on_press=self.on_press)
self.listener.start()
self.forza5.logger.info('stopped')
def exit_handler(self, event):
"""exit button callback
Args:
event
"""
shutdown(self.forza5, self.threadPool, self.listener)
helper.dump_settings(self.forza5)
self.forza5.logger.info('bye~')
self.root.destroy()
def round_rectangle(self, canvas: tkinter.Canvas, x1, y1, x2, y2, radius=25, **kwargs):
"""draw rectangle with round corner
Args:
canvas (tkinter.Canvas): canvas
x1: top left x coordinate
y1: top left y coordinate
x2: bot right x coordinate
y2: bot right y coordinate
radius (int, optional): round radius. Defaults to 25.
Returns:
rectangle
"""
points = [
x1 + radius, y1, x1 + radius, y1, x2 - radius, y1, x2 - radius, y1, x2, y1, x2, y1 + radius, x2, y1 + radius, x2, y2 - radius, x2, y2 - radius, x2, y2, x2 - radius, y2, x2 - radius, y2, x1 + radius, y2, x1 + radius, y2, x1, y2, x1,
y2 - radius, x1, y2 - radius, x1, y1 + radius, x1, y1 + radius, x1, y1
]
return canvas.create_polygon(points, **kwargs, smooth=True)
def shutdown(forza: Forza, threadPool: ThreadPoolExecutor, listener: Listener):
"""shutdown/clean up resources
Args:
forza (Forza): forza
threadPool (ThreadPoolExecutor): thread pool
listener (Listener): keyboard listener
"""
forza.isRunning = False
threadPool.shutdown(wait=False)
listener.stop()
def main():
"""main.....
"""
MainWindow()
if __name__ == "__main__":
main()
|
11467192
|
import unittest2
from hashlib import sha1
from pykafka.partitioners import GroupHashingPartitioner
class TestGroupHashingPartitioner(unittest2.TestCase):
def test_valid_inputs_success(self):
# Test data; 1st element is group size, 2nd is total partition count, 3rd is the key
key = 'foo'.encode('utf-8')
data = [[1, 16, key],
[2, 16, key],
[4, 16, key],
[16, 16, key],
[1, 1, key]]
for row in data:
self._run_test(row[0], row[1], row[2])
def test_invalid_inputs_error(self):
key = 'foo'.encode('utf-8')
data = [[0, 16, key],
[17, 16, key]]
for row in data:
with self.assertRaises(ValueError):
self._run_test(row[0], row[1], row[2])
continue
def test_create_with_zero_group_size_raises_error(self):
with self.assertRaises(ValueError):
GroupHashingPartitioner(hash_func=None, group_size=0)
def test_create_with_negative_group_size_raises_error(self):
with self.assertRaises(ValueError):
GroupHashingPartitioner(hash_func=None, group_size=-1)
def test_missing_hash_function_raises_error(self):
with self.assertRaises(ValueError):
GroupHashingPartitioner(hash_func=None, group_size=1)
def _run_test(self, group_size, total_partition_count, key):
def hash_func(k): return int(sha1(k).hexdigest(), 16)
# Instead of one hash for each key, generate a list of n possible hashes, where n=group_size
hashed_keys = [abs(hash_func(key) + x) for x in range(group_size)]
# Obtain a list of valid partitions
valid_partitions = list(map(lambda x: x % total_partition_count, hashed_keys))
self.assertEquals(len(valid_partitions), group_size)
# Call the partitioner and check that the returned partition is in the valid list
partitioner = GroupHashingPartitioner(hash_func, group_size)
x = partitioner.__call__(list(range(total_partition_count)), key)
self.assertTrue(x in valid_partitions)
|
11467201
|
import matplotlib.pyplot as plt
import pyDNase
from pyDNase.footprinting import wellington
#Load test data
reads = pyDNase.BAMHandler("example.bam")
regions = pyDNase.GenomicIntervalSet("example.bed")
#Plot cuts data
plt.plot(reads[regions[0]]["+"],c="red")
plt.plot(-reads[regions[0]]["-"],c="blue")
#Footprint and plot the results
footprinter = wellington(regions[0],reads)
plt.plot(footprinter.scores,c="black")
plt.show()
|
11467209
|
def calMean(array):
mean = 0
for i in range(len(array)):
mean = mean + array[i]
mean = mean/float(len(array))
return mean
if __name__ == "__main__":
array = [1,2,3,4]
mean = calMean(array)
print (mean)
|
11467283
|
from xbrr.base.reader.base_element_schema import BaseElementSchema
class ElementSchema(BaseElementSchema):
def __init__(self,
name="", reference="", label="", alias="",
abstract="", data_type="",
period_type="", balance=""):
super().__init__()
self.name = name
self.reference = reference
self.label = label
self.alias = alias
self.abstract = abstract
self.data_type = data_type
self.period_type = period_type
self.balance = balance
def set_alias(self, alias):
self.alias = alias
return self
@classmethod
def create_from_reference(cls, reader, reference,
label_kind="", label_verbose=False):
name = reference.split("#")[-1]
label = ""
abstract = ""
data_type = ""
period_type = ""
balance = ""
if reader.xbrl_dir:
_def = reader.read_by_link(reference)
if label_kind is not None:
label = _def.label(label_kind, label_verbose)
xsd = _def.xsd
abstract = xsd["abstract"]
data_type = xsd["type"]
if "xbrli:periodType" in xsd.attrs:
period_type = xsd["xbrli:periodType"]
if "xbrli:balance" in xsd.attrs:
balance = xsd["xbrli:balance"]
instance = cls(name=name, reference=reference, label=label,
abstract=abstract, data_type=data_type,
period_type=period_type, balance=balance)
return instance
def to_dict(self):
return {
"name": self.name,
"reference": self.reference,
"label": self.label,
"abstract": self.abstract,
"data_type": self.data_type,
"period_type": self.period_type,
"balance": self.balance
}
|
11467312
|
import numpy as np
import sys,os,glob
######################################## INPUT ##########################################
root = '/simons/scratch/fvillaescusa/pdf_information/PDF/matter/'
cosmos = ['Om_p/', 'Ob_p/', 'Ob2_p/', 'h_p/', 'ns_p/', 's8_p/',
'Om_m/', 'Ob_m/', 'Ob2_m/', 'h_m/', 'ns_m/', 's8_m/',
'Mnu_p/', 'Mnu_pp/', 'Mnu_ppp/',
'fiducial_ZA/', 'fiducial/', 'fiducial_LR/', 'fiducial_HR/',
'latin_hypercube/']
zs = [0, 0.5, 1, 2, 3]
scales = [5, 10, 15, 20, 25, 30, 35]
#########################################################################################
# do a loop over the different cosmologies
for cosmo in cosmos:
files1 = glob.glob('%s/%s/*/PDF_*'%(root,cosmo))
files2 = glob.glob('%s/%s/*/moments_*'%(root,cosmo))
print 'Found %d %d files for %s'%(len(files1),len(files2),cosmo)
continue
# do a loop over the different redshifts
for z in zs:
files = glob.glob('%s/%s/*/variance_PDF_m_z=%s.txt'%(root,cosmo,z))
print 'Found %d var files for %s at z=%d'%(len(files),cosmo,z)
# do a loop over the different scales
for scale in scales:
files = glob.glob('%s/%s/*/PDF_m_%.1f_z=%s.txt'%(root,cosmo,scale,z))
print 'Found %d files for %s with %d at z=%d'%(len(files),cosmo,scale,z)
|
11467347
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
import os
#python setup.py build_ext --inplace
ext_modules = [
Extension("treecode_bem",
sources = ['common.c',
'bem_pbc.c',
'treecode_bem_I.c',
'treecode_bem_II.c',
'treecode_bem_lib.pyx'],
include_dirs = [numpy.get_include()],
libraries=['m'],
#libraries=['m','gomp'],
extra_compile_args=["-fopenmp"],
#extra_link_args=["-g"],
)
]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
|
11467379
|
import json
from urllib.parse import urlencode, parse_qsl
def pref_custom_id(custom_id: str, data: dict):
"""
Convert dict to string.
Args:
custom_id (str): Name of custom_id.
data (dict): Data you want to convert.
Raises:
ValueError: Data can not contain '__'.
Returns:
A string custom_id
"""
data_str = urlencode(data)
if "__" in data_str:
raise ValueError("Data can not contain '__'")
return f"pref_{custom_id}__{data_str}"
def un_pref_custom_id(custom_id: str, data: str):
"""
Convert string to dict.
Args:
custom_id (str): Name of custom_id.
data (str): String to parse.
Returns:
A dict converted from data.
"""
start = len(custom_id) + 7
return {i: int(j) if j.isdigit() else j for i, j in parse_qsl(data[start:])}
|
11467410
|
from __future__ import absolute_import
import logging
from abc import ABCMeta, abstractmethod
from collections import Iterable
import six
from frontera.core import models
from frontera.core.components import Backend, DistributedBackend, Middleware, CanonicalSolver
from frontera.exceptions import NotConfigured
from frontera.settings import Settings
from frontera.utils.misc import load_object
class BackendMixin(object):
def __init__(self, backend, db_worker=False, strategy_worker=False):
# Load backend
self._logger_components.debug("Loading backend '%s'", backend)
self._backend = self._load_backend(backend, db_worker, strategy_worker)
self._backend.frontier_start()
def _load_backend(self, backend, db_worker, strategy_worker):
# FIXME remove obsolete
cls = load_object(backend)
assert issubclass(cls, Backend), "backend '%s' must subclass Backend" % cls.__name__
if issubclass(cls, DistributedBackend):
if db_worker:
return cls.db_worker(self)
if strategy_worker:
return cls.strategy_worker(self)
return cls.local(self)
else:
assert not strategy_worker, "In order to distribute backend only DistributedBackend " \
"subclasses are allowed to use"
if hasattr(cls, 'from_manager'):
return cls.from_manager(self)
else:
return cls()
@property
def backend(self):
"""
The :class:`Backend <frontera.core.components.Backend>` object to be used by the frontier. \
Can be defined with :setting:`BACKEND` setting.
"""
return self._backend
def close(self):
self.backend.frontier_stop()
class StrategyMixin(object):
def __init__(self, strategy_class, strategy_args, scoring_stream):
self._scoring_stream = scoring_stream if scoring_stream else LocalUpdateScoreStream(self.backend.queue)
self._states_context = StatesContext(self.backend.states)
if isinstance(strategy_class, str):
strategy_class = load_object(strategy_class)
self._strategy = strategy_class.from_worker(self, strategy_args, self._scoring_stream, self._states_context)
@property
def strategy(self):
return self._strategy
@property
def states_context(self):
return self._states_context
def close(self):
self.strategy.close()
self.states_context.flush()
class ComponentsPipelineMixin(BackendMixin):
def __init__(self, backend, middlewares=None, canonicalsolver=None, db_worker=False, strategy_worker=False):
self._logger_components = logging.getLogger("manager.components")
# Load middlewares
self._middlewares = self._load_middlewares(middlewares)
# Load canonical solver
self._logger_components.debug("Loading canonical url solver '%s'", canonicalsolver)
if canonicalsolver:
self._canonicalsolver = self._load_object(canonicalsolver)
assert isinstance(self.canonicalsolver, CanonicalSolver), \
"canonical solver '%s' must subclass CanonicalSolver" % self.canonicalsolver.__class__.__name__
BackendMixin.__init__(self, backend, db_worker, strategy_worker)
@property
def canonicalsolver(self):
"""
Instance of CanonicalSolver used for getting canonical urls in frontier components.
"""
return self._canonicalsolver
@property
def middlewares(self):
"""
A list of :class:`Middleware <frontera.core.components.Middleware>` objects to be used by the frontier. \
Can be defined with :setting:`MIDDLEWARES` setting.
"""
return self._middlewares
def _load_middlewares(self, middleware_names):
# TO-DO: Use dict for middleware ordering
mws = []
for mw_name in middleware_names or []:
self._logger_components.debug("Loading middleware '%s'", mw_name)
try:
mw = self._load_object(mw_name, silent=False)
assert isinstance(mw, Middleware), "middleware '%s' must subclass Middleware" % mw.__class__.__name__
if mw:
mws.append(mw)
except NotConfigured:
self._logger_components.warning("middleware '%s' disabled!", mw_name)
return mws
def _process_components(self, method_name, obj=None, return_classes=None, components=None, **kwargs):
pipeline = self._components_pipeline if components is None else \
[self._components_pipeline[c] for c in components]
return_obj = obj
for component_category, component, check_response in pipeline:
components = component if isinstance(component, list) else [component]
for component in components:
result = self._process_component(component=component, method_name=method_name,
component_category=component_category, obj=return_obj,
return_classes=return_classes, **kwargs)
if check_response:
return_obj = result
if check_response and obj and not return_obj:
self._logger_components.warning("Object '%s' filtered in '%s' by '%s'",
obj.__class__.__name__, method_name, component.__class__.__name__)
return
return return_obj
def _process_component(self, component, method_name, component_category, obj, return_classes, **kwargs):
self._logger_components.debug("processing '%s' '%s.%s' %s",
method_name, component_category, component.__class__.__name__, obj)
return_obj = getattr(component, method_name)(*([obj] if obj else []), **kwargs)
assert return_obj is None or isinstance(return_obj, return_classes), \
"%s '%s.%s' must return None or %s, Got '%s'" % \
(component_category, obj.__class__.__name__, method_name,
' or '.join(c.__name__ for c in return_classes)
if isinstance(return_classes, tuple) else
return_classes.__name__,
return_obj.__class__.__name__)
return return_obj
def close(self):
BackendMixin.close(self)
super(ComponentsPipelineMixin, self).close()
class StrategyComponentsPipelineMixin(ComponentsPipelineMixin, StrategyMixin):
def __init__(self, backend, strategy_class, strategy_args, scoring_stream, **kwargs):
super(StrategyComponentsPipelineMixin, self).__init__(backend, **kwargs)
StrategyMixin.__init__(self, strategy_class, strategy_args, scoring_stream)
def close(self):
StrategyMixin.close(self)
super(StrategyComponentsPipelineMixin, self).close()
class BaseContext(object):
def __init__(self, request_model, response_model, settings=None):
# Settings
self._settings = settings or Settings()
# Logger
self._logger = logging.getLogger("manager")
# Log frontier manager starting
self._logger.info('-' * 80)
self._logger.info('Starting Frontier Manager...')
# Load request model
self._request_model = load_object(request_model)
assert issubclass(self._request_model, models.Request), "Request model '%s' must subclass 'Request'" % \
self._request_model.__name__
# Load response model
self._response_model = load_object(response_model)
assert issubclass(self._response_model, models.Response), "Response model '%s' must subclass 'Response'" % \
self._response_model.__name__
@classmethod
def from_settings(cls, settings=None):
manager_settings = Settings(settings)
return BaseContext(request_model=manager_settings.REQUEST_MODEL,
response_model=manager_settings.RESPONSE_MODEL,
settings=manager_settings)
def _load_object(self, obj_class_name, silent=False):
obj_class = load_object(obj_class_name)
try:
return self._load_frontier_object(obj_class)
except NotConfigured:
if not silent:
raise NotConfigured
def _load_frontier_object(self, obj_class):
if hasattr(obj_class, 'from_manager'):
return obj_class.from_manager(self)
else:
return obj_class()
@property
def request_model(self):
"""
The :class:`Request <frontera.core.models.Request>` object to be used by the frontier. \
Can be defined with :setting:`REQUEST_MODEL` setting.
"""
return self._request_model
@property
def response_model(self):
"""
The :class:`Response <frontera.core.models.Response>` object to be used by the frontier. \
Can be defined with :setting:`RESPONSE_MODEL` setting.
"""
return self._response_model
@property
def settings(self):
"""
The :class:`Settings <frontera.settings.Settings>` object used by the frontier.
"""
return self._settings
class BaseManager(object):
def get_next_requests(self, max_next_requests=0, **kwargs):
"""
Returns a list of next requests to be crawled. Optionally a maximum number of pages can be passed. If no
value is passed, \
:attr:`FrontierManager.max_next_requests <frontera.core.manager.FrontierManager.max_next_requests>`
will be used instead. (:setting:`MAX_NEXT_REQUESTS` setting).
:param int max_next_requests: Maximum number of requests to be returned by this method.
:param dict kwargs: Arbitrary arguments that will be passed to backend.
:return: list of :class:`Request <frontera.core.models.Request>` objects.
"""
# log (in)
self._logger.debug('GET_NEXT_REQUESTS(in) max_next_requests=%s', max_next_requests)
# get next requests
next_requests = self.backend.get_next_requests(max_next_requests, **kwargs)
# log (out)
self._logger.debug('GET_NEXT_REQUESTS(out) returned_requests=%s', len(next_requests))
return next_requests
def page_crawled(self, response):
"""
Informs the frontier about the crawl result.
:param object response: The :class:`Response <frontera.core.models.Response>` object for the crawled page.
:return: None.
"""
self._logger.debug('PAGE_CRAWLED url=%s status=%s', response.url, response.status_code)
self._process_components(method_name='page_crawled',
obj=response,
return_classes=self.response_model)
def links_extracted(self, request, links):
"""
Informs the frontier about extracted links for the request.
:param object request: The :class:`Request <frontera.core.models.Request>` object from which the links where crawled.
:param list links: A list of :class:`Request <frontera.core.models.Request>` objects generated from the links \
extracted for the request.
:return: None.
"""
self._logger.debug('LINKS_EXTRACTED url=%s links=%d', request.url, len(links))
self._process_components(method_name='links_extracted',
obj=request,
return_classes=self.request_model,
components=(0, 1),
links=links)
def links_extracted_after(self, request, filtered):
self._process_components(method_name='links_extracted',
obj=request,
return_classes=self.request_model,
components=(2,),
links=filtered)
def request_error(self, request, error):
self._logger.debug('PAGE_REQUEST_ERROR url=%s error=%s', request.url, error)
return self._process_components(method_name='request_error',
obj=request,
return_classes=self.request_model,
error=error)
class LocalFrontierManager(BaseContext, StrategyComponentsPipelineMixin, BaseManager):
"""
The :class:`FrontierManager <frontera.core.manager.FrontierManager>` object encapsulates the whole frontier,
providing an API to interact with. It's also responsible of loading and communicating all different frontier
components.
"""
def __init__(self, request_model, response_model, backend, strategy_class, strategy_args, middlewares=None,
test_mode=False, max_requests=0, max_next_requests=0, auto_start=True, settings=None,
canonicalsolver=None):
"""
:param object/string request_model: The :class:`Request <frontera.core.models.Request>` object to be \
used by the frontier.
:param object/string response_model: The :class:`Response <frontera.core.models.Response>` object to be \
used by the frontier.
:param object/string backend: The :class:`Backend <frontera.core.components.Backend>` object to be \
used by the frontier.
:param list middlewares: A list of :class:`Middleware <frontera.core.components.Middleware>` \
objects to be used by the frontier.
:param bool test_mode: Activate/deactivate :ref:`frontier test mode <frontier-test-mode>`.
:param int max_requests: Number of pages after which the frontier would stop (See \
:ref:`Finish conditions <frontier-finish>`).
:param int max_next_requests: Maximum number of requests returned by \
:attr:`get_next_requests <frontera.core.manager.FrontierManager.get_next_requests>` method.
:param bool auto_start: Activate/deactivate automatic frontier start (See :ref:`starting/stopping the \
frontier <frontier-start-stop>`).
:param object/string settings: The :class:`Settings <frontera.settings.Settings>` object used by \
the frontier.
:param object/string canonicalsolver: The :class:`CanonicalSolver <frontera.core.components.CanonicalSolver>`
object to be used by frontier.
"""
BaseContext.__init__(self, request_model, response_model, settings=settings)
# Test mode
self._test_mode = test_mode
self._logger.debug('Test mode %s' % ("ENABLED" if self.test_mode else "DISABLED"))
# Page counters
self._max_requests = max_requests
self._max_next_requests = max_next_requests
self._n_requests = 0
# Iteration counter
self._iteration = 0
# Manager finished flag
self._finished = False
StrategyComponentsPipelineMixin.__init__(self, backend, strategy_class, strategy_args, None,
middlewares=middlewares, canonicalsolver=canonicalsolver,
db_worker=False, strategy_worker=False)
# Init frontier components pipeline
# Some code relies on the order, modify carefully
self._components_pipeline = [
('Middleware', self.middlewares, True),
('CanonicalSolver', self.canonicalsolver, False),
('Strategy', self.strategy, False)
]
# Log frontier manager start
self._logger.info('Frontier Manager Started!')
self._logger.info('-' * 80)
# start/stop
self._started = False
self._stopped = False
self._auto_start = auto_start
if self.auto_start:
self.start()
@classmethod
def from_settings(cls, settings=None, db_worker=False, strategy_worker=False):
"""
Returns a :class:`FrontierManager <frontera.core.manager.FrontierManager>` instance initialized with \
the passed settings argument. If no settings is given,
:ref:`frontier default settings <frontier-default-settings>` are used.
"""
manager_settings = Settings.object_from(settings)
return LocalFrontierManager(request_model=manager_settings.REQUEST_MODEL,
response_model=manager_settings.RESPONSE_MODEL,
backend=manager_settings.BACKEND,
strategy_class=manager_settings.STRATEGY,
strategy_args=manager_settings.STRATEGY_ARGS,
middlewares=manager_settings.MIDDLEWARES,
test_mode=manager_settings.TEST_MODE,
max_requests=manager_settings.MAX_REQUESTS,
max_next_requests=manager_settings.MAX_NEXT_REQUESTS,
auto_start=manager_settings.AUTO_START,
settings=manager_settings,
canonicalsolver=manager_settings.CANONICAL_SOLVER)
@property
def test_mode(self):
"""
Boolean value indicating if the frontier is using :ref:`frontier test mode <frontier-test-mode>`. \
Can be defined with :setting:`TEST_MODE` setting.
"""
return self._test_mode
@property
def max_requests(self):
"""
Number of pages after which the frontier would stop (See :ref:`Finish conditions <frontier-finish>`). \
Can be defined with :setting:`MAX_REQUESTS` setting.
"""
return self._max_requests
@property
def max_next_requests(self):
"""
Maximum number of requests returned by \
:attr:`get_next_requests <frontera.core.manager.FrontierManager.get_next_requests>` method. \
Can be defined with :setting:`MAX_NEXT_REQUESTS` setting.
"""
return self._max_next_requests
@property
def auto_start(self):
"""
Boolean value indicating if automatic frontier start is activated. \
See :ref:`starting/stopping the frontier <frontier-start-stop>`. \
Can be defined with :setting:`AUTO_START` setting.
"""
return self._auto_start
@property
def iteration(self):
"""
Current :ref:`frontier iteration <frontier-iterations>`.
"""
return self._iteration
@property
def n_requests(self):
"""
Number of accumulated requests returned by the frontier.
"""
return self._n_requests
@property
def finished(self):
"""
Boolean value indicating if the frontier has finished. See :ref:`Finish conditions <frontier-finish>`.
"""
if not self._finished:
return self.strategy.finished()
return True
def start(self):
"""
Notifies all the components of the frontier start. Typically used for initializations (See \
:ref:`starting/stopping the frontier <frontier-start-stop>`).
:return: None.
"""
assert not self._started, 'Frontier already started!'
self._logger.debug('START')
self._process_components(method_name='frontier_start')
self._started = True
def stop(self):
"""
Notifies all the components of the frontier stop. Typically used for finalizations (See \
:ref:`starting/stopping the frontier <frontier-start-stop>`).
:return: None.
"""
self._check_startstop()
self._logger.debug('STOP')
self._process_components(method_name='frontier_stop')
StrategyComponentsPipelineMixin.close(self)
self._stopped = True
def add_seeds(self, seeds_file):
"""
Performs seeds addition procedure. Using file-like object, calls read_seeds method of crawling strategy.
:param file seeds_file: A file-like object opened in binary mode which will be passed to read_seeds
:return: None.
"""
self._check_startstop()
self.strategy.read_seeds(seeds_file)
def get_next_requests(self, max_next_requests=0, **kwargs):
"""
Returns a list of next requests to be crawled. Optionally a maximum number of pages can be passed. If no
value is passed, \
:attr:`FrontierManager.max_next_requests <frontera.core.manager.FrontierManager.max_next_requests>`
will be used instead. (:setting:`MAX_NEXT_REQUESTS` setting).
:param int max_next_requests: Maximum number of requests to be returned by this method.
:param dict kwargs: Arbitrary arguments that will be passed to backend.
:return: list of :class:`Request <frontera.core.models.Request>` objects.
"""
self._check_startstop()
# End condition check
if self.max_requests and self.n_requests >= self.max_requests:
self._logger.info('MAX PAGES REACHED! (%s/%s)', self.n_requests, self.max_requests)
self._finished = True
return []
# Calculate number of requests
max_next_requests = max_next_requests or self.max_next_requests
if self.max_requests:
if not max_next_requests:
max_next_requests = self.max_requests - self.n_requests
else:
if self.n_requests + max_next_requests > self.max_requests:
max_next_requests = self.max_requests - self.n_requests
# get next requests
next_requests = super(LocalFrontierManager, self).get_next_requests(max_next_requests, **kwargs)
# Increment requests counter
self._n_requests += len(next_requests)
# Increment iteration
if next_requests:
self._iteration += 1
return next_requests
def page_crawled(self, response):
self._check_startstop()
assert isinstance(response, self.response_model), "Response object must subclass '%s', '%s' found" % \
(self.response_model.__name__, type(response).__name__)
assert hasattr(response, 'request') and response.request, "Empty response request"
assert isinstance(response.request, self.request_model), "Response request object must subclass '%s', " \
"'%s' found" % \
(self.request_model.__name__,
type(response.request).__name__)
assert isinstance(response, self.response_model), "Response object must subclass '%s', '%s' found" % \
(self.response_model.__name__, type(response).__name__)
self.states_context.to_fetch(response)
self.states_context.fetch()
self.states_context.states.set_states(response)
super(LocalFrontierManager, self).page_crawled(response)
self.states_context.states.update_cache(response)
def links_extracted(self, request, links):
self._check_startstop()
assert isinstance(request, self.request_model), "Request object must subclass '%s', '%s' found" % \
(self.request_model.__name__, type(request).__name__)
for link in links:
assert isinstance(link, self._request_model), "Link objects must subclass '%s', '%s' found" % \
(self._request_model.__name__, type(link).__name__)
super(LocalFrontierManager, self).links_extracted(request, links)
filtered = self.strategy.filter_extracted_links(request, links)
if filtered:
self.states_context.to_fetch(request)
self.states_context.to_fetch(filtered)
self.states_context.fetch()
self.states_context.states.set_states(filtered)
super(LocalFrontierManager, self).links_extracted_after(request, filtered)
self.states_context.states.update_cache(filtered)
def request_error(self, request, error):
"""
Informs the frontier about a page crawl error. An error identifier must be provided.
:param object request: The crawled with error :class:`Request <frontera.core.models.Request>` object.
:param string error: A string identifier for the error.
:return: None.
"""
self._check_startstop()
self.states_context.to_fetch(request)
self.states_context.fetch()
self.states_context.states.set_states(request)
processed_page = super(LocalFrontierManager, self).request_error(request, error)
self.states_context.states.update_cache(request)
return processed_page
def create_request(self, url, method=b'GET', headers=None, cookies=None, meta=None, body=b''):
"""
Creates request and applies middleware and canonical solver pipelines.
:param url: str
:param method: bytes
:param headers: dict
:param cookies: dict
:param meta: dict
:param body: bytes
:return: :class:`Request <frontera.core.models.Request>` object
"""
r = self.request_model(url, method=method, headers=headers, cookies=cookies, meta=meta, body=body)
self._process_components('create_request',
obj=r,
return_classes=self.request_model,
components=(0, 1))
return r
def _check_startstop(self):
assert self._started, "Frontier not started!"
assert not self._stopped, "Call to stopped frontier!"
class WorkerFrontierManager(BaseContext, StrategyComponentsPipelineMixin):
"""
The :class:`WorkerFrontierManager <frontera.core.manager.WorkerFrontierManager>` class role is to
instantiate the core components and is used mainly by workers.
"""
def __init__(self, settings, request_model, response_model, backend, max_next_requests, strategy_class=None,
strategy_args=None, scoring_stream=None, middlewares=None, canonicalsolver=None, db_worker=False,
strategy_worker=False):
"""
:param object/string request_model: The :class:`Request <frontera.core.models.Request>` object to be \
used by the frontier.
:param object/string response_model: The :class:`Response <frontera.core.models.Response>` object to be \
used by the frontier.
:param object/string backend: The :class:`Backend <frontera.core.components.Backend>` object to be \
used by the frontier.
:param list middlewares: A list of :class:`Middleware <frontera.core.components.Middleware>` \
objects to be used by the frontier.
:param int max_next_requests: Maximum number of requests returned by \
:attr:`get_next_requests <frontera.core.manager.FrontierManager.get_next_requests>` method.
:param object/string settings: The :class:`Settings <frontera.settings.Settings>` object used by \
the frontier.
:param object/string canonicalsolver: The :class:`CanonicalSolver <frontera.core.components.CanonicalSolver>`
object to be used by frontier.
:param object scoring_stream: Instance of :class:`UpdateScoreStream <frontera.core.manager.UpdateScoreStream>`
for crawling strategy to send scheduled requests to.
:param bool db_worker: True if class is instantiated in DB worker environment
:param bool strategy_worker: True if class is instantiated in strategy worker environment
"""
BaseContext.__init__(self, request_model, response_model, settings=settings)
self._max_next_requests = max_next_requests
if strategy_worker:
StrategyComponentsPipelineMixin.__init__(self, backend, strategy_class, strategy_args, scoring_stream,
middlewares=middlewares, canonicalsolver=canonicalsolver,
db_worker=db_worker, strategy_worker=strategy_worker)
# Init frontier components pipeline
# Some code relies on the order, modify carefully
self._components_pipeline = [
('Middleware', self.middlewares, True),
('CanonicalSolver', self.canonicalsolver, False),
]
if db_worker:
ComponentsPipelineMixin.__init__(self, backend, db_worker=db_worker, strategy_worker=strategy_worker)
# Log frontier manager start
self._logger.info('Frontier Manager Started!')
self._logger.info('-' * 80)
@classmethod
def from_settings(cls, settings=None, db_worker=False, strategy_worker=False, scoring_stream=None):
manager_settings = Settings.object_from(settings)
kwargs = {
'request_model': manager_settings.REQUEST_MODEL,
'response_model': manager_settings.RESPONSE_MODEL,
'backend': manager_settings.BACKEND,
'max_next_requests': manager_settings.MAX_NEXT_REQUESTS,
'settings': manager_settings,
'db_worker': db_worker,
'strategy_worker': strategy_worker
}
if strategy_worker:
kwargs.update({
'strategy_class': manager_settings.STRATEGY,
'strategy_args': manager_settings.STRATEGY_ARGS,
'middlewares': manager_settings.MIDDLEWARES,
'canonicalsolver': manager_settings.CANONICAL_SOLVER,
'scoring_stream': scoring_stream
})
return WorkerFrontierManager(**kwargs)
@property
def test_mode(self):
return False
def create_request(self, url, method=b'GET', headers=None, cookies=None, meta=None, body=b''):
"""
Creates request and applies middleware and canonical solver pipelines.
:param url: str
:param method: bytes
:param headers: dict
:param cookies: dict
:param meta: dict
:param body: bytes
:return: :class:`Request <frontera.core.models.Request>` object
"""
r = self.request_model(url, method=method, headers=headers, cookies=cookies, meta=meta, body=body)
return self._process_components('create_request',
obj=r,
return_classes=self.request_model,
components=(0, 1))
class SpiderFrontierManager(BaseContext, ComponentsPipelineMixin, BaseManager):
def __init__(self, request_model, response_model, backend, middlewares, max_next_requests, settings,
canonicalsolver):
BaseContext.__init__(self, request_model, response_model, settings=settings)
ComponentsPipelineMixin.__init__(self, backend, middlewares=middlewares, canonicalsolver=canonicalsolver,
db_worker=False, strategy_worker=False)
self.max_next_requests = max_next_requests
self._components_pipeline = [
('Middleware', self.middlewares, True),
('CanonicalSolver', self.canonicalsolver, False),
('Backend', self.backend, False)
]
@classmethod
def from_settings(cls, settings=None):
manager_settings = Settings.object_from(settings)
return SpiderFrontierManager(request_model=manager_settings.REQUEST_MODEL,
response_model=manager_settings.RESPONSE_MODEL,
backend=manager_settings.BACKEND,
middlewares=manager_settings.MIDDLEWARES,
max_next_requests=manager_settings.MAX_NEXT_REQUESTS,
settings=manager_settings,
canonicalsolver=manager_settings.CANONICAL_SOLVER)
@property
def test_mode(self):
return False
@property
def auto_start(self):
return True
def get_next_requests(self, max_next_requests=0, **kwargs):
return super(SpiderFrontierManager, self).get_next_requests(max_next_requests=max_next_requests or self.max_next_requests, **kwargs)
def links_extracted(self, request, links):
super(SpiderFrontierManager, self).links_extracted(request, links)
super(SpiderFrontierManager, self).links_extracted_after(request, links)
@property
def finished(self):
return False
def start(self):
self._logger.debug('START')
self._process_components(method_name='frontier_start')
def stop(self):
super(SpiderFrontierManager, self).close()
@six.add_metaclass(ABCMeta)
class UpdateScoreStream(object):
@abstractmethod
def send(self, request, score=1.0, dont_queue=False):
pass
def flush(self):
pass
class MessageBusUpdateScoreStream(UpdateScoreStream):
def __init__(self, producer, encoder):
self._producer = producer
self._encoder = encoder
def send(self, request, score=1.0, dont_queue=False):
encoded = self._encoder.encode_update_score(
request=request,
score=score,
schedule=not dont_queue
)
self._producer.send(None, encoded)
class LocalUpdateScoreStream(UpdateScoreStream):
def __init__(self, queue):
self._queue = queue
def send(self, request, score=1.0, dont_queue=False):
self._queue.schedule([(request.meta[b'fingerprint'], score, request, not dont_queue)])
class StatesContext(object):
def __init__(self, states):
self._requests = []
self.states = states
self._fingerprints = dict()
self.logger = logging.getLogger("states-context")
def to_fetch(self, requests):
requests = requests if isinstance(requests, Iterable) else [requests]
for request in requests:
fingerprint = request.meta[b'fingerprint']
self._fingerprints[fingerprint] = request
def fetch(self):
self.states.fetch(self._fingerprints)
self._fingerprints.clear()
def refresh_and_keep(self, requests):
self.to_fetch(requests)
self.fetch()
self.states.set_states(requests)
self._requests.extend(requests if isinstance(requests, Iterable) else [requests])
def release(self):
self.states.update_cache(self._requests)
self._requests = []
def flush(self):
self.logger.info("Flushing states")
self.states.flush()
self.logger.info("Flushing of states finished")
|
11467413
|
from random import expovariate
from statistics import mean
from math import inf as Infinity
# Parameters
lamda = 1.3 # Arrival rate (Lambda)
mu = 2.0 # Departure rate (Mu)
Num_Pkts = 100000 # Number of Packets to be simulated
count = 0 # Count number of simulated packets
clock = 0
N = 0 # State Variable; number of packets in system
Arr_Time = expovariate(lamda)
Dep_Time = Infinity
# Output Variables
Arr_Time_Data = [] # Collect arrival times
Dep_Time_Data = [] # Collect departure times
Delay_Data = [] # Collect delays of individual packets
while count < Num_Pkts:
if Arr_Time < Dep_Time: # Arrival Event
clock = Arr_Time
Arr_Time_Data.append(clock)
N = N + 1.0
Arr_Time = clock + expovariate(lamda)
if N == 1:
Dep_Time = clock + expovariate(mu)
else: # Departure Event
clock = Dep_Time
Dep_Time_Data.append(clock)
N = N - 1.0
count = count + 1 # Packet Simulated
if N > 0:
Dep_Time = clock + expovariate(mu)
else:
Dep_Time = Infinity
for i in range(Num_Pkts):
d = Dep_Time_Data[i] - Arr_Time_Data[i]
Delay_Data.append(d)
print( "Average Delay = ", round( mean(Delay_Data), 4) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.