id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
443101
|
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from cforest.tree import _compute_global_loss
from cforest.tree import _compute_valid_splitting_indices
from cforest.tree import _find_optimal_split
from cforest.tree import _find_optimal_split_inner_loop
from cforest.tree import _predict_row_causaltree
from cforest.tree import _retrieve_index
from cforest.tree import _transform_outcome
from cforest.tree import predict_causaltree
tt = [
np.array([False]),
np.array([False, False, False, False, True]),
np.array([True, True, True, True, False]),
np.array([False, False, True, True, True]),
np.concatenate(
(
np.full((10,), False),
np.array([True, False, True, False]),
np.full((10,), True),
)
),
]
min_leafs = [2] * 5
@pytest.mark.parametrize("t, min_leaf", zip(tt, min_leafs))
def test__compute_valid_splitting_indices_with_empty_output(t, min_leaf):
out = _compute_valid_splitting_indices(t, min_leaf)
assert_array_equal(out, np.arange(0))
tt = [
np.concatenate(
(
np.full((10,), False),
np.array([True, True, False, True, True, False, False]),
np.full((10,), True),
)
)
]
min_leafs = [2]
out_expected = [np.array([11, 12, 13, 14])]
@pytest.mark.parametrize(
"t, min_leaf, out_exp", zip(tt, min_leafs, out_expected)
)
def test__compute_valid_splitting_indices_with_output(t, min_leaf, out_exp):
out = _compute_valid_splitting_indices(t, min_leaf)
assert_array_equal(out, out_exp)
@pytest.fixture
def setup_retrieve_index_for_completeness():
index = np.concatenate(
(
np.full((10,), False),
np.array([True, True, False, True, True, False, False]),
np.full((10,), True),
)
)
x = np.array(
[
0.32956842,
-0.55119603,
-1.11740483,
-0.26300451,
-0.06686618,
0.21236623,
0.06182492,
0.66415156,
-0.19704692,
0.41878558,
0.58971691,
-1.3248038,
-0.55965504,
-0.28713562,
]
)
sorted_subset_index = np.argsort(x)
split_index = int(len(index) / 2)
out = {
"index": index,
"sorted_subset_index": sorted_subset_index,
"split_index": split_index,
}
return out
def test__retrieve_index_for_completeness(
setup_retrieve_index_for_completeness,
):
left, right = _retrieve_index(**setup_retrieve_index_for_completeness)
combined = np.array(left, dtype=int) + np.array(right, dtype=int)
assert_array_equal(
setup_retrieve_index_for_completeness["index"],
np.array(combined, dtype=bool),
)
def test__retrieve_index_reverse_engineer_split_point():
pass
def test__retrieve_index_reverse_engineer_index_sorted():
pass
def test__compute_global_loss():
n_1l, n_0l, n_1r, n_0r = 10, 10, 10, 10
sum_1l, sum_0l, sum_1r, sum_0r = 10, 10, 10, 10
y_transformed = np.array(20 * [-2, 2])
i = 20
result = _compute_global_loss(
sum_1l=sum_1l,
sum_0l=sum_0l,
sum_1r=sum_1r,
sum_0r=sum_0r,
n_1l=n_1l,
n_0l=n_0l,
n_1r=n_1r,
n_0r=n_0r,
y_transformed=y_transformed,
i=i,
use_transformed_outcomes=True,
)
assert result == 160.0
def _create_data_for_splitting_tests(n):
x = np.linspace(-1, 1, num=n)
np.random.seed(2)
t = np.array(np.random.binomial(1, 0.5, n), dtype=bool)
y = np.repeat([-1, 1], int(n / 2))
y = np.insert(y, int(n / 2), -1)
y = y + 2 * y * t
return x, t, y
def test__find_optimal_split_inner_loop():
"""Create 1 dim. data for which we know that the split must occur at x = 0.
"""
nobs = 10001
x, t, y = _create_data_for_splitting_tests(n=nobs)
y_transformed = _transform_outcome(y, t)
splitting_indices = _compute_valid_splitting_indices(t, min_leaf=2)
result = _find_optimal_split_inner_loop(
splitting_indices=splitting_indices,
x=x,
t=t,
y=y,
y_transformed=y_transformed,
min_leaf=4,
use_transformed_outcomes=True,
)
_, split_value, split_index = result
# need to check if the algorithms needs to hit 0.0 for sure or only approx.
assert abs(split_value) < 0.02
# as above (check if we found an index very close to the middle)
assert abs(split_index - nobs / 2) < 15
def test__find_optimal_split():
"""Create multi dimensional data for which we know that the split must
occur almost surely at the first feature."""
nobs = 10001 # number of observations
k = 10 # number of unrelated features
x, t, y = _create_data_for_splitting_tests(n=nobs)
# no seed on purpose
unrelated_features = np.random.normal(loc=0, scale=2, size=(nobs, k))
X = np.hstack((x.reshape((-1, 1)), unrelated_features))
index = np.full((nobs,), True)
tmp = _find_optimal_split(
X=X, t=t, y=y, index=index, min_leaf=4, use_transformed_outcomes=False
)
_, _, split_feat, _ = tmp
assert split_feat == 0
ctree = pd.read_csv("cforest/tests/data/fitted_ctree__predict_row_test.csv")
ctree[["left_child", "right_child", "level", "split_feat"]] = ctree[
["left_child", "right_child", "level", "split_feat"]
].astype("Int64")
ctrees = [ctree] * 4
rows = [
np.array([1, 1]),
np.array([1, -1]),
np.array([-1, 1]),
np.array([-1, -1]),
]
expected = [0.0, -5.0, 2.0, 14.0]
@pytest.mark.parametrize("ctree, row, exp", zip(ctrees, rows, expected))
def test__predict_row_causaltree(ctree, row, exp):
prediction = _predict_row_causaltree(ctree, row)
assert prediction == exp
def test__predict_causaltree():
x = np.array(rows)
exp = np.array(expected)
prediction = predict_causaltree(ctree, x)
assert_array_equal(prediction, exp)
|
443102
|
import bisect
import datetime
import itertools
from gopro_overlay.entry import Entry
def pairwise(iterable): # Added in itertools v3.10
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
class Timeseries:
def __init__(self, entries=None):
self.entries = {}
self.dates = []
self.modified = False
if entries is not None:
self.add(*entries)
@property
def min(self) -> datetime.datetime:
self.check_modified()
return self.dates[0]
@property
def max(self) -> datetime.datetime:
self.check_modified()
return self.dates[-1]
def __len__(self):
self.check_modified()
return len(self.dates)
def check_modified(self):
if self.modified:
self._update()
def _update(self):
self.dates = sorted(list(self.entries.keys()))
self.modified = False
def add(self, *entries: Entry):
for e in entries:
self.entries[e.dt] = e
self.modified = True
def get(self, dt, interpolate=True):
self.check_modified()
if not self.dates or dt < self.dates[0]:
raise ValueError("Date is before start")
if dt > self.dates[-1]:
raise ValueError("Date is after end")
if dt in self.entries:
return self.entries[dt]
else:
if not interpolate:
raise KeyError(f"Date {dt} not found")
greater_idx = bisect.bisect_left(self.dates, dt)
lesser_idx = greater_idx - 1
return self.entries[self.dates[lesser_idx]].interpolate(self.entries[self.dates[greater_idx]], dt)
def items(self):
self.check_modified()
return [self.entries[k] for k in self.dates]
def process_deltas(self, processor, skip=1):
self.check_modified()
diffs = list(zip(self.dates, self.dates[skip:]))
for a, b in diffs:
updates = processor(self.entries[a], self.entries[b], skip)
if updates:
self.entries[a].update(**updates)
def process(self, processor):
self.check_modified()
for e in self.dates:
updates = processor(self.entries[e])
if updates:
self.entries[e].update(**updates)
|
443125
|
from sp_api.api import ListingsRestrictions
def test_listing_restrictions():
res = ListingsRestrictions().get_listings_restrictions(sellerId='A3F26DF64ZIPJZ', asin='B07HRD6JKK')
assert res('restrictions') is not None
assert isinstance(res('restrictions'), list)
|
443151
|
import json
from e2e.Classes.Merit.Merit import Merit
from e2e.Vectors.Generation.PrototypeChain import PrototypeBlock, PrototypeChain
proto: PrototypeChain = PrototypeChain(9, False)
merit: Merit = Merit.fromJSON(proto.finish().toJSON())
merit.add(PrototypeBlock(merit.blockchain.blocks[-1].header.time + 1200).finish(1, merit))
for _ in range(9):
merit.add(PrototypeBlock(merit.blockchain.blocks[-1].header.time + 1200).finish(0, merit))
with open("e2e/Vectors/Merit/LockedMerit/LocksUnlocks.json", "w") as vectors:
vectors.write(json.dumps(merit.toJSON()))
|
443202
|
from __future__ import division
from __future__ import print_function
import argparse
import os
import pickle
import sys
# sys.path.append(".")
import numpy as np
from dss_vae.structs.dataset import Dataset
from dss_vae.structs.vocab import Vocab
from dss_vae.structs.vocab import VocabEntry
def data_details(train_set, dev_set, test_set, vocab=None):
def detail(data_set):
_src_vocab = VocabEntry.from_corpus([e.src for e in data_set], )
_tgt_vocab = VocabEntry.from_corpus([e.tgt for e in data_set], )
_vocab = Vocab(src=_src_vocab, tgt=_tgt_vocab)
print('vocabulary %s' % repr(_vocab), file=sys.stdout)
_target_len = [len(e.tgt) for e in data_set]
print('Max target len: {}'.format(max(_target_len)), file=sys.stdout)
print('Avg target len: {}'.format(np.average(_target_len)), file=sys.stdout)
_source_len = [len(e.src) for e in data_set]
print('Max source len: {}'.format(max(_source_len)), file=sys.stdout)
print('Avg source len: {}'.format(np.average(_source_len)), file=sys.stdout)
print('generated vocabulary %s' % repr(vocab), file=sys.stdout)
print("sum info: train:{},dev:{},test:{}".format(
len(train_set),
len(dev_set),
len(test_set),
))
print("Train")
detail(train_set)
print("Dev")
detail(dev_set)
print("Test")
detail(test_set)
def length_filter(dataset, max_src_len=-1, max_tgt_len=-1, max_numbers=-1):
examples = dataset.examples
ori_num = len(examples)
if max_src_len != -1:
new_examples = []
for x in examples:
if len(x.src) < max_src_len:
new_examples.append(x)
examples = new_examples
if max_tgt_len != -1:
new_examples = []
for x in examples:
if len(x.tgt) < max_tgt_len:
new_examples.append(x)
examples = new_examples
if max_numbers != -1:
from random import sample
train_idx = sample(range(len(examples)), max_numbers)
examples = [examples[idx] for idx in train_idx]
dataset.examples = examples
pro_num = len(examples)
print("process from {} -> {}".format(ori_num, pro_num))
return dataset
def prepare_dataset(data_dir, data_dict, tgt_dir, max_src_vocab=16000, max_tgt_vocab=300, vocab_freq_cutoff=1,
max_src_length=-1, max_tgt_length=-1,
train_size=-1,
write_down=True):
train_pair = os.path.join(data_dir, data_dict['train'])
dev_pair = os.path.join(data_dir, data_dict['dev'])
test_pair = os.path.join(data_dir, data_dict['test'])
make_dataset(train_pair, dev_pair, test_pair, tgt_dir, max_src_vocab, max_tgt_vocab, vocab_freq_cutoff,
max_src_length,
max_tgt_length, train_size,
write_down)
def make_dataset(train_raw,
dev_raw=None,
test_raw=None,
out_dir=".",
max_src_vocab=16000,
max_tgt_vocab=300,
vocab_freq_cutoff=1,
max_src_length=-1,
max_tgt_length=-1,
train_size=-1,
write_down=True,
ext_fields=tuple(),
exp_mode="Plain"
):
train_set = length_filter(
Dataset.from_raw_file(train_raw, exp_mode),
max_src_length,
max_tgt_length,
max_numbers=train_size)
# generate vocabulary
if vocab_freq_cutoff == -1:
vocab_freq_cutoff = 0
src_vocab = VocabEntry.from_corpus([e.src for e in train_set], size=max_src_vocab,
freq_cutoff=vocab_freq_cutoff)
tgt_vocab = VocabEntry.from_corpus([e.tgt for e in train_set], size=max_tgt_vocab,
freq_cutoff=vocab_freq_cutoff)
sub_vocab_dict = {
"src": src_vocab,
"tgt": tgt_vocab
}
if len(ext_fields) > 0:
for sub_vocab_name in ext_fields:
sub_vocab = VocabEntry.from_corpus([getattr(e, sub_vocab_name) for e in train_set])
sub_vocab_dict[sub_vocab_name] = sub_vocab
vocab = Vocab(**sub_vocab_dict)
print('generated vocabulary %s' % repr(vocab), file=sys.stdout)
dev_set = length_filter(
Dataset.from_raw_file(dev_raw, exp_mode),
max_src_length,
max_tgt_length)
if test_raw is not None:
test_set = length_filter(
Dataset.from_raw_file(test_raw, exp_mode),
max_src_length,
max_tgt_length)
else:
test_set = dev_set
print("sum info: train:{},dev:{},test:{}".format(
len(train_set),
len(dev_set),
len(test_set),
))
data_details(train_set, dev_set, test_set)
if write_down:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
train_file = out_dir + "/train.bin"
dev_file = out_dir + "/dev.bin"
test_file = out_dir + "/test.bin"
vocab_file = out_dir + "/vocab.bin"
pickle.dump(train_set.examples, open(train_file, 'wb'))
pickle.dump(dev_set.examples, open(dev_file, 'wb'))
pickle.dump(test_set.examples, open(test_file, 'wb'))
pickle.dump(vocab, open(vocab_file, 'wb'))
# if 'debug' in data_dict:
# debug_set = Dataset.from_raw_file(os.path.join(data_dir, data_dict['debug']))
# debug_file = tgt_dir + "/debug.bin"
# pickle.dump(debug_set.bin, open(debug_file, 'wb'))
if __name__ == "__main__":
opt_parser = argparse.ArgumentParser()
opt_parser.add_argument('--train_file', dest="train_file", type=str,
help='train file after process with tree_convert output file')
opt_parser.add_argument('--dev_file', dest="dev_file", type=str, help='dev file with src,tgt pair')
opt_parser.add_argument('--test_file', dest="test_file", type=str, help='test file with src,tgt pair')
opt_parser.add_argument('--out_dir', dest="out_dir", type=str, help='target dir')
opt_parser.add_argument("--max_src_vocab", dest="max_src_vocab", type=int, default=16000,
help="source phrase vocab size, default is 16000")
opt_parser.add_argument("--max_tgt_vocab", dest="max_tgt_vocab", type=int, default=300,
help="target phrase vocab size, 300 for parse")
opt_parser.add_argument("--vocab_freq_cutoff", dest="vocab_freq_cutoff", type=int, default=-1,
help="sort freq of word in train set, "
"and cutoff which freq which lower than this value, default is -1")
opt_parser.add_argument("--max_src_len", dest="max_src_len", type=int, default=-1,
help="max length of example 's source input , default is -1")
opt_parser.add_argument("--max_tgt_len", dest="max_tgt_len", type=int, default=-1,
help="max length of example 's target output , default is -1")
opt_parser.add_argument("--train_size", dest="train_size", type=int, default=-1,
help="the number of bin select from whole dataset, default is -1, means all")
opt_parser.add_argument("--mode", dest="mode", type=str, default="Plain",
help="vocab filed 's mode [plain,syntax-vae,ae,syntax-gen], default is plain, ")
opt = opt_parser.parse_args()
ext_filed_dict = {
"Plain": tuple(),
"PTB": tuple(),
'SyntaxVAE': tuple(),
"NAG": tuple(["arc"])
}
make_dataset(
train_raw=opt.train_file,
dev_raw=opt.dev_file,
test_raw=opt.test_file,
out_dir=opt.out_dir,
max_src_vocab=opt.max_src_vocab,
max_tgt_vocab=opt.max_tgt_vocab,
max_src_length=opt.max_src_len,
max_tgt_length=opt.max_tgt_len,
vocab_freq_cutoff=opt.vocab_freq_cutoff,
train_size=opt.train_size,
ext_fields=ext_filed_dict[opt.mode],
exp_mode=opt.mode
)
|
443207
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
def read_cv2_img(path):
'''
Read color images
Args:
path: Path to image
return:
Only returns color images
'''
img = cv2.imread(path, -1)
if img is not None:
if len(img.shape) != 3:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def resize(img, dst_size):
if isinstance(dst_size, int):
img = cv2.resize(img, (dst_size, dst_size), cv2.INTER_CUBIC)
elif isinstance(dst_size, tuple) or isinstance(dst_size, list):
img = cv2.resize(img, (dst_size[0], dst_size[1]), cv2.INTER_CUBIC)
return img
def show_cv2_img(img, title='img'):
'''
Display cv2 image
:param img: cv::mat
:param title: title
:return: None
'''
plt.imshow(img)
plt.title(title)
plt.axis('off')
plt.show()
def show_images_row(imgs, titles, rows=1):
'''
Display grid of cv2 images image
Args:
imgs: list [cv::mat]
titles: titles
return: None
'''
assert ((titles is None) or (len(imgs) == len(titles)))
num_images = len(imgs)
if titles is None:
titles = ['Image (%d)' % i for i in range(1, num_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(imgs, titles)):
ax = fig.add_subplot(rows, np.ceil(num_images / float(rows)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
ax.set_title(title)
plt.axis('off')
plt.show()
|
443208
|
from typing import List, Optional
import numpy as np
from l5kit.data.filter import filter_agents_by_labels, filter_agents_by_track_id
from l5kit.geometry import rotation33_as_yaw
from l5kit.rasterization.render_context import RenderContext
from l5kit.rasterization.box_rasterizer import get_ego_as_agent, draw_boxes, BoxRasterizer
class AugmentedBoxRasterizer(BoxRasterizer):
@staticmethod
def from_cfg(cfg, data_manager=None, eval=False):
raster_cfg = cfg["raster_params"]
# map_type = raster_cfg["map_type"]
# dataset_meta_key = raster_cfg["dataset_meta_key"]
render_context = RenderContext(
raster_size_px=np.array(raster_cfg["raster_size"]),
pixel_size_m=np.array(raster_cfg["pixel_size"]),
center_in_raster_ratio=np.array(raster_cfg["ego_center"]),
)
# raster_size: Tuple[int, int] = cast(Tuple[int, int], tuple(raster_cfg["raster_size"]))
# pixel_size = np.array(raster_cfg["pixel_size"])
# ego_center = np.array(raster_cfg["ego_center"])
filter_agents_threshold = raster_cfg["filter_agents_threshold"]
history_num_frames = cfg["model_params"]["history_num_frames"]
return AugmentedBoxRasterizer(
render_context,
filter_agents_threshold,
history_num_frames,
raster_cfg.get("agent_drop_ratio", 0.9),
raster_cfg.get("agent_drop_prob", -1.0),
raster_cfg.get("min_extent_ratio", 0.8),
raster_cfg.get("max_extent_ratio", 1.2),
eval=eval
)
def __init__(
self,
render_context: RenderContext,
filter_agents_threshold: float,
history_num_frames: int,
agent_drop_ratio: float = 0.9,
agent_drop_prob: float = -1.0,
min_extent_ratio: float = 0.8,
max_extent_ratio: float = 1.2,
eval: bool = False,
):
"""
Args:
render_context (RenderContext): Render context
filter_agents_threshold (float): Value between 0 and 1 used to filter uncertain agent detections
history_num_frames (int): Number of frames to rasterise in the past
"""
super(AugmentedBoxRasterizer, self).__init__(render_context, filter_agents_threshold, history_num_frames)
# --- These are called inside super init ---
# self.render_context = render_context
# self.raster_size = render_context.raster_size_px
# self.filter_agents_threshold = filter_agents_threshold
# self.history_num_frames = history_num_frames
self.raster_channels = (self.history_num_frames + 1) * 5
self.agent_drop_ratio = agent_drop_ratio
self.agent_drop_prob = agent_drop_prob
self.min_extent_ratio = min_extent_ratio
self.max_extent_ratio = max_extent_ratio
self.eval = eval # Evaluation mode, No augmentation is applied when `True`.
if eval:
print("AugmentedBoxRasterizer eval mode is True!")
def rasterize(
self,
history_frames: np.ndarray,
history_agents: List[np.ndarray],
history_tl_faces: List[np.ndarray],
agent: Optional[np.ndarray] = None,
) -> np.ndarray:
# all frames are drawn relative to this one"
frame = history_frames[0]
if agent is None:
ego_translation_m = history_frames[0]["ego_translation"]
ego_yaw_rad = rotation33_as_yaw(frame["ego_rotation"])
else:
ego_translation_m = np.append(agent["centroid"], history_frames[0]["ego_translation"][-1])
ego_yaw_rad = agent["yaw"]
raster_from_world = self.render_context.raster_from_world(ego_translation_m, ego_yaw_rad)
# this ensures we always end up with fixed size arrays, +1 is because current time is also in the history
out_shape = (self.raster_size[1], self.raster_size[0], self.history_num_frames + 1)
agents_images = np.zeros(out_shape, dtype=np.uint8)
ego_images = np.zeros(out_shape, dtype=np.uint8)
# --- 1. prepare agent keep indices for random agent drop augmentation ---
track_ids = np.concatenate([a["track_id"] for a in history_agents])
unique_track_ids = np.unique(track_ids).astype(np.int64)
n_max_agents = int(np.max(unique_track_ids) + 1) # +1 for host car.
unique_track_ids = np.concatenate([[0], unique_track_ids]) # Add Host car, with id=0.
n_unique_agents = len(unique_track_ids)
# if not np.all(unique_track_ids == np.arange(np.max(unique_track_ids) + 1)):
# # It occured!! --> unique_track_ids is not continuous. Some numbers are filtered out.
# print("unique_track_ids", unique_track_ids, "is not continuous!!!")
if not self.eval and np.random.uniform() < self.agent_drop_ratio:
if self.agent_drop_prob < 0:
# Randomly decide number of agents to drop.
# 0 represents host car.
n_keep_agents = np.random.randint(0, n_unique_agents)
agent_keep_indices = np.random.choice(unique_track_ids, n_keep_agents, replace=False)
else:
# Decide agents to drop or not by agent_drop_prob.
agent_keep_indices = unique_track_ids[
np.random.uniform(0.0, 1.0, (n_unique_agents,)) > self.agent_drop_prob]
n_keep_agents = len(agent_keep_indices)
# Must keep ego agent!
if agent["track_id"] not in agent_keep_indices:
agent_keep_indices = np.append(agent_keep_indices, agent["track_id"])
else:
n_keep_agents = n_unique_agents
# keep all agents
agent_keep_indices = None
# --- 2. prepare extent scale augmentation ratio ----
# TODO: create enough number of extent_ratio array. Actually n_keep_agents suffice but create n_max_agents
# for simplicity..
if self.eval:
# No augmentation.
agents_extent_ratio = np.ones((n_max_agents, 3))
elif self.min_extent_ratio == self.max_extent_ratio:
agents_extent_ratio = np.ones((n_max_agents, 3)) * self.min_extent_ratio
else:
agents_extent_ratio = np.random.uniform(self.min_extent_ratio, self.max_extent_ratio, (n_max_agents, 3))
ego_extent_ratio = agents_extent_ratio[0]
for i, (frame, agents_) in enumerate(zip(history_frames, history_agents)):
agents = filter_agents_by_labels(agents_, self.filter_agents_threshold)
if agent_keep_indices is not None:
# --- 1. apply agent drop augmentation ---
agents = agents[np.isin(agents["track_id"], agent_keep_indices)]
# note the cast is for legacy support of dataset before April 2020
av_agent = get_ego_as_agent(frame).astype(agents.dtype)
# 2. --- apply extent scale augmentation ---
# TODO: Need to convert agents["track_id"] --> index based on `agent_keep_indices`,
# if we only create `agents_extent_ratio` of size `n_keep_agents`.
agents["extent"] *= agents_extent_ratio[agents["track_id"]]
av_agent[0]["extent"] *= ego_extent_ratio
if agent is None:
agents_image = draw_boxes(self.raster_size, raster_from_world, agents, 255)
ego_image = draw_boxes(self.raster_size, raster_from_world, av_agent, 255)
else:
agent_ego = filter_agents_by_track_id(agents, agent["track_id"])
if agent_keep_indices is None or 0 in agent_keep_indices:
agents = np.append(agents, av_agent)
if len(agent_ego) == 0: # agent not in this history frame
agents_image = draw_boxes(self.raster_size, raster_from_world, agents, 255)
ego_image = np.zeros_like(agents_image)
else: # add av to agents and remove the agent from agents
agents = agents[agents != agent_ego[0]]
agents_image = draw_boxes(self.raster_size, raster_from_world, agents, 255)
ego_image = draw_boxes(self.raster_size, raster_from_world, agent_ego, 255)
agents_images[..., i] = agents_image
ego_images[..., i] = ego_image
# combine such that the image consists of [agent_t, agent_t-1, agent_t-2, ego_t, ego_t-1, ego_t-2]
out_im = np.concatenate((agents_images, ego_images), -1)
return out_im.astype(np.float32) / 255
|
443222
|
import torch
from mmcls.models.classifiers import ImageClassifier
def test_repvgg():
g4_map = {l: 4 for l in [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26]}
model_cfg = dict(backbone=dict(
type='RepVGG',
num_classes=1000,
num_blocks=[4, 6, 16, 1],
width_multiplier=[2.5, 2.5, 2.5, 5],
override_groups_map=g4_map,
),
neck=None,
head=dict(type='ClsHead',
loss=dict(type='CrossEntropyLoss')))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0, "====> loss error "
if __name__ == '__main__':
test_repvgg()
|
443227
|
import argparse
import wandb
from deepform.common import MODEL_DIR, WANDB_PROJECT
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="download a model stored in W&B")
parser.add_argument(
"-v",
"--version",
dest="version",
help="model version to download",
default="latest",
)
args = parser.parse_args()
run = wandb.init(
project="model-download",
job_type="ps",
allow_val_change=True,
)
config = run.config
model_name = config.model_artifact_name
artifact_name = f"{WANDB_PROJECT}/{model_name}:{args.version}"
artifact = run.use_artifact(artifact_name, type="model")
artifact_alias = artifact.metadata.get("name") or "unknown"
artifact.download(root=(MODEL_DIR / artifact_alias))
|
443269
|
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
COLORS = dict(
black='0;30',
darkgray='1;30',
red='1;31',
green='1;32',
brown='0;33',
yellow='1;33',
blue='1;34',
purple='1;35',
cyan='1;36',
white='1;37',
reset='0'
)
COLORIZE = sys.stdout.isatty()
def paint(s, color, colorize=COLORIZE):
if colorize:
if color in COLORS:
return '\033[{}m{}\033[0m'.format(COLORS[color], s)
else:
raise ValueError('Invalid color')
else:
return s
def print_init(info, file=sys.stdout, colorize=COLORIZE):
print('Initialization overview')
for k, v in info['init'].items():
if v == 'file':
color = 'green'
elif v == 'init':
color = 'red'
else:
color = 'white'
print('%30s %s' % (k, paint(v, color=color, colorize=colorize)), file=file)
def histogram(x, bins='auto', columns=40):
if np.isnan(x).any():
print("Error: Can't produce histogram when there are NaNs")
return
total_count = len(x)
counts, bins = np.histogram(x, bins=bins, normed=True)
for i, c in enumerate(counts):
frac = c
cols = int(frac * columns)
bar = '#' * min(60, cols) + ('>' if cols > 60 else '')
print('[{:6.2f}, {:6.2f}): {}'.format(bins[i], bins[i+1], bar))
|
443287
|
import datetime
from dataclasses import dataclass
from .constant import *
@dataclass
class BarData:
"""
Candlestick bar data of a certain trading period.
"""
symbol: str
exchange: Exchange
datetime: datetime
interval: Interval = None
volume: float = 0
open_interest: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
close_price: float = 0
extra: dict = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
|
443292
|
from u2flib_server.utils import websafe_decode
from u2flib_server.model import (JSONDict, RegistrationData, SignatureData,
U2fRegisterRequest, U2fSignRequest)
from binascii import b2a_hex
import unittest
SAMPLE_REG_DATA = websafe_decode(
'BQRFJ5xApW6uBsuSJ_FgUcL-seKecha71q8evgqfQwc5QuqQhv4qJoL3KpSUKX1T6XVJEqyOkn'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'tJZ4V3CqMZ-MOUgICt2aMxacMX9cIa8dgS2jUDBOMB0GA1UdDgQWBBQNqL-TV04iaO6mS5tjGE'
'6ShfexnjAfBgNVHSMEGDAWgBQNqL-TV04iaO6mS5tjGE6ShfexnjAMBgNVHRMEBTADAQH_MAkG'
'ByqGSM49BAEDSAAwRQIgXJWZdbvOWdhVaG7IJtn44o21Kmi8EHsDk4cAfnZ0r38CIQD6ZPi3Pl'
'4lXxbY7BXFyrpkiOvCpdyNdLLYbSTbvIBQOTBGAiEAsp3iNiXaF9mk6mHzJqva-hi7AlT-_or-'
'2HdKUJycqaUCIQCP9P4aju4iq8U6hRyIIllppzwfK9_7QNv_7_OV7pQxug'
)
SAMPLE_REG_DATA_NEEDS_FIX = websafe_decode(
'BQQR2Q82wJ9RLOcH5TvQvve7LrBnDp0YiCSDxKPiHsg_AY1b70GK-dcCt-HqCkqJZikAXL4zLY'
'<KEY>'
'<KEY>'
'wwKgYDVQQDEyNZdWJpY28gVTJGIFJvb3QgQ0EgU2VyaWFsIDQ1NzIwMDYzMTAgFw0xNDA4MDEw'
'MDAwMDBaGA8yMDUwMDkwNDAwMDAwMFowKzEpMCcGA1UEAwwgWXViaWNvIFUyRiBFRSBTZXJpYW'
'wgMTM4MzExNjc4NjEwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ3jfx0DHOblHJO09Ujubh2'
'gQZWwT3ob6-uzzjZD1XiyAob_gsw3FOzXefQRblty48r-U-o4LkDFjx_btwuSHtxoxIwEDAOBg'
'orBgEEAYLECgEBBAAwCwYJKoZIhvcNAQELA4IBAQIaR2TKAInPkq24f6hIU45yzD79uzR5KUME'
'e4IWqTm69METVio0W2FHWXlpeUe85nGqanwGeW7U67G4_WAnGbcd6zz2QumNsdlmb_AebbdPRa'
'95Z8BG1ub_S04JoxQYNLaa8WRlzN7POgqAnAqkmnsZQ_W9Tj2uO9zP3mpxOkkmnqz7P5zt4Lp5'
'xrv7p15hGOIPD5V-ph7tUmiCJsq0LfeRA36X7aXi32Ap0rt_wyfnRef59YYr7SmwaMuXKjbIZS'
'LesscZZTMzXd-uuLb6DbUCasqEVBkGGqTRfAcOmPov1nHUrNDCkOR0obR4PsJG4PiamIfApNeo'
'XGYpGbok6nucMEUCIQCCL2jamBxyJQ6ktxgJVNFRKf4pUHvlvFgyXTQ6NOYlAwIgSQ1TB64V25'
'deHKak1UEZA2AbkR9znO2XJKd93v1BY9Y'
)
SAMPLE_SIG_DATA = websafe_decode(
'AAAAAAEwRgIhAPVBA3i9Ag6UOe9Jv-fz0J8HLIGfAS26eP8m0FRoZvufAiEAzmxU_mJxDJXOvV'
'-FMl_Ug2qYUGFZ6hu9m2VYdTkbvgA'
)
class RegistrationDataTest(unittest.TestCase):
def test_invalid_data(self):
self.assertRaises(ValueError, RegistrationData, b'abc')
def test_str(self):
rawresponse = RegistrationData(SAMPLE_REG_DATA)
self.assertEqual(b'050445279c', b2a_hex(rawresponse.bytes)[:10])
self.assertEqual(SAMPLE_REG_DATA, rawresponse.bytes)
def test_str_needs_fix(self):
rawresponse = RegistrationData(SAMPLE_REG_DATA_NEEDS_FIX)
self.assertEqual(b'050411d90f', b2a_hex(rawresponse.bytes)[:10])
self.assertNotEqual(SAMPLE_REG_DATA_NEEDS_FIX, rawresponse.bytes)
class SignatureDataTest(unittest.TestCase):
def test_str(self):
rawresponse = SignatureData(SAMPLE_SIG_DATA)
self.assertEqual(b'0000000001', b2a_hex(rawresponse.bytes)[:10])
self.assertEqual(SAMPLE_SIG_DATA, rawresponse.bytes)
class JSONDictTest(unittest.TestCase):
def test_create(self):
self.assertEqual({}, JSONDict())
def test_create_from_bytes(self):
self.assertEqual({'a': 1, 'b': 2}, JSONDict(b'{"a":1,"b":2}'))
def test_create_from_unicode(self):
self.assertEqual({'a': 1, 'b': 2}, JSONDict(u'{"a":1,"b":2}'))
def test_create_from_dict(self):
self.assertEqual({'a': 1, 'b': 2}, JSONDict({'a': 1, 'b': 2}))
def test_create_from_kwargs(self):
self.assertEqual({'a': 1, 'b': 2}, JSONDict(a=1, b=2))
def test_create_from_list(self):
self.assertEqual({}, JSONDict([]))
self.assertEqual({'a': 1, 'b': 2}, JSONDict([('a', 1), ('b', 2)]))
def test_create_wrong_nargs(self):
self.assertRaises(TypeError, JSONDict, {}, {})
self.assertRaises(TypeError, JSONDict, {'a': 1}, {'b': 2})
def test_json(self):
self.assertEqual('{}', JSONDict().json)
self.assertEqual('{"a": 1}', JSONDict(a=1).json)
def test_wrap(self):
self.assertTrue(isinstance(JSONDict.wrap({}), JSONDict))
x = JSONDict()
self.assertTrue(x is JSONDict.wrap(x))
def test_getattr_unknown(self):
self.assertRaises(AttributeError, lambda: JSONDict().foo)
def test_getattr(self):
self.assertEqual(1, JSONDict(a=1).a)
def test_required_fields(self):
class Foo(JSONDict):
_required_fields = ['foo', 'bar']
Foo({'foo': 1, 'bar': 2})
self.assertRaises(ValueError, Foo, {'foo': 1})
self.assertRaises(ValueError, Foo, {'bar': 1})
self.assertRaises(ValueError, Foo)
class U2fRegisterRequestTest(unittest.TestCase):
def test_u2f_register_request(self):
challenge = "<KEY>"
req = U2fRegisterRequest.create(
'https://example.com',
[],
websafe_decode(challenge)
)
self.assertEqual(U2fRegisterRequest.wrap({
"registeredKeys": [],
"appId": "https://example.com",
"registerRequests": [{
"version": "U2F_V2",
"challenge": "Jtb6wLXjMHN67fV1BVNivz-qnAnD8OOqFju49RDBJro"
}]
}), req)
self.assertEqual(U2fRegisterRequest.wrap(req.json), req)
self.assertEqual(
websafe_decode('EAaArVRs5qV39C9S3zO0z9ynVoWeZkuNfeMpsVDQnOk'),
req.applicationParameter
)
self.assertEqual([], req.registeredKeys)
self.assertEqual(1, len(req.registerRequests))
reg_req = req.get_request('U2F_V2')
self.assertEqual(reg_req.challenge, websafe_decode(challenge))
self.assertEqual(reg_req.version, 'U2F_V2')
class U2fSignRequestTest(unittest.TestCase):
def test_missing_keys(self):
self.assertRaises(ValueError, U2fSignRequest.wrap, {
"appId": "https://example.com",
"challenge": "0000",
"registeredKeys": []
})
def test_u2f_sign_request(self):
challenge = "Jtb6wLXjMHN67fV1BVNivz-qnAnD8OOqFju49RDBJro"
req = U2fSignRequest.wrap(
{
"appId": "https://example.com",
"registeredKeys": [{
"publicKey": "<KEY>"
"<KEY>",
"version": "U2F_V2",
"keyHandle": "<KEY>"
"<KEY>"
}],
"challenge": challenge
}
)
self.assertEqual(U2fSignRequest.wrap(req.json), req)
self.assertEqual(
req.applicationParameter,
websafe_decode('EAaArVRs5qV39C9S3zO0z9ynVoWeZkuNfeMpsVDQnOk')
)
self.assertEqual(req.challenge, websafe_decode(challenge))
|
443296
|
from azureml.core import Workspace
import os
import sys
subscription_id = os.environ.get("SUBSCRIPTION_ID", "<subscription_id>")
resource_group = os.environ.get("RESOURCE_GROUP", "tensorflow101")
workspace_name = os.environ.get("WORKSPACE_NAME", "tensorflow101-mlwrksp")
workspace_region = os.environ.get("WORKSPACE_REGION", "westeurope")
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
ws.write_config()
print('Library configuration succeeded')
except:
ws = Workspace.create(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group,
location = workspace_region,
create_resource_group = True,
exist_ok = True)
ws.get_details()
ws.write_config()
print('Library configuration succeeded')
|
443331
|
import csv
import json
from datetime import datetime
from decimal import Decimal
from pathlib import Path
from typing import Dict, List, Tuple, Union
from sqlalchemy.orm.session import Session
from reporter.database.model import Close, Price
class RICInfo:
def __init__(self,
description: str,
currency: str,
utc_offset: int,
has_dst: bool):
self.description = description
self.currency = currency
self.utc_offset = utc_offset
self.has_dst = has_dst
class Table:
def __init__(self,
ric: str,
description: str,
currency: str,
rows: List[Tuple[str, str, str]],
is_dummy: Union[bool, None] = False):
self.ric = ric
self.description = description
self.currency = currency
self.rows = rows
self.is_dummy = is_dummy
def load_ric_to_ric_info() -> Dict[str, RICInfo]:
with Path('resources', 'stock-exchanges.json').open(mode='r') as f:
stock_exchange = json.load(f)
result = {}
with Path('resources', 'ric.csv').open(mode='r') as f:
reader = csv.reader(f)
next(reader)
for line in reader:
ric, desc, cur, _, exchange, _ = line
exchange_info = stock_exchange.get(exchange, {})
utc_offset = int(exchange_info.get('utc_offset', 0))
has_dst = bool(exchange_info.get('has_dst'))
result[ric] = RICInfo(desc, cur, utc_offset, has_dst)
return result
def create_ric_tables(session: Session,
rics: List[str],
ric_to_ric_info: Dict[str, RICInfo],
timestamp: datetime) -> List[Table]:
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
EPSILON = 1e-2
n_days = 5
tables = []
for ric in rics:
ric_info = ric_to_ric_info.get(ric)
results = session \
.query(Close.t, Price.val) \
.join(Price, Close.t == Price.t) \
.filter(Close.ric == ric, Close.ric == Price.ric, Close.t <= timestamp) \
.order_by(Close.t.desc()) \
.limit(n_days) \
.all()
prev_vals = [v for (_, v) in results][1:] + [Decimal(0)]
formatted_rows = []
for i, (t, v, diff) in enumerate([(t, v, v - prev_v) for ((t, v), prev_v)
in zip(results, prev_vals)]):
if i == len(results) - 1:
indicator = '-'
elif abs(diff) < EPSILON:
indicator = '→'
elif diff > 0:
indicator = '↑'
else:
indicator = '↓'
formatted_rows.append((t.strftime(DATETIME_FORMAT), '{:,.2f}'.format(v), indicator))
table = Table(ric,
ric_info.description,
ric_info.currency,
formatted_rows)
tables.append(table)
return tables
|
443349
|
import re
from drf_spectacular.openapi import AutoSchema
class PeeringManagerAutoSchema(AutoSchema):
"""
Subclass of Spectaclar's `AutoSchema` to support bulk operations.
"""
def get_operation_id(self):
tokenized_path = self._tokenize_path()
# replace dashes as they can be problematic later in code generation
tokenized_path = [t.replace("-", "_") for t in tokenized_path]
if self.method.lower() == "get" and self._is_list_view():
action = "list"
else:
if hasattr(self.view, "action_map") and self.view.action_map:
action = self.view.action_map[self.method.lower()]
else:
action = self.method_mapping[self.method.lower()]
if not tokenized_path:
tokenized_path.append("root")
if re.search(r"<drf_format_suffix\w*:\w+>", self.path_regex):
tokenized_path.append("formatted")
return "_".join(tokenized_path + [action])
|
443434
|
import unittest
import torch
from butterfly_factor import butterfly_factor_mult, butterfly_factor_mult_intermediate
from butterfly import Block2x2DiagProduct
from complex_utils import complex_mul
from factor_multiply import butterfly_multiply_intermediate, butterfly_multiply_intermediate_backward
def twiddle_list_concat(B: Block2x2DiagProduct):
# Assume ordering from largest size to smallest size
if not B.complex:
return torch.cat([factor.ABCD.permute(2, 0, 1) for factor in B.factors[::-1]])
else:
return torch.cat([factor.ABCD.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
class ButterflyFactorTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_butterfly_factor_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n)
input_ = torch.randn(batch_size, n, requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (d_twiddle - d_twiddle_slow).abs().max().item())
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
def test_butterfly_factor_complex_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True)
input_ = torch.randn(batch_size, n, 2, requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2, 2)).view(prev.shape)
output_slow = (complex_mul(factor.ABCD, prev.view(-1, 1, 2, factor.size // 2, 2)).sum(dim=-3)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (d_twiddle - d_twiddle_slow).abs().max().item())
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_cuda(self):
batch_size = 100
n = 4096 # To test n > MAX_BLOCK_SIZE
B = Block2x2DiagProduct(n).to('cuda')
input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (factor.size, (d_twiddle - d_twiddle_slow).abs().max().item()))
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
def test_butterfly_factor_intermediate_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n)
input_ = torch.randn(batch_size, n, requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
def test_butterfly_factor_intermediate_complex_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True)
input_ = torch.randn(batch_size, n, 2, requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2, 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_intermediate_cuda(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n).to('cuda')
input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_intermediate_complex_cuda(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True).to('cuda')
input_ = torch.randn(batch_size, n, 2, device='cuda', requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2, 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
if __name__ == "__main__":
unittest.main()
# batch_size = 2
# n = 4
# B = Block2x2DiagProduct(n).to('cuda')
# # input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
# input_ = torch.arange(batch_size * n, dtype=torch.float, device='cuda', requires_grad=True).view(batch_size, n)
# output = input_
# factor = B.factors[0]
# prev = output
# output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
# output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
# grad = input_
# d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
# d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
# print(d_twiddle)
# print(d_twiddle_slow)
# print((factor.size, (d_twiddle - d_twiddle_slow).abs().max().item()))
|
443484
|
from compas.geometry import Circle
# These imports are used to check __repr__.
from compas.geometry import Plane # noqa: F401
from compas.geometry import Point # noqa: F401
from compas.geometry import Vector # noqa: F401
def test_circle():
point = [0, 0, 0]
vector = [1, 0, 0]
plane = (point, vector)
c = Circle(plane, 1.0)
assert c.radius == 1.0
assert c.plane == plane
def test_equality():
point = [0, 0, 0]
vector = [1, 0, 0]
plane = (point, vector)
c = Circle(plane, 1.0)
assert c == (plane, 1.0)
assert c == Circle(plane, 1.0)
assert c != 1.0
assert c != (plane, 2.0)
def test___repr__():
point = [0, 0, 0]
vector = [1, 0, 0]
plane = (point, vector)
c = Circle(plane, 1.0)
assert c == eval(repr(c))
def test___getitem__():
point = [0, 0, 0]
vector = [1, 0, 0]
plane = (point, vector)
c = Circle(plane, 1.0)
assert c[0] == plane
assert c[1] == 1.0
|
443495
|
import atexit
import signal
import sys
from threading import Timer
import pkg_resources
import sqlite3
from flask import Flask
from . import config
from . import utils
from .apputils import mx_request
def run_sql(filename):
conn = sqlite3.connect(config.db_name)
c = conn.cursor()
cmds = pkg_resources.resource_string(__name__, 'sql/' + filename).decode('utf8')
for cmd in cmds.split(';\n\n'):
c.execute(cmd)
conn.commit()
conn.close()
def initial_setup():
# TODO use alembic
run_sql('db_prep.sql')
# TODO non-blocking and error checking
r = mx_request('GET', f'/_matrix/client/r0/profile/{config.as_botname}/displayname', wait=True)
if r.status_code == 404:
r = mx_request('POST', '/_matrix/client/r0/register', wait=True,
json={
'type': 'm.login.application_service',
'username': config.as_botname[1:].split(':')[0]
})
if r.status_code == 200:
initial_setup()
else:
return
elif r.status_code != 200 or utils.get_from_dict(r.json(), 'displayname') != config.as_disname:
mx_request('PUT', f'/_matrix/client/r0/profile/{config.as_botname}/displayname', wait=True,
json={'displayname': config.as_disname})
if config.as_avatar == '':
return
r = mx_request('GET', f'/_matrix/client/r0/profile/{config.as_botname}/avatar_url', wait=True)
if r.status_code != 200 or utils.get_from_dict(r.json(), 'avatar_url') != config.as_avatar:
mx_request('PUT', f'/_matrix/client/r0/profile/{config.as_botname}/avatar_url', wait=True,
json={'avatar_url': config.as_avatar})
def leave_bad_rooms():
app = Flask(__name__)
with app.app_context():
c = utils.get_db_conn().cursor()
r = mx_request('GET', '/_matrix/client/r0/joined_rooms')
for room_id in r.json()['joined_rooms']:
c.execute('SELECT 1 FROM rooms WHERE room_id=?', (room_id,))
room_found = utils.fetchone_single(c)
if not room_found:
c.execute('SELECT 1 FROM control_rooms WHERE room_id=?', (room_id,))
room_found = utils.fetchone_single(c)
if not room_found:
mx_request('POST', f'/_matrix/client/r0/rooms/{room_id}/leave')
timer = None
def update_presence():
mx_request('PUT', f'/_matrix/client/r0/presence/{config.as_botname}/status', wait=True,
json={'presence': 'online'}, verbose=False)
global timer
timer = Timer(20.0, update_presence)
timer.start()
def sighandler(sig, frame):
print(f'Caught signal {sig}')
global timer
if timer != None:
timer.cancel()
sys.exit(0)
def on_exit():
print('Shutting down')
mx_request('PUT', f'/_matrix/client/r0/presence/{config.as_botname}/status',
json={'presence': 'offline'})
def prep():
initial_setup()
# TODO is there any other missed state to sync?
leave_bad_rooms()
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:
signal.signal(sig, sighandler)
update_presence()
atexit.register(on_exit)
|
443499
|
from coverage.control import Coverage
from coveralls.report import CoverallsReporter
class coveralls(Coverage):
def coveralls(self, base_dir, ignore_errors=False, merge_file=None):
reporter = CoverallsReporter(self, self.config)
reporter.find_file_reporters(None)
return reporter.report(base_dir, ignore_errors=ignore_errors, merge_file=merge_file)
|
443531
|
from abc import ABC, abstractmethod
from pprint import pformat
from syndicate.core.resources.helper import filter_dict_by_shape
class AbstractExternalResource(ABC):
@abstractmethod
def define_resource_shape(self):
pass
@abstractmethod
def describe_meta(self, name):
pass
def compare_meta(self, name, syndicate_meta):
aws_meta = self.describe_meta(name)
resource_shape = self.define_resource_shape()
syndicate_meta = self.filter_meta(
meta=syndicate_meta,
shape=resource_shape
)
aws_resource_meta = aws_meta.get(name)
if not aws_resource_meta:
return f"External resource '{name}' does not exist"
aws_meta = self.filter_meta(
meta=aws_resource_meta,
shape=resource_shape
)
for key in syndicate_meta.keys():
syndicate_value = syndicate_meta.get(key)
aws_value = aws_meta.get(key)
if isinstance(aws_value, list):
try: # sorting flat list
syndicate_value.sort()
aws_value.sort()
except TypeError: # sorting list of dicts
sort_key = list(aws_value[0].keys())[0]
syndicate_value.sort(key=lambda k: k[sort_key])
aws_value.sort(key=lambda k: k[sort_key])
if syndicate_meta != aws_meta:
return self.get_errors(
resource_name=name,
syndicate_meta=syndicate_meta,
aws_meta=aws_meta
)
@staticmethod
def filter_meta(meta, shape):
return filter_dict_by_shape(meta, shape)
@staticmethod
def get_errors(resource_name, syndicate_meta, aws_meta):
errors = [f"'{resource_name}' resource meta mismatch:"]
for key in syndicate_meta.keys():
syndicate_value = syndicate_meta.get(key)
aws_value = aws_meta.get(key)
if isinstance(aws_value, (str, int)) and syndicate_value != aws_value:
errors.append(f"Expected '{key}' value: '{pformat(syndicate_value)}',\nGot '{pformat(aws_value)}'")
if isinstance(aws_value, list):
for aws_item, syndicate_item in zip(aws_value, syndicate_value):
if aws_item != syndicate_item:
errors.append(
f"Expected '{key}' value: '{pformat(syndicate_item)}',\nGot '{pformat(aws_item)}'")
return '\n'.join(errors)
|
443573
|
import unittest
from marqeta.errors import MarqetaError
from tests.lib.client import get_client
class TestUsersNotesSave(unittest.TestCase):
"""Tests for the users.notes.save endpoint."""
@classmethod
def setUpClass(cls):
"""Setup for all the tests in the class."""
cls.client = get_client()
def verify_user_note(self, response, verify):
"""
Verifies a user note matches the expected values.
Parameters:
response (CardholderNoteResponseModel): The response to verify.
verify (Dictionary): The values that should be in the response.
"""
# Verify the correct class is being tested
actual = response.__class__.__name__
expected = 'CardholderNoteResponseModel'
self.assertEqual(actual, expected, 'Unexpected response found')
# Verify the expected attributes are defined
expected_attributes = [
'token',
'description',
'created_by',
'private',
'created_time',
'last_modified_time'
]
for attribute in expected_attributes:
with self.subTest(f'{attribute} is not defined'):
self.assertIsNotNone(getattr(response, attribute))
# Verify values match expected values
match_attributes = list(verify.keys())
for attribute in match_attributes:
with self.subTest(f'{attribute} does not match the expected value'):
self.assertEqual(getattr(response, attribute),
verify[attribute])
def test_notes_save(self):
"""Update a note."""
user = self.client.users.create({})
note_request = {
'description': 'B Sharp',
'created_by': '<NAME>',
'created_by_user_role': 'USER'
}
note = self.client.users(user.token).notes.create(note_request)
updated_note_request = {'description': 'A Flat'}
updated_note = self.client.users(user.token).notes.save(
note.token, updated_note_request)
self.verify_user_note(updated_note, updated_note_request)
def test_notes_save_no_info(self):
"""Update a note with no information."""
user = self.client.users.create({})
note_request = {
'description': 'B Sharp',
'created_by': '<NAME>',
'created_by_user_role': 'USER'
}
note = self.client.users(user.token).notes.create(note_request)
updated_note = {}
with self.assertRaises(MarqetaError):
self.client.users(user.token).notes.save(note.token, updated_note)
|
443586
|
import operator
from phidl.quickplotter import _get_layerprop
def write_lyp(filename, layerset):
""" Creates a KLayout .lyp Layer Properties file from a set of
PHIDL layers """
stipple_default = ['I2','I5','I9','I17','I19','I22','I33','I38']
stipple_count = 0
if filename[-4:] != '.lyp': filename = filename + '.lyp'
# Opening file for writing
with open('%s' % filename,'w+') as f:
# Writing header string
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
# Writing layer properties opener
f.write('<layer-properties>\n')
unsorted_layers = layerset._layers.values()
sorted_layers = sorted(unsorted_layers, key = operator.attrgetter('gds_layer', 'gds_datatype'))
for layer in sorted_layers:
# Extracting information from dictionary layer by layer
gds_layer = layer.gds_layer
gds_datatype = layer.gds_datatype
color = layer.color
name = '%s/%s - ' % (str(gds_layer), str(gds_datatype)) + layer.name
if layer.description is not None:
name = name + ' - (' + layer.description + ')'
# Setting stipple or 'dither'
dither = layer.dither
if dither is None:
dither = stipple_default[stipple_count]
stipple_count = (stipple_count + 1) % len(stipple_default)
elif dither[0] != 'I':
raise ValueError("""Stipple must begin with an I""")
elif int(dither[1:len(dither)]) < 0:
raise ValueError("""Stipple index cannot be less than 0""")
elif int(dither[1:len(dither)]) > 46:
raise ValueError("""Stipple index cannot be greater than 46""")
else:
pass
# Writing properties header for speciic layer
f.write(' <properties>\n')
# Writing line to specify frame colour
f.write(' <frame-color>%s</frame-color>\n' % color)
# Writing line to specify fill colour
f.write(' <fill-color>%s</fill-color>\n' % color)
# # Writing line to specify brightness (value between [-255, 255])
# f.write(' <frame-brightness>%s</frame-brightness>\n <fill-brightness>%s</fill-brightness>\n' % (int(brightness), int(brightness)))
frame_brightness = -25
f.write(' <frame-brightness>%s</frame-brightness>\n' % (int(frame_brightness)))
# Writing line to specify dither pattern
f.write(' <dither-pattern>%s</dither-pattern>\n' % dither)
# Writing lines to specify line style
f.write(' <line-style/>\n')
# Writing line to specify validity
f.write(' <valid>true</valid>\n')
# Writing line to specify visibility
f.write(' <visible>true</visible>\n')
# Writing line to specify transparency
f.write(' <transparent>false</transparent>\n')
# Writing line to specify width
f.write(' <width/>\n')
# Writing line to specity markedness
f.write(' <marked>false</marked>\n')
# Writing line to specify xfill
f.write(' <xfill>false</xfill>\n')
# Writing line to specify animation
f.write(' <animation>0</animation>\n')
# Writing line to specify layer name
f.write(' <name>%s</name>\n' % name)
# Writing line to specify source
f.write(' <source>%s/%s@1</source>\n' % (str(gds_layer), str(gds_datatype)))
# Writing properties closer for specific layer
f.write(' </properties>\n')
# Writing layer properties trailer
f.write('</layer-properties>\n')
def load_lyp(filename):
''' Creates a LayerSet object from a lyp file that is XML '''
try:
import xmltodict
except:
raise ImportError("""This function is in development, and currently requires
the module "xmltodict" to operate. Please retry after installing xmltodict
$ pip install xmltodict """)
from phidl.device_layout import LayerSet
if filename[-4:] != '.lyp': filename = filename + '.lyp'
with open(filename, 'r') as fx:
lyp_dict = xmltodict.parse(fx.read(), process_namespaces=True)
# lyp files have a top level that just has one dict: layer-properties
# That has multiple children 'properties', each for a layer. So it gives a list
lyp_list = lyp_dict['layer-properties']['properties']
if not isinstance(lyp_list, list):
lyp_list = [lyp_list]
lys = LayerSet()
def add_entry(entry, lys):
''' Entry is a dict of one element of 'properties'.
No return value. It adds it to the lys variable directly
'''
layerInfo = entry['source'].split('@')[0]
phidl_LayerArgs = dict()
phidl_LayerArgs['gds_layer'] = int(layerInfo.split('/')[0])
phidl_LayerArgs['gds_datatype'] = int(layerInfo.split('/')[1])
phidl_LayerArgs['color'] = entry['fill-color']
phidl_LayerArgs['dither'] = entry['dither-pattern']
# These functions are customizable. See below
phidl_LayerArgs['name'] = name2shortName(entry['name'])
phidl_LayerArgs['description'] = name2description(entry['name'])
lys.add_layer(**phidl_LayerArgs)
return lys
for entry in lyp_list:
try:
group_members = entry['group-members']
except KeyError: # it is a real layer
add_entry(entry, lys)
else: # it is a group of other entries
if not isinstance(group_members, list):
group_members = [group_members]
for member in group_members:
add_entry(member, lys)
return lys
def name2shortName(name_str):
''' Maps the name entry of the lyp element to a name of the phidl layer,
i.e. the dictionary key used to access it.
Default format of the lyp name is
layer/datatype - phidl_key - description
or
phidl_key - description
Reassign for different layer naming conventions with::
phidl.utilities.name2shortName = someOtherFunction(string)
'''
if name_str is None:
raise IOError('This layer has no name')
components = name_str.split(' - ')
if len(components) > 1:
short_name = components[1]
else:
short_name = components[0]
return short_name
def name2description(name_str):
''' Gets the description of the layer contained in the lyp name field.
It is not strictly necessary to have a description. If none there, it returns ''.
Default format of the lyp name is
layer/datatype - phidl_key - description
or
phidl_key - description
Reassign for different layer naming conventions with::
phidl.utilities.name2description = someOtherFunction(string)
'''
if name_str is None:
raise IOError('This layer has no name')
components = name_str.split(' - ')
description = ''
if len(components) > 2:
description = components[2][1:-1]
return description
def write_svg(D, filename, scale = 1):
xsize, ysize = D.size
dcx, dcy = D.center
dx, dy = dcx-xsize/2, dcy-ysize/2
group_num = 1
if filename[-4:] != '.svg': filename += '.svg'
with open(filename, 'w+') as f:
f.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n')
f.write(('<svg\n width="%0.6f" \n height="%0.6f"\n'
' version="1.1"\n'
' xmlns:svg="http://www.w3.org/2000/svg"\n'
' xmlns="http://www.w3.org/2000/svg">\n')
% (xsize*scale, ysize*scale))
all_polygons = D.get_polygons(by_spec = True)
for layer, polygons in all_polygons.items():
# color = '#800000'
color = _get_layerprop(layer = layer[0] , datatype = layer[1])['color']
f.write(' <g id="layer%03i_datatype%03i">\n' % (layer[0], layer[1]))
group_num += 1
for polygon in polygons:
poly_str = ' <path style="fill:%s"\n d="' % color
n = 0
for p in polygon:
if n == 0: poly_str+= 'M '
else: poly_str+= 'L '
poly_str += '%0.6f %0.6f ' % ((p[0]-dx)*scale,(-(p[1]-dy)+ysize)*scale)
n += 1
poly_str+= 'Z"/>\n'
f.write(poly_str)
f.write(' </g>\n')
f.write('</svg>\n')
return filename
|
443613
|
import pytest
import six
from dbnd import dbnd_config, relative_path
from dbnd.testing.helpers import run_test_notebook
from dbnd.testing.helpers_pytest import assert_run_task
from dbnd_examples.data import data_repo, dbnd_examples_data_path
from dbnd_examples.orchestration.examples import wine_quality
from dbnd_examples.orchestration.examples.wine_quality_as_task_class import (
PredictWineQuality,
PredictWineQualityParameterSearch,
)
class TestWineQualityClasses(object):
def test_wine_quality_class(self):
task = PredictWineQualityParameterSearch(alpha_step=0.4)
assert_run_task(task)
def test_wine_quality_deco_search(self):
task = wine_quality.predict_wine_quality_parameter_search.t(
alpha_step=0.5,
override={wine_quality.predict_wine_quality.t.data: data_repo.wines},
)
assert_run_task(task)
def test_wine_quality_deco_with_custom_data(self):
task = wine_quality.predict_wine_quality.t(alpha=0.5, data=data_repo.wines)
assert_run_task(task)
@pytest.mark.skipif(not six.PY3, reason="requires python3")
def test_wine_quality_deco_simple_all(self):
with dbnd_config(
{"local_prod": {"_from": "local", "env_label": "prod", "production": True}}
):
task = wine_quality.predict_wine_quality.t(
alpha=0.5, override={wine_quality.fetch_data.t.task_env: "local_prod"},
)
assert_run_task(task)
def test_wine_quality_deco_simple(self):
task = wine_quality.predict_wine_quality.t(alpha=0.5)
assert_run_task(task)
def test_wine_quality_gz(self):
task = PredictWineQuality(data=dbnd_examples_data_path("wine_quality.csv.gz"))
assert_run_task(task)
def test_prepare_data(self):
task = wine_quality.prepare_data.t(
raw_data=dbnd_examples_data_path("wine_quality.csv.gz")
)
assert_run_task(task)
# TODO: Fix this, it works locally in tox but not in CI
# https://app.asana.com/0/1199880094608584/1200788284410456
@pytest.mark.skip("doesnt pass in ci")
def test_notebook(self):
ipynb = relative_path(wine_quality.__file__, "wine_quality_as_notebook.ipynb")
run_test_notebook(ipynb)
|
443633
|
import unittest
import logging
from hearthstone.simulator.core import player
class BattleGroundsTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
logging.basicConfig(level=logging.DEBUG)
player.TEST_MODE = True
|
443644
|
from django.conf.urls import url
from .views import (
checkout_address_create_view,
checkout_address_reuse_view,
AddressListView,
AddressUpdateView,
AddressDeleteView,
AddressCreateView
)
urlpatterns = [
url(r'^checkout/create/$', checkout_address_create_view, name='checkout_address_create'),
url(r'^checkout/reuse/$', checkout_address_reuse_view, name='checkout_address_reuse'),
url(r'^list/$', AddressListView.as_view(), name='list'),
url(r'^create/$', AddressCreateView.as_view(), name='create'),
url(r'^update/(?P<address_id>\d+)/$', AddressUpdateView.as_view(), name='update'),
url(r'^delete/(?P<address_id>\d+)/$', AddressDeleteView.as_view(), name='delete')
]
|
443650
|
def load_setting(key):
# Just some dummy values for demo
if key == 'key':
return '123123'
elif key == 'limit':
return 123123
elif key == 'dummylist':
return [1, 2, 3, 4, 5]
elif key == 'crap':
return {'good': "boy"}
def superlazy(key, default):
class T(type(default)):
@staticmethod
def __loader__():
ret = load_setting(key)
if ret:
setattr(T, '__cache__', ret)
delattr(T, '__loader__')
def force(k, v):
def _wrapper(*args, **kwargs):
if hasattr(T, '__loader__'):
T.__loader__()
if hasattr(T, '__cache__'):
c = T.__cache__
func = getattr(c, k)
return func(*args[1:], **kwargs)
return v(*args, **kwargs)
return _wrapper
t = T(default)
for k, v in type(default).__dict__.iteritems():
if (k in ['__doc__']): continue
setattr(T, k, force(k, v))
return t
# Lazy string
a = superlazy("key", 'hahahaha')
print a, isinstance(a, str)
# Lazy int
b = superlazy("limit",9)
print 1 + b, isinstance(b, int)
print str(b)
# Lazy list
c = superlazy("dummylist", [1, 2, 3])
print c
print c[0]
print len(c)
# Lazy dict
d = superlazy("crap", {'a': 1, 'b': 2, 'c': 3})
print isinstance(d, dict)
print d
print d.get('a')
# Not existed lazy int, default value is used
e = superlazy("wer", 321)
print isinstance(e, int)
print e
|
443674
|
import logging
import xml.etree.ElementTree as ET
import base64
from remotepspy.psrp import PSRPParser
class SimpleCommandTracer:
"""A simple PowerShell tracer that attempts to print (and log, if enabled) commands and their output. Does not
support every possible use case fully, but attempts to cover most common, interesting activity.
"""
LOGGER_NAME = 'RemotePSpy.simple_cmd'
def __init__(self):
self.logger = logging.getLogger(SimpleCommandTracer.LOGGER_NAME)
self.prompt_incoming = False
def message(self, destination, message_type, rpid, pipeline_id, data):
if message_type not in PSRPParser.MSG_TYPES:
self.logger.error('Unrecognised MessageType: {}'.format(message_type))
return
if PSRPParser.MSG_TYPES[message_type] == 'CREATE_PIPELINE':
self.msg_create_pipeline(data, rpid, pipeline_id, destination)
elif PSRPParser.MSG_TYPES[message_type] == 'PIPELINE_HOST_CALL':
self.msg_pipeline_host_call(data, rpid, pipeline_id, destination)
elif PSRPParser.MSG_TYPES[message_type] == 'PIPELINE_OUTPUT':
self.msg_pipeline_output(data, rpid, pipeline_id, destination)
def msg_create_pipeline(self, data, rpid, pipeline_id, destination):
if data == '':
self.logger.warning('Empty message data in CREATE_PIPELINE message. Runspace: {}, Pipeline: {}, '
'Destination: {}'.format(rpid, pipeline_id, destination))
return
doc = ET.fromstring(data)
# Find Cmds list
lst = doc.find("MS/Obj[@N='PowerShell']/MS/Obj[@N='Cmds']/LST")
if lst is None:
return
cmds = list(lst)
# Iterate over each command
parsed_cmds = []
for cmd_obj in cmds:
# Everything is under an <MS> in the <Obj>
ms = cmd_obj.find('MS')
if ms is None:
continue
# Find and decode the command
cmd = ms.find("S[@N='Cmd']")
if cmd is None:
continue
cmd = cmd.text
if cmd is None:
continue
# TODO also add handling for any other special commands here, maybe like Out-Default
# If the command is 'prompt', it simply indicates an incoming prompt string value on the pipeline
if cmd == 'prompt':
self.prompt_incoming = True
return
cmd = PSRPParser.deserialize_string(cmd)
final_cmd_str = [cmd] # Will be joined together with space separator
# Find any args
# NOTE: this does not currently support all complex type arguments, only strings and arrays of strings
args = ms.find("Obj[@N='Args']")
args_lst = args.find('LST')
if args_lst is not None:
self.get_cmd_args(args_lst, final_cmd_str)
# Join an individual command and its arguments together
parsed_cmds.append(' '.join(final_cmd_str))
# Join commands together
full_cmd_str = ' | '.join(parsed_cmds)
# Output the final result
print(full_cmd_str)
self.logger.info('Runspace: {}, Pipeline: {}, Destination: {}, Command: {}'.format(rpid, pipeline_id,
destination, full_cmd_str))
def msg_pipeline_host_call(self, data, rpid, pipeline_id, destination):
if data == '':
self.logger.warning('Empty message data in PIPELINE_HOST_CALL message. Runspace: {}, Pipeline: {}, '
'Destination: {}'.format(rpid, pipeline_id, destination))
return
doc = ET.fromstring(data)
method = doc.find("MS/Obj[@N='mi']/ToString")
if method is None:
self.logger.error('Could not find method identifier in PIPELINE_HOST_CALL. Runspace: {}, Pipeline: {}, '
'Destination: {}, Data: {}'.format(rpid, pipeline_id, destination, data))
return
method = method.text
# TODO we can support more functions later, the full list is at [MS-PSRP] 2.2.3.17
# (https://msdn.microsoft.com/en-us/library/dd306624.aspx)
if method == 'WriteLine2':
self._pipeline_method_WriteLine2(doc, rpid, pipeline_id, destination)
elif method == 'Write2':
self._pipeline_method_Write2(doc, rpid, pipeline_id, destination)
elif method == 'WriteLine3':
self._pipeline_method_WriteLine3(doc, rpid, pipeline_id, destination)
elif method == 'SetShouldExit':
pass # Nothing really needed to be done
else:
print('[Unsupported PIPELINE_HOST_CALL method: {}]'.format(method))
self.logger.warning('Unsupported PIPELINE_HOST_CALL method: {}. Runspace: {}, Pipeline: {}, Destination:{}'
''.format(method, rpid, pipeline_id, destination))
def msg_pipeline_output(self, data, rpid, pipeline_id, destination):
if data == '':
self.logger.info('Empty message data in PIPELINE_OUTPUT message. Runspace: {}, Pipeline: {}, '
'Destination: {}'.format(rpid, pipeline_id, destination))
return
doc = ET.fromstring(data)
# If we're expecting an incoming prompt value to display, do so now
if self.prompt_incoming:
if doc.tag != 'S':
self.logger.warning('Unsupported type received for prompt: {}'.format(data))
print('[UNSUPPORTED TYPE RECEIVED FOR PROMPT]:\n{}'.format(data))
return
prompt = doc.text
if prompt is None:
return
prompt = PSRPParser.deserialize_string(prompt, htmldecode=True)
print(prompt, end='', flush=True)
self.logger.info('Runspace: {}, Pipeline: {}, Destination: {}, Prompt: {}'.format(rpid, pipeline_id,
destination, prompt))
self.prompt_incoming = False
else:
# NOTE: most complex types are not yet supported and will be output as raw CLIXML.
tn = doc.find('TN')
if tn is not None:
tns = list(tn)
if len(tns) > 0:
if tns[0].text == 'Selected.Microsoft.PowerShell.Commands.GenericMeasureInfo':
pass # Not believed to be relevant for a simple command trace
elif tns[0].text == 'Selected.System.Management.Automation.CmdletInfo':
pass # Not believed to be relevant for a simple command trace
elif tns[0].text == 'Selected.System.Management.ManagementObject':
self.output_management_object(doc, rpid, pipeline_id, destination)
else:
self.logger.warning('Unsupported type in PIPELINE_OUTPUT: {}'.format(data))
print('[UNSUPPORTED TYPE RECEIVED]:\n{}'.format(data))
else:
# Output any basic types we support. Primitive types are defined in [MS-PSRP] 2.2.5.1.
output = SimpleCommandTracer.deseiralize_element(doc)
if output is not None:
print(output)
self.logger.info('Runspace: {}, Pipeline: {}, Destination: {}, <{}> output: {}'
''.format(rpid, pipeline_id, destination, doc.tag, output))
# Note: Only basic types supported, and not yet fully.
@staticmethod
def deseiralize_element(elem):
output = None
if elem.tag == 'Nil':
output = None # Just ignore
elif elem.tag == 'S' or elem.tag == 'SBK' or elem.tag == 'Version' or elem.tag == 'URI':
if elem.text is None:
output = ''
else:
output = PSRPParser.deserialize_string(elem.text)
elif elem.tag == 'XD':
if elem.text is None:
output = ''
else:
output = PSRPParser.deserialize_string(elem.text, htmldecode=True)
elif elem.tag == 'GUID':
# Wrap output in curly brackets
output = '{{{}}}'.format(elem.text)
elif elem.tag == 'SecureString':
output = '[SecureString]{}'.format(elem.text)
elif (elem.tag == 'D' or elem.tag == 'Dd' or elem.tag == 'Sg' or elem.tag == 'I64'
or elem.tag == 'U64' or elem.tag == 'I32' or elem.tag == 'U32' or elem.tag == 'I16'
or elem.tag == 'U16' or elem.tag == 'DT' or elem.tag == 'B'):
if elem.text is None:
output = ''
else:
output = elem.text
elif elem.tag == 'C':
output = '[char_code]{}'.format(elem.text)
elif elem.tag == 'BA':
if elem.text is None:
output = "b''"
else:
byte_array = base64.b64decode(elem.text)
output = '{}'.format(byte_array)
elif elem.tag == 'SB':
output = '[signed_byte]{}'.format(elem.text)
elif elem.tag == 'By':
output = '[unsigned_byte]{}'.format(elem.text)
else:
# Types not yet supported fall into here
output = '[unsupported-{}-type]{}'.format(elem.tag, ET.tostring(elem).decode('utf-8'))
return output
def _pipeline_method_WriteLine2(self, doc, rpid, pipeline_id, destination):
output_lst = doc.find("MS/Obj[@N='mp']/LST")
if output_lst is None:
self.logger.debug('Runspace: {}, Pipeline: {}, Destination: {}, WriteLine2() called with no arguments'
''.format(rpid, pipeline_id, destination))
return
for elem in list(output_lst):
output = SimpleCommandTracer.deseiralize_element(elem)
if output is not None:
print(output)
self.logger.info('Runspace: {}, Pipeline: {}, Destination: {}, WriteLine2({})'
''.format(rpid, pipeline_id, destination, output.encode('utf-8')))
def _pipeline_method_Write2(self, doc, rpid, pipeline_id, destination):
self._pipeline_write_with_colours(doc, rpid, pipeline_id, destination, False, 'Write2')
def _pipeline_method_WriteLine3(self, doc, rpid, pipeline_id, destination):
self._pipeline_write_with_colours(doc, rpid, pipeline_id, destination, True, 'WriteLine3')
# Supports the workings of Write2 and WriteLine3, which operate the same except for whether a newline is output.
def _pipeline_write_with_colours(self, doc, rpid, pipeline_id, destination, newline_flag, method_name):
method_args = doc.find("MS/Obj[@N='mp']/LST")
if method_args is None:
self.logger.debug('Runspace: {}, Pipeline: {}, Destination: {}, {}() called with no arguments'
''.format(rpid, pipeline_id, destination, method_name))
return
method_args = list(method_args)
if len(method_args) < 3:
self.logger.error('Runspace: {}, Pipeline: {}, Destination: {}, {}() called with unexpected number of '
'arguments. Expected 3, got {}.'.format(rpid, pipeline_id, destination, method_name,
len(method_args)))
return
# The first 2 args are background and foreground colour, which we do not support here
elem = method_args[2]
output = SimpleCommandTracer.deseiralize_element(elem)
if output is not None:
if newline_flag:
print(output)
else:
print(output, end='', flush=True)
self.logger.info('Runspace: {}, Pipeline: {}, Destination: {}, {}({})'
''.format(rpid, pipeline_id, destination, method_name, output.encode('utf-8')))
def get_cmd_args(self, args_lst, final_cmd_str):
arg_objs = list(args_lst)
for arg_obj in arg_objs:
arg_obj_ms = arg_obj.find('MS')
if arg_obj_ms is None:
continue
for elem in arg_obj_ms:
if elem.tag == 'Nil':
pass # Can just ignore
elif elem.tag == 'S':
# This is just a simple string arg
arg_str = elem.text
if arg_str is not None:
arg_str = PSRPParser.deserialize_string(arg_str)
if ' ' in arg_str.strip():
final_cmd_str.append('"' + arg_str + '"')
else:
final_cmd_str.append(arg_str)
elif elem.tag == 'Obj':
# We may have an array of strings as the argument value
inner_lst = elem.find('LST')
if inner_lst is not None:
values = []
for item in list(inner_lst):
if item.tag == 'S':
item_val = item.text
if item_val is not None:
values.append(PSRPParser.deserialize_string(item_val))
if len(values) > 0:
# Join the argument array values together separated by comma
final_values = ','.join(values)
if ' ' in final_values.strip():
final_cmd_str.append('"' + final_values + '"')
final_cmd_str.append(final_values)
else:
self.logger.warning('Unsupported type in args list of a cmd in CREATE_PIPELINE message: {}'
''.format(ET.tostring(elem).decode('utf-8')))
print('[UNSUPPORTED ARG TYPE RECEIVED]: {}'.format(ET.tostring(elem).decode('utf-8')))
def output_management_object(self, serialized_element, rpid, pipeline_id, destination):
# Output a set of Strings as property_name:value pairs
ms = serialized_element.find('MS')
if ms is None:
return
for item in list(ms):
if item.tag != 'S':
self.logger.warning('Unsupported type in PIPELINE_OUTPUT, in the <MS> element of a '
'Selected.System.Management.ManagementObject: {}'
''.format(ET.tostring(item).decode('utf-8')))
print('[UNSUPPORTED TYPE RECEIVED]: {}'.format(ET.tostring(item).decode('utf-8')))
continue
value = item.text
prop_name = SimpleCommandTracer.get_property_name(item)
if prop_name is not None:
print('{}: {}'.format(prop_name, value))
self.logger.info("Runspace: {}, Pipeline: {}, Destination: {}, Output: '{}:{}'"
"".format(rpid, pipeline_id, destination, prop_name, value))
else:
print(value)
self.logger.info(
'Runspace: {}, Pipeline: {}, Destination: {}, Output: {}'.format(rpid, pipeline_id, destination,
value))
# Return any property name from the 'N' attribute of an Element.
@staticmethod
def get_property_name(elem):
if 'N' not in elem.keys():
return None
prop_name = elem.get('N')
return PSRPParser.deserialize_string(prop_name)
|
443699
|
import pytest
import tempfile
import uuid
import os
import shutil
import responses
import click
from gigantumcli.server import ServerConfig
@pytest.fixture
def server_config():
"""Fixture to create a Build instance with a test image name that does not exist and cleanup after"""
unit_test_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.mkdir(unit_test_working_dir)
os.makedirs(os.path.join(unit_test_working_dir, '.labmanager', 'identity'))
yield ServerConfig(working_dir=unit_test_working_dir)
shutil.rmtree(unit_test_working_dir)
class TestServerConfig(object):
@responses.activate
def test_server_discovery_fails(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={},
status=404)
responses.add(responses.GET, 'https://test2.gigantum.com/.well-known/discover.json',
json={},
status=404)
with pytest.raises(click.UsageError):
server_config.add_server("test2.gigantum.com")
@responses.activate
def test_auth_discovery_fails(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={},
status=404)
responses.add(responses.GET, 'https://test2.gigantum.com/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/.well-known/auth.json',
json={},
status=404)
with pytest.raises(click.UsageError):
server_config.add_server("https://test2.gigantum.com/")
with pytest.raises(click.UsageError):
server_config.add_server("https://thiswillneverwork.gigantum.com/")
@responses.activate
def test_add_server(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
@responses.activate
def test_add_server_already_configured(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
with pytest.raises(ValueError):
server_config.add_server("https://test2.gigantum.com/")
@responses.activate
def test_list_servers(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'my-server',
"name": "My Server 1",
"git_url": "https://test3.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test3.gigantum.com/api/v1/",
"object_service_url": "https://test3.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search3.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test3.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test3.api.gigantum.io",
"issuer": "https://test3-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test3-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test3.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
server_id = server_config.add_server("https://test3.gigantum.com/")
assert server_id == 'my-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'my-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'my-server'))
server_list = server_config.list_servers(should_print=True)
assert len(server_list) == 2
@responses.activate
def test_remove_server_only_one(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
os.makedirs(os.path.join(server_config.working_dir, '.labmanager', 'servers'), exist_ok=True)
with open(os.path.join(server_config.working_dir, '.labmanager', 'servers', 'CURRENT'), 'wt') as cf:
cf.write("another-server")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
with pytest.raises(ValueError):
server_config.remove_server('another-server')
@responses.activate
def test_remove_server(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'my-server',
"name": "My Server 1",
"git_url": "https://test3.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test3.gigantum.com/api/v1/",
"object_service_url": "https://test3.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search3.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test3.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test3.api.gigantum.io",
"issuer": "https://test3-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test3-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test3.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
server_id = server_config.add_server("https://test3.gigantum.com/")
assert server_id == 'my-server'
# mock some more stuff
server_file = os.path.join(server_config.servers_dir, "another-server.json")
with open(os.path.join(server_config.servers_dir, 'CURRENT'), 'wt') as cf:
cf.write("another-server")
cached_jwks = os.path.join(server_config.working_dir, '.labmanager', 'identity',
'another-server-jwks.json')
with open(cached_jwks, 'wt') as jf:
jf.write("FAKE DATA")
test_user_data = os.path.join(server_config.working_dir, 'servers', 'another-server',
'TEST_FILE')
with open(test_user_data, 'wt') as jf:
jf.write("FAKE DATA")
assert os.path.isfile(test_user_data)
assert os.path.isfile(cached_jwks)
assert os.path.isfile(server_file)
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
server_config.remove_server('another-server')
assert not os.path.isfile(test_user_data)
assert not os.path.isfile(cached_jwks)
assert not os.path.isfile(server_file)
assert not os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
current_path = os.path.join(server_config.servers_dir, 'CURRENT')
with open(current_path, 'rt') as cf:
assert cf.read() == 'my-server'
|
443704
|
class Solution:
def maximumProduct(self, nums: List[int]) -> int:
min1 = min2 = inf
max1 = max2 = max3 = -inf
for n in nums:
if n < min1:
min2, min1 = min1, n
elif n < min2:
min2 = n
if n > max1:
max3, max2, max1 = max2, max1, n
elif n > max2:
max3, max2 = max2, n
elif n > max3:
max3 = n
return max(min1 * min2 * max1, max1 * max2 * max3)
|
443833
|
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.db import models
from systems.models import UserProfile
class UserProfileInline(admin.StackedInline):
model = UserProfile
class InvUserAdmin(UserAdmin):
filter_horizontal = UserAdmin.filter_horizontal + ('groups',)
inlines = [UserProfileInline]
admin.site.unregister(User)
admin.site.register(User, InvUserAdmin)
admin.site.register(UserProfile)
|
443836
|
import urllib
import numpy as np
import torch
from archived.s3 import put_object
from archived.sync import merge_np_bytes
# lambda setting
tmp_bucket = "tmp-grads"
def handler(event, context):
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
print('bucket = {}'.format(bucket))
print('key = {}'.format(key))
num_files = 5
for i in np.arange(num_files):
w = np.random.rand(2, 3).astype(np.float32)
print("the {}-th numpy array".format(i))
print(w)
put_object(tmp_bucket, "weight_" + str(i), w.tobytes())
arr = merge_np_bytes(tmp_bucket, num_files, np.float32, [2, 3])
t = torch.from_numpy(arr)
print(t)
|
443837
|
from importlib import import_module
from django.urls import reverse
from django.shortcuts import _get_queryset
def import_model(path_or_callable):
if hasattr(path_or_callable, '__call__'):
return path_or_callable
else:
assert isinstance(path_or_callable, str)
package, attr = path_or_callable.rsplit('.', 1)
return getattr(import_module(package), attr)
def get_image_model_path():
from django.conf import settings
return getattr(settings, 'WAGTAILIMAGES_IMAGE_MODEL', 'wagtailimages.Image')
def strip_prefix_and_ending_slash(path):
"""
If puput and wagtail are registered with a prefix, it needs to be removed
so the 'entry_page_serve_slug' or 'blog_page_feed_slug' resolutions can work.
Ex, here with a dynamic (i18n_patterns()) + a static prefix :
urlpatterns += i18n_patterns(
url(r'^blah/', include('puput.urls')),
url(r'^blah/', include(wagtail_urls)),
)
The prefix is simply the root where Wagtail page are served.
https://github.com/torchbox/wagtail/blob/stable/1.8.x/wagtail/wagtailcore/urls.py#L36
"""
return path.replace(reverse('wagtail_serve', args=[""]), '', 1).rstrip("/")
def get_object_or_None(klass, *args, **kwargs):
"""
Uses get() to return an object or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), a MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
|
443851
|
PROG_NAME = "MTSv"
VERSION = "2.0.0"
DEFAULT_CFG_FNAME = "mtsv.cfg"
DEFAULT_LOG_FNAME = "mtsv_{COMMAND}_{TIMESTAMP}.log"
CONFIG_STRING = """
# NOTE: Changes to the config file in the middle of the pipeline
# may force previous steps to be rerun.
#
# ===============================================================
#
# READPREP: {readprep_description}
#
# ===============================================================
# {fastq_pattern_description}
fastq_pattern: {fastq_pattern_default}
# {fastp_params_description}
fastp_params: {fastp_params_default}
# {kmer_size_description}
kmer_size: {kmer_size_default}
# ===============================================================
#
# BINNING: {binning_description}
#
# ===============================================================
# {database_config_description}
database_config: {database_path}
# {edits_description}
edits: {edits_default}
# {binning_mode_description}
binning_mode: {binning_mode_default}
# {max_hits_description}
max_hits: {max_hits_default}
# {seed_size_description}
# Uncomment (remove "#" before name) to modify this parameter
# This will override the value set for the BINNING_MODE.
# seed_size:
# {min_seeds_description}
# Uncomment (remove "#" before name) to modify this parameter
# This will override the value set for the BINNING_MODE.
# min_seeds:
# {seed_gap_description}
# Uncomment (remove "#" before name) to modify this parameter
# This will override the value set for the BINNING_MODE.
# seed_gap:
# ===============================================================
#
# ANALYSIS: {analyze_description}
#
# ===============================================================
# {filter_rule_description}
filter_rule: {filter_rule_default}
# {filter_value_description}
filter_value: {filter_value_default}
# {filter_column_description}
filter_column: {filter_column_default}
# {datastore_description}
datastore: {datastore}
# {sample_n_kmers_description}
sample_n_kmers: {sample_n_kmers_default}
# {alpha_description}
alpha: {alpha_default}
# {h_description}
h: {h_default}
# {figure_kwargs_description}
figure_kwargs: {figure_kwargs_default}
# ===============================================================
#
# EXTRACT: {extract_description}
#
# ===============================================================
# {extract_taxids_description}
extract_taxids: {extract_taxids_default}
"""
CLUSTER_CONFIG = """
__default__:
cpus: '{threads}'
mem: 5000
log: '{log}.cluster'
jobname: 'mtsv_{rule}'
time: "30:00"
fastp:
jobname: "fastp"
readprep:
jobname: "readprep"
binning:
jobname: "binning"
mem: 32000
cpus: 12
time: "2:00:00"
collapse:
jobname: "collapse"
init_taxdump:
jobname: "init_taxdump"
summary:
jobname: "summary"
time: "2:00:00"
mem: 30000
cpus: 1
filter_candidate_taxa:
jobname: "filter_candidate_taxa"
get_candidates_not_in_database:
jobname: "get_candidates_not_in_database"
random_kmers:
jobname: "random_kmers"
analyze_binning:
jobname: "analyze_binning"
mem: 30000
time: "1:00:00"
analyze_collapse:
jobname: "analyze_collapse"
update_datastore:
jobname: "update_datastore"
analysis:
jobname: "analysis"
analysis_figure:
jobname: "analysis_figure"
analysis_html:
jobname: "analysis_html"
extract:
jobname: "extract"
unaligned_queries:
jobname: "unaligned_queries"
mem: 8000
"""
|
443852
|
from aws_cdk import (
core,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_events as events,
aws_events_targets as events_targets,
aws_ecs_patterns as ecs_patterns,
aws_logs as logs,
aws_cloudformation as cloudformation,
aws_cloudwatch as cw,
aws_applicationautoscaling as aas,
)
class CeleryDefaultServiceStack(cloudformation.NestedStack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(
scope, id, **kwargs,
)
self.celery_default_worker_task = ecs.FargateTaskDefinition(
self, "DefaultCeleryWorkerFargateTask"
)
self.celery_default_worker_task.add_container(
"DefaultCeleryWorkerContaienr",
image=scope.image,
logging=ecs.LogDrivers.aws_logs(
stream_prefix="CeleryDefaultWorkerContainer",
log_retention=logs.RetentionDays.ONE_WEEK,
),
environment=scope.variables.regular_variables,
secrets=scope.variables.secret_variables,
command=[
'celery',
'worker',
'-A',
'backend.celery_app:app',
'-l',
'info',
'-Q',
'default',
'-n',
'worker-default@%h',
],
)
self.celery_default_worker_service = ecs.FargateService(
self,
"DefaultCeleryWorkerService",
task_definition=self.celery_default_worker_task,
assign_public_ip=True,
cluster=scope.ecs.cluster,
desired_count=0,
security_group=ec2.SecurityGroup.from_security_group_id(
self,
"CeleryDefaultWorkerSG",
security_group_id=scope.vpc.vpc_default_security_group,
),
)
scope.backend_assets_bucket.grant_read_write(
self.celery_default_worker_service.task_definition.task_role
)
for secret in [scope.variables.django_secret_key, scope.rds.db_secret]:
secret.grant_read(
self.celery_default_worker_service.task_definition.task_role
)
self.default_celery_queue_cw_metric = cw.Metric(
namespace=scope.full_app_name, metric_name="default"
)
self.celery_default_queue_asg = self.celery_default_worker_service.auto_scale_task_count(
min_capacity=0, max_capacity=2
)
self.celery_default_queue_asg.scale_on_metric(
"CeleryDefaultQueueAutoscaling",
metric=self.default_celery_queue_cw_metric,
scaling_steps=[
aas.ScalingInterval(change=-1, lower=0),
aas.ScalingInterval(change=1, lower=1),
],
adjustment_type=aas.AdjustmentType.CHANGE_IN_CAPACITY,
)
|
443868
|
import fingerprint as fp
import db
import soundfile as sf
import taglib
import hashlib
import os
import glob
import sys
def convert_to_mono(sig):
if len(sig.shape) > 1:
return fp.np.mean(sig, axis=1)
return sig
def read_audiofile(filename):
# get tags
song = taglib.File(filename)
print(filename)
assert song.length < 800, "Maximum allowed song length is 13 minutes"
# get the signal
name, ext = os.path.splitext(filename)
os.system('ffmpeg -loglevel 8 -i "%s" "%s%s"' % (filename, name, '.wav'))
_filename = name + '.wav'
sig, fs = sf.read(_filename) # extract the signal
os.remove(_filename)
return os.path.basename(filename), song.tags, convert_to_mono(sig)
def hash_metadata(filename, tags, bits=51):
basename = os.path.basename(filename).encode('utf-8')
m = hashlib.md5(b'%s' % basename)
return int('0x' + m.hexdigest(), 16) >> (128 - bits)
def learn_song(filename):
song_name, tags, sig = read_audiofile(filename)
addresses, times = fp.get_addresses(fp.get_filtered_spectrogram(sig))
basename = os.path.basename(filename)
name, ext = os.path.splitext(basename)
db.store(addresses, times, hash_metadata(filename, tags), name)
def learn_songs(_dir):
for filename in glob.glob(_dir + '/**/*.[Mm]p3', recursive=True):
song = taglib.File(filename)
if song.length < 800:
learn_song(filename)
else:
print('Skipping %s (too long)')
def identify_clip(filename):
song_name, tags, sig = read_audiofile(filename)
addresses, times = fp.get_addresses(fp.get_filtered_spectrogram(sig))
return db.show_results(db.search(addresses, times), 3)
def identify_from_mic(time=20, sound_device=0):
print("Listening...")
filename = os.path.join(sys.path[0] + '/record.mp3')
os.system('ffmpeg -loglevel 8 -ar 44100 -f alsa -i hw:%d -t %d %s' %
(sound_device, time, filename))
results = identify_clip(filename)
os.remove(filename)
return results
|
443877
|
from django.db import models
class Spammable(models.Model):
spam_flag = models.ManyToManyField("SpammyPosting")
class Meta:
abstract = True
|
443985
|
from django.urls import include, path
from rest_framework import routers
from .views import Events, CMSDetail, CMSList, ChannelDeserializer
from .viewsets import ChannelViewset, ChatTypeViewset
router = routers.DefaultRouter()
router.register(r"channels", ChannelViewset, base_name="slackchat-channel")
router.register(
r"chat-types", ChatTypeViewset, base_name="slackchat-chat-types"
)
urlpatterns = [
path("api/cms/", ChannelDeserializer.as_view(), name="cms-api"),
path("api/", include(router.urls)),
path("events/", Events.as_view()),
path("cms/", CMSList.as_view(), name="cms-list"),
path("cms/new/", CMSDetail.as_view(), name="cms-detail-new"),
path("cms/<slug:id>/edit/", CMSDetail.as_view(), name="cms-detail-edit"),
]
|
444029
|
import gym
import pandas as pd
import datetime
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
from recogym import build_agent_init
from recogym.agents import Agent
from recogym import Configuration
from recogym import (
gather_agent_stats,
AgentStats
)
from recogym import env_1_args
def produce_agent_stats(
env,
std_env_args,
agent: Agent,
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
):
stat_epochs = 1
stat_epochs_new_random_seed = True
training_data_samples = tuple([num_users_to_train])
testing_data_samples = num_users_to_score
time_start = datetime.datetime.now()
agent_stats = gather_agent_stats(
env,
std_env_args,
{
'agent': agent,
},
{
**build_agent_init(
agent_name,
agent_class,
{
**agent_configs,
'num_products': num_products,
}
),
},
training_data_samples,
testing_data_samples,
stat_epochs,
stat_epochs_new_random_seed,
num_organic_users_to_train,
with_cache
)
q0_025 = []
q0_500 = []
q0_975 = []
for agent_name in agent_stats[AgentStats.AGENTS]:
agent_values = agent_stats[AgentStats.AGENTS][agent_name]
q0_025.append(agent_values[AgentStats.Q0_025][0])
q0_500.append(agent_values[AgentStats.Q0_500][0])
q0_975.append(agent_values[AgentStats.Q0_975][0])
time_end = datetime.datetime.now()
seconds = (time_end - time_start).total_seconds()
return pd.DataFrame(
{
'q0.025': q0_025,
'q0.500': q0_500,
'q0.975': q0_975,
'time': [seconds],
}
)
def create_agent_and_env_sess_pop(
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
latent_factor: int,
num_flips: int,
log_epsilon: float,
sigma_omega: float,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
reverse_pop=False
):
std_env_args = {
**env_1_args,
'random_seed': random_seed,
'num_products': num_products,
'K': latent_factor,
'sigma_omega': sigma_omega,
'number_of_flips': num_flips
}
env = gym.make('reco-gym-v1')
sess_pop_agent = OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
**std_env_args,
'select_randomly': True,
'epsilon': log_epsilon,
'num_products': num_products,
'reverse_pop': reverse_pop
}))
return env, std_env_args, sess_pop_agent
def eval_against_session_pop(
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
latent_factor: int,
num_flips: int,
log_epsilon: float,
sigma_omega: float,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
):
env, std_env_args, agent = create_agent_and_env_sess_pop(num_products,
num_organic_users_to_train,
num_users_to_train,
num_users_to_score,
random_seed,
latent_factor,
num_flips,
log_epsilon,
sigma_omega,
agent_class,
agent_configs,
agent_name,
with_cache,
)
return produce_agent_stats(env, std_env_args, agent, num_products, num_organic_users_to_train, num_users_to_train, num_users_to_score, random_seed, agent_class, agent_configs, agent_name, with_cache)
def first_element(sc, name):
sc['model'] = name
sc['q0.025'] = sc['q0.025'][0]
sc['q0.500'] = sc['q0.500'][0]
sc['q0.975'] = sc['q0.975'][0]
print(sc)
return sc
|
444056
|
def test_i8():
i: i8
i = 5
print(i)
def test_i16():
i: i16
i = 4
print(i)
def test_i32():
i: i32
i = 3
print(i)
def test_i64():
i: i64
i = 2
print(i)
|
444059
|
import numpy as np
import pygsti.algorithms.fiducialselection as fs
import pygsti.circuits as pc
from . import fixtures
from ..util import BaseCase
class FiducialSelectionUtilTester(BaseCase):
def test_build_bitvec_mx(self):
mx = fs.build_bitvec_mx(3, 1)
# TODO assert correctness
class FiducialSelectionStdModel(object):
def setUp(self):
super(FiducialSelectionStdModel, self).setUp()
self.model = fixtures.model.copy()
self.fiducials = fixtures.fiducials
class FiducialSelectionExtendedModel(FiducialSelectionStdModel):
def setUp(self):
super(FiducialSelectionExtendedModel, self).setUp()
self.fiducials = pc.list_all_circuits(fixtures.opLabels, 0, 2)
###
# _find_fiducials_integer_slack
#
class OptimizeIntegerFiducialsBase(object):
def setUp(self):
super(OptimizeIntegerFiducialsBase, self).setUp()
self.options = dict(
verbosity=4
)
def test_optimize_integer_fiducials_slack_frac(self):
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, slack_frac=0.1, **self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_fixed(self):
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, fixed_slack=0.1, **self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_initial_weights(self):
weights = np.ones(len(self.fiducials), 'i')
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, fixed_slack=0.1,
initial_weights=weights, **self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_return_all(self):
fiducials, weights, scores = fs._find_fiducials_integer_slack(
self.model, self.fiducials, slack_frac=0.1, return_all=True,
**self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_worst_score_func(self):
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, slack_frac=0.1,
score_func='worst', **self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_fixed_num(self):
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, slack_frac=0.1, fixed_num=4,
**self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_force_empty(self):
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, slack_frac=0.1, fixed_num=4,
force_empty=False, **self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_low_max_iterations(self):
fiducials = fs._find_fiducials_integer_slack(
self.model, self.fiducials, slack_frac=0.1, max_iter=1,
**self.options
)
# TODO assert correctness
def test_optimize_integer_fiducials_slack_insufficient_fiducials(self):
insuff_fids = pc.to_circuits([('Gx',)])
weights = np.ones(len(insuff_fids), 'i')
fiducials = fs._find_fiducials_integer_slack(
self.model, insuff_fids, fixed_slack=0.1,
initial_weights=weights, **self.options
)
self.assertIsNone(fiducials)
def test_optimize_integer_fiducials_slack_raises_on_missing_slack_param(self):
with self.assertRaises(ValueError):
fs._find_fiducials_integer_slack(self.model, self.fiducials, **self.options)
class OptimizeIntegerFiducialsExceptionTester(FiducialSelectionStdModel, BaseCase):
def test_optimize_integer_fiducials_slack_raises_on_missing_method(self):
with self.assertRaises(Exception):
fs._find_fiducials_integer_slack(self.model, self.fiducials, fixed_slack=0.1)
class PrepOptimizeIntegerFiducialsStdModelTester(OptimizeIntegerFiducialsBase, FiducialSelectionStdModel, BaseCase):
def setUp(self):
super(PrepOptimizeIntegerFiducialsStdModelTester, self).setUp()
self.options.update(
prep_or_meas="prep"
)
class MeasOptimizeIntegerFiducialsStdModelTester(OptimizeIntegerFiducialsBase, FiducialSelectionStdModel, BaseCase):
def setUp(self):
super(MeasOptimizeIntegerFiducialsStdModelTester, self).setUp()
self.options.update(
prep_or_meas="meas"
)
# LOL explicit is better than implicit, right?
class PrepOptimizeIntegerFiducialsExtendedModelTester(
PrepOptimizeIntegerFiducialsStdModelTester, FiducialSelectionExtendedModel):
pass
class MeasOptimizeIntegerFiducialsExtendedModelTester(
MeasOptimizeIntegerFiducialsStdModelTester, FiducialSelectionExtendedModel):
pass
###
# test_fiducial_list
#
# XXX class names prefixed with "Test" will be picked up by nose
class _TestFiducialListBase(object):
def setUp(self):
super(_TestFiducialListBase, self).setUp()
self.fiducials_list = fs._find_fiducials_integer_slack(
self.model, self.fiducials,
prep_or_meas=self.prep_or_meas, slack_frac=0.1
)
def test_test_fiducial_list(self):
self.assertTrue(fs.test_fiducial_list(
self.model, self.fiducials_list, self.prep_or_meas
))
def test_test_fiducial_list_worst_score_func(self):
self.assertTrue(fs.test_fiducial_list(
self.model, self.fiducials_list, self.prep_or_meas,
score_func='worst'
))
def test_test_fiducial_list_return_all(self):
result, spectrum, score = fs.test_fiducial_list(
self.model, self.fiducials_list, self.prep_or_meas,
return_all=True
)
# TODO assert correctness
class PrepTestFiducialListTester(_TestFiducialListBase, FiducialSelectionStdModel, BaseCase):
prep_or_meas = 'prep'
class MeasTestFiducialListTester(_TestFiducialListBase, FiducialSelectionStdModel, BaseCase):
prep_or_meas = 'prep'
class TestFiducialListExceptionTester(FiducialSelectionStdModel, BaseCase):
def test_test_fiducial_list_raises_on_bad_method(self):
with self.assertRaises(ValueError):
fs.test_fiducial_list(self.model, None, "foobar")
###
# _find_fiducials_grasp
#
class GraspFiducialOptimizationTester(FiducialSelectionStdModel, BaseCase):
def test_grasp_fiducial_optimization_prep(self):
fiducials = fs._find_fiducials_grasp(
self.model, self.fiducials, prep_or_meas="prep", alpha=0.5,
verbosity=4
)
# TODO assert correctness
def test_grasp_fiducial_optimization_meas(self):
fiducials = fs._find_fiducials_grasp(
self.model, self.fiducials, prep_or_meas="meas", alpha=0.5,
verbosity=4
)
# TODO assert correctness
def test_grasp_fiducial_optimization_raises_on_bad_method(self):
with self.assertRaises(ValueError):
fs._find_fiducials_grasp(
self.model, self.fiducials, prep_or_meas="foobar",
alpha=0.5, verbosity=4
)
|
444214
|
import os
import time
import argparse
import threading
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
class Benchmark(object):
"""
num_requests: Number of requests.
max_concurrent: Maximum number of concurrent requests.
"""
def __init__(self, num_requests, max_concurrent):
self._num_requests = num_requests
self._max_concurrent = max_concurrent
self._done = 0
self._active = 0
self._condition = threading.Condition()
def inc_done(self):
with self._condition:
self._done += 1
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def throttle(self):
with self._condition:
while self._active == self._max_concurrent:
self._condition.wait()
self._active += 1
def wait(self):
with self._condition:
while self._done < self._num_requests:
self._condition.wait()
def _create_rpc_callback(benchmark):
def _callback(result_future):
exception = result_future.exception()
if exception:
print(exception)
else:
result = result_future.result().outputs['outputs'].int_val
benchmark.inc_done()
benchmark.dec_active()
return _callback
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default=os.getenv('MODEL_NAME', None))
parser.add_argument('--serving_host', default=os.getenv('SERVING_HOST', None))
parser.add_argument('--serving_port', default=os.getenv('SERVING_PORT', '8500'))
parser.add_argument('--num_requests', default=1000)
parser.add_argument('--max_concurrent', default=1)
args = parser.parse_args()
channel = grpc.insecure_channel('{}:{}'.format(args.serving_host, args.serving_port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
benchmark = Benchmark(int(args.num_requests), int(args.max_concurrent))
start_time = time.time()
for i in range(int(args.num_requests)):
request = predict_pb2.PredictRequest()
request.model_spec.name = args.model_name
request.model_spec.signature_name = tf.saved_model.signature_constants.PREDICT_METHOD_NAME
request.inputs[tf.saved_model.signature_constants.PREDICT_INPUTS].CopyFrom(
tf.contrib.util.make_tensor_proto([[i % 2**32]], shape=[1, 1]))
benchmark.throttle()
result = stub.Predict.future(request, 10)
result.add_done_callback(_create_rpc_callback(benchmark))
benchmark.wait()
end_time = time.time()
print()
print('{} requests ({} max concurrent)'.format(args.num_requests, args.max_concurrent))
print('{} requests/second'.format(int(args.num_requests)/(end_time-start_time)))
|
444217
|
from pathlib import Path
import cdms2
datapath = (
"/p/user_pub/e3sm/zhang40/analysis_data_e3sm_diags/TRMM/climatology_diurnal_cycle/"
)
for path in Path(datapath).rglob("*.nc"):
print(path.name)
print(path)
filename = path.name.split("/")[-1]
print(filename)
# filename ='TRMM-3B43v-7_3hr_ANN_199801_201312_climo.nc'
f_in = cdms2.open(datapath + filename)
var = f_in("pr")
var = var / 3600.0 * 1000 / 1000.0
var.id = "pr"
f_out = cdms2.open(datapath + "units_fix_" + filename, "w")
f_out.write(var)
att_keys = list(f_in.attributes.keys())
att_dic = {}
for i in range(len(att_keys)):
att_dic[i] = att_keys[i], f_in.attributes[att_keys[i]]
to_out = att_dic[i]
setattr(f_out, to_out[0], to_out[1])
print(var.mean())
f_in.close()
f_out.close()
|
444250
|
import numpy as np
import scipy.signal
def reclassify(array, class_dict):
"""Reclassifies values in a ndarray according to the rules provided in class_dict.
:param array: Array that holds categorical class values. (ndarray).
:param class_dict: Dictionary that maps input class values to output class values. (dict). \n
>>> class_dict = {
>>> "reclass_value_from":[0,1,2,3,4],
>>> "reclass_value_to":[0,1,0,0,0],
>>> }
:returns: Numpy array with binary [0,1] class values. (ndarray).
"""
array_rec = np.zeros((array.shape[0], array.shape[1], 1), dtype=np.uint8)
for i in range(len(class_dict["reclass_value_from"])):
array_rec[array == class_dict["reclass_value_from"][i]] = class_dict["reclass_value_to"][i]
return array_rec.astype(np.uint8)
def rolling_window(array, window=(0,), asteps=None, wsteps=None, axes=None, toend=True):
"""Applies a rolling (moving) window to a ndarray.
:param array: Array to which the rolling window is applied (array_like).
:param window: Either a single integer to create a window of only the last axis or a
tuple to create it for the last len(window) axes. 0 can be used as a to ignore a
dimension in the window (int or tuple).
:param asteps: Aligned at the last axis, new steps for the original array, ie. for
creation of non-overlapping windows (tuple).
:param wsteps: Steps for the added window dimensions. These can be 0 to repeat values
along the axis (int or tuple (same size as window)).
:param axes: If given, must have the same size as window. In this case window is
interpreted as the size in the dimension given by axes. IE. a window
of (2, 1) is equivalent to window=2 and axis=-2 (int or tuple)
:param toend: If False, the new dimensions are right after the corresponding original
dimension, instead of at the end of the array. Adding the new axes at the
end makes it easier to get the neighborhood, however toend=False will give
a more intuitive result if you view the whole array (bool).
:returns: A view on `array` which is smaller to fit the windows and has windows added
dimensions (0s not counting), ie. every point of `array` is an array of size
window. (ndarray).
"""
array = np.asarray(array)
orig_shape = np.asarray(array.shape)
window = np.atleast_1d(window).astype(int)
if axes is not None:
axes = np.atleast_1d(axes)
w = np.zeros(array.ndim, dtype=int)
for axis, size in zip(axes, window):
w[axis] = size
window = w
# Check if window is legal:
if window.ndim > 1:
raise ValueError("`window` must be one-dimensional.")
if np.any(window < 0):
raise ValueError("All elements of `window` must be larger then 1.")
if len(array.shape) < len(window):
raise ValueError("`window` length must be less or equal `array` dimension.")
_asteps = np.ones_like(orig_shape)
if asteps is not None:
asteps = np.atleast_1d(asteps)
if asteps.ndim != 1:
raise ValueError("`asteps` must be either a scalar or one dimensional.")
if len(asteps) > array.ndim:
raise ValueError("`asteps` cannot be longer then the `array` dimension.")
# does not enforce alignment, so that steps can be same as window too.
_asteps[-len(asteps) :] = asteps
if np.any(asteps < 1):
raise ValueError("All elements of `asteps` must be larger then 1.")
asteps = _asteps
_wsteps = np.ones_like(window)
if wsteps is not None:
wsteps = np.atleast_1d(wsteps)
if wsteps.shape != window.shape:
raise ValueError("`wsteps` must have the same shape as `window`.")
if np.any(wsteps < 0):
raise ValueError("All elements of `wsteps` must be larger then 0.")
_wsteps[:] = wsteps
_wsteps[window == 0] = 1
wsteps = _wsteps
# Check that the window would not be larger then the original:
if np.any(orig_shape[-len(window) :] < window * wsteps):
raise ValueError("`window` * `wsteps` larger then `array` in at least one dimension.")
new_shape = orig_shape
# For calculating the new shape 0s must act like 1s:
_window = window.copy()
_window[_window == 0] = 1
new_shape[-len(window) :] += wsteps - _window * wsteps
new_shape = (new_shape + asteps - 1) // asteps
# make sure the new_shape is at least 1 in any "old" dimension (ie. steps
# is (too) large, but we do not care.
new_shape[new_shape < 1] = 1
shape = new_shape
strides = np.asarray(array.strides)
strides *= asteps
new_strides = array.strides[-len(window) :] * wsteps
# The full new shape and strides:
if toend:
new_shape = np.concatenate((shape, window))
new_strides = np.concatenate((strides, new_strides))
else:
_ = np.zeros_like(shape)
_[-len(window) :] = window
_window = _.copy()
_[-len(window) :] = new_strides
_new_strides = _
new_shape = np.zeros(len(shape) * 2, dtype=int)
new_strides = np.zeros(len(shape) * 2, dtype=int)
new_shape[::2] = shape
new_strides[::2] = strides
new_shape[1::2] = _window
new_strides[1::2] = _new_strides
new_strides = new_strides[new_shape != 0]
new_shape = new_shape[new_shape != 0]
return np.lib.stride_tricks.as_strided(array, shape=new_shape, strides=new_strides)
def tile_array(array, xsize=256, ysize=256, overlap=0.1):
"""Splits a ndarray into equally sized tiles with overlap.
:param array: Numpy array of shape (rows, cols, bands). (ndarray).
:param xsize: Xsize of tiles. (int).
:param ysize: Ysize of tiles. (int).
:param overlap: Overlap of tiles between 0.0 and 1.0. (float).
:returns: Numpy array of shape(tiles, rows, cols, bands). (ndarray).
"""
# get dtype and bands from first file
dtype = array.dtype
bands = array.shape[2] if array.ndim == 3 else 1
# get steps
xsteps = int(xsize - (xsize * overlap))
ysteps = int(ysize - (ysize * overlap))
# pad array on all sides to fit all tiles.
# replicate values here instead of filling with nan.
# nan padding would cause issues for standardization and classification later on.
ypad = ysize + 1
xpad = xsize + 1
array = np.pad(
array,
(
(int(ysize * overlap), ypad + int(ysize * overlap)),
(int(xsize * overlap), xpad + int(xsize * overlap)),
(0, 0),
),
mode="symmetric",
)
# tile the data into overlapping patches
# this skips any tile at the end of row and col that exceeds the shape of the input array
# therefore padding the input array is needed beforehand
x_ = rolling_window(array, (xsize, ysize, bands), asteps=(xsteps, ysteps, bands))
# access single tiles and write them to file and/or to ndarray of shape (tiles, rows, cols, bands)
x = []
for i in range(x_.shape[0]):
for j in range(x_.shape[1]):
x.append(x_[i, j, 0, :, :, :])
return np.asarray(x, dtype=dtype)
def untile_array(array_tiled, target_shape, overlap=0.1, smooth_blending=False):
"""Untiles an ndarray back into the original image size.
:param array_tiled: Numpy array of shape (tiles, rows, cols, bands). (ndarray).
:param target_shape: Target shape (rows, cols, bands). (list of int).
:param overlap: Overlap of tiles between 0.0 and 1.0. (float).
:param smooth_blending: Apply smooth tile blending. (bool).
:returns: Numpy array of shape(rows, cols, bands). (ndarray)
"""
# get rows, cols, bands and dtype from first file
dtype = array_tiled.dtype
rows = target_shape[0]
cols = target_shape[1]
bands = target_shape[2]
xsize = array_tiled.shape[1]
ysize = array_tiled.shape[2]
# use overlap to fit image size with fixed tile size
xsteps = int(xsize - (xsize * overlap))
ysteps = int(ysize - (ysize * overlap))
# create target array
# this needs to include any padding applied to the tiled array (same as in tile_array())
array_target = np.zeros(target_shape)
ypad = ysize + 1
xpad = xsize + 1
array_target = np.pad(
array_target,
(
(int(ysize * overlap), ypad + int(ysize * overlap)),
(int(xsize * overlap), xpad + int(xsize * overlap)),
(0, 0),
),
mode="symmetric",
)
# get xtiles and ytiles
x_ = rolling_window(array_target, (xsize, ysize, bands), asteps=(xsteps, ysteps, bands))
xtiles = int(x_.shape[0])
ytiles = int(x_.shape[1])
if smooth_blending:
if overlap > 0.5:
raise ValueError("overlap needs to be <=0.5 when using smooth blending.")
# define tapered cosine function (tukey) to be used for smooth blending
window1d = scipy.signal.tukey(M=xsize, alpha=overlap * 2)
window2d = np.expand_dims(np.expand_dims(window1d, axis=1), axis=2)
window2d = window2d * window2d.transpose(1, 0, 2)
# apply window spline 2d function to each tile
array_tiled = np.array([tile * window2d for tile in array_tiled])
# access single tiles and write them to target array
t = 0
xoffset = 0
for x in range(xtiles):
yoffset = 0
for y in range(ytiles):
array_target[
xoffset * xsteps : xoffset * xsteps + xsize, yoffset * ysteps : yoffset * ysteps + ysize, :
] = (
array_target[
xoffset * xsteps : xoffset * xsteps + xsize, yoffset * ysteps : yoffset * ysteps + ysize, :
]
+ array_tiled[t, :, :, :]
)
t += 1
yoffset += 1
xoffset += 1
else:
# access single tiles and write them to target array
t = 0
xoffset = 0
for x in range(xtiles):
yoffset = 0
for y in range(ytiles):
array_target[
xoffset * xsteps : xoffset * xsteps + xsize, yoffset * ysteps : yoffset * ysteps + ysize, :
] = array_tiled[t, :, :, :]
t += 1
yoffset += 1
xoffset += 1
# crop target array to target shape
# this removes any padding to the array
array_target = array_target[
int(ysize * overlap) : int(ysize * overlap) + rows, int(xsize * overlap) : int(xsize * overlap) + cols, :
]
return array_target.astype(dtype)
def cohen_kappa_score(y_true, y_pred):
"""Computes Cohens Kappa Score.
:param y_true: Array that holds true class values. (ndarray).
:param y_pred: Array that holds predicted class values. (ndarray).
:returns: Cohens Kappa Score. (Float).
"""
if y_true.shape != y_pred.shape:
raise TypeError("y_true.shape must match y_pred.shape")
po = (y_true == y_pred).astype(np.float32).mean()
classes = sorted(set(list(np.concatenate((y_true, y_pred), axis=0))))
mp = {}
for i, c in enumerate(classes):
mp[c] = i
k = len(mp)
sa = np.zeros(shape=(k,), dtype=np.int32)
sb = np.zeros(shape=(k,), dtype=np.int32)
n = y_true.shape[0]
for x, y in zip(list(y_true), list(y_pred)):
sa[mp[x]] += 1
sb[mp[y]] += 1
pe = 0
for i in range(k):
pe += (sa[i] / n) * (sb[i] / n)
kappa = (po - pe) / (1.0 - pe)
return kappa
|
444295
|
import socket
from multiprocessing.pool import ThreadPool
from time import sleep
import six
from kombu import Connection, Consumer, Queue
from kombu.exceptions import MessageStateError
from kombu.utils import nested
from microservices.helpers.logs import InstanceLogger
from microservices.utils import get_logger
_logger = get_logger(__name__)
class HandlerError(Exception):
pass
class DeferredMessage(object):
_methods_for_callbacks = {
'ack', 'reject', 'requeue', 'reject_log_error',
'ack_log_error',
}
def __init__(self, message, deferred_callbacks):
self.message = message
self.deferred_callbacks = deferred_callbacks
@property
def with_deferred_callbacks(self):
return self.deferred_callbacks is not None
def __getattr__(self, item):
entity = getattr(self.message, item)
if self.with_deferred_callbacks:
if item in self._methods_for_callbacks:
return lambda *args, **kwargs: self.deferred_callbacks.append(
lambda: entity(*args, **kwargs)
)
else:
return entity
else:
return entity
@six.python_2_unicode_compatible
class Rule(object):
"""Rule"""
def __init__(self, name, handler, logger, autoack=True,
deferred_callbacks=None, pool=None,
**options):
"""Initialization
:param name: name of queue
:param handler: handle for queue
:param autoack: if true, call message.ack()
"""
self.handler = handler
self.name = name
self.options = options
self.autoack = autoack
self.logger = InstanceLogger(self, logger)
self._name = '<queue: {}>'.format(self.name)
self.deferred_callbacks = deferred_callbacks
self.pool = pool
def __str__(self):
return self._name
@property
def with_deferred_callbacks(self):
return self.deferred_callbacks is not None
def add_to_pool(self, handler):
self.pool.apply_async(handler)
def callback(self, body, message):
message = DeferredMessage(message, self.deferred_callbacks)
self.logger.debug('Data (len: %s) received', len(body))
def autoack():
try:
self.logger.debug('Ack message via autoack')
message.ack()
except ConnectionError as e: # pragma: no cover
self.logger.error('Connection error: %s when try message.ack',
e.strerror)
except MessageStateError:
self.logger.warning(
'ACK() was called in handler?')
def handler():
try:
self.logger.debug('Call handler...')
self.handler(body, HandlerContext(message, self))
except Exception:
self.logger.exception('Something happened in user handler')
raise HandlerError('Something happened in user handler')
if self.autoack:
autoack()
if self.with_deferred_callbacks:
self.logger.debug('Add handler to pool')
self.add_to_pool(handler)
else:
handler()
class HandlerContext(object):
"""Context for handler function"""
def __init__(self, message, rule):
"""Initialization
:param message: original message from kombu
:type message: kombu.Message
:param rule: rule object
:type rule: Rule
"""
self.message = message
self.rule = rule
@six.python_2_unicode_compatible
class Microservice(object):
"""Microservice for queues"""
connection = 'amqp:///'
def __init__(self, connection='amqp:///', logger=None, timeout=1, name=None,
workers=None, pool_factory=ThreadPool, reconnect_timeout=1,
reconnect_enable=True, workers_override_prefetch=True,
immediate_connect=True):
"""Initialization
:type pool_factory: callable object, pool should has property size
:param pool_factory: for pool will by configurated as pool_factory(workers)
:type workers: int
:param workers: count of workers in pool
:param connection: connection for queues broker
:type connection: str, None, dict or Connection
:param logger: logging instance
:type logger: Logger
:param timeout: sleeping for loop, default = 0.1
:type timeout: None, int or float
"""
if logger is None:
logger = _logger
self.logger = InstanceLogger(self, logger)
self.connection = self._get_connection(connection)
self.timeout = timeout
self.consumers = []
self.reconnect_timeout = reconnect_timeout
self.reconnect_enable = reconnect_enable
self.workers_override_prefetch = workers_override_prefetch
if name is None:
try:
name = '<microservice: {}>'.format(self.connection.as_uri())
except: # pragma no cover
name = '<microservice: {}>'.format(
self.connection.transport_cls) # pragma: no cover
self.name = name
self._stop = False
self._stopped = False
self.pool = None
self.workers = workers
self.deferred_callbacks = None
if workers:
self.deferred_callbacks = []
self.pool = pool_factory(workers)
if immediate_connect:
self.connect()
def __str__(self):
return self.name
@property
def with_pool(self):
return self.pool is not None
def _get_connection(self, connection):
"""Create connection strategy
:param connection: connection for broker
:type connection: str, None, kombu.connections.Connection, dict
:return: instance of kombu.connections.Connection
:rtype: Connection
"""
if not connection:
connection = self.connection # pragma: no cover
if isinstance(connection, str):
connection = {'hostname': connection}
if isinstance(connection, dict):
connection = Connection(**connection)
return connection
def add_queue_rule(self, handler, name, autoack=True, prefetch_size=0,
prefetch_count=0, **kwargs):
"""Add queue rule to Microservice
:param prefetch_count: count of messages for getting from mq
:param prefetch_size: size in bytes for getting data from mq
:param handler: function for handling messages
:param autoack: if True message.ack() after callback
:type handler: callable object
:param name: name of queue
:type name: str
"""
if self.with_pool:
if self.workers_override_prefetch:
prefetch_count = self.workers
rule = Rule(name, handler, self.logger, autoack=autoack,
deferred_callbacks=self.deferred_callbacks,
pool=self.pool, **kwargs)
else:
rule = Rule(name, handler, self.logger, autoack=autoack, **kwargs)
self.connect()
consumer = Consumer(self.connection, queues=[Queue(rule.name)],
callbacks=[rule.callback], auto_declare=True)
consumer.qos(prefetch_count=prefetch_count, prefetch_size=prefetch_size)
self.consumers.append(consumer)
self.logger.debug('Rule "%s" added!', rule.name)
def _start(self):
self._stopped = False
self._stop = False
self.connect()
def stop(self):
self._stop = True
self.logger.info('Try to stop microservice draining events')
def queue(self, name, autoack=True, prefetch_size=0, prefetch_count=0,
**kwargs):
"""Decorator for handler function
>>>app = Microservice()
>>>
>>>@app.queue('queue')
>>>def function(payload, context):
>>> pass
:param prefetch_count: count of messages for getting from mq
:param prefetch_size: size in bytes for getting data from mq
:param autoack: if True message.ack() after callback
:param name: name of queue
:type name: str
"""
def decorator(f):
self.add_queue_rule(f, name, autoack=autoack,
prefetch_size=prefetch_size,
prefetch_count=prefetch_count,
**kwargs)
return f
return decorator
def connect(self): # pragma no cover
"""Try connect to mq"""
while not self._stop and not self.connection.connected:
try:
self.connection.connect()
self.logger.info('Connected to mq broker')
break
except ConnectionError as e: # pragma: no cover
if self.reconnect_enable:
self.logger.error(
'Connection error, cause: %s. Reconnecting...',
e.strerror
)
else:
self.stop()
break
except Exception: # pragma: no cover
self.logger.exception(
'Error when try to connect') # pragma: no cover
sleep(self.reconnect_timeout)
def revive(self): # pragma no cover
def _revive():
for i, consumer in enumerate(self.consumers):
self.logger.debug('Try revive consumer: %s', i)
consumer.channel = self.connection
consumer.revive(consumer.channel)
self.logger.debug('Consumer: %s was revived', i)
while not self._stop:
try:
_revive()
break
except ConnectionError: # pragma: no cover
if self.reconnect_enable:
self.connect()
else:
self.stop()
break
except Exception: # pragma: no cover
self.logger.exception(
'Error when try to revive') # pragma: no cover
sleep(self.reconnect_timeout)
self.logger.debug('All consumers %s was revived...', len(self.consumers))
@property
def stopped(self):
return self._stopped
def drain_results(self):
while self.deferred_callbacks:
callback = self.deferred_callbacks.pop()
try:
callback()
self.logger.debug('Called callback. All: %s',
len(self.deferred_callbacks))
except ConnectionError as e: # pragma: no cover
self.logger.error(
'Connection error when try callback: %s. Cause: %s. '
'Message will be handled on next iteration',
callback, e.strerror
)
except Exception: # pragma no cover
self.logger.exception(
'Unknown exception when try callback: %s', callback
)
def drain_events(self, infinity=True):
with nested(*self.consumers):
while not self._stop:
try:
self.connection.drain_events(timeout=self.timeout)
except socket.timeout:
if not infinity:
break
except ConnectionError as e: # pragma no cover
self.logger.error(
'Connection to mq has broken off because: %s. Try to reconnect, %s',
e)
self.connect()
self.revive()
break
except HandlerError:
self.logger.exception('Handler error')
except Exception as e: # pragma no cover
if not self._stop:
self.logger.exception(
'Something wrong! Try to restart the loop')
self.revive()
break
else: # pragma: no cover
self.logger.exception(
'Something wrong! And stopping...')
break
if self.with_pool:
try:
self.drain_results()
except Exception: # pragma no cover
self.logger.exception('Unknown error when '
'draining results')
if self._stop:
if self.with_pool:
try:
self.pool.join()
self.drain_results() # pragma: no cover
except AssertionError:
pass
except Exception: # pragma: no cover
self.logger.exception(
'Unknown error when '
'draining results'
)
self._stopped = True
self.logger.info('Stopped draining events.')
def run(self, debug=False):
"""Run microservice in loop, where handle connections
:param debug: enable/disable debug mode
:type debug: bool
"""
if debug:
from microservices.utils import set_logging
set_logging('DEBUG')
def _run():
self._start()
self.drain_events(infinity=True)
while not self._stopped:
_run()
def read(self, count=1):
for x in range(count):
self.drain_events(infinity=False)
|
444344
|
from OpenNero import *
from random import seed
# add the key and mouse bindings
from inputConfig import *
# add network utils
from common import *
from module import getMod, delMod
### called from gui elements ############################
def toggle_ai_callback(pauseButton):
""" pause and resume all AI agents """
toggle_ai()
if pauseButton.text == 'Pause!':
disable_ai()
pauseButton.text = 'Resume!'
else:
pauseButton.text = 'Pause!'
reset_ai()
def toggle_bot_type(changeBotButton, botTypeBox):
if botTypeBox.text.lower().find('script') >= 0:
botTypeBox.text = 'rtNEAT'
changeBotButton.text = 'Select Script Bots'
else:
botTypeBox.text = 'Script'
changeBotButton.text = 'Select rtNEAT Bots'
def remove_bots_closure(removeBotsButton, addBotsButton):
def closure():
removeBotsButton.enabled = False
addBotsButton.enabled = True
getMod().remove_bots()
return closure
def add_bots_closure(removeBotsButton, addBotsButton, botTypeBox, numBotBox):
def closure():
removeBotsButton.enabled = True
addBotsButton.enabled = False
getMod().add_bots(botTypeBox.text, numBotBox.text)
return closure
def CreateGui(guiMan):
window_width = 250 # width
guiMan.setTransparency(1.0)
guiMan.setFont("data/gui/fonthaettenschweiler.bmp")
botTypeLabel = gui.create_text(guiMan, 'botTypeLabel', Pos2i(10,17), Pos2i(50,30), 'Bot Type:')
botTypeBox = gui.create_edit_box(guiMan, 'botType', Pos2i(60,10), Pos2i(60,30), 'Script')
numBotBox = gui.create_edit_box(guiMan, 'numBot', Pos2i(130,10), Pos2i(40,30), '5')
addBotButton = gui.create_button(guiMan, 'addBot', Pos2i(180,10), Pos2i(60,30), '')
addBotButton.text = "Add bots"
changeBotButton = gui.create_button(guiMan, 'changeBot', Pos2i(10,50), Pos2i(230,30), '')
changeBotButton.text = "Select rtNEAT Bots"
changeBotButton.OnMouseLeftClick = lambda:toggle_bot_type(changeBotButton, botTypeBox)
w = (window_width - 40) / 3
pauseButton = gui.create_button( guiMan, 'pause', Pos2i(10,90), Pos2i(w,30), '' )
pauseButton.text = 'Pause!'
pauseButton.OnMouseLeftClick = lambda:toggle_ai_callback(pauseButton)
removeBotButton = gui.create_button(guiMan, 'cleanBot', Pos2i(10 + (w + 10),90), Pos2i(w,30), '')
removeBotButton.text = "Remove bots"
exitButton = gui.create_button(guiMan, 'exit', Pos2i(10 + 2 * (w + 10),90), Pos2i(w,30), '')
exitButton.text = "Exit"
exitButton.OnMouseLeftClick = lambda: switchToHub()
addBotButton.OnMouseLeftClick = add_bots_closure(removeBotButton, addBotButton, botTypeBox, numBotBox)
removeBotButton.OnMouseLeftClick = remove_bots_closure(removeBotButton, addBotButton)
addBotButton.enabled = True
removeBotButton.enabled = False
AiWindow = gui.create_window( guiMan, 'AiWindow', Pos2i(530,20), Pos2i(window_width,150), 'AI Controls' )
AiWindow.addChild(botTypeLabel)
AiWindow.addChild(botTypeBox)
AiWindow.addChild(numBotBox)
AiWindow.addChild(changeBotButton)
AiWindow.addChild(addBotButton)
AiWindow.addChild(pauseButton)
AiWindow.addChild(removeBotButton)
AiWindow.addChild(exitButton)
def ClientMain():
# disable physics and AI updates at first
# disable_physics()
disable_ai()
# initialize random number generator with current time
seed()
# add a camera
camRotateSpeed = 100
camMoveSpeed = 1500
camZoomSpeed = 100
cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
cam.setPosition(Vector3f(100, 100, 50))
cam.setTarget(Vector3f(1, 1, 1))
cam.setFarPlane(1000)
cam.setEdgeScroll(False)
getMod().setup_sandbox()
# add a light source
getSimContext().addLightSource(Vector3f(500,-500,1000), 1500)
# create the io map
getSimContext().setInputMapping(createInputMapping())
# setup the gui
CreateGui(getSimContext().getGuiManager())
|
444444
|
import json
from datetime import timedelta
from django.core import mail
from django.test import TestCase
from django.utils import timezone
from mock import Mock
from job_runner.apps.job_runner.management.commands.health_check import Command
from job_runner.apps.job_runner.models import Run, Worker
class CommandTestCase(TestCase):
"""
Tests for :class:`.Command`.
"""
fixtures = [
'test_auth',
'test_projects',
'test_workers',
'test_worker_pools',
'test_job_templates',
'test_jobs',
]
def test__mark_worker_runs_as_failed(self):
"""
Test :meth:`.Command._mark_worker_runs_as_failed`.
"""
worker = Worker.objects.get(pk=1)
Run.objects.filter(pk=1).update(worker=worker)
command = Command()
command.publisher = Mock()
self.assertEqual(None, Run.objects.get(pk=1).return_dts)
command._mark_worker_runs_as_failed(worker, 'Test mark as failed')
run = Run.objects.get(pk=1)
self.assertNotEqual(None, run.return_dts)
self.assertFalse(run.return_success)
command.publisher.send_multipart.assert_called_once_with([
'worker.event',
json.dumps({
'event': 'returned',
'run_id': 1,
'kind': 'run',
})
])
def test__find_unresponsive_workers_and_mark_runs_as_failed(self):
"""
Test ``_find_unresponsive_workers_and_mark_runs_as_failed``.
"""
Run.objects.filter(pk=1).update(worker=Worker.objects.get(pk=1))
Run.objects.filter(pk=2).update(worker=Worker.objects.get(pk=2))
# In testing.py:
# JOB_RUNNER_WORKER_PING_INTERVAL = 60 * 5
# JOB_RUNNER_WORKER_MARK_JOB_FAILED_AFTER_INTERVALS = 3
acceptable = timezone.now() - timedelta(seconds=(60 * 5 * 3))
unacceptable = acceptable - timedelta(seconds=15)
Worker.objects.filter(pk=1).update(ping_response_dts=acceptable)
Worker.objects.filter(pk=2).update(ping_response_dts=unacceptable)
command = Command()
command.publisher = Mock()
command._find_unresponsive_workers_and_mark_runs_as_failed()
runs = Run.objects.all()
# Run pk=1 was marked as failed
self.assertNotEqual(None, runs[0].return_dts)
self.assertFalse(runs[0].return_success)
# Run pk=2 was not touched
self.assertEqual(None, runs[1].return_success)
self.assertEqual(None, runs[1].return_dts)
def test__find_unresponsive_worker_pools(self):
"""
Test :meth:`.Command._find_unresponsive_worker_pools`.
In this case, we expect Pool 1 to be responsive and Pool 2 to be
unresponsive.
"""
Worker.objects.filter(pk=1).update(ping_response_dts=timezone.now())
# from settings in ``base.py``:
# 915 = (60 * 5 * 3) + 15
Worker.objects.filter(pk=2).update(
ping_response_dts=timezone.now() - timedelta(seconds=915))
command = Command()
command._find_unresponsive_worker_pools()
self.assertEqual(1, len(mail.outbox))
self.assertEqual(
'Worker-pool unresponsive: Pool 2', mail.outbox[0].subject)
|
444447
|
import unittest
from api.app import app
import json
class TestAPI(unittest.TestCase):
def test_vincinv(self):
query = {
'lat1': -37.57037203,
'lon1': 144.25295244,
'lat2': -37.39101561,
'lon2': 143.5535383,
'from_angle_type': 'dms',
'to_angle_type': 'dms'
}
expected_response = {
'ell_dist': 54972.289,
'azimuth1to2': 306.52053231124,
'azimuth2to1': 127.10250207968
}
response = app.test_client().get('/vincinv', query_string=query)
self.assertEqual(json.loads(response.data), expected_response)
def test_vincdir(self):
query = {
'lat1': -37.57037203,
'lon1': 144.25295244,
'azimuth1to2': 306.520537,
'ell_dist': 54972.271,
'from_angle_type': 'dms',
'to_angle_type': 'dms'
}
expected_response = {
'lat2': -37.3910156124268,
'lon2': 143.5535383883988,
'azimuth2to1': 127.10250671432
}
response = app.test_client().get('/vincdir', query_string=query)
self.assertEqual(json.loads(response.data), expected_response)
|
444497
|
import csv
class Parser:
def __init__(self, ):
self.clusters = {}
def read_expression_clusters(self, network_file, cluster_file):
"""
Reads hrr and hcca file from the original PlaNet pipeline
ids are based on line number starting with 0 !
:param network_file: path to the file with the network (hrr)
:param cluster_file: path to the file with the clusters (hcca)
:return:
"""
id_to_probe = {}
# the network file is required for the mapping from numeric ids to gene ids
with open(network_file) as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for probe_id, parts in enumerate(reader):
# probe_id in this case is the line number (starting from zero)
probe = parts[0]
gene_id = parts[1]
id_to_probe[probe_id] = {}
id_to_probe[probe_id]['probe'] = probe
id_to_probe[probe_id]['gene'] = gene_id
with open(cluster_file) as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for parts in reader:
probe_id = int(parts[0])
cluster_id = parts[1]
if cluster_id not in self.clusters:
self.clusters[cluster_id] = []
if probe_id in id_to_probe.keys():
self.clusters[cluster_id].append({'probe': id_to_probe[probe_id]['probe'],
'gene': id_to_probe[probe_id]['gene']})
|
444527
|
import terrascript
import terrascript.provider
import terrascript.resource
config = terrascript.Terrascript()
# AWS provider
config += terrascript.provider.aws(region="us-east-1")
# Define Variable and add to config
v = terrascript.Variable("image_id", type="string")
config += v
# Define AWS EC2 instance and add to config
i = terrascript.resource.aws_instance("example", instance_type="t2.micro", ami=v)
config += i
# Output the instance's private IP
config += terrascript.Output(
"instance_ip_addr",
value=i.private_ip,
description="The private IP address of the instance.",
)
|
444529
|
from gbvision.utils.net import AsyncStreamReceiver
from .udp_stream_receiver import UDPStreamReceiver
class AsyncUDPStreamReceiver(AsyncStreamReceiver, UDPStreamReceiver):
def __init__(self, port, *args, **kwargs):
UDPStreamReceiver.__init__(self, port, *args, **kwargs)
AsyncStreamReceiver.__init__(self, *args, **kwargs)
def _read(self):
return UDPStreamReceiver.read(self)
|
444533
|
from torch import nn
import torch.nn.functional as F
class mini_xception(nn.Module):
def __init__(self):
super(mini_xception,self).__init__()
self.num_channels=1
self.image_size=48
self.num_labels=7
self.conv2d_1 =nn.Conv2d(in_channels=46,out_channels=8,kernel_size=3,stride=1)
self.batch_normalization_1=nn.BatchNorm1d(46)
self.conv2d_2=nn.Conv2d(46,8,3,1)
self.batch_normalization_2=nn.BatchNorm1d(46)
#module 1
def forward(self, x):
x=F.relu(self.batch_normalization_1)
x=F.relu(self.batch_normalization_2)
return x
if __name__ == '__main__':
print(mini_xception())
|
444583
|
import random
START = 0
STOP = 9999
max_num = 100
comparisons = 0
swaps = 0
def bubble_sort(numbers):
global comparisons
global swaps
n = len(numbers)
for i in range(n):
for j in range(0, n-i-1):
if numbers[j] > numbers[j+1] :
numbers[j], numbers[j+1] = numbers[j+1], numbers[j]
swaps += 1
comparisons += 1
def numbers_generator():
global max_num
return [random.randint(START, STOP) for i in range(0, max_num)]
if( __name__=='__main__'):
numbers = numbers_generator()
print(" << ", end="")
print(*numbers, sep=" ")
bubble_sort(numbers)
print(" >> ", end="")
print(*numbers, sep=" ")
print(f"Total comparisons of {max_num} numbers = {comparisons} times / with {swaps} swaps.")
|
444661
|
from collections import Counter
from unittest import TestCase
from mab_ranking.bandits.bandits import BetaThompsonSampling
from mab_ranking.bandits.rank_bandits import IndependentBandits
class IndependentBanditsTest(TestCase):
def test_choose(self):
num_ranks = 3
rank_bandit = IndependentBandits(num_ranks, BetaThompsonSampling, num_arms=10)
selected_arms = rank_bandit.choose()
assert len(selected_arms) == num_ranks
assert len(selected_arms) == len(set(selected_arms))
def test_update(self):
rank_bandit = IndependentBandits(3, BetaThompsonSampling, num_arms=8)
selected_arms = [0, 1, 2]
rank_bandit.update(selected_arms=selected_arms, rewards=[1.0, 0.0, 0.0])
assert rank_bandit.rank_bandits[0].rewards == [2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
assert rank_bandit.rank_bandits[0].num_tries == [3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
def test_choose_and_update(self):
rank_bandit = IndependentBandits(2, BetaThompsonSampling, num_arms=4)
chosen_arm_in_first_position = []
for _ in range(500):
selected_arms = rank_bandit.choose()
first_chosen_arm = selected_arms[0]
chosen_arm_in_first_position.append(first_chosen_arm)
# Only arm with id 1 at 1st position receives a reward
reward = 1.0 if first_chosen_arm == 1 else 0.0
rank_bandit.update(
selected_arms=selected_arms,
rewards=[reward, 0.0]
)
assert Counter(chosen_arm_in_first_position).most_common(1)[0][0] == 1
|
444670
|
import abc
class ExtraDataAbstractMixin(object, metaclass=abc.ABCMeta):
"""
Ensure that backends define these methods. Used in pipeline to save extra data on the user model.
"""
@abc.abstractmethod
def save_extra_data(response, user):
return
@abc.abstractmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
return
class ExtraActionsAbstractMixin(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def post(user_social_auth, social_obj):
return
@abc.abstractmethod
def get_friends(user_social_auth):
return
|
444688
|
import unittest
from numpy import hstack, max, abs, ones, zeros, sum, sqrt
from cantera import Solution, one_atm, gas_constant
import numpy as np
from spitfire import ChemicalMechanismSpec
from os.path import join, abspath
from subprocess import getoutput
test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls'))
mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')]
def validate_on_mechanism(mech, temperature, pressure, test_rhs=True, test_jac=True):
xml = join(test_mech_directory, mech + '.xml')
T = temperature
p = pressure
r = ChemicalMechanismSpec(xml, 'gas').griffon
gas = Solution(xml)
ns = gas.n_species
gas.TPX = T, p, ones(ns)
y = gas.Y
state = hstack((T, y[:-1]))
rhsGR = np.empty(ns)
r.reactor_rhs_isobaric(state, p, 0., np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, rhsGR)
if test_jac:
Tin, yin, tau = 0, np.ndarray(1), 0
rhsTmp = np.empty(ns)
jacGR = np.empty(ns * ns)
r.reactor_jac_isobaric(state, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, 0, 0, rhsTmp, jacGR)
jacGR = jacGR.reshape((ns, ns), order='F')
dT = 1.e-6
dY = 1.e-6
jacFD = np.empty((ns, ns))
rhsGR1, rhsGR2 = np.empty(ns), np.empty(ns)
state_m = hstack((T - dT, y[:-1]))
state_p = hstack((T + dT, y[:-1]))
r.reactor_rhs_isobaric(state_m, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGR1)
r.reactor_rhs_isobaric(state_p, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGR2)
jacFD[:, 0] = (- rhsGR1 + rhsGR2) / (2. * dT)
for i in range(ns - 1):
y_m1, y_p1 = np.copy(y), np.copy(y)
y_m1[i] += - dY
y_m1[-1] -= - dY
y_p1[i] += dY
y_p1[-1] -= dY
state_m = hstack((T, y_m1[:-1]))
state_p = hstack((T, y_p1[:-1]))
r.reactor_rhs_isobaric(state_m, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGR1)
r.reactor_rhs_isobaric(state_p, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGR2)
jacFD[:, 1 + i] = (- rhsGR1 + rhsGR2) / (2. * dY)
pass_jac = max(abs(jacGR - jacFD) / (abs(jacGR) + 1.)) < 1.e-2
if not pass_jac:
print('fd:')
for i in range(ns):
for j in range(ns):
print(f'{jacFD[i, j]:12.2e}', end=', ')
print('')
print('gr:')
for i in range(ns):
for j in range(ns):
print(f'{jacGR[i, j]:12.2e}', end=', ')
print('')
print('gr-fd:')
for i in range(ns):
for j in range(ns):
print(f'{(jacGR[i, j] - jacFD[i, j]) / (abs(jacFD[i, j]) + 1.0):12.2e}', end=', ')
print('')
print('')
w = gas.net_production_rates * gas.molecular_weights
h = gas.standard_enthalpies_RT * gas.T * gas_constant / gas.molecular_weights
rhsCN = zeros(ns)
rhsCN[1:] = w[:-1] / gas.density
rhsCN[0] = - sum(w * h) / gas.density / gas.cp_mass
if max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) > 100. * sqrt(np.finfo(float).eps):
print(rhsGR, rhsCN)
pass_rhs = max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps)
if test_rhs and test_jac:
return pass_rhs and pass_jac
if test_rhs:
return pass_rhs
if test_jac:
return pass_jac
def create_test(m, T, p, test_rhs, test_jac):
def test(self):
self.assertTrue(validate_on_mechanism(m, T, p, test_rhs, test_jac))
return test
class Accuracy(unittest.TestCase):
pass
temperature_dict = {'600K': 600., '1200K': 1200.}
pressure_dict = {'1atm': one_atm, '2atm': 2. * one_atm}
for mech in mechs:
for temperature in temperature_dict:
for pressure in pressure_dict:
rhsname = 'test_rhs_' + mech + '_' + temperature + '_' + pressure
jacname = 'test_jac_' + mech + '_' + temperature + '_' + pressure
setattr(Accuracy, rhsname, create_test(mech, temperature_dict[temperature], pressure_dict[pressure],
test_rhs=True, test_jac=False))
setattr(Accuracy, jacname, create_test(mech, temperature_dict[temperature], pressure_dict[pressure],
test_rhs=False, test_jac=True))
if __name__ == '__main__':
unittest.main()
|
444721
|
from .common import ImageSeriesTest, make_array, make_array_ims
class TestProperties(ImageSeriesTest):
def setUp(self):
self._a = make_array()
self._is_a = make_array_ims()
def test_prop_nframes(self):
self.assertEqual(self._a.shape[0], len(self._is_a))
def test_prop_shape(self):
self.assertEqual(self._a.shape[1:], self._is_a.shape)
def test_prop_dtype(self):
self.assertEqual(self._a.dtype, self._is_a.dtype)
|
444730
|
from rest_framework.pagination import LimitOffsetPagination
class MaxLimitPagination(LimitOffsetPagination):
max_limit = 8
|
444757
|
import asyncio
from functools import partial
def mark_done(future, result):
print(f'Set to: {result}')
future.set_result(result)
async def b1():
loop = asyncio.get_event_loop()
fut = asyncio.Future()
loop.call_soon(mark_done, fut, 'the result')
loop.call_soon(partial(print, 'Hello', flush=True))
loop.call_soon(partial(print, 'Greeting', flush=True))
print(f'Done: {fut.done()}')
await asyncio.sleep(0)
print(f'Done: {fut.done()}, Result: {fut.result()}')
async def b2():
loop = asyncio.get_event_loop()
fut = asyncio.Future()
loop.call_later(2, mark_done, fut, 'the result')
loop.call_later(1, partial(print, 'Hello'))
loop.call_later(1, partial(print, 'Greeting'))
print(f'Done: {fut.done()}')
await asyncio.sleep(2)
print(f'Done: {fut.done()}, Result: {fut.result()}')
async def b3():
loop = asyncio.get_event_loop()
now = loop.time()
fut = asyncio.Future()
loop.call_at(now + 2, mark_done, fut, 'the result')
loop.call_at(now + 1, partial(print, 'Hello', flush=True))
loop.call_at(now + 1, partial(print, 'Greeting', flush=True))
print(f'Done: {fut.done()}')
await asyncio.sleep(2)
print(f'Done: {fut.done()}, Result: {fut.result()}')
if __name__ == '__main__':
for f in (b1, b2, b3):
asyncio.run(f())
|
444762
|
import torch.nn as nn
from .heads.smpl_head_prediction import SMPLHeadPrediction
from .transformers import RelationTransformerModel
from yacs.config import CfgNode as CN
class Pose_transformer(nn.Module):
def __init__(self, opt):
super(Pose_transformer, self).__init__()
config = "utils/config.yaml"
with open(config, 'r') as f:
cfg = CN.load_cfg(f); cfg.freeze()
self.cfg = cfg
self.relational = RelationTransformerModel(cfg.MODEL.TRANSFORMER)
self.smpl_head_prediction = SMPLHeadPrediction(cfg)
|
444819
|
import requests
import logging
from server import config
logger = logging.getLogger(__name__)
def predict(story_text):
if story_text is None: # maybe we didn't parse any text out?
return {}
url = "{}/predict.json".format(config.get('NYT_THEME_LABELLER_URL'))
try:
r = requests.post(url, json={'text': story_text})
return r.json()
except requests.exceptions.RequestException as e:
logger.exception(e)
return {}
|
444837
|
import os
import re
import subprocess
from time import sleep
from typing import Any, Dict, List, Optional
from semantic_version import Version
import src.cli.console as console
from src import settings
from src.local.providers.abstract_provider import AbstractK8sProvider
from src.local.providers.k3d.storage import K3dStorage
from src.local.providers.types import K8sProviderType
from src.local.system import CMDWrapper
class K3d(AbstractK8sProvider, CMDWrapper):
kubernetes_cluster_type = K8sProviderType.k3d
base_command = "k3d"
_cluster = []
def __init__(
self,
id,
name: str = None,
prefix: str = settings.K3D_CLUSTER_PREFIX,
_debug_output=False,
):
# storage
storage = K3dStorage(id=id)
# abstract kubernetes cluster
AbstractK8sProvider.__init__(
self,
id=id,
name=name,
storage=storage,
)
# CMDWrapper
self._debug_output = _debug_output
# cluster name
cluster_name = prefix + self.name.lower()
cluster_name = cluster_name.replace(" ", "-")
self.k3d_cluster_name = cluster_name
def _clusters(self) -> List[Dict[str, str]]:
if len(self._cluster) == 0:
arguments = ["cluster", "list", "--no-headers"]
process = self._execute(arguments)
list_output = process.stdout.read()
clusters = []
cluster_list = [item.strip() for item in list_output.split("\n")[:-1]]
for entry in cluster_list:
cluster = [item.strip() for item in entry.split(" ") if item != ""]
# todo handle this output
if len(cluster) != 4:
continue
clusters.append(
{
"name": cluster[0],
"servers": cluster[1],
"agents": cluster[2],
"loadbalancer": cluster[3] == "true",
}
)
self._cluster = clusters
return self._cluster
def get_kubeconfig(self, wait=10) -> Optional[str]:
arguments = ["kubeconfig", "get", self.k3d_cluster_name]
# this is a nasty busy wait, but we don't have another chance
for i in range(1, wait):
process = self._execute(arguments)
if process.returncode == 0:
break
else:
console.info(f"Waiting for the cluster to be ready ({i}/{wait}).")
sleep(2)
if process.returncode != 0:
console.error("Something went completely wrong with the cluster spin up (or we got a timeout).")
else:
# we now need to write the kubekonfig to a file
config = process.stdout.read().strip()
if not os.path.isdir(os.path.join(settings.CLI_KUBECONFIG_DIRECTORY, self.k3d_cluster_name)):
os.mkdir(os.path.join(settings.CLI_KUBECONFIG_DIRECTORY, self.k3d_cluster_name))
config_path = os.path.join(
settings.CLI_KUBECONFIG_DIRECTORY,
self.k3d_cluster_name,
"kubeconfig.yaml",
)
file = open(config_path, "w+")
file.write(config)
file.close()
return config_path
@staticmethod
def _get_random_unused_port() -> int:
import socket
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(("", 0))
addr, port = tcp.getsockname()
tcp.close()
return port
def exists(self) -> bool:
for cluster in self._clusters():
if cluster["name"] == self.k3d_cluster_name:
return True
return False
def create(
self,
ingress_port=None,
workers=settings.K3D_DEFAULT_WORKERS,
):
v5plus = self.version().major >= 5
api_port = self._get_random_unused_port()
if not ingress_port:
publisher_port = self._get_random_unused_port()
else:
publisher_port = ingress_port
arguments = [
"cluster",
"create",
self.k3d_cluster_name,
"--agents",
str(workers),
"--api-port",
str(api_port),
"--port",
f"{publisher_port}:{settings.K3D_DEFAULT_INGRESS_PORT}@agent{':0' if v5plus else '[0]'}",
"--servers",
str(1),
"--wait",
"--timeout",
"120s",
]
self._execute(arguments)
data = self.storage.get()
data.name = self.k3d_cluster_name
data.api_port = api_port
data.publisher_port = publisher_port
data.kubeconfig_path = self.get_kubeconfig()
self.storage.set(data)
return True
def start(self):
arguments = ["cluster", "start", self.k3d_cluster_name]
self._execute(arguments)
data = self.storage.get()
data.kubeconfig_path = self.get_kubeconfig()
self.storage.set(data)
return True
def stop(self):
arguments = ["cluster", "stop", self.k3d_cluster_name]
self._execute(arguments)
return True
def delete(self):
arguments = ["cluster", "delete", self.k3d_cluster_name]
self._execute(arguments)
self.storage.delete()
return True
def version(self) -> Version:
process = subprocess.run([self.base_command, "--version"], capture_output=True, text=True)
output = str(process.stdout).strip()
version_str = re.search(r"(\d+\.\d+\.\d+)", output).group(1)
return Version(version_str)
class K3dBuilder:
def __init__(self):
self._instances = {}
def __call__(
self,
id,
name=None,
**_ignored,
):
# get instance from cache
instance = self._instances.get(id, None)
if instance:
return instance
# create instance
instance = K3d(
id,
name=name,
prefix=settings.K3D_CLUSTER_PREFIX,
)
self._instances[id] = instance
return instance
|
444920
|
import torch.nn as nn
import torch.nn.functional as F
class res_mlp_block(nn.Module):
def __init__(self,num_channel,use_2d=False):
super().__init__()
if use_2d==False:
self.mlp1=nn.Conv1d(num_channel,num_channel,kernel_size=1)
self.mlp2=nn.Conv1d(num_channel,num_channel,kernel_size=1)
else:
self.mlp1 = nn.Conv2d(num_channel, num_channel, kernel_size=1)
self.mlp2 = nn.Conv2d(num_channel, num_channel, kernel_size=1)
def forward(self,input):
net=input
net=self.mlp1(net)
net=self.mlp2(net)
net=F.relu(input+net)
return net
class res_mlp(nn.Module):
def __init__(self,num_channel,num_layer,use_2d=False):
super().__init__()
self.res_block_list=nn.ModuleList()
for i in range(num_layer):
self.res_block_list.append(res_mlp_block(num_channel,use_2d=use_2d))
def forward(self,input):
net=input
for i in range(len(self.res_block_list)):
net=self.res_block_list[i](net)
return net
|
444938
|
from jwt.exceptions import MissingRequiredClaimError
def test_missing_required_claim_error_has_proper_str():
exc = MissingRequiredClaimError('abc')
assert str(exc) == 'Token is missing the "abc" claim'
|
444949
|
from typing import Generic, TypeVar
from .types import Realm, WellKnown, SecurityConsole, Client, Credential, ClientRegistration, \
OpenRedirect, NoneSign, FormPostXSS, Username, Password
from keycloak_scanner.utils import to_camel_case
SimpleType = TypeVar('SimpleType')
V = TypeVar('V')
class BadWrappedTypeException(Exception):
def __init__(self, t: type, value):
self.t = t
self.value = value
super().__init__(f'Wrapper error: value {value} not compatible with type {t.__name__}')
class WrapperType(Generic[SimpleType]):
def __init__(self, simple_type: type):
self.name = to_camel_case(simple_type.__name__)
self.simple_type = simple_type
def check(self, value):
if not isinstance(value, self.simple_type):
raise BadWrappedTypeException(self.simple_type, value)
class Wrapper(Generic[SimpleType]):
def __init__(self, wrapper_type: WrapperType[SimpleType], value: SimpleType):
self.wrapper_type: WrapperType[SimpleType] = wrapper_type
self.wrapper_type.check(value)
self.value_ = value
def value(self) -> SimpleType:
return self.value_
# TODO : map classes with wrapper types ?
class WrapperTypes:
REALM_TYPE = WrapperType(Realm)
WELL_KNOWN_TYPE = WrapperType(WellKnown)
CLIENT_TYPE = WrapperType(Client)
CREDENTIAL_TYPE = WrapperType(Credential)
CLIENT_REGISTRATION = WrapperType(ClientRegistration)
OPEN_REDIRECT = WrapperType(OpenRedirect)
SECURITY_CONSOLE = WrapperType(SecurityConsole)
NONE_SIGN = WrapperType(NoneSign)
FORM_POST_XSS = WrapperType(FormPostXSS)
USERNAME_TYPE = WrapperType(Username)
PASSWORD_TYPE = WrapperType(Password)
|
445040
|
import sys
sys.path.append('../../')
from spear.labeling import preprocessor
@preprocessor()
def convert_to_lower(x):
return x.lower().strip()
|
445041
|
from django.urls import path
from sql import views
app_name = "sql"
urlpatterns = [
path('sql.html', views.SqlDdl.as_view(), name='sql_ddl'),
path('sql-<str:pk>.html', views.SqlDdlQuery.as_view(), name='sql_query'),
]
|
445068
|
from typing import Hashable, Iterable, Optional, Union
import pandas_flavor as pf
import pandas as pd
from janitor.utils import deprecated_alias
@pf.register_dataframe_method
@deprecated_alias(columns="column_names")
def get_dupes(
df: pd.DataFrame,
column_names: Optional[Union[str, Iterable[str], Hashable]] = None,
) -> pd.DataFrame:
"""
Return all duplicate rows.
This method does not mutate the original DataFrame.
Functional usage syntax:
```python
df = pd.DataFrame(...)
df = get_dupes(df)
```
Method chaining syntax:
```python
import pandas as pd
import janitor
df = pd.DataFrame(...).get_dupes()
```
:param df: The pandas DataFrame object.
:param column_names: (optional) A column name or an iterable
(list or tuple) of column names. Following pandas API, this only
considers certain columns for identifying duplicates. Defaults to using
all columns.
:returns: The duplicate rows, as a pandas DataFrame.
"""
dupes = df.duplicated(subset=column_names, keep=False)
return df[dupes == True] # noqa: E712
|
445102
|
import json
import os
from typing import Any
from typing import Dict
from typing import Optional
from boto3.session import Session
from botocore.exceptions import ClientError
class TokenProvider:
def __init__(self):
pass
def get_token(self) -> Optional[str]:
pass
class EnvironmentVariableTokenProvider(TokenProvider):
variable_name: str
def __init__(self, config_data: Dict[str, Any]):
self.variable_name = config_data["value"]
def get_token(self) -> Optional[str]:
return os.environ.get(self.variable_name)
class ConfigurationFileTokenProvider(TokenProvider):
def __init__(self, config_data: Dict[str, Any]):
self.token = config_data["value"]
def get_token(self) -> Optional[str]:
return self.token
class AwsSecretsManagerTokenRetrievalException(Exception):
def __init__(self, message):
self.message = message
def __repr__(self):
return self.message
class AwsSecretsManagerTokenProvider(TokenProvider):
def __init__(self, config_data: Dict[str, Any]):
self.secret_name = config_data["secret_name"]
self.region = config_data["region"]
self.secret_key = config_data["secret_key"]
def get_token(self) -> Optional[str]:
session = Session()
client = session.client(service_name="secretsmanager", region_name=self.region)
try:
get_secret_value_response = client.get_secret_value(SecretId=self.secret_name)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
raise AwsSecretsManagerTokenRetrievalException(f"The requested secret {self.secret_name} was not found")
elif e.response["Error"]["Code"] == "InvalidRequestException":
raise AwsSecretsManagerTokenRetrievalException("The request was invalid")
elif e.response["Error"]["Code"] == "InvalidParameterException":
raise AwsSecretsManagerTokenRetrievalException("The request had invalid params")
else:
if "SecretString" in get_secret_value_response:
secret = json.loads(get_secret_value_response["SecretString"])
try:
return secret[self.secret_key]
except KeyError:
raise AwsSecretsManagerTokenRetrievalException(f"Invalid secret_key parameter: {self.secret_key}")
else:
raise AwsSecretsManagerTokenRetrievalException(
"Invalid secret format. It should be a SecretString, instead of binary."
)
TYPE_NAME_TO_CLASS: Dict[str, TokenProvider] = {
"ENVIRONMENT_VARIABLE": EnvironmentVariableTokenProvider,
"TOKEN": ConfigurationFileTokenProvider,
"AWS_SECRETS_MANAGER": AwsSecretsManagerTokenProvider,
}
class InvalidTokenProviderTypeException(Exception):
def __init__(self, name):
self.name = name
def __repr__(self):
return f"Invalid token provider type: {self.name}"
def get_token_provider_by_name(name: str) -> TokenProvider:
try:
return TYPE_NAME_TO_CLASS[name]
except KeyError:
raise InvalidTokenProviderTypeException(name)
class TokenNotFoundException(Exception):
def __repr__(self):
return "Token could not be found"
def get_token(token_config: Dict[str, Any]) -> str:
token: str
if type(token_config) == list:
for token_provider in token_config:
provider = get_token_provider_by_name(token_provider["type"])(token_provider)
token = provider.get_token()
if token:
return token
raise TokenNotFoundException()
else:
return os.environ.get("CACHET_TOKEN") or token_config
|
445107
|
import math
from typing import Dict, Tuple, List
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
from opengl_helper.shader import BaseShader
from opengl_helper.texture import Texture
class ComputeShader(BaseShader):
def __init__(self, shader_src: str):
BaseShader.__init__(self)
self.shader_handle: int = compileProgram(compileShader(shader_src, GL_COMPUTE_SHADER))
self.textures: List[Tuple[Texture, str, int]] = []
self.uniform_cache: Dict[str, Tuple[int, any, any]] = dict()
self.max_workgroup_size: int = glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0)[0]
def compute(self, width: int, barrier: bool = False):
for i in range(math.ceil(width / self.max_workgroup_size)):
self.set_uniform_data(
[("work_group_offset", i * self.max_workgroup_size, "int")])
for texture, flag, image_position in self.textures:
texture.bind_as_image(flag, image_position)
glUseProgram(self.shader_handle)
for uniform_location, uniform_data, uniform_setter in self.uniform_cache.values():
uniform_setter(uniform_location, uniform_data)
if i == math.ceil(width / self.max_workgroup_size) - 1:
glDispatchCompute(width % self.max_workgroup_size, 1, 1)
else:
glDispatchCompute(self.max_workgroup_size, 1, 1)
if barrier:
self.barrier()
@staticmethod
def barrier():
glMemoryBarrier(GL_ALL_BARRIER_BITS)
|
445148
|
from gopro_overlay.models import KineticEnergyModel
from gopro_overlay.units import units
def test_kinetic():
speed = units.Quantity(15, units.mps)
mass = units.Quantity(10, units.kg)
model = KineticEnergyModel(mass)
result = model.evaluate(speed)
assert result == units.Quantity(1125, units.joules)
|
445161
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
import align.detect_face
import glob
from pdb import set_trace as bp
from six.moves import xrange
from dataset.dataset_helpers import *
import torch
from torch.utils import data
from torchvision import transforms as T
import torchvision
from PIL import Image
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from helpers import *
"""
python3 app/export_embeddings.py \
--model_path ./data/pth/IR_50_MODEL_arcface_ms1celeb_epoch90_lfw9962.pth \
--data_dir ./data/dataset_got/dataset_lanister_raw/ \
--output_dir data/out_embeddings/ \
--model_type IR_50 \
--is_aligned 0 \
--with_demo_images 1 \
--image_size 112 \
--image_batch 5 \
--h5_name dataset_lanister.h5
"""
def writePersonMeanEmbeddingFile(h5_filename, person_name, mean_embedding):
'''
=====================================
*** Mean embedding h5 file structure:
person1_name
embedding [4.5, 2.1, 9.9]
person2_name
embedding [3.0, 41.1, 56.621]
=====================================
Parameters;
h5_filename='data/dataset.h5'
person_name='Alex'
mean_embedding=[-1.40146054e-02, 2.31648367e-02, -8.39150697e-02......]
'''
with h5py.File(h5_filename, 'a') as f:
person_grp = f.create_group(person_name)
person_grp.create_dataset('embedding', data=mean_embedding)
def writePersonTempFile(temp_h5_filename, person_name, image_temp_name, embedding):
'''
=====================================
*** temp h5 file structure:
person1_name
person1_subgroup_imagetempname_1
embedding [4.5, 2.1, 9.9]
person1_subgroup_imagetempname_2
embedding [84.5, 32.32, 10.1]
person2_name
person2_subgroup_imagetempname_1
embedding [1.1, 2.1, 2.9]
person2_subgroup_imagetempname_2
embedding [3.0, 41.1, 56.621]
=====================================
Parameters;
temp_h5_filename='data/temp_dataset.h5'
person_name='Alex'
image_temp_name='a1.jpg'
embedding=[-1.40146054e-02, 2.31648367e-02, -8.39150697e-02......]
'''
with h5py.File(temp_h5_filename, 'a') as f:
if person_name in f.keys():
person_subgroup = f[person_name].create_group(image_temp_name)
person_subgroup.create_dataset('embedding', data=embedding)
else:
person_grp = f.create_group(person_name)
person_subgroup = person_grp.create_group(image_temp_name)
person_subgroup.create_dataset('embedding', data=embedding)
class FacesDataset(data.Dataset):
def __init__(self, image_list, label_list, names_list, num_classes, is_aligned, image_size, margin, gpu_memory_fraction, demo_images_path=None):
self.image_list = image_list
self.label_list = label_list
self.names_list = names_list
self.num_classes = num_classes
self.is_aligned = is_aligned
self.demo_images_path = demo_images_path
self.image_size = image_size
self.margin = margin
self.gpu_memory_fraction = gpu_memory_fraction
def __getitem__(self, index):
img_path = self.image_list[index]
img = Image.open(img_path)
data = img.convert('RGB')
if self.is_aligned==1:
image_data_rgb = np.asarray(data) # (112, 112, 3)
else:
image_data_rgb = load_and_align_data(img_path, self.image_size, self.margin, self.gpu_memory_fraction)
ccropped, flipped = crop_and_flip(image_data_rgb, for_dataloader=True)
# bp()
# print("\n\n")
# print("### image_data_rgb shape: " + str(image_data_rgb.shape))
# print("### CCROPPED shape: " + str(ccropped.shape))
# print("### FLIPPED shape: " + str(flipped.shape))
# print("\n\n")
if self.demo_images_path is not None:
################################################
### SAVE Demo Images
image_name = str(self.names_list[index]) + '_' + str(os.path.basename(img_path))
## Save Matplotlib
im_da = np.asarray(image_data_rgb)
plt.imsave(self.demo_images_path + image_name, im_da)
## Save OpenCV
# image_BGR = cv2.cvtColor(image_data_rgb, cv2.COLOR_RGB2BGR)
# cv2.imwrite(self.demo_images_path + prefix + '.png', image_BGR)
################################################
# data = self.transforms(data)
label = self.label_list[index]
name = self.names_list[index]
apsolute_path = os.path.abspath(img_path)
return ccropped, flipped, label, name, apsolute_path
def __len__(self):
return len(self.image_list)
def main(ARGS):
# np.set_printoptions(threshold=sys.maxsize)
out_dir = ARGS.output_dir
if not os.path.isdir(out_dir): # Create the out directory if it doesn't exist
os.makedirs(out_dir)
else:
if os.path.exists(os.path.join(os.path.expanduser(out_dir), ARGS.h5_name)):
os.remove(os.path.join(os.path.expanduser(out_dir), ARGS.h5_name))
images_dir=None
if ARGS.with_demo_images==1:
images_dir = os.path.join(os.path.expanduser(out_dir), 'demo_images/')
if not os.path.isdir(images_dir): # Create the out directory if it doesn't exist
os.makedirs(images_dir)
train_set = get_dataset(ARGS.data_dir)
image_list, label_list, names_list = get_image_paths_and_labels(train_set)
faces_dataset = FacesDataset(image_list=image_list,
label_list=label_list,
names_list=names_list,
num_classes=len(train_set),
is_aligned=ARGS.is_aligned,
image_size=ARGS.image_size,
margin=ARGS.margin,
gpu_memory_fraction=ARGS.gpu_memory_fraction,
demo_images_path=images_dir)
loader = torch.utils.data.DataLoader(faces_dataset, batch_size=ARGS.image_batch,
shuffle=False, num_workers=ARGS.num_workers)
# fetch the classes (labels as strings) exactly as it's done in get_dataset
path_exp = os.path.expanduser(ARGS.data_dir)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
# get the label strings
label_strings = [name for name in classes if \
os.path.isdir(os.path.join(path_exp, name))]
####### Device setup
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
####### Model setup
print("Use CUDA: " + str(use_cuda))
print('Model type: %s' % ARGS.model_type)
model = get_model(ARGS.model_type, ARGS.input_size)
if use_cuda:
model.load_state_dict(torch.load(ARGS.model_path))
else:
model.load_state_dict(torch.load(ARGS.model_path, map_location='cpu'))
model.to(device)
model.eval()
embedding_size = 512
start_time = time.time()
########################################
temp_file = out_dir+"temp_"+ARGS.h5_name
with torch.no_grad():
for i, (ccropped, flipped, label, name, absolute_paths) in enumerate(loader):
ccropped, flipped, label = ccropped.to(device), flipped.to(device), label.to(device)
feats = extract_norm_features(ccropped, flipped, model, device, tta = True)
emb = feats.cpu().numpy()
for j in range(len(ccropped)):
#params
person_embedding = emb[j, :]
person_name = name[j]
image_temp_name = os.path.basename(absolute_paths[j])
writePersonTempFile(temp_file, person_name, image_temp_name, person_embedding)
percent = round(100. * i / len(loader))
print('.completed {}% Run time: {}'.format(percent, timedelta(seconds=int(time.time() - start_time))), end='\r')
print('', end='\r')
total_time = timedelta(seconds=int(time.time() - start_time))
print(60*"=")
print('Extracting embeddings done. time: ' + str(total_time))
###########################################################
### Extracting MEAN embedding for each person
'''
=====================================
*** temp h5 file structure:
person1_name
person1_subgroup_imagetempname_1
embedding [4.5, 2.1, 9.9]
person1_subgroup_imagetempname_2
embedding [84.5, 32.32, 10.1]
person2_name
person2_subgroup_imagetempname_1
embedding [1.1, 2.1, 2.9]
person2_subgroup_imagetempname_2
embedding [3.0, 41.1, 56.621]
=====================================
'''
if not os.path.isfile(temp_file):
assert "temp h5 file is not exist"
print('Extracting mean embeddings...\n')
# Data for each person in temp file
with h5py.File(temp_file, 'r') as f:
for person in f.keys():
# print("\npersonName: " + str(person))
nrof_images = len(f[person].keys())
embedding_size = 512
embeddings_array = np.zeros((nrof_images, embedding_size))
# label_strings_array = []
print('For {} extracted {} embeddings'.format(person, nrof_images))
# print("\tembedding array shape: " + str(embeddings_array.shape))
# print("\tnumber of images: " + str(nrof_images) + " embedding size: " + str(embedding_size))
for i, subgroup in enumerate(f[person].keys()):
# print("\tlabel: " + str(i))
embeddings_array[i, :] = f[person][subgroup]['embedding'][:]
# label_strings_array.append(str(subgroup))
# print("\timage_name: " + str(subgroup))
# print("\tembedding: " + str(f[person][subgroup]['embedding'][:]))
mean_embedding = np.mean(embeddings_array, axis=0)
writePersonMeanEmbeddingFile(out_dir+ARGS.h5_name, person, mean_embedding)
print('\nExtracting mean embeddings done. time: ' + str(total_time))
if os.path.exists(temp_file):
os.remove(temp_file)
else:
print("Failed to remove temp h5 file {}".format(temp_file))
print(60*"=")
print('All done. time: ' + str(total_time))
def load_and_align_data(image_path, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('🎃 Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
print(image_path)
img = misc.imread(os.path.expanduser(image_path))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
img = aligned
return img
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, help='pth model file')
parser.add_argument('--data_dir', type=str, help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')
parser.add_argument('--model_type', type=str, help='Model type to use for training.', default='IR_50')# support: ['ResNet_50', 'ResNet_101', 'ResNet_152', 'IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']
parser.add_argument('--input_size', type=str, help='support: [112, 112] and [224, 224]', default=[112, 112])
parser.add_argument('--output_dir', type=str, help='Dir where to save all embeddings and demo images', default='data/out_embeddings/')
parser.add_argument('--is_aligned', type=int, help='Is the data directory already aligned and cropped? 0:False 1:True', default=1)
parser.add_argument('--with_demo_images', type=int, help='Embedding Images 0:False 1:True', default=1)
parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--margin', type=int, help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--gpu_memory_fraction', type=float, help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--image_batch', type=int, help='Number of images stored in memory at a time. Default 64.', default=64)
parser.add_argument('--num_workers', type=int, help='Number of threads to use for data pipeline.', default=8)
# numpy file Names
parser.add_argument('--h5_name', type=str, help='h5 file name', default='dataset.h5')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
445179
|
from itertools import product
import math
import torch
import torch.nn.functional as F
def im2toepidx(c, i, j, h, w):
return c*h*w + i*w + j
def get_toeplitz_idxs(fshape, dshape, f_stride=(1,1), s_pad=(0,0)):
assert fshape[1] == dshape[0], "data channels must match filters channels"
fh, fw = fshape[-2:]
ic, ih, iw = dshape
oh = int(math.floor((ih + 2 * s_pad[0] - fh) / f_stride[0]) + 1)
ow = int(math.floor((iw + 2 * s_pad[1] - fw) / f_stride[1]) + 1)
oc = fshape[0]
T_idxs = []
f_idxs = []
for outch, outh, outw in product(range(oc), range(oh), range(ow)):
for fi, fj in product(range(0-s_pad[0], fh-s_pad[0]), range(0-s_pad[1], fw-s_pad[1])):
readh, readw = (outh*f_stride[0]) + fi, (outw*f_stride[1]) + fj
if readh < 0 or readw < 0 or readh >= ih or readw >= iw:
# We don't want to go over the 'edges' of the image.
continue
for inch in range(ic):
Mj = im2toepidx(inch, readh, readw, ih, iw)
Mi = im2toepidx(outch, outh, outw, oh, ow)
T_idxs.append([Mi, Mj])
f_flat_idx = outch*(ic*fh*fw) + inch*(fh*fw) + (fi+s_pad[0])*fh + (fj+s_pad[1])
f_idxs.append(f_flat_idx)
T_idxs = torch.LongTensor(T_idxs).t()
f_idxs = torch.LongTensor(f_idxs)
return (T_idxs, f_idxs)
def get_filter_vals(f, f_idxs):
vals = torch.gather(f.view(-1).to('cpu'), dim=0, index=f_idxs)
return vals
def get_sparse_toeplitz(f, dshape, T_idxs, f_idxs):
t_size = (T_idxs[0].max() + 1, torch.prod(torch.tensor(dshape)))
vals = get_filter_vals(f, f_idxs)
return torch.sparse.FloatTensor(T_idxs, vals, t_size)
def apply_toeplitz_deriv_F(T, f, T_idxs, f_idxs):
TF = torch.zeros_like(f)
TF_flat = TF.view(-1)
for f_i in range(f.view(-1).shape[0]):
T_idxs_for_fi = T_idxs[:, torch.where(f_idxs == f_i)[0]]
TF_flat[f_i] = torch.sum(T[T_idxs_for_fi[0], T_idxs_for_fi[1]])
return TF
def DTdw(T_idxs, f_idxs):
T0_max = (T_idxs[0].max().item() + 1)
T1_max = (T_idxs[1].max().item() + 1)
shape = (T0_max * T1_max,
f_idxs.max().item() + 1)
F = torch.zeros(shape).to('cuda')
for t, f in zip(T_idxs.T, f_idxs):
F[t[0]*T1_max + t[1], f] = 1.0
return F
def test_sparse_toeplitz():
# cin, cout, fh, fw, im_h, im_w, stride, pad
settings = [[1, 1, 3, 3, 8, 8, 1, 0],
[1, 1, 3, 3, 8, 8, 1, 1],
[1, 1, 3, 3, 8, 8, 2, 0],
[1, 3, 3, 3, 8, 8, 1, 0],
[3, 1, 3, 3, 8, 8, 1, 0],
[3, 3, 3, 3, 8, 8, 1, 0],
[1, 3, 1, 1, 8, 8, 1, 0],
[1, 3, 6, 6, 8, 8, 1, 0],
[1, 3, 3, 3, 8, 8, 2, 0],
[1, 3, 6, 6, 8, 8, 3, 0],
[1, 3, 3, 3, 8, 8, 1, 2],
[1, 3, 3, 3, 3, 3, 1, 0],
[1, 3, 3, 3, 3, 3, 1, 2]]
all_pass = True
for s in settings:
cin, cout, fh, fw, im_h, im_w, stride, pad = s
print(f"Setting: {s}")
filters = torch.randn(cout, cin, fh, fw)
X = torch.randn(1, cin, im_h, im_w)
conv_out = F.conv2d(X, filters, bias=None, stride=stride, padding=pad)
conv_out = conv_out.view(-1)
T_idxs, f_idxs = get_toeplitz_idxs(filters.shape, X.shape[1:],
f_stride=(stride, stride), s_pad=(pad, pad))
T_sparse = get_sparse_toeplitz(filters, X.shape[1:], T_idxs, f_idxs)
toeplitz_out = torch.mm(T_sparse.to_dense(), X.view(-1, 1)).view(-1)
close = torch.allclose(conv_out, toeplitz_out, atol=1e-5)
print("Toeplitz and Convolution close: ", close)
print("Maximum difference between conv and toeplitz:",
torch.abs(conv_out - toeplitz_out).max())
if not close:
all_pass = False
print("********"*50)
if all_pass:
print("All tests passed")
else:
print("Test failed")
if __name__ == '__main__':
test_sparse_toeplitz()
|
445239
|
import unittest
import pytest
from grapl_analyzerlib.comparators import (
Distance,
Not,
Has,
Eq,
)
PREDICATE: str = "pred"
VALUE: str = "value"
class TestComparators(unittest.TestCase):
def test_distance(self) -> None:
comparator = Distance(
predicate=PREDICATE,
value=VALUE,
distance=3,
)
assert comparator.to_filter() == "distance(pred, value, 3)"
comparator = Distance(
predicate=PREDICATE,
value=Not(VALUE),
distance=3,
)
assert comparator.to_filter() == "NOT distance(pred, value, 3)"
def test_has(self) -> None:
comparator = Has(
predicate=PREDICATE,
)
assert comparator.to_filter() == "has(pred)"
comparator = Has(
predicate=Not(PREDICATE),
)
assert comparator.to_filter() == "(NOT has(pred) )"
def test_eq__non_dgraph_type(self) -> None:
comparator = Eq(
predicate=PREDICATE,
value=VALUE,
)
assert comparator.to_filter() == "eq(pred, value)"
comparator = Eq(
predicate=PREDICATE,
value=Not(VALUE),
)
assert comparator.to_filter() == "(NOT eq(pred, value))"
def test_eq__dgraph_type(self) -> None:
comparator = Eq(
predicate="dgraph.type",
value=VALUE,
)
assert comparator.to_filter() == "type(value)"
comparator = Eq(
predicate="dgraph.type",
value=Not(VALUE),
)
assert comparator.to_filter() == "(NOT type(value))"
@pytest.mark.skip("TODO")
def test_gt(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_ge(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_lt(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_le(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_contains(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_startswith(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_endswith(self) -> None:
pass
@pytest.mark.skip("TODO")
def test_rex(self) -> None:
pass
|
445278
|
from google.appengine.ext import ndb
from endpoints_proto_datastore.ndb import EndpointsModel
from endpoints_proto_datastore.ndb import EndpointsAliasProperty
from protorpc import messages
class AccountGeoCode(EndpointsModel):
lat = ndb.FloatProperty()
lng = ndb.FloatProperty()
class Account(EndpointsModel):
_message_fields_schema = ('id', 'gplus_id', 'gplus_page', 'type',
'display_name', 'pic_url', 'geocode',
'real_name', 'location', 'region', 'email',
'country', 'ctry_filename', 'product_group',
'pg_filename', 'deleted', 'so_id')
_api_key = None
gplus_id = ndb.StringProperty()
gplus_page = ndb.StringProperty()
type = ndb.StringProperty()
display_name = ndb.StringProperty()
real_name = ndb.StringProperty()
email = ndb.StringProperty()
auth_email = ndb.StringProperty()
location = ndb.StringProperty()
region = ndb.StringProperty()
country = ndb.StringProperty()
ctry_filename = ndb.StringProperty()
geocode = ndb.StructuredProperty(AccountGeoCode)
product_group = ndb.StringProperty(repeated=True)
pg_filename = ndb.StringProperty()
deleted = ndb.BooleanProperty()
pic_url = ndb.StringProperty()
# Stack Overflow ID : issue #211 on github
so_id = ndb.StringProperty()
def ApiKeySet(self, value):
self._api_key = value
@EndpointsAliasProperty(setter=ApiKeySet, property_type=messages.StringField)
def api_key(self):
return self._api_key
def IdSet(self, value):
if not isinstance(value, basestring):
raise TypeError('ID must be a string.')
self.UpdateFromKey(ndb.Key(Account, value))
@EndpointsAliasProperty(setter=IdSet, required=True)
def id(self):
if self.key is not None:
return self.key.string_id()
|
445304
|
import pytest
import torch
from module import Embedding
class Config(object):
vocab_size = 10
word_dim = 10
pos_size = 12 # 2 * pos_limit + 2
pos_dim = 5
dim_strategy = 'cat' # [cat, sum]
config = Config()
x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 3, 5, 0], [8, 4, 3, 0, 0]])
x_pos = torch.tensor([[1, 2, 3, 4, 5], [1, 2, 3, 4, 0], [1, 2, 3, 0, 0]])
def test_Embedding_cat():
embed = Embedding(config)
feature = embed((x, x_pos))
dim = config.word_dim + config.pos_dim
assert feature.shape == torch.Size((3, 5, dim))
def test_Embedding_sum():
config.dim_strategy = 'sum'
embed = Embedding(config)
feature = embed((x, x_pos))
dim = config.word_dim
assert feature.shape == torch.Size((3, 5, dim))
if __name__ == '__main__':
pytest.main()
|
445316
|
from .address_data import *
from .address_queries import *
from .address_summary import *
from .address_transactions import *
from .address_resolution import *
from .proxy_utils import *
|
445345
|
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import ckanext.validate_links.cli as cli
import ckanext.validate_links.views as views
def admin_only(context, data_dict=None):
return {'success': False, 'msg': 'Access restricted to system administrators'}
class Validate_LinksPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.IBlueprint)
plugins.implements(plugins.IAuthFunctions)
plugins.implements(plugins.IClick)
# IConfigurer
def update_config(self, config):
toolkit.add_ckan_admin_tab(config, 'admin_broken_links.read', 'Broken links')
toolkit.add_template_directory(config, 'templates')
toolkit.add_public_directory(config, 'public')
toolkit.add_resource('fanstatic', 'validate_links')
# IBlueprint
def get_blueprint(self):
return views.get_blueprint()
# IAuthFunctions
def get_auth_functions(self):
return {'admin_broken_links': admin_only}
# IClick
def get_commands(self):
return cli.get_commands()
|
445349
|
import scrapy
from ..items import EconomycrawlerItem
from news.models import EHeadline
from economycrawler.spiders import economy_spider
from economycrawler import pipelines
class EconomySpider(scrapy.Spider):
name = "economy"
start_urls = [
'https://economictimes.indiatimes.com/markets/stocks/news'
]
def parse(self, response):
div_all_news = response.xpath("//div[@class='eachStory']")
i=0
for some in div_all_news:
items = EconomycrawlerItem()
title = some.xpath("//h3/a/meta/@content")[i].extract()
link = "https://economictimes.indiatimes.com" + some.xpath("//h3/a/@href")[i].extract()
img = some.xpath("//a/span[@class='imgContainer']/img/@data-original")[i].extract()
i+=1
items["title"] = title
items["image"] = img
items["url"] = link
items['source'] = 'Economic Times'
yield items
#if i==10:
# break
class ExpressSpider(scrapy.Spider):
name = "express"
start_urls = [
'https://indianexpress.com/section/business/economy/'
]
def parse(self, response):
div_all_news = response.xpath("//div[@class='articles']")
i=0
for some in div_all_news:
items = EconomycrawlerItem()
title = some.xpath("//h2/a/text()")[i].extract()
link = some.xpath("//h2/a/@href")[i].extract()
s = some.xpath("//div/a/noscript")[i].extract()
l = s.split('"')
img = l[5]
i+=1
l=[]
items["title"] = title
items["image"] = img
items["url"] = link
items["source"] = 'Indian Express'
yield items
#if i==10:
# break
|
445440
|
import sys
sys.path.insert(0, "../")
from opetopy.UnnamedOpetope import address, ProofTree
p = ProofTree({
address([], 2): {
address([], 1): {
address('*'): {} # {} represents the point
},
address(['*']): {
address('*'): {}
}
},
address([['*']]): {
None: {} # indicates a degeneracy
}})
print(p)
print()
print(p.eval())
|
445447
|
import pandas as pd
import os
import os.path as osp
def load_csv(working_dir,csv_path):
cache_folder_name = "cache/"
cache_folder_path = osp.join(working_dir,cache_folder_name)
os.makedirs(cache_folder_path,exist_ok=True)
pickle_file_name = os.path.basename(csv_path) + ".pkl"
csv_folder = os.path.dirname(csv_path)
csv_folder = csv_folder[1:]
cache_pickle_folder = osp.join(cache_folder_path,csv_folder)
os.makedirs(cache_pickle_folder,exist_ok=True)
pickle_path = osp.join(cache_pickle_folder,pickle_file_name)
if not os.path.isfile(pickle_path):
print("pkl not found: {}".format(pickle_path))
dataframe = pd.read_csv(csv_path)
dataframe.to_pickle(pickle_path)
else:
print("pkl found: {}".format(pickle_path))
dataframe = pd.read_pickle(pickle_path)
return dataframe
if __name__ == "__main__":
load_csv("/home/koehlp/Downloads/work_dirs","/net/merkur/storage/deeplearning/users/koehl/gta/GTA_Dataset_22.07.2019/test/cam_0/coords_cam_0.csv" )
|
445469
|
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torch.take')
def convert_take(ctx):
input = ctx.method_args[0]
index = get_arg(ctx, 'index', pos=1, default=None)
input_trt = trt_(ctx.network, input)
index_trt = trt_(ctx.network, index)
output = ctx.method_return
# flatten input
layer = ctx.network.add_shuffle(input_trt)
layer.reshape_dims = (-1, )
flatten_input_trt = layer.get_output(0)
# flatten index
output_trt = ctx.network.add_gather(flatten_input_trt, index_trt,
0).get_output(0)
output._trt = output_trt
|
445490
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import imutils
def img_equal_clahe_yuv(img):
"""Apply equalize, clahe into image with YUV channels.
Args:
img (image) : Image
Returns:
img_eq (image): Image with equalize applied
img_clahe (image): Image with clahe applied
"""
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_eq = img_yuv.copy()
img_eq[:,:,0] = cv2.equalizeHist(img_eq[:, :, 0])
img_eq = cv2.cvtColor(img_eq, cv2.COLOR_YUV2BGR)
img_clahe = img_yuv.copy()
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) #CLAHE 생성
img_clahe[:, :, 0] = clahe.apply(img_clahe[:, :, 0]) #CLAHE 적용
img_clahe = cv2.cvtColor(img_clahe, cv2.COLOR_YUV2BGR)
return img_eq,img_clahe
def img_normalize(img):
"""Normalize image
Args:
img (image) : Image
Returns:
img_norm (image): Image with normalize applied
"""
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_norm = cv2.normalize(img_gray, None, 0, 255, cv2.NORM_MINMAX)
return img_norm
def show_hist(img):
"""Visual histogram of image
Args:
img (image) : Gray scale image
"""
plt.hist(img.flatten(), 256, [0, 256], color = 'r')
plt.xlim([0, 256])
plt.show()
plt.figure()
def img_clahe_luminus(img):
"""Apply equalize, clahe into image with LAB channels.
Args:
img (image) : Image
Returns:
img_eq (image): Image with equalize applied
img_clahe (image): Image with clahe applied
"""
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # luminosity(명도)채널을 얻기 위해 채널을 BGR->LAB 로 바꿈
l, a, b = cv2.split(lab) # 채널 분리
el=cv2.equalizeHist(l)
img_eq=cv2.merge((el, a, b))
img_eq=cv2.cvtColor(img_eq, cv2.COLOR_LAB2BGR)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) # 히스토그램 균등화 시키기 http://www.gisdeveloper.co.kr/?p=6652
cl = clahe.apply(l) # 명도 채널에 적용
limg = cv2.merge((cl, a, b)) # 채널 합치기
img_clahe = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return img_eq, img_clahe
def remove_line(gray):
"""Remove long line
Args:
gray (image): Gray scale image
Returns:
255 - result: Binary image with removed long line.
"""
h,w = gray.shape[:2] # h, w
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # gray 채널
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] #OTSU 방법을 이용해 binary 이미지 변환
kernel = np.ones((3, 3), np.uint8)
dilation_image = cv2.dilate(thresh, kernel, iterations=1) # dilate 연산
# 가록 선을 찾을 커널 정하기 -> 가로 선을 찾을 꺼나 (x,y)에서 x를 더 크게 잡아야된다.
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 1))
# 모포로지 연산 (위의 dilate와 유사한 시리즈)
detected_lines = cv2.morphologyEx(dilation_image, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
# 선들의 contour 들을 찾기
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
temp_height = 10
for c in cnts: # 모든 contour 마다
temp = c.flatten()
if max(temp[::2]) - min(temp[::2]) > w / 2: # contour의 w가 전체 이미지의 w/2보다 크면 삭제
temp_height = max(temp[1::2]) - min(temp[1::2])
cv2.drawContours(dilation_image, [c], -1, (0, 0, 0), -1) # 안을 검은색으로 채워줌
repair_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, (temp_height // 10) + 1))
result = cv2.morphologyEx(dilation_image, cv2.MORPH_CLOSE, repair_kernel, iterations=2)
return 255 - result
def global_threshold1(gray):
"""Binary image using global threshold
Args:
gray (image): Gray scale image
Returns:
gray (image) : Resize gray scale image (It has same size with returned binray image)
255 - closing (image) : Binary image
"""
h, w = gray.shape[:2] # h, w
if w > 1000 and h > 100: # 가로 세로가 일정 수치 보다 크면 30%씩 줄임
gray=cv2.resize(gray, dsize=(0, 0), fx=0.3, fy=0.3, interpolation=cv2.INTER_LINEAR)
blurred = cv2.GaussianBlur(gray, (11, 11), 1) # 가우시안 블러 적용
thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] #OTSU 방법을 이용해 binary 이미지 변환
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 사각형 모양의 커널을 만듬
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) # 모포로지 open
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) # 모포로지 close
return gray, 255 - closing
def remove_brightness(gray):
"""Applt binary to image with severe brightness deviation.
Args:
gray (image): Gray scale image
Returns:
gray (image) : Resize gray scale image (It has same size with returned binray image)
255 - closing (image) : Binary image
"""
h, w = gray.shape[:2]
if w > 1000 and h > 100: # 가로 세로가 일정 수치 보다 크면 30%씩 줄임
gray=cv2.resize(gray, dsize=(0, 0), fx=0.3, fy=0.3, interpolation=cv2.INTER_LINEAR)
blurred = cv2.GaussianBlur(gray, (11, 11), 1) # 가우시안 블러 적용
thresh=cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, # adaptiveThreshold를 이용해 binary 이미지 만들기
cv2.THRESH_BINARY, 15, 2)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 사각형 모양의 커널을 만듬
opening = cv2.morphologyEx(255-thresh, cv2.MORPH_OPEN, kernel) # 모포로지 open
repair_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5)) # 모포로지 close를 위한 커널
result = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, repair_kernel, iterations=1) # 모포로지 close
return gray, 255 - result
def show_x_y_hist(img):
"""Visual image's the image mean in the x,y axis.
Args:
img (image) : Image
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = gray.shape[:2]
temp = []
for i in range(w):
temp.append(gray[:, i].mean())
plt.title('x')
plt.plot(temp)
plt.figure()
temp = []
for i in range(h):
temp.append(gray[i, :].mean())
plt.title('y')
plt.plot(temp)
plt.figure()
plt.imshow(img)
def pyramid(image, scale=1.5, minSize=(30, 30)):
# yield the original image
yield image
# keep looping over the pyramid
while True:
# compute the new dimensions of the image and resize it
w = int(image.shape[1] / scale)
image = imutils.resize(image, width=w)
# if the resized image does not meet the supplied minimum
# size, then stop constructing the pyramid
if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
break
# yield the next image in the pyramid
yield image
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def sliding_window1(gray):
"""Check the image brightness deviation using the sliding window method.
Args:
gray (image): Gray scale image
Returns:
_max (float) : maximum brightness of image.
_min (float) : minimum brightness of image.
"""
h, w = gray.shape[:2]
(w_width, w_height) = (w // 3, h // 3) # sliding_window 크기
dead_line = min(w // 10, h // 10) # sliding_window의 종료 지점
_max = 0
_min = np.inf
for x in range(0, gray.shape[1] - dead_line, w // 5): # sliding window는 w//5의 크기 만큼 움직임
for y in range(0, gray.shape[0] - dead_line, h // 5): # sliding window는 h//5의 크기 만큼 움직임
t_x, t_y = x + w_width, y + w_height # sliding window의 다음 지점
if x + w_width > gray.shape[1]:
t_x = gray.shape[1]
if y + w_height > gray.shape[0]:
t_y = gray.shape[0]
window = gray[x:t_x, y:t_y] # 현재 window
_mean = window.mean() # 현재 window의 평균 밝기 값
if _max <_mean:
_max = _mean
if _min > _mean:
_min = _mean
return _max, _min
def sliding_window2(gray):
"""Check the image brightness deviation using the sliding window method.
Args:
gray (image): Gray scale image
Returns:
_max (float) : maximum brightness of image.
_min (float) : minimum brightness of image.
"""
h, w = gray.shape[:2]
_max = 0
_min = np.inf
(winW, winH) = (w // 3, h // 3)
for resized in pyramid(gray, scale=1.5):
for (x, y, window) in sliding_window(resized, stepSize=max(w // 10, h // 10), windowSize=(winW, winH)):
if window.shape[0] != winH or window.shape[1] != winW:
continue
_mean = resized[y:y + winH, x:x + winW].mean()
if _max <_mean:
_max = _mean
if _min > _mean:
_min = _mean
return _max, _min
|
445510
|
import getopt
import os
import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pylab
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import LinearLocator, LogLocator, MaxNLocator
from numpy import double
OPT_FONT_NAME = 'Helvetica'
TICK_FONT_SIZE = 24
LABEL_FONT_SIZE = 28
LEGEND_FONT_SIZE = 30
LABEL_FP = FontProperties(style='normal', size=LABEL_FONT_SIZE)
LEGEND_FP = FontProperties(style='normal', size=LEGEND_FONT_SIZE)
TICK_FP = FontProperties(style='normal', size=TICK_FONT_SIZE)
MARKERS = (['o', 's', 'v', "^", "h", "v", ">", "x", "d", "<", "|", "", "|", "_"])
# you may want to change the color map for different figures
COLOR_MAP = ('#B03A2E', '#2874A6', '#239B56', '#7D3C98', '#F1C40F', '#F5CBA7', '#82E0AA', '#AEB6BF', '#AA4499')
# you may want to change the patterns for different figures
PATTERNS = (["\\", "///", "o", "||", "\\\\", "\\\\", "//////", "//////", ".", "\\\\\\", "\\\\\\"])
LABEL_WEIGHT = 'bold'
LINE_COLORS = COLOR_MAP
LINE_WIDTH = 3.0
MARKER_SIZE = 0.0
MARKER_FREQUENCY = 1000
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['xtick.labelsize'] = TICK_FONT_SIZE
matplotlib.rcParams['ytick.labelsize'] = TICK_FONT_SIZE
matplotlib.rcParams['font.family'] = OPT_FONT_NAME
FIGURE_FOLDER = './results'
FILE_FOLER = '/home/shuhao/TStream/data/stats'
def ConvertEpsToPdf(dir_filename):
os.system("epstopdf --outfile " + dir_filename + ".pdf " + dir_filename + ".eps")
os.system("rm -rf " + dir_filename + ".eps")
# draw a bar chart
def DrawFigure(x_values, y_values, legend_labels, x_label, y_label, filename, allow_legend):
# you may change the figure size on your own.
fig = plt.figure(figsize=(10, 3))
figure = fig.add_subplot(111)
FIGURE_LABEL = legend_labels
if not os.path.exists(FIGURE_FOLDER):
os.makedirs(FIGURE_FOLDER)
# values in the x_xis
index = np.arange(len(x_values))
# the bar width.
# you may need to tune it to get the best figure.
width = 0.08
# draw the bars
bars = [None] * (len(FIGURE_LABEL))
for i in range(len(y_values)):
bars[i] = plt.bar(index + i * width + width / 2,
y_values[i], width,
hatch=PATTERNS[i],
color=LINE_COLORS[i],
label=FIGURE_LABEL[i], edgecolor='black', linewidth=3)
# # sometimes you may not want to draw legends.
# if allow_legend == True:
# plt.legend(bars, FIGURE_LABEL,
# prop=LEGEND_FP,
# ncol=5,
# loc='upper center',
# # mode='expand',
# shadow=False,
# bbox_to_anchor=(0.45, 1.6),
# columnspacing=0.1,
# handletextpad=0.2,
# # bbox_transform=ax.transAxes,
# # frameon=True,
# # columnspacing=5.5,
# # handlelength=2,
# )
# you may need to tune the xticks position to get the best figure.
plt.xticks(index + 2.5 * width, x_values)
# plt.ylim(0, 100)
plt.xlabel(x_label, fontproperties=LABEL_FP)
plt.ylabel(y_label, fontproperties=LABEL_FP)
plt.savefig(FIGURE_FOLDER + "/" + filename + ".pdf", bbox_inches='tight')
def ReadFile(threads, events):
w, h = 12, 1
y = [[] for _ in range(h)]
gs_path = FILE_FOLER + '/GS/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(gs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
bfs_path = FILE_FOLER + '/BFS/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(bfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
dfs_path = FILE_FOLER + '/DFS/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(dfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
op_gs_path = FILE_FOLER + '/OPGS/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
op_bfs_path = FILE_FOLER + '/OPBFS/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(op_bfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
op_dfs_path = FILE_FOLER + '/OPDFS/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(op_dfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
print(y)
return y
def ReadFileWithAbort(threads, events):
w, h = 12, 1
y = [[] for _ in range(h)]
gs_path = FILE_FOLER + '/GSA/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(gs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
bfs_path = FILE_FOLER + '/BFSA/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(bfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
dfs_path = FILE_FOLER + '/DFSA/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(dfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
op_gs_path = FILE_FOLER + '/OPGSA/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
op_bfs_path = FILE_FOLER + '/OPBFSA/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(op_bfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
op_dfs_path = FILE_FOLER + '/OPDFSA/threads = {}/totalEvents = {}'.format(threads, events)
lines = open(op_dfs_path).readlines()
throughput = lines[0].split(": ")[1]
y[0].append(float(throughput))
print(y)
return y
if __name__ == '__main__':
for tthread in [1, 2, 4, 8, 16, 24]:
for batchInterval in [1024, 2048, 4096, 8192, 10240]:
totalEvents = tthread * batchInterval
y_values = ReadFile(tthread, totalEvents)
x_values = ["$GS_{OC}$", "$BFS_{OC}$", "$DFS_{OC}$", "$GS_{OP}$", "$BFS_{OP}$", "$DFS_{OP}$"]
legend_labels = ["throughput"]
legend = True
DrawFigure(x_values, y_values, legend_labels,
'', 'throughput', 'overview_t{}_b{}'.format(tthread, batchInterval), True)
y_values = ReadFileWithAbort(tthread, totalEvents)
x_values = ["$GSA_{OC}$", "$BFSA_{OC}$", "$DFSA_{OC}$", "$GSA_{OP}$", "$BFSA_{OP}$", "$DFSA_{OP}$"]
legend_labels = ["throughput"]
DrawFigure(x_values, y_values, legend_labels,
'', 'throughput', 'overview_with_abort_t{}_b{}'.format(tthread, batchInterval), True)
|
445513
|
import pprint
points = []
xs = []
ys = []
for i in range(2):
xs.append(int(input("Enter an x value: ")))
for i in range(2):
ys.append(int(input("Enter an y value: ")))
points.append([xs[0], ys[0]])
points.append([xs[1], ys[0]])
points.append([xs[1], ys[1]])
points.append([xs[0], ys[1]])
pprint.pprint(points, indent=4, compact=False)
|
445560
|
from . import api
from flask import jsonify
def bad_request_error(message):
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
def unauthorized_error(message):
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
def forbidden_error(message):
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
def page_not_found_error(message):
response = jsonify({'error': 'page not found', 'message': message})
response.status_code = 404
return response
class ValidationError(ValueError):
pass
@api.errorhandler(ValidationError)
def validate_error(e):
return bad_request_error(e.args[0])
|
445569
|
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
class PymunkRecipe(CompiledComponentsPythonRecipe):
name = "pymunk"
version = "6.0.0"
url = "https://pypi.python.org/packages/source/p/pymunk/pymunk-{version}.zip"
depends = ["cffi", "setuptools"]
call_hostpython_via_targetpython = False
def get_recipe_env(self, arch):
env = super().get_recipe_env(arch)
env["LDFLAGS"] += " -llog" # Used by Chipmunk cpMessage
env["LDFLAGS"] += " -lm" # For older versions of Android
return env
recipe = PymunkRecipe()
|
445576
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../"))
import unittest
import pandas as pd
from karura.core.insights.categorical_to_dummy_insight import CategoricalToDummyInsight
from karura.core.dataframe_extension import DataFrameExtension
class TestCategoricalToDummyInsight(unittest.TestCase):
def test_insight(self):
d = {
"category1": pd.Series(["a", "b", "c", "b", "c", "a", "a", "b"]),
"category2": pd.Series(["z", "z", "x", "y", "z", "z", "z", "x"]),
"numericals": pd.Series([1, 2, 3, 2, 1, 2, 2, 1]),
}
category_columns = ["category1", "category2"]
dfe = DataFrameExtension(pd.DataFrame(d), categoricals=category_columns)
insight = CategoricalToDummyInsight()
targets = insight.get_insight_targets(dfe)
self.assertEqual(len(category_columns), len(targets))
insight.adopt(dfe)
self.assertEqual("numericals", dfe.df.columns[0])
for c in category_columns:
categories = d[c].value_counts().index
dummy_columns = ["{}_{}".format(c, v) for v in categories] # default prefix
converted = dfe.df.columns[[v.startswith(c) for v in dfe.df.columns]]
for cv in converted:
self.assertTrue(cv in dummy_columns)
print(dfe.ftypes)
def test_get_transformer(self):
d = {
"category1": ["a", "b", "c", "d"],
"category2": [1, 2, 3, 4]
}
df = pd.DataFrame.from_dict(d)
dfe = DataFrameExtension(df, categoricals=("category1", "category2"))
insight = CategoricalToDummyInsight()
insight.adopt(dfe)
self.assertEqual(8, len(dfe.ftypes)) # expand to dummy
dfe.df.drop(["category1_a", "category2_3"], axis=1, inplace=True) # drop 2 column (like useless feature)
transformer = insight.get_transformer(dfe)
df_t = transformer.transform(pd.DataFrame.from_dict(d))
print(df_t.head())
self.assertEqual(6, len(df_t.columns))
for c in ["category1_b", "category1_c", "category1_d"]:
self.assertTrue(c in df_t.columns)
self.assertEqual(1, len(df_t[df_t[c] == 1]))
for c in ["category2_1", "category2_2", "category2_4"]:
self.assertTrue(c in df_t.columns)
self.assertEqual(1, len(df_t[df_t[c] == 1]))
if __name__ == "__main__":
unittest.main()
|
445627
|
import sys
import os
import subprocess
from shutil import copyfile, rmtree
def find_and_split_inputs(input_env_data, output_dir, number_of_inputs):
## Generate the environment file (by splitting inputs)
## Find the input file name
input_env_lines = input_env_data.split('\n')
input_vars = [line.split('=')[1] for line in input_env_lines if line.startswith('IN=')]
if(len(input_vars) == 1):
input_file_name = input_vars[0]
# print(input_file_name)
## Split input files
new_input_files = split_inputs(output_dir, number_of_inputs, input_file_name)
else:
## If there is no input variable, then we don't split any files
new_input_files = []
return new_input_files
def split_inputs(output_dir, number_of_inputs, input_file_name):
## Make a directory to store the split parts (and delete what was previously there)
split_input_directory = os.path.join(output_dir, 'split_inputs')
if os.path.exists(split_input_directory):
rmtree(split_input_directory)
os.makedirs(split_input_directory)
## Split it into parts
# stream = os.popen('wc -l {}'.format(input_file_name))
# wc_output = stream.read()
# input_file_n_lines = wc_output.split()[0]
# print(input_file_n_lines)
split_file_prefix = os.path.join(split_input_directory, 'input-chunk-')
stream = subprocess.run(['split',
'-n l/{}'.format(number_of_inputs),
'-d',
'{}'.format(os.path.expandvars(input_file_name)),
'{}'.format(split_file_prefix)],
check=True)
new_input_files = [os.path.join(split_input_directory, f) for f in os.listdir(split_input_directory)
if os.path.isfile(os.path.join(split_input_directory, f))]
new_input_files.sort()
# print(new_input_files)
return new_input_files
def list_split_inputs(output_dir):
split_input_directory = os.path.join(output_dir, 'split_inputs')
assert(os.path.exists(split_input_directory))
new_input_files = [os.path.join(split_input_directory, f) for f in os.listdir(split_input_directory)
if os.path.isfile(os.path.join(split_input_directory, f))]
new_input_files.sort()
# print(new_input_files)
return new_input_files
def generate_env_file(input_env_data, output_env, new_input_files):
## Find the input file name
input_env_lines = input_env_data.split('\n')
## Save the new environment file accordingly
no_input_env_lines = [line for line in input_env_lines if not line.startswith('IN=')]
new_input_vars = ['IN{}={}'.format(i, in_file_name)
for i, in_file_name in enumerate(new_input_files)]
output_env_data = "\n".join(new_input_vars + no_input_env_lines)
with open(output_env, "w") as file:
file.write(output_env_data)
## Replace $IN with all different $INis (one for each input file)
def replace_in_variable(data, new_input_files):
new_data = data.replace(' $IN', ' ' + ' '.join(['$IN{}'.format(i)
for i in range(len(new_input_files))]))
return new_data
def generate_seq_script(input_script, output_script, new_input_files):
## Generate the sequential script
with open(input_script) as file:
input_script_data = file.read()
output_script_data = replace_in_variable(input_script_data, new_input_files)
with open(output_script, "w") as file:
file.write(output_script_data)
def main():
input_dir = sys.argv[1]
name_of_script = sys.argv[2]
number_of_inputs = int(sys.argv[3])
output_dir = sys.argv[4]
try:
env_suffix = "env_" + sys.argv[5]
input_f_suffix = "_" + sys.argv[5]
except:
env_suffix = "env"
input_f_suffix = ""
## This script takes a microbenchmark script as input, finds the $IN
## occurence in it and then generates an intermediary script with many
## $INs in its place.
input_script = os.path.join(input_dir, name_of_script + ".sh")
output_script = os.path.join(output_dir, '{}_{}_seq.sh'.format(name_of_script, number_of_inputs))
input_env = os.path.join(input_dir, name_of_script + "_{}.sh".format(env_suffix))
output_env = os.path.join(output_dir, '{}_{}_env.sh'.format(name_of_script, number_of_inputs))
input_funs = os.path.join(input_dir, name_of_script + "_funs.sh")
output_funs = os.path.join(output_dir, '{}_{}_funs.sh'.format(name_of_script, number_of_inputs))
input_in_f = os.path.join(input_dir, name_of_script + input_f_suffix + ".in")
output_in_f = os.path.join(output_dir, '{}_{}.in'.format(name_of_script, number_of_inputs, input_f_suffix))
## Read the input env file if it exists
try:
with open(input_env) as file:
input_env_data = file.read()
except:
print("Env file:", input_env, "could not be read.")
input_env_data = ""
## Find and split input files given the environment file
new_input_files = find_and_split_inputs(input_env_data, output_dir, number_of_inputs)
## Generate new environment file
generate_env_file(input_env_data, output_env, new_input_files)
## Copy the funs file (if it exists)
if os.path.exists(input_funs):
copyfile(input_funs, output_funs)
## Copy the standard input file (if it exists)
if os.path.exists(input_in_f):
copyfile(input_in_f, output_in_f)
generate_seq_script(input_script, output_script, new_input_files)
if __name__ == "__main__":
main()
|
445640
|
import numpy as np
import torch
import time
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
import matplotlib.pyplot as plt
import config.HyperConfig as Config
import Attention_LSTM as Model
import loss_function as LossFunc
import data
# from sklearn.model_selection import train_test_split
# from torch.utils.data import DataLoader, TensorDataset
def draw(acc_lst, loss_lst):
assert len(acc_lst) == len(loss_lst)
nb_epochs = len(acc_lst)
plt.subplot(211)
plt.plot(list(range(nb_epochs)), loss_lst, c='r', label='loss')
plt.legend(loc='best')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.subplot(212)
plt.plot(list(range(nb_epochs)), acc_lst, c='b', label='acc')
plt.legend(loc='best')
plt.xlabel('epoch')
plt.ylabel('acc')
plt.tight_layout()
plt.show()
# 测试集评估模型
def evaluate(test_data, classifier, vocab, config):
# 将本层及子层的training设定为False
classifier.eval()
total_acc = 0
total_loss = 0
loss_func = nn.CrossEntropyLoss()
# with torch.no_grad():
for batch_data in data.get_batch(test_data, config.batch_size):
# batch_data, seqs_len, _ = data.pad_batch(batch_data, config.max_len)
corpus_xb, wd2vec_xb, yb, _, seqs_len, _ = data.batch_data_variable(batch_data, vocab)
out, _ = classifier(corpus_xb, wd2vec_xb, seqs_len)
total_loss += loss_func(out, yb).item()
pred = torch.argmax(F.softmax(out, dim=1), dim=1)
# acc = (pred == yb).sum().item()
acc = torch.eq(pred, yb).sum().item()
total_acc += acc
print('test loss:', total_loss)
print('test acc:', float(total_acc) / len(test_data))
# 训练模型
def train(train_data, test_data, vocab, config):
# loss_func = nn.NLLLoss()
loss_func = nn.CrossEntropyLoss() # 标签必须为0~n-1,而且必须为1维的
att_loss = LossFunc.AttentionCrossEntropy() # 监督注意力损失函数
embed_weights = vocab.get_embedding_weights(config.embedding_path)
att_lstm = Model.Attention_LSTM(vocab, config, embed_weights)
optimizer = Adam(att_lstm.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
# 3 训练模型
acc_lst, loss_lst = [], []
att_lstm.train() # 将本层及子层的training设定为True
t1 = time.time()
for eps in range(config.epochs):
print(' --Epoch %d' % (1 + eps))
total_loss = 0
total_acc = 0
for batch_data in data.get_batch(train_data, config.batch_size): # 批训练
# batch_data, seqs_len, _ = data.pad_batch(batch_data, config.max_len)
corpus_xb, wd2vec_xb, yb, att_ids, seqs_len, _ = data.batch_data_variable(batch_data, vocab)
# 3.1 将数据输入模型
out, weights = att_lstm(corpus_xb, wd2vec_xb, seqs_len)
# 3.2 重置模型梯度
att_lstm.zero_grad()
# optimizer.zero_grad()
# print(weights.shape, att_ids.shape, out.shape, yb.shape)
# 3.3 计算误差损失
loss_cls = loss_func(out, yb) # 分类误差
loss_att = att_loss(weights, att_ids) # 注意力误差
loss = loss_cls + config.theta * loss_att # 分类误差+注意力监督误差
total_loss += loss.item()
# 计算准确率
pred = torch.argmax(F.softmax(out, dim=1), dim=1)
# acc = (pred == yb).sum()
acc = torch.eq(pred, yb).sum().item()
total_acc += acc
# 3.4 反向传播求梯度
loss.backward()
# 3.5 (用新的梯度值)更新模型参数
optimizer.step()
print('loss:', total_loss)
print('acc:', float(total_acc) / len(train_data))
loss_lst.append(total_loss)
acc_lst.append(float(total_acc) / len(train_data))
t2 = time.time()
print('训练总用时:%.2f min' % ((t2-t1) / 60))
# 绘制acc和loss曲线图
draw(acc_lst, loss_lst)
# 保存整个模型
torch.save(att_lstm, config.save_model_path)
# 只保存模型参数
# torch.save(att_lstm.state_dict(), config.save_model_path)
# 评估模型
evaluate(test_data, att_lstm, vocab, config)
if __name__ == '__main__':
np.random.seed(1314)
torch.manual_seed(3347)
torch.cuda.manual_seed(3347)
# torch.backends.cudnn.deterministic = True # 解决reproducible问题,但是可能会影响性能
# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.enabled = False # cuDNN采用的是不确定性算法,会影响到reproducible
print('GPU可用:', torch.cuda.is_available())
print('CuDNN可用:', torch.backends.cudnn.enabled)
print('GPUs:', torch.cuda.device_count())
# torch.set_num_threads(4) # 设定用于并行化CPU操作的OpenMP线程数
config = Config.Config('config/hyper_param.cfg')
config.use_cuda = torch.cuda.is_available()
if config.use_cuda:
torch.cuda.set_device(0)
train_data = data.load_data_instance(config.train_data_path)
test_data = data.load_data_instance(config.test_data_path)
vocab = data.createVocab(corpus_path=config.train_data_path, lexicon_path=config.lexicon_path)
vocab.save(config.save_vocab_path)
train(train_data=train_data, test_data=test_data, vocab=vocab, config=config)
|
445668
|
import os.path
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.callbacks import ModelIntervalCheckpoint
from rl.core import Agent
from models import KickoffModes, AbstractConfiguration
def kickoff(configuration):
# type: (AbstractConfiguration) -> ()
dqn = DQNAgent(
model=configuration.create_cnn_model(),
nb_actions=configuration.environment.action_space.number_of_actions,
policy=configuration.policy,
memory=configuration.memory,
processor=configuration.processor,
nb_steps_warmup=configuration.warmup_steps,
gamma=configuration.gamma,
target_model_update=configuration.target_model_update,
train_interval=configuration.train_interval,
delta_clip=configuration.delta_clip)
dqn.compile(Adam(lr=configuration.learning_rate), metrics=configuration.metrics)
if configuration.mode == KickoffModes.train:
run_in_train_mode(dqn, configuration)
elif configuration.mode == KickoffModes.test:
run_in_test_mode(dqn, configuration)
def run_in_train_mode(dqn, configuration):
# type: (Agent, AbstractConfiguration) -> ()
checkpoint_weights_filename = configuration.checkpoint_weights_filename_base.format(step='')
if os.path.isfile(configuration.weights_filename):
dqn.load_weights(configuration.weights_filename)
configuration.environment.make()
callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=configuration.checkpoint_interval_steps)]
dqn.fit(configuration.environment, callbacks=callbacks, nb_steps=configuration.number_of_steps, nb_max_episode_steps=1000)
dqn.save_weights(configuration.weights_filename, overwrite=True)
configuration.environment.selenium_docker_wrapper.clean()
def run_in_test_mode(dqn, configuration):
# type: (Agent, AbstractConfiguration) -> ()
if not os.path.isfile(configuration.weights_filename):
raise ValueError('No Previous Weights Found')
configuration.environment.make()
dqn.load_weights(configuration.weights_filename)
dqn.test(configuration.environment, nb_episodes=configuration.number_test_episodes, nb_max_episode_steps=1000)
|
445679
|
import ray
import time
# A regular Python function.
def normal_function():
return 1
# By adding the `@ray.remote` decorator, a regular Python function
# becomes a Ray remote function.
@ray.remote
def my_function():
return 1
# To invoke this remote function, use the `remote` method.
# This will immediately return an object ref (a future) and then create
# a task that will be executed on a worker process.
obj_ref = my_function.remote()
# The result can be retrieved with ``ray.get``.
assert ray.get(obj_ref) == 1
@ray.remote
def slow_function():
time.sleep(10)
return 1
# Invocations of Ray remote functions happen in parallel.
# All computation is performed in the background, driven by Ray's internal event loop.
for _ in range(4):
# This doesn't block.
slow_function.remote()
|
445703
|
import pytorch_lightning as pl
import torch
import torch.nn as nn
class OutputLayer(pl.LightningModule):
"""
Output Layer of the LegoFormer
Maps Transformer's output vector to decomposition factors.
"""
def __init__(self, dim_model, output_resolution=32):
"""
:param dim_model: Transformer model output dimensionality
:param output_resolution: Output voxel grid resolution (side length)
"""
super().__init__()
# Define output layers
self.linear_z = nn.Linear(dim_model, output_resolution)
self.linear_y = nn.Linear(dim_model, output_resolution)
self.linear_x = nn.Linear(dim_model, output_resolution)
# Initialize output layers
torch.nn.init.xavier_uniform_(self.linear_z.weight)
torch.nn.init.xavier_uniform_(self.linear_y.weight)
torch.nn.init.xavier_uniform_(self.linear_x.weight)
def forward(self, x):
"""
:param x: input with shape [BATCH_SIZE, NUM_FACTORS, DIM_MODEL]
:return: 3 vectors (decomposition factors), each with shape [BATCH_SIZE, NUM_FACTORS, NUM_VOXEL (32)]
"""
z_factors = self.linear_z(x).sigmoid()
y_factors = self.linear_y(x).sigmoid()
x_factors = self.linear_x(x).sigmoid()
return z_factors, y_factors, x_factors
|
445732
|
import logging
import re
from qark.issue import Severity, Issue
from qark.scanner.plugin import ManifestPlugin
log = logging.getLogger(__name__)
API_KEY_REGEX = re.compile(r'(?=.{20,})(?=.+\d)(?=.+[a-z])(?=.+[A-Z])')
SPECIAL_CHARACTER_REGEX = re.compile(r'(?=.+[!$%^~])')
HARDCODED_API_KEY_REGEX = re.compile(r'api_key|api|key', re.IGNORECASE)
API_KEY_DESCRIPTION = "Please confirm and investigate for potential API keys to determine severity."
class APIKeys(ManifestPlugin):
def __init__(self):
super(APIKeys, self).__init__(category="manifest", name="Potential API Key found",
description=API_KEY_DESCRIPTION)
self.severity = Severity.INFO
def run(self):
with open(self.manifest_path, "r") as manifest_file:
for line_number, line in enumerate(manifest_file):
# TODO: Fix API_KEY_REGEX, there are too many false positives
# if re.search(API_KEY_REGEX, line) and not re.search(SPECIAL_CHARACTER_REGEX, line):
# self.issues.append(Issue(
# category=self.category, severity=self.severity, name=self.name,
# description=self.description,
# file_object=self.manifest_path,
# line_number=line_number)
# )
if re.search(HARDCODED_API_KEY_REGEX, line):
self.issues.append(Issue(
category=self.category, severity=self.severity, name=self.name,
description=self.description,
file_object=self.manifest_path,
line_number=line_number)
)
plugin = APIKeys()
|
445746
|
from .dash import Dash, no_update # noqa: F401
from .views import BaseDashView # noqa: F401
from . import dependencies # noqa: F401
from . import development # noqa: F401
from . import exceptions # noqa: F401
from . import resources # noqa: F401
from .version import __version__ # noqa: F401
# from ._callback_context import CallbackContext as _CallbackContext
#
# callback_context = _CallbackContext()
|
445754
|
from .inference_models.hierarchical_model import infer_labels
from .utils.dataset import GogglesDataset
from .affinity_matrix_construction.construct import construct_image_affinity_matrices
__version__ = '0.1'
|
445760
|
from .base import BaseAPI, BaseEndpoint # noqa
from .client import api_client_factory # noqa
__all__ = ['BaseAPI', 'BaseEndpoint', 'api_client_factory']
|
445845
|
import datetime
import html
from typing import List
from urllib.parse import quote_plus
from .source import Source
from ...models import Chapter, Novel
class NovelPub(Source):
base_urls = ("https://www.novelpub.com",)
last_updated = datetime.date(2021, 10, 29)
search_viable = True
search_url_template = "https://www.novelpub.com/lnwsearchlive?inputContent={}"
def __init__(self, *args, **kwargs):
super(NovelPub, self).__init__(*args, **kwargs)
self.bad_tags += ["i"]
def search(self, keyword: str, *args, **kwargs) -> List[Novel]:
search_url = self.search_url_template.format(quote_plus(keyword))
response = self.http_gateway.get(search_url)
html_content = html.unescape(response.json().get("resultview"))
soup = self.make_soup(html_content)
novels = []
for a in soup.select(".novel-list .novel-item > a"):
novel = Novel(
title=a["title"].strip(),
url=self.to_absolute_url(a["href"]),
thumbnail_url=a.select_one("img")["src"],
)
novels.append(novel)
return novels
def novel(self, url: str) -> Novel:
soup = self.get_soup(url)
novel = Novel(
title=soup.select_one(".novel-title").text.strip(),
author=soup.select_one(".author a").text.strip(),
synopsis=[
p.text.strip()
for p in soup.select(".summary .content p")
if p.text.strip()
],
thumbnail_url=soup.select_one(".cover img")["data-src"],
url=url,
)
alternative_title = soup.select_one(".alternative-title")
if alternative_title and alternative_title.text.strip():
novel.add_metadata(
"title", alternative_title.text.strip(), others={"role": "alt"}
)
for li in soup.select(".categories > ul > li"):
novel.add_metadata("subject", li.text.strip())
for a in soup.select(".content .tag"):
novel.add_metadata("tag", a.text.strip())
for span in soup.select(".header-stats span"):
label = span.select_one("small").text.strip().lower()
if label == "status":
value = span.select_one("strong").text.strip()
novel.status = value
volume = novel.get_default_volume()
toc_url = url.rstrip("/") + "/chapters/page-{}"
soup = self.get_soup(toc_url.format(1))
self.extract_toc(soup, volume)
pages = soup.select(".pagenav .pagination > li:not(.PagedList-skipToNext)")
pages = (
range(
2, int(pages[-1].select_one("a")["href"].rsplit("-", 1)[-1].strip()) + 1
)
if len(pages) > 1
else range(0, 0)
)
for page in pages:
self.extract_toc(self.get_soup(toc_url.format(page)), volume)
return novel
def extract_toc(self, soup, volume):
for li in soup.select(".chapter-list > li"):
a = li.select_one("a")
updated = li.select_one("time").get("datetime", None)
chapter = Chapter(
index=int(li["data-orderno"]),
title=a.select_one(".chapter-title").text.strip(),
url=self.to_absolute_url(a["href"]),
updated=datetime.datetime.fromisoformat(updated) if updated else None,
)
volume.add(chapter)
def chapter(self, chapter: Chapter):
soup = self.get_soup(chapter.url)
content = soup.select_one("#chapter-container")
for element in content.select(".adsbox, adsbygoogle"):
element.extract()
self.clean_contents(content)
chapter.paragraphs = str(content)
|
445850
|
from itertools import product
from quantlib.instruments.option import EuropeanExercise
from quantlib.instruments.payoffs import PlainVanillaPayoff
from quantlib.instruments.option import Put, Call
from quantlib.instruments.asian_options import (
ContinuousAveragingAsianOption, DiscreteAveragingAsianOption, Geometric
)
from quantlib.pricingengines.asian.analyticcontgeomavprice import (
AnalyticContinuousGeometricAveragePriceAsianEngine
)
from quantlib.pricingengines.asian.analyticdiscrgeomavprice import (
AnalyticDiscreteGeometricAveragePriceAsianEngine
)
from quantlib.processes.black_scholes_process import BlackScholesMertonProcess
from quantlib.settings import Settings
from quantlib.time.api import Date, NullCalendar, June, Actual360, Years
from quantlib.termstructures.yields.flat_forward import FlatForward
from quantlib.quotes import SimpleQuote
from quantlib.termstructures.volatility.api import BlackConstantVol
from .unittest_tools import unittest
# taken from quantlib unit-test utils
def flat_rate(forward, daycounter):
return FlatForward(
forward=forward,
settlement_days=0,
calendar=NullCalendar(),
daycounter=daycounter
)
def relative_error(x1, x2, reference):
if reference:
result = abs(x1-x2)/reference
else:
# fall back to absolute error
result = abs(x1-x2)
return result
class AsianOptionTestCase(unittest.TestCase):
"""Base test for all the cases related to VanillaOption.
This test case is based on the QuantLib example EquityOption.cpp
"""
def setUp(self):
self.settings = Settings()
self.calendar = NullCalendar()
self.today = Date(6, June, 2021)
self.settlement_date = self.today + 90
self.settings.evaluation_date = self.today
# options parameters
self.option_type = Put
self.underlying = 80.0
self.strike = 85.0
self.dividend_yield = -0.03
self.risk_free_rate = 0.05
self.volatility = 0.20
# self.maturity = Date(17, May, 1999)
self.daycounter = Actual360()
self.underlyingH = SimpleQuote(self.underlying)
# bootstrap the yield/dividend/vol curves
self.flat_term_structure = FlatForward(
reference_date=self.today,
forward=self.risk_free_rate,
daycounter=self.daycounter
)
self.flat_dividend_ts = FlatForward(
reference_date=self.today,
forward=self.dividend_yield,
daycounter=self.daycounter
)
self.flat_vol_ts = BlackConstantVol(
self.today,
self.calendar,
self.volatility,
self.daycounter
)
self.black_scholes_merton_process = BlackScholesMertonProcess(
self.underlyingH,
self.flat_dividend_ts,
self.flat_term_structure,
self.flat_vol_ts
)
self.payoff = PlainVanillaPayoff(self.option_type, self.strike)
def test_analytic_cont_geom_av_price(self):
"""
"Testing analytic continuous geometric average-price Asians...")
data from "Option Pricing Formulas", Haug, pag.96-97
"""
exercise = EuropeanExercise(self.settlement_date)
option = ContinuousAveragingAsianOption(Geometric, self.payoff, exercise)
engine = AnalyticContinuousGeometricAveragePriceAsianEngine(
self.black_scholes_merton_process
)
option.set_pricing_engine(engine)
tolerance = 1.0e-4
self.assertAlmostEqual(4.6922, option.net_present_value, delta=tolerance)
# trying to approximate the continuous version with the discrete version
running_accumulator = 1.0
past_fixings = 0
fixing_dates = [self.today + i for i in range(91)]
engine2 = AnalyticDiscreteGeometricAveragePriceAsianEngine(
self.black_scholes_merton_process
)
option2 = DiscreteAveragingAsianOption(
Geometric,
self.payoff,
exercise,
fixing_dates,
past_fixings=past_fixings,
running_accum=running_accumulator,
)
option2.set_pricing_engine(engine2)
tolerance = 3.0e-3
self.assertAlmostEqual(4.6922, option.net_present_value, delta=tolerance)
def test_analytic_cont_geo_av_price_greeks(self):
tolerance = {}
tolerance["delta"] = 1.0e-5
tolerance["gamma"] = 1.0e-5
# tolerance["theta"] = 1.0e-5
tolerance["rho"] = 1.0e-5
tolerance["divRho"] = 1.0e-5
tolerance["vega"] = 1.0e-5
opt_types = [Call, Put]
underlyings = [100.0]
strikes = [90.0, 100.0, 110.0]
q_rates = [0.04, 0.05, 0.06]
r_rates = [0.01, 0.05, 0.15]
lengths = [1, 2]
vols = [0.11, 0.50, 1.20]
spot = SimpleQuote(0.0)
q_rate = SimpleQuote(0.0)
r_rate = SimpleQuote(0.0)
vol = SimpleQuote(0.0)
q_ts = flat_rate(q_rate, self.daycounter)
r_ts = flat_rate(r_rate, self.daycounter)
vol_ts = BlackConstantVol(self.today, self.calendar, vol, self.daycounter)
process = BlackScholesMertonProcess(spot, q_ts, r_ts, vol_ts)
calculated = {}
expected = {}
for opt_type, strike, length in product(opt_types, strikes, lengths):
maturity = EuropeanExercise(self.today + length*Years)
payoff = PlainVanillaPayoff(opt_type, strike)
engine = AnalyticContinuousGeometricAveragePriceAsianEngine(process)
option = ContinuousAveragingAsianOption(Geometric, payoff, maturity)
option.set_pricing_engine(engine)
for u, m, n, v in product(underlyings, q_rates, r_rates, vols):
q = m
r = n
spot.value = u
q_rate.value = q
r_rate.value = r
vol.value = v
value = option.npv
calculated["delta"] = option.delta
calculated["gamma"] = option.gamma
# calculated["theta"] = option.theta
calculated["rho"] = option.rho
calculated["divRho"] = option.dividend_rho
calculated["vega"] = option.vega
if (value > spot.value*1.0e-5):
# perturb spot and get delta and gamma
du = u*1.0e-4
spot.value = u + du
value_p = option.npv
delta_p = option.delta
spot.value = u - du
value_m = option.npv
delta_m = option.delta
spot.value = u
expected["delta"] = (value_p - value_m)/(2*du)
expected["gamma"] = (delta_p - delta_m)/(2*du)
# perturb rates and get rho and dividend rho
dr = r*1.0e-4
r_rate.value = r + dr
value_p = option.npv
r_rate.value = r - dr
value_m = option.npv
r_rate.value = r
expected["rho"] = (value_p - value_m)/(2*dr)
dq = q*1.0e-4
q_rate.value = q + dq
value_p = option.npv
q_rate.value = q - dq
value_m = option.npv
q_rate.value = q
expected["divRho"] = (value_p - value_m)/(2*dq)
# perturb volatility and get vega
dv = v*1.0e-4
vol.value = v + dv
value_p = option.npv
vol.value = v - dv
value_m = option.npv
vol.value = v
expected["vega"] = (value_p - value_m)/(2*dv)
# perturb date and get theta
dt = self.daycounter.year_fraction(self.today - 1, self.today + 1)
self.settings.evaluation_date = self.today - 1
value_m = option.npv
self.settings.evaluation_date = self.today + 1
value_p = option.npv
self.settings.evaluation_date = self.today
expected["theta"] = (value_p - value_m)/dt
# compare
for greek, calcl in calculated.items():
expct = expected[greek]
tol = tolerance[greek]
error = relative_error(expct, calcl, u)
self.assertTrue(error < tol)
|
445858
|
import torch
from torch.nn import Softplus, Module
from torch.distributions.normal import Normal
from .mixture_gaussian import GaussianDiagonalMixture
def transform_to_distribution_params(params, distr_dim=1, eps=1e-6):
"""Apply nonlinearities to unconstrained model outputs so
they can be represented as parameters of either
Normal or Normal-Wishart distributions"""
if len(params) > 3:
all_means, all_stds = [], []
for i in range(len(params) // 2):
all_means.append(params[i * 2].unsqueeze(0))
all_stds.append(Softplus()(params[i * 2 + 1].unsqueeze(0)) + eps)
return torch.cat(all_means, dim=0), torch.cat(all_stds, dim=0)
mean = params[0]
std = Softplus()(params[1]) + eps
if len(params) == 2:
return [mean, std]
elif len(params) == 3:
beta = Softplus()(params[2]) + eps
min_df = 3
#min_df = params[0].size(distr_dim) + 2 # !!!
kappa, nu = beta, beta + min_df
return [mean.unsqueeze(-1), std.unsqueeze(-1), kappa, nu]
class ProbabilisticWrapper(Module):
def __init__(self, distribution_cls, model, distr_dim=1):
super(ProbabilisticWrapper, self).__init__()
self.distribution_cls = distribution_cls
self.model = model
self.distr_dim = distr_dim
def forward(self, x, mask=None):
out_params = self.model(x)
assert (len(out_params) in [2, 3]) or (
self.distribution_cls == GaussianDiagonalMixture
)
if mask is not None:
predicted_params = transform_to_distribution_params(
[p[mask] for p in out_params], self.distr_dim
)
else:
predicted_params = transform_to_distribution_params(
out_params, self.distr_dim
)
if not self.training:
predicted_params = [param.cpu() for param in predicted_params]
return self.distribution_cls(*predicted_params)
def state_dict(self):
return self.model.state_dict()
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
class GaussianEnsembleWrapper(Module):
"""Wraps list of models to a Gaussian Mixture"""
def __init__(self, models):
super(GaussianEnsembleWrapper, self).__init__()
self.models = models
self.distribution_cls = GaussianDiagonalMixture
def forward(self, x, mask=None):
agg = []
for model in self.models:
agg.append(model(x))
assert len(agg[0]) == 2
if mask is not None:
all_predicted_params = [
transform_to_distribution_params([ps[mask] for ps in p])
for p in agg
]
else:
all_predicted_params = [
transform_to_distribution_params(p) for p in agg
]
if not self.training:
all_predicted_params = [
[p for p in internal_params]
for internal_params in all_predicted_params
]
return self.distribution_cls(
torch.cat(
[params[0].unsqueeze(0) for params in all_predicted_params]
),
torch.cat(
[params[1].unsqueeze(0) for params in all_predicted_params]
)
)
def load_state_dict(self, list_of_state_dicts):
assert len(list_of_state_dicts) == len(self.models)
for i, model in enumerate(self.models):
model.load_state_dict(list_of_state_dicts[i])
def eval(self):
super(GaussianEnsembleWrapper, self).eval()
for model in self.models:
model.eval()
return self
def cuda(self):
return self.to('cuda')
def cpu(self):
return self.to('cpu')
def to(self, device):
for model in self.models:
model.to(device)
return self
def make_dataparallel(self):
for i, model in enumerate(self.models):
self.models[i] = nn.DataParallel(model)
|
445864
|
from Network.activity_net.video_hdf5_generator import process_video_dir
from Network.activity_net.API import ActivityNet_Dataloader, ActivityNet_Model
'''
You have to pre process videos before you training.
It will cost huge disk space and huge memmory space.
'''
process_video_dir(r"videos/folder/path", r"pre-process/output/path")
m = ActivityNet_Model()
d = ActivityNet_Dataloader(r"videos/folder/path",
r"training/annotation/path", r"testing/annotation/path",
r"pre-process/output/path") #If you dont have a test annotation file, just set the same as the training annotation file
m.set_dataset(d)
for i in range(200):
train_info = m.trian()
print(train_info)
if i % 20==0:
save_path = "model{}.h5".format(str(i).zfill(3))
m.model.save(path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.