id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1633651
|
def has_context(txt, context_list):
if any(context in str(txt) for context in context_list):
return True
else:
return False
|
1633656
|
import geohash
import json
coll = { "type": "FeatureCollection", "features": []}
h = geohash.encode(48.862004, 2.33734, precision=5)
cells = [h] + geohash.neighbors(h)
for cell in cells:
b = geohash.bbox(cell)
coordinates = []
coordinates.append( [ b['e'], b['s'] ])
coordinates.append( [ b['e'], b['n'] ])
coordinates.append( [ b['w'], b['n'] ])
coordinates.append( [ b['w'], b['s'] ])
coordinates.append( [ b['e'], b['s'] ])
feat = {
"type": "Feature",
"properties": {"name": cell},
"geometry": {
"type": "Polygon",
"coordinates": [ coordinates ]
}
}
coll["features"].append(feat)
print json.dumps(coll)
|
1633678
|
class MissingAttributeException(Exception):
pass
class UnknownMarketException(Exception):
pass
class UnknownPricesException(Exception):
pass
class WrongTypeAttributeException(Exception):
pass
|
1633682
|
import asyncio
import logging
import sys
import asynctnt
logging.basicConfig(level=logging.DEBUG)
async def main():
c = asynctnt.Connection(
host='localhost',
port=3305,
connect_timeout=5,
request_timeout=5,
reconnect_timeout=1/3,
)
async with c:
while True:
res = await c.eval('local t ={}; for i=1,1000000 do t[i] = {i + 0.03} end; return t')
print(sys.getrefcount(res.body[0][-1]))
asyncio.run(main())
|
1633694
|
from nose import tools
import numpy as np
from scipy import stats
from . import models
from .. import basis_functions
from .. import solvers
def analytic_solution(t, k0, alpha, delta, g, n, s, **params):
"""Analytic solution for model with Cobb-Douglas production."""
lmbda = (g + n + delta) * (1 - alpha)
ks = (((s / (g + n + delta)) * (1 - np.exp(-lmbda * t)) +
k0**(1 - alpha) * np.exp(-lmbda * t))**(1 / (1 - alpha)))
return ks
def cobb_douglas_output(k, alpha, **params):
"""Intensive output has Cobb-Douglas functional form."""
return k**alpha
def equilibrium_capital(alpha, delta, g, n, s, **params):
"""Equilibrium value of capital (per unit effective labor supply)."""
return (s / (g + n + delta))**(1 / (1 - alpha))
def generate_random_params(scale, seed):
np.random.seed(seed)
# random g, n, delta such that sum of these params is positive
g, n = stats.norm.rvs(0.05, scale, size=2)
delta, = stats.lognorm.rvs(scale, loc=g + n, size=1)
assert g + n + delta > 0
# s and alpha must be on (0, 1) (but lower values are more reasonable)
s, alpha = stats.beta.rvs(a=1, b=3, size=2)
# choose k0 so that it is not too far from equilibrium
kstar = equilibrium_capital(alpha, delta, g, n, s)
k0, = stats.uniform.rvs(0.5 * kstar, 1.5 * kstar, size=1)
assert k0 > 0
params = {'g': g, 'n': n, 'delta': delta, 's': s, 'alpha': alpha,
'k0': k0}
return params
def initial_mesh(t, T, num, problem):
ts = np.linspace(t, T, num)
kstar = equilibrium_capital(**problem.params)
ks = kstar - (kstar - problem.params['k0']) * np.exp(-ts)
return ts, ks
random_seed = np.random.randint(2147483647)
random_params = generate_random_params(0.1, random_seed)
test_problem = models.SolowModel(cobb_douglas_output, equilibrium_capital,
random_params)
polynomial_basis = basis_functions.PolynomialBasis()
solver = solvers.Solver(polynomial_basis)
def _test_polynomial_collocation(basis_kwargs, boundary_points, num=1000):
"""Helper function for testing various kinds of polynomial collocation."""
ts, ks = initial_mesh(*boundary_points, num=num, problem=test_problem)
k_poly = polynomial_basis.fit(ts, ks, **basis_kwargs)
initial_coefs = k_poly.coef
nodes = polynomial_basis.roots(**basis_kwargs)
solution = solver.solve(basis_kwargs, boundary_points, initial_coefs,
nodes, test_problem)
# check that solver terminated successfully
msg = "Solver failed!\nSeed: {}\nModel params: {}\n"
tools.assert_true(solution.result.success,
msg=msg.format(random_seed, test_problem.params))
# compute the residuals
normed_residuals = solution.normalize_residuals(ts)
# check that residuals are close to zero on average
tools.assert_true(np.mean(normed_residuals) < 1e-6,
msg=msg.format(random_seed, test_problem.params))
# check that the numerical and analytic solutions are close
numeric_soln = solution.evaluate_solution(ts)
analytic_soln = analytic_solution(ts, **test_problem.params)
tools.assert_true(np.mean(numeric_soln - analytic_soln) < 1e-6)
def test_chebyshev_collocation():
"""Test collocation solver using Chebyshev polynomials for basis."""
boundary_points = (0, 100)
basis_kwargs = {'kind': 'Chebyshev', 'degree': 50, 'domain': boundary_points}
_test_polynomial_collocation(basis_kwargs, boundary_points)
def test_legendre_collocation():
"""Test collocation solver using Legendre polynomials for basis."""
boundary_points = (0, 100)
basis_kwargs = {'kind': 'Legendre', 'degree': 50, 'domain': boundary_points}
_test_polynomial_collocation(basis_kwargs, boundary_points)
|
1633752
|
import pytest
from zquantum.core.interfaces.backend_test import QuantumBackendTests
from zquantum.core.interfaces.mock_objects import MockQuantumBackend
@pytest.fixture
def backend():
return MockQuantumBackend()
class TestMockQuantumBackend(QuantumBackendTests):
pass
|
1633769
|
import unittest
import pymysql.cursors
# Not a Python dev, this is probably pretty ugly
class TestDjConventions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.conn = pymysql.connect(
host="localhost",
user="user",
password="password",
db="helium_from_dj"
)
@classmethod
def tearDownClass(cls):
cls.conn.close()
def test_class_names(self):
expected_names = [
"sample_manual",
"#sample_lookup",
"_sample_imported",
"__sample_computed",
"sample_master",
"sample_master__part1",
"sample_master__part2"
]
with TestDjConventions.conn.cursor() as cursor:
cursor.execute("SHOW TABLES;")
tables = [row[0] for row in cursor.fetchall()]
for expected_name in expected_names:
self.assertIn(expected_name, tables)
if __name__ == "__main__":
unittest.main()
|
1633790
|
from __future__ import division
import sys
import random
import numpy as np
from scipy.stats import mannwhitneyu
import pandas as pd
import uuid
def load_list(infile):
X = []
with open(infile) as f:
for line in f:
X.append(line.rstrip())
return X
def calc_DE_mannwhitneyu(X, names1, names2):
pvalues = []
medianA = []
medianB = []
for gene in X.index:
A = X[names1].loc[gene]
B = X[names2].loc[gene]
if np.count_nonzero(A) == 0 and np.count_nonzero(B) == 0:
pvalues.append(np.nan)
medianA.append(0)
medianB.append(0)
continue
_, pvalue = mannwhitneyu(A, B)
pvalues.append(pvalue)
medianA.append(np.median(A))
medianB.append(np.median(B))
df_DE = pd.DataFrame({"pvalue": pvalues, "medianA": medianA, "medianB": medianB}, index=X.index)
df_DE.sort_values("pvalue", inplace=True)
df_DE["pvalue_adj"] = df_DE["pvalue"] * df_DE["pvalue"].shape[0]
return df_DE
if __name__ == "__main__":
infile_df_expr = sys.argv[1]
infile_names1 = sys.argv[2]
infile_names2 = sys.argv[3]
num_to_sample = int(sys.argv[4])
outfile_basename = sys.argv[5]
X = pd.read_csv(infile_df_expr, header=0, index_col=0)
names1 = load_list(infile_names1)
names2 = load_list(infile_names2)
names1_sampled = np.random.choice(names1, size=num_to_sample, replace=False)
names2_sampled = np.random.choice(names2, size=num_to_sample, replace=False)
df_DE = calc_DE_mannwhitneyu(X, names1_sampled, names2_sampled)
suffix = str(uuid.uuid4())
outfile = outfile_basename + "." + suffix + ".csv"
df_DE.to_csv(outfile)
print "Done!!"
|
1633819
|
class BaseExporter:
@staticmethod
def export(puzzle, include_blunder=True):
"""
The method responsible for exporting Puzzle object into desired form.
:return: string representation of Puzzle object
"""
pass
|
1633848
|
import os
import random
from statistics import mean
import string
import uuid
import sys
import tracery
import spacy
import pyocr
import pyocr.builders
from PIL import Image, ImageDraw, ImageFilter
BOUND_PADDING = 50
BOX_PADDING = 50 # 10
WOBBLE_MAX = 2
nlp = spacy.load('en')
def draw_vertical_lines(draw, boxes, doc_bounding_box, line_width):
line_weight_factor = random.triangular(0.005, 1.2)
current_x = doc_bounding_box[0] - line_width / 2
color = get_color()
while current_x < doc_bounding_box[2]:
start_x = current_x
start_y = doc_bounding_box[1] - line_width / 2
end_x = start_x
end_y = doc_bounding_box[3] - line_width / 2
bx0 = start_x
bx1 = start_x + line_width
select_boxes = []
for box in boxes:
wx0 = box.position[0][0] - BOUND_PADDING
wx1 = box.position[1][0] + BOUND_PADDING
if bx0 < wx0 and wx1 < bx1 or \
wx0 < bx1 and bx1 < wx1 or \
wx0 < bx0 and bx0 < wx1:
select_boxes.append(box)
if select_boxes:
y0 = start_y
y1 = end_y
for box in select_boxes:
y1 = box.position[0][1] - BOX_PADDING
draw_line(draw, [start_x, y0, end_x, y1], line_width=line_width, color=color,
line_weight_factor=line_weight_factor, dir='v')
y0 = box.position[1][1] + BOX_PADDING
draw_line(draw, [start_x, y0, end_x, end_y], line_width=line_width, color=color,
line_weight_factor=line_weight_factor, dir='v')
else:
draw_line(draw, [start_x, start_y, end_x, end_y], line_width=line_width, color=color,
line_weight_factor=line_weight_factor, dir='v')
current_x = start_x + line_width
def get_color():
if random.randint(0, 100) == 0:
color = (179, 27, 27)
else:
color = (int(random.triangular(0, 10, 1)),
int(random.triangular(0, 10, 1)),
int(random.triangular(0, 10, 1)),
)
return color
def draw_horizontal_lines(draw, boxes, doc_bounding_box, line_width):
"""Draw black horizontal lines across the page _except_ for that word"""
line_weight_factor = random.triangular(0.005, 1.2)
color = get_color()
start_x = doc_bounding_box[0]
current_y = doc_bounding_box[1]
end_x = doc_bounding_box[2]
end_y = doc_bounding_box[3] - line_width / 2
while current_y < doc_bounding_box[3]:
by0 = current_y
by1 = current_y + line_width
select_boxes = []
for box in boxes:
wy0 = box.position[0][1]
wy1 = box.position[1][1]
if by0 <= wy0 and wy1 <= by1 or \
wy0 <= by1 and by1 <= wy1 or \
wy0 <= by0 and by0 <= wy1:
select_boxes.append(box)
if select_boxes:
x0 = start_x
x1 = end_x
for box in select_boxes:
x1 = box.position[0][0] - BOX_PADDING
draw_line(draw, [x0, current_y, x1, current_y],
line_width=line_width,
line_weight_factor=line_weight_factor, color=color,
dir="h")
x0 = box.position[1][0] + BOX_PADDING
draw_line(draw, [x0 + BOX_PADDING, current_y, end_x, current_y],
line_width=line_width, line_weight_factor=line_weight_factor, dir="h", color=color)
else:
draw_line(draw, [start_x, current_y, end_x, current_y],
line_width=line_width, color=color,
line_weight_factor=line_weight_factor,
dir="h")
current_y = by1
def draw_line(draw, pos, line_width, dir="h", color=(0, 0, 0), line_weight_factor=1):
# Draw a fuzzy line of randomish width repeat times
repeat = random.randint(10, 20)
width = int(line_width) * line_weight_factor
default_padding = line_width / 3
margin_extent = 20 # random.randint(1, 20)
# Slide the center of the line down width/2 based on dir
if dir == 'h':
pos[1] += width / 2
pos[3] += width / 2
# Introduce some randomness into the margins
pos[0] -= random.triangular(width / margin_extent, width * margin_extent)
pos[2] += random.triangular(width / margin_extent, width * margin_extent)
else:
pos[0] -= width / 2
pos[2] -= width / 2
# Introduce some randomness into the margins
pos[1] -= random.triangular(width / margin_extent, width * margin_extent)
pos[3] += random.triangular(width / margin_extent, width * margin_extent)
for i in range(0, repeat):
width = int(random.uniform(line_width - default_padding, line_width))
padding = default_padding * 4
pos[0] = random.triangular(pos[0] - padding, pos[0] + padding)
pos[1] = random.triangular(pos[1] - padding, pos[1] + padding)
pos[2] = random.triangular(pos[2] - padding, pos[2] + padding)
pos[3] = random.triangular(pos[3] - padding, pos[3] + padding)
opacity = 240 + i
width_factor = random.triangular(1, 10, 1)
draw.line(pos, width=int(width / width_factor), fill=(*color, opacity))
def get_boxes(imagefile, tool):
num_words = 5
boxes = tool.image_to_string(
Image.open(imagefile), lang="eng",
builder=pyocr.builders.WordBoxBuilder()
)
return boxes
def image_filter(img):
for i in range(10):
img = img.filter(ImageFilter.SMOOTH_MORE)
return img
def parse_words(boxes):
words = []
for box in boxes:
word = box.content.strip()
word = word.translate(str.maketrans({a:None for a in string.punctuation}))
words.append({'text': word, 'box': box})
sent = ' '.join([w['box'].content for w in words])
doc = nlp(sent)
for token in doc:
for word in words:
text = word['text']
if token.text == text:
word['token'] = token
word['pos'] = token.pos_
return words
def find_boxes_for_grammar(boxes):
words = parse_words(boxes)
grammars = [
['DET', 'NOUN', 'VERB', 'NOUN'],
['ADJ', 'NOUN', 'VERB', 'NOUN'],
['ADJ', 'NOUN', 'VERB', 'ADV'],
['DET', 'NOUN', 'VERB', 'NOUN', 'CONJ', 'NOUN'],
['VERB', 'DET', 'NOUN'],
['ADV', 'VERB', 'NOUN', 'CONJ', 'NOUN']
]
grammar = random.choice(grammars)
picks = []
word_index = 0
prev_word = None
prev_pos = None
for pos in grammar:
while True:
word = words[word_index]
if len(picks) > 0:
prev_word = picks[-1]
prev_pos = prev_word['pos']
pick_this = True
if prev_pos == 'DET':
if prev_word['text'] == 'a' or prev_word['text'] == 'an':
# Pick this if it's singular
pick_this = not is_plural(word)
if prev_word['text'] == 'a':
# Pick this if it doesn't start with a vowel
pick_this = not starts_with_vowel(word) and pick_this
if prev_word['text'] == 'an':
pick_this = starts_with_vowel(word) and pick_this
if prev_word['text'] == 'this':
pick_this = not is_plural(word) and pick_this
if prev_word['text'] == 'these':
pick_this = is_plural(word) and pick_this
if prev_pos == 'NOUN':
# If the previous noun was plural, the verb must be plural
if is_plural(prev_word):
pick_this = is_plural_verb(word) and pick_this
if not is_plural(prev_word):
pick_this = not is_plural_verb(word) and pick_this
if prev_pos == 'VERB':
# If the verb was plural, the noun must be
if is_plural_verb(prev_word):
pick_this = is_plural(word) and pick_this
if not is_plural_verb(prev_word):
pick_this = not is_plural(word) and pick_this
if pos == 'VERB':
# Don't pick auxilliary verbs as they won't have a helper
if 'token' in word:
pick_this = word['token'].dep_ != 'aux' and pick_this
if 'pos' in word and word['pos'] == pos and pick_this and random.randint(0, 30) == 0:
#print("Picking ", word['text'], " ", word['token'].dep_)
picks.append(word)
prev_pos = pos
word_index += 1
break
word_index += 1
return [p['box'] for p in picks]
def is_plural(word):
if word['text'] == 'men' or word['text'] == 'women': # Special case this since one comes up a lot
return True
return word['text'][-1] == 's'
def is_plural_verb(word):
if word['text'] == 'have':
return True
return word['text'][-1] != 's'
def is_present(word):
return word['text'][-1] == 's'
def starts_with_vowel(word):
vowels = set(['a', 'e', 'i', 'o', 'u'])
return word['text'][0] in vowels
def setup(imagefile):
tool = pyocr.get_available_tools()[0]
boxes = get_boxes(imagefile, tool)
return boxes
def draw(imagefile, boxes):
while True:
try:
select_boxes = find_boxes_for_grammar(boxes)
break
except IndexError:
#print("Retrying...")
pass
# Get the line height by taking the average of all the box heights
box_heights = []
margin_lefts = []
margin_rights = []
margin_top = boxes[0].position[0][1]
margin_bottom = boxes[-1].position[1][1]
for box in boxes:
margin_lefts.append(box.position[0][0])
margin_rights.append(box.position[1][0])
box_heights.append(box.position[1][1] - box.position[0][1])
margin_left = min(margin_lefts)
margin_right = max(margin_rights)
line_width = mean(box_heights)
line_spaces = [0]
last_y_pos = boxes[0].position[1][1]
src = Image.open(imagefile)
src = src.convert('RGBA')
img = Image.new('RGBA', (src.size[0], src.size[1]))
draw = ImageDraw.Draw(img)
doc_bounding_box = (margin_left, margin_top, margin_right, margin_bottom)
line_choices = random.choice(('v', 'h', 'a'))
line_choices = 'v'
if line_choices == 'v':
draw_vertical_lines(draw, select_boxes, doc_bounding_box=doc_bounding_box, line_width=line_width)
elif line_choices == 'h':
draw_horizontal_lines(draw, select_boxes,
doc_bounding_box=doc_bounding_box,
line_width=line_width)
else:
draw_vertical_lines(draw, select_boxes, doc_bounding_box=doc_bounding_box, line_width=line_width)
draw_horizontal_lines(draw, select_boxes,
doc_bounding_box=doc_bounding_box,
line_width=line_width)
img = image_filter(img)
out = Image.alpha_composite(src, img)
repeat = 10
f = 10
for box in select_boxes:
pad = BOX_PADDING
d = ImageDraw.Draw(out)
p0 = [box.position[0][0] - pad, box.position[0][1] - pad]
p1 = [box.position[1][0] + pad, box.position[0][1] - pad]
p2 = [box.position[1][0] + pad, box.position[1][1] + pad]
p3 = [box.position[0][0] - pad, box.position[1][1] + pad]
b = (*p0, *p2)
crop = src.crop(box=b)
out.paste(crop, box=b)
w = 10 + int(random.uniform(-5, 5))
for i in range(0, repeat):
fuzz = random.uniform(-f, f)
p0 = [p + fuzz for p in p0]
fuzz = random.uniform(-f, f)
p1 = [p + fuzz for p in p1]
fuzz = random.uniform(-f, f)
p2 = [p + fuzz for p in p2]
fuzz = random.uniform(-f, f)
p3 = [p + fuzz for p in p3]
fuzz = random.uniform(-f, f)
d.line(p0 + p1, width=w, fill="black")
d.line(p1 + p2, width=w, fill="black")
d.line(p2 + p3, width=w, fill="black")
d.line(p3 + p0, width=w, fill="black")
final = Image.new('RGBA', (src.size[0], src.size[1]))
canvas = ImageDraw.Draw(final)
canvas.rectangle([0, 0, final.size[0], final.size[1]], fill='white')
final = Image.alpha_composite(final, out)
outfile = str(uuid.uuid4())[0:5] + '.png' # os.path.basename(imagefile)
final.save("build/" + outfile)
if __name__ == '__main__':
path = sys.argv[1]
pages = []
for f in os.listdir(path):
pages.append(f)
num_generations_per_page = 100
while True:
f = random.choice(pages)
imagefile = os.path.join(path, f)
print("Procesing " + imagefile)
boxes = setup(imagefile)
for i in range(0, num_generations_per_page):
draw(imagefile, boxes)
|
1633861
|
import textwrap
import pytest
from _pytask.live import LiveExecution
from _pytask.live import LiveManager
from _pytask.nodes import PythonFunctionTask
from _pytask.report import ExecutionReport
from pytask import cli
@pytest.mark.end_to_end
@pytest.mark.parametrize("verbose", [False, True])
def test_verbose_mode_execution(tmp_path, runner, verbose):
source = "def task_dummy(): pass"
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
args = [tmp_path.as_posix()]
if not verbose:
args.append("-v 0")
result = runner.invoke(cli, args)
assert ("Task" in result.output) is verbose
assert ("Outcome" in result.output) is verbose
assert ("└──" in result.output) is verbose
assert ("task_dummy.py::task_dummy" in result.output) is verbose
@pytest.mark.unit
def test_live_execution_sequentially(capsys, tmp_path):
path = tmp_path.joinpath("task_dummy.py")
task = PythonFunctionTask(
"task_dummy", path.as_posix() + "::task_dummy", path, None
)
live_manager = LiveManager()
live = LiveExecution(live_manager, [tmp_path], 1)
live_manager.start()
live.update_running_tasks(task)
live_manager.pause()
# Test pause removes the table.
captured = capsys.readouterr()
assert "Task" not in captured.out
assert "Outcome" not in captured.out
assert "task_dummy.py::task_dummy" not in captured.out
assert "running" not in captured.out
live_manager.resume()
live_manager.start()
live_manager.stop()
# Test table with running task.
captured = capsys.readouterr()
assert "Task" in captured.out
assert "Outcome" in captured.out
assert "task_dummy.py::task_dummy" in captured.out
assert "running" in captured.out
live_manager.start()
report = ExecutionReport(
task=task, success=True, exc_info=None, symbol="new_symbol", color="black"
)
live_manager.resume()
live.update_reports(report)
live_manager.stop()
# Test final table with reported outcome.
captured = capsys.readouterr()
assert "Task" in captured.out
assert "Outcome" in captured.out
assert "task_dummy.py::task_dummy" in captured.out
assert "running" not in captured.out
assert "new_symbol" in captured.out
@pytest.mark.unit
@pytest.mark.parametrize("verbose", [1, 2])
@pytest.mark.parametrize("symbol", ["s", "p", ".", "F"])
def test_live_execution_displays_skips_and_persists(capsys, tmp_path, verbose, symbol):
path = tmp_path.joinpath("task_dummy.py")
task = PythonFunctionTask(
"task_dummy", path.as_posix() + "::task_dummy", path, None
)
live_manager = LiveManager()
live = LiveExecution(live_manager, [tmp_path], verbose)
live_manager.start()
live.update_running_tasks(task)
live_manager.pause()
report = ExecutionReport(
task=task, success=True, exc_info=None, symbol=symbol, color="black"
)
live_manager.resume()
live.update_reports(report)
live_manager.stop()
# Test final table with reported outcome.
captured = capsys.readouterr()
assert "Task" in captured.out
assert "Outcome" in captured.out
if verbose < 2 and symbol in ("s", "p"):
assert "task_dummy.py::task_dummy" not in captured.out
assert f"│ {symbol}" not in captured.out
else:
assert "task_dummy.py::task_dummy" in captured.out
assert f"│ {symbol}" in captured.out
assert "running" not in captured.out
|
1633880
|
import sonnet as snt
import tensorflow as tf
# Types of RoI "pooling"
CROP = 'crop'
ROI_POOLING = 'roi_pooling'
class ROIPoolingLayer(snt.AbstractModule):
"""ROIPoolingLayer applies ROI Pooling (or tf.crop_and_resize).
RoI pooling or RoI extraction is used to extract fixed size features from a
variable sized feature map using variabled sized bounding boxes. Since we
have proposals of different shapes and sizes, we need a way to transform
them into a fixed size Tensor for using FC layers.
There are two basic ways to do this, the original one in the FasterRCNN's
paper is RoI Pooling, which as the name suggests, it maxpools directly from
the region of interest, or proposal, into a fixed size Tensor.
The alternative way uses TensorFlow's image utility operation called,
`crop_and_resize` which first crops an Tensor using a normalized proposal,
and then applies extrapolation to resize it to the desired size,
generating a fixed size Tensor.
Since there isn't a std support implemenation of RoIPooling, we apply the
easier but still proven alternatve way.
"""
def __init__(self, config, debug=False, name='roi_pooling'):
super(ROIPoolingLayer, self).__init__(name=name)
self._pooling_mode = config.pooling_mode.lower()
self._pooled_width = config.pooled_width
self._pooled_height = config.pooled_height
self._pooled_padding = config.padding
self._debug = debug
def _get_bboxes(self, roi_proposals, im_shape):
"""
Gets normalized coordinates for RoIs (between 0 and 1 for cropping)
in TensorFlow's order (y1, x1, y2, x2).
Args:
roi_proposals: A Tensor with the bounding boxes of shape
(total_proposals, 5), where the values for each proposal are
(x_min, y_min, x_max, y_max).
im_shape: A Tensor with the shape of the image (height, width).
Returns:
bboxes: A Tensor with normalized bounding boxes in TensorFlow's
format order. Its should is (total_proposals, 4).
"""
with tf.name_scope('get_bboxes'):
im_shape = tf.cast(im_shape, tf.float32)
x1, y1, x2, y2 = tf.unstack(
roi_proposals, axis=1
)
x1 = x1 / im_shape[1]
y1 = y1 / im_shape[0]
x2 = x2 / im_shape[1]
y2 = y2 / im_shape[0]
bboxes = tf.stack([y1, x1, y2, x2], axis=1)
return bboxes
def _roi_crop(self, roi_proposals, conv_feature_map, im_shape):
# Get normalized bounding boxes.
bboxes = self._get_bboxes(roi_proposals, im_shape)
# Generate fake batch ids
bboxes_shape = tf.shape(bboxes)
batch_ids = tf.zeros((bboxes_shape[0], ), dtype=tf.int32)
# Apply crop and resize with extracting a crop double the desired size.
crops = tf.image.crop_and_resize(
conv_feature_map, bboxes, batch_ids,
[self._pooled_width * 2, self._pooled_height * 2], name="crops"
)
# Applies max pool with [2,2] kernel to reduce the crops to half the
# size, and thus having the desired output.
prediction_dict = {
'roi_pool': tf.nn.max_pool(
crops, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding=self._pooled_padding
),
}
if self._debug:
prediction_dict['bboxes'] = bboxes
prediction_dict['crops'] = crops
prediction_dict['batch_ids'] = batch_ids
prediction_dict['conv_feature_map'] = conv_feature_map
return prediction_dict
def _roi_pooling(self, roi_proposals, conv_feature_map, im_shape):
raise NotImplementedError()
def _build(self, roi_proposals, conv_feature_map, im_shape):
if self._pooling_mode == CROP:
return self._roi_crop(roi_proposals, conv_feature_map, im_shape)
elif self._pooling_mode == ROI_POOLING:
return self._roi_pooling(roi_proposals, conv_feature_map, im_shape)
else:
raise NotImplementedError(
'Pooling mode {} does not exist.'.format(self._pooling_mode))
|
1633887
|
import ROOT
from ROOT import TGraphErrors
from array import array
from mva_tools.build_roc_simple import build_roc
if __name__ == "__main__":
path = "/Users/musthero/Documents/Yura/Applications/tmva_local/BDT_score_distributions_muons.root"
hsig_path = "histo_tmva_sig"
hbkg_path = "histo_tmva_bkg"
rootfile = ROOT.TFile.Open(path)
if rootfile.IsZombie():
print "Root file is corrupt"
hSig = rootfile.Get(hsig_path)
hBkg = rootfile.Get(hbkg_path)
g = build_roc(hSig, hBkg, 1)
ll = [g]
g.SetLineColor(ROOT.kBlue)
g.Draw("AL") # draw TGraph with no marker dots
# Draw rectangular cuts
eff_rej = (0.905108,0.987200,0.000272,0.000172)
sig_eff_val = array('f', [eff_rej[0]])
sig_eff_err = array('f', [eff_rej[1]])
bkg_rej_val = array('f', [eff_rej[2]])
bkg_rej_err = array('f', [eff_rej[3]])
n = 1
#gr = TGraphErrors(n, sig_eff_val, sig_eff_err, bkg_rej_val, bkg_rej_err)
#gr = TGraphErrors(n, sig_eff_val, sig_eff_err, bkg_rej_val, bkg_rej_err)
gr = TGraphErrors(n, sig_eff_val, sig_eff_err, bkg_rej_val, bkg_rej_err)
print sig_eff_val, sig_eff_err, bkg_rej_val, bkg_rej_err
#gr.Draw("AC*")
gr.SetMarkerColor(2)
gr.SetLineColor(2)
gr.SetLineWidth(2)
gr.Draw("psame")
|
1633973
|
import turtle
def item(lenght, level, color):
if level <= 0:
return
for _ in range(5): # 5
turtle.color(colors[color])
turtle.forward(lenght)
item(lenght/4, level-1, color+1)
turtle.penup() # there is no need to draw again the same line (and it can use differnt color)
turtle.backward(lenght)
turtle.pendown()
turtle.right(360/8) # 8
turtle.right(360/8 * 3) # 3 = 8 - 5
colors = ["white", "green", "blue", "yellow", "red"]
turtle.tracer(0, 0)
turtle.hideturtle()
turtle.bgcolor("black")
item(200, 4, 0)
turtle.update()
turtle.mainloop()
|
1634018
|
import numpy as np
gauss_len = 100
gaussian_amp = 0.2
def gauss(amplitude, mu, sigma, delf, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_wave = amplitude * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
# Detuning correction Eqn. (4) in Chen et al. PRL, 116, 020501 (2016)
gauss_wave = gauss_wave * np.exp(2 * np.pi * delf * t)
return [float(x) for x in gauss_wave]
def gauss_der(amplitude, mu, sigma, delf, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_der_wave = (
amplitude * (-2 * (t - mu)) * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
)
# Detuning correction Eqn. (4) in Chen et al. PRL, 116, 020501 (2016)
gauss_der_wave = gauss_der_wave * np.exp(2 * np.pi * delf * t)
return [float(x) for x in gauss_der_wave]
def IQ_imbalance(g, phi):
c = np.cos(phi)
s = np.sin(phi)
N = 1 / ((1 - g ** 2) * (2 * c ** 2 - 1))
return [float(N * x) for x in [(1 - g) * c, (1 + g) * s, (1 - g) * s, (1 + g) * c]]
delf = 0.0 # Detuning frequency e.g. [-25,-10] MHz
gauss_pulse = gauss(gaussian_amp, 0, 6, delf, gauss_len)
drag_gauss_pulse = gauss(gaussian_amp, 0, 6, delf, gauss_len)
alpha = 0.05
delta = 0.8 - 2 * np.pi * delf # Below Eqn. (4) in Chen et al.
drag_gauss_der_pulse = gauss_der(alpha / delta * gaussian_amp, 0, 6, delf, gauss_len)
readout_len = 400
qubit_IF = 0
rr_IF = 0
qubit_LO = 6.345e9
rr_LO = 4.755e9
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": +0.0}, # qubit 1-I
2: {"offset": +0.0}, # qubit 1-Q
3: {"offset": +0.0}, # Readout resonator
4: {"offset": +0.0}, # Readout resonator
},
"digital_outputs": {
1: {},
},
"analog_inputs": {
1: {"offset": +0.0},
2: {"offset": +0.0},
},
}
},
"elements": {
"qubit": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": qubit_LO,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"X/2": "DRAG_PULSE",
"X": "DRAG_PULSE",
"-X/2": "DRAG_PULSE",
"Y/2": "DRAG_PULSE",
"Y": "DRAG_PULSE",
"-Y/2": "DRAG_PULSE",
},
},
"rr": {
"mixInputs": {
"I": ("con1", 3),
"Q": ("con1", 4),
"lo_frequency": rr_LO,
"mixer": "mixer_RR",
},
"intermediate_frequency": rr_IF,
"operations": {
"readout": "readout_pulse",
},
"outputs": {"out1": ("con1", 1)},
"time_of_flight": 28,
"smearing": 0,
},
},
"pulses": {
"XPulse": {
"operation": "control",
"length": gauss_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
"YPulse": {
"operation": "control",
"length": gauss_len,
"waveforms": {"I": "zero_wf", "Q": "gauss_wf"},
},
"DRAG_PULSE": {
"operation": "control",
"length": gauss_len,
"waveforms": {"I": "DRAG_gauss_wf", "Q": "DRAG_gauss_der_wf"},
},
"readout_pulse": {
"operation": "measurement",
"length": gauss_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
"integration_weights": {
"integW1": "integW1",
"integW2": "integW2",
},
"digital_marker": "ON",
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"gauss_wf": {"type": "arbitrary", "samples": gauss_pulse},
"DRAG_gauss_wf": {"type": "arbitrary", "samples": drag_gauss_pulse},
"DRAG_gauss_der_wf": {"type": "arbitrary", "samples": drag_gauss_der_pulse},
"readout_wf": {"type": "constant", "sample": 0.3},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"integW1": {
"cosine": [1.0] * int(readout_len / 4),
"sine": [0.0] * int(readout_len / 4),
},
"integW2": {
"cosine": [0.0] * int(readout_len / 4),
"sine": [1.0] * int(readout_len / 4),
},
},
"mixers": {
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": qubit_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
"mixer_RR": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": rr_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
},
}
|
1634107
|
import pickle
import six
import torch
from mmdet.models import ResNet
depth = 50
variant = 'd'
return_idx = [1, 2, 3]
dcn_v2_stages = [-1]
freeze_at = -1
freeze_norm = False
norm_decay = 0.
depth = 50
variant = 'd'
return_idx = [1, 2, 3]
dcn_v2_stages = [-1]
freeze_at = 2
freeze_norm = False
norm_decay = 0.
model = ResNet(depth=depth, variant=variant, return_idx=return_idx, dcn_v2_stages=dcn_v2_stages,
freeze_at=freeze_at, freeze_norm=freeze_norm, norm_decay=norm_decay)
model.train()
model_std = model.state_dict()
def copy(name, w, std):
if isinstance(w, dict):
print()
value2 = torch.Tensor(w)
value = std[name]
value.copy_(value2)
std[name] = value
ckpt_file = '51_00.pdparams'
save_name = '51_00.pth'
with open(ckpt_file, 'rb') as f:
model_dic = pickle.load(f) if six.PY2 else pickle.load(f, encoding='latin1')
for key in model_dic.keys():
name2 = key
w = model_dic[key]
if 'StructuredToParameterName@@' in key:
continue
else:
if '._mean' in key:
name2 = name2.replace('._mean', '.running_mean')
if '._variance' in key:
name2 = name2.replace('._variance', '.running_var')
copy(name2, w, model_std)
model.load_state_dict(model_std)
torch.save(model_std, save_name)
print(torch.__version__)
ckpt_file = '51_08.pdparams'
save_name = '51_08_paddle.pth'
with open(ckpt_file, 'rb') as f:
model_dic = pickle.load(f) if six.PY2 else pickle.load(f, encoding='latin1')
for key in model_dic.keys():
name2 = key
w = model_dic[key]
if 'StructuredToParameterName@@' in key:
continue
else:
if '._mean' in key:
name2 = name2.replace('._mean', '.running_mean')
if '._variance' in key:
name2 = name2.replace('._variance', '.running_var')
copy(name2, w, model_std)
model.load_state_dict(model_std)
torch.save(model_std, save_name)
print(torch.__version__)
|
1634130
|
import asyncio
import random
import time
async def noop(ctx):
return 1
async def sleeper(ctx):
# 500 ms / 20 == avg 25 ms per job
await asyncio.sleep(random.random() / 20)
return 1
def sync_noop():
return 1
def sync_sleeper():
time.sleep(random.random() / 20)
return 1
|
1634158
|
from enum import Enum
class WaterLoopComponent(object):
def __init__(self):
self._component_name = ''
self._component_template = None
self._function = WaterComponent.chiller
class WaterComponent(Enum):
chiller = 1
boiler = 2
condenser = 3
pump = 4
heatexchanger = 5
|
1634185
|
class ImageCropper:
def __init__(self, face_cropper, face_coordinates):
self.face_cropper = face_cropper
self.face_coordinates = face_coordinates
def crop(self, img, output_path):
self.face_cropper.crop_image_into_face(img, self.face_coordinates, output_path)
|
1634198
|
from collections import namedtuple, defaultdict
MenuItem = namedtuple("MenuItem", "section order label url")
def menu_order(item):
return item.order
class MenuRegistry(object):
def __init__(self):
self.callbacks = []
def register(self, func):
self.callbacks.append(func)
def get_menu_items(self, request):
if request is None:
return
sections = defaultdict(list)
for callback in self.callbacks:
menu_item = callback(request)
if menu_item is None:
continue
sections[menu_item.section].append(menu_item)
for section in sections:
sections[section] = sorted(sections[section], key=menu_order)
return sections
menu_registry = MenuRegistry()
|
1634241
|
from __future__ import absolute_import, division, print_function
from cctbx import xray
from scitbx.math import tensor_rank_2_gradient_transform_matrix
from scitbx import matrix
from scitbx.array_family import flex
import cmath
import math
from six.moves import zip
def scatterer_as_list(self):
if (self.flags.use_u_iso_only()):
return list(self.site) + [self.u_iso, self.occupancy, self.fp, self.fdp]
return list(self.site) + list(self.u_star) \
+ [self.occupancy, self.fp, self.fdp]
def scatterer_from_list(l):
if (len(l) == 7):
return xray.scatterer(
site=l[:3],
u=l[3],
occupancy=l[4],
scattering_type="const",
fp=l[5],
fdp=l[6])
return xray.scatterer(
site=l[:3],
u=l[3:9],
occupancy=l[9],
scattering_type="const",
fp=l[10],
fdp=l[11])
class gradients:
def __init__(self, site, u_iso, u_star, occupancy, fp, fdp):
self.site = site
self.u_iso = u_iso
self.u_star = u_star
self.flags = xray.scatterer_flags()
self.flags.set_use_u(
iso=(u_iso is not None),
aniso=(u_star is not None))
self.occupancy = occupancy
self.fp = fp
self.fdp = fdp
def pack_gradients(grads):
result = []
for g in grads:
result.extend(scatterer_as_list(g))
return result
mtps = -2 * math.pi**2
class structure_factor:
def __init__(self, xray_structure, hkl):
self.unit_cell = xray_structure.unit_cell()
self.space_group = xray_structure.space_group()
self.scatterers = xray_structure.scatterers()
self.hkl = hkl
self.d_star_sq = self.unit_cell.d_star_sq(hkl)
def f(self):
result = 0
tphkl = 2 * math.pi * matrix.col(self.hkl)
for scatterer in self.scatterers:
assert scatterer.scattering_type == "const"
w = scatterer.occupancy
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
ffp = 1 + scatterer.fp
fdp = scatterer.fdp
ff = ffp + 1j * fdp
for s in self.space_group:
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
r = s.r().as_rational().as_float()
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
result += w * dw * ff * e
return result
def df_d_params(self):
result = []
tphkl = 2 * math.pi * matrix.col(self.hkl)
h,k,l = self.hkl
d_exp_huh_d_u_star = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
for scatterer in self.scatterers:
assert scatterer.scattering_type == "const"
w = scatterer.occupancy
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
ffp = 1 + scatterer.fp
fdp = scatterer.fdp
ff = ffp + 1j * fdp
d_site = matrix.col([0,0,0])
if (not scatterer.flags.use_u_aniso()):
d_u_iso = 0
d_u_star = None
else:
d_u_iso = None
d_u_star = matrix.col([0,0,0,0,0,0])
d_occ = 0
d_fp = 0
d_fdp = 0
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = r.transpose()
d_site += site_gtmx * (
w * dw * ff * e * 1j * tphkl)
if (not scatterer.flags.use_u_aniso()):
d_u_iso += w * dw * ff * e * mtps * self.d_star_sq
else:
u_star_gtmx = matrix.sqr(tensor_rank_2_gradient_transform_matrix(r))
d_u_star += u_star_gtmx * (
w * dw * ff * e * mtps * d_exp_huh_d_u_star)
d_occ += dw * ff * e
d_fp += w * dw * e
d_fdp += w * dw * e * 1j
result.append(gradients(
site=d_site,
u_iso=d_u_iso,
u_star=d_u_star,
occupancy=d_occ,
fp=d_fp,
fdp=d_fdp))
return result
def d2f_d_params(self):
tphkl = 2 * math.pi * matrix.col(self.hkl)
tphkl_outer = tphkl.outer_product()
h,k,l = self.hkl
d_exp_huh_d_u_star = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.outer_product()
for scatterer in self.scatterers:
assert scatterer.scattering_type == "const"
w = scatterer.occupancy
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
ffp = 1 + scatterer.fp
fdp = scatterer.fdp
ff = (ffp + 1j * fdp)
d2_site_site = flex.complex_double(flex.grid(3,3), 0j)
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso = flex.complex_double(flex.grid(3,1), 0j)
d2_site_u_star = None
else:
d2_site_u_iso = None
d2_site_u_star = flex.complex_double(flex.grid(3,6), 0j)
d2_site_occ = flex.complex_double(flex.grid(3,1), 0j)
d2_site_fp = flex.complex_double(flex.grid(3,1), 0j)
d2_site_fdp = flex.complex_double(flex.grid(3,1), 0j)
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso = 0j
d2_u_iso_occ = 0j
d2_u_iso_fp = 0j
d2_u_iso_fdp = 0j
else:
d2_u_star_u_star = flex.complex_double(flex.grid(6,6), 0j)
d2_u_star_occ = flex.complex_double(flex.grid(6,1), 0j)
d2_u_star_fp = flex.complex_double(flex.grid(6,1), 0j)
d2_u_star_fdp = flex.complex_double(flex.grid(6,1), 0j)
d2_occ_fp = 0j
d2_occ_fdp = 0j
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = r.transpose()
d2_site_site += flex.complex_double(
site_gtmx *
(w * dw * ff * e * (-1) * tphkl_outer)
* site_gtmx.transpose())
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso += flex.complex_double(site_gtmx * (
w * dw * ff * e * 1j * mtps * self.d_star_sq * tphkl))
else:
u_star_gtmx = matrix.sqr(tensor_rank_2_gradient_transform_matrix(r))
d2_site_u_star += flex.complex_double(
site_gtmx
* ((w * dw * ff * e * 1j * tphkl).outer_product(
mtps * d_exp_huh_d_u_star))
* u_star_gtmx.transpose())
d2_site_occ += flex.complex_double(site_gtmx * (
dw * ff * e * 1j * tphkl))
d2_site_fp += flex.complex_double(site_gtmx * (
w * dw * e * 1j * tphkl))
d2_site_fdp += flex.complex_double(site_gtmx * (
w * dw * e * (-1) * tphkl))
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2
d2_u_iso_occ += dw * ff * e * mtps * self.d_star_sq
d2_u_iso_fp += w * dw * e * mtps * self.d_star_sq
d2_u_iso_fdp += 1j * w * dw * e * mtps * self.d_star_sq
else:
d2_u_star_u_star += flex.complex_double(
u_star_gtmx
* (w * dw * ff * e * mtps**2 * d2_exp_huh_d_u_star_u_star)
* u_star_gtmx.transpose())
d2_u_star_occ += flex.complex_double(u_star_gtmx * (
dw * ff * e * mtps * d_exp_huh_d_u_star))
d2_u_star_fp += flex.complex_double(u_star_gtmx * (
w * dw * e * mtps * d_exp_huh_d_u_star))
d2_u_star_fdp += flex.complex_double(u_star_gtmx * (
w * dw * 1j * e * mtps * d_exp_huh_d_u_star))
d2_occ_fp += dw * e
d2_occ_fdp += dw * e * 1j
if (not scatterer.flags.use_u_aniso()):
i_occ, i_fp, i_fdp, np = 4, 5, 6, 7
else:
i_occ, i_fp, i_fdp, np = 9, 10, 11, 12
dp = flex.complex_double(flex.grid(np,np), 0j)
paste = dp.matrix_paste_block_in_place
paste(d2_site_site, 0,0)
if (not scatterer.flags.use_u_aniso()):
paste(d2_site_u_iso, 0,3)
paste(d2_site_u_iso.matrix_transpose(), 3,0)
else:
paste(d2_site_u_star, 0,3)
paste(d2_site_u_star.matrix_transpose(), 3,0)
paste(d2_site_occ, 0,i_occ)
paste(d2_site_occ.matrix_transpose(), i_occ,0)
paste(d2_site_fp, 0,i_fp)
paste(d2_site_fp.matrix_transpose(), i_fp,0)
paste(d2_site_fdp, 0,i_fdp)
paste(d2_site_fdp.matrix_transpose(), i_fdp,0)
if (not scatterer.flags.use_u_aniso()):
dp[3*7+3] = d2_u_iso_u_iso
dp[3*7+4] = d2_u_iso_occ
dp[4*7+3] = d2_u_iso_occ
dp[3*7+5] = d2_u_iso_fp
dp[5*7+3] = d2_u_iso_fp
dp[3*7+6] = d2_u_iso_fdp
dp[6*7+3] = d2_u_iso_fdp
else:
paste(d2_u_star_u_star, 3,3)
paste(d2_u_star_occ, 3, 9)
paste(d2_u_star_occ.matrix_transpose(), 9, 3)
paste(d2_u_star_fp, 3, 10)
paste(d2_u_star_fp.matrix_transpose(), 10, 3)
paste(d2_u_star_fdp, 3, 11)
paste(d2_u_star_fdp.matrix_transpose(), 11, 3)
dp[i_occ*np+i_fp] = d2_occ_fp
dp[i_fp*np+i_occ] = d2_occ_fp
dp[i_occ*np+i_fdp] = d2_occ_fdp
dp[i_fdp*np+i_occ] = d2_occ_fdp
yield dp
def d_target_d_params(self, target):
da, db = target.da(), target.db()
return flex.double([[da * d.real + db * d.imag
for d in scatterer_as_list(d_scatterer)]
for d_scatterer in self.df_d_params()])
def d2_target_d_params(self, target):
result = []
da, db = target.da(), target.db()
daa, dbb, dab = target.daa(), target.dbb(), target.dab()
ds = self.df_d_params()
d2s = self.d2f_d_params()
for di0,d2i in zip(ds, d2s):
d2ij_iter = iter(d2i)
for di in scatterer_as_list(di0):
row = []
for dj0 in ds:
for dj in scatterer_as_list(dj0):
sum = daa * di.real * dj.real \
+ dbb * di.imag * dj.imag \
+ dab * (di.real * dj.imag + di.imag * dj.real)
if (di0 is dj0):
d2ij = next(d2ij_iter)
sum += da * d2ij.real + db * d2ij.imag
row.append(sum)
result.append(row)
return flex.double(result)
class structure_factors:
def __init__(self, xray_structure, miller_set):
assert xray_structure.is_similar_symmetry(miller_set)
self.xray_structure = xray_structure
self.miller_indices = miller_set.indices()
np = 0
for scatterer in xray_structure.scatterers():
if (not scatterer.flags.use_u_aniso()):
np += 7
else:
np += 12
self.number_of_parameters = np
def fs(self):
result = flex.complex_double()
for hkl in self.miller_indices:
result.append(structure_factor(
xray_structure=self.xray_structure, hkl=hkl).f())
return result
def f(self):
return flex.sum(self.fs())
def d_target_d_params(self, f_obs, target_type):
result = flex.double(self.number_of_parameters, 0)
for hkl,obs in zip(self.miller_indices, f_obs.data()):
sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl)
target = target_type(obs=obs, calc=sf.f())
result += sf.d_target_d_params(target=target)
return result
def d2_target_d_params(self, f_obs, target_type):
np = self.number_of_parameters
result = flex.double(flex.grid(np, np), 0)
for hkl,obs in zip(self.miller_indices, f_obs.data()):
sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl)
target = target_type(obs=obs, calc=sf.f())
result += sf.d2_target_d_params(target=target)
return result
|
1634247
|
import logging
import copy as cp
from datetime import datetime
from typing import (
TypeVar,
Dict,
Any,
Optional,
Type,
Union,
ClassVar,
Sequence,
)
import attr
import marshmallow
from marshmallow import fields
from cattr.converters import Converter
from simple_smartsheet import config
from simple_smartsheet import utils
from simple_smartsheet.types import IndexesType
logger = logging.getLogger(__name__)
converter = Converter() # type: ignore
converter.register_structure_hook(datetime, lambda ts, _: ts)
converter.register_structure_hook(IndexesType, lambda x, _: x)
converter.register_structure_hook(Union[float, str, datetime, None], lambda ts, _: ts)
class Schema(marshmallow.Schema):
class Meta:
unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)
@marshmallow.post_dump
def remove_none(self, data, many: bool, **kwargs):
return {key: value for key, value in data.items() if value is not None}
class CoreSchema(Schema):
id = fields.Int()
name = fields.Str()
T = TypeVar("T", bound="Object")
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class Object:
_schema: ClassVar[Type[Schema]] = Schema
@classmethod
def load(
cls: Type[T],
data: Dict[str, Any],
only: Optional[Sequence[str]] = None,
exclude: Sequence[str] = (),
**kwargs: Any,
) -> T:
schema = cls._schema(only=only, exclude=exclude)
normalized_data = schema.load(data)
normalized_data.update(kwargs)
obj = converter.structure(normalized_data, cls)
return obj
def dump(
self, only: Optional[Sequence[str]] = None, exclude: Sequence[str] = ()
) -> Dict[str, Any]:
schema = self._schema(only=only, exclude=exclude)
result = schema.dump(self.unstructured)
return result
@property
def unstructured(self) -> Dict[str, Any]:
return converter.unstructure(self)
def __repr__(self) -> str:
if hasattr(self, "id") and hasattr(self, "name"):
attrs = ["name", "id"]
elif hasattr(self, "id"):
attrs = ["id"]
elif hasattr(self, "name"):
attrs = ["name"]
else:
return super().__repr__()
return utils.create_repr(self, attrs)
def copy(self: T, deep: bool = True) -> T:
if deep:
return cp.deepcopy(self)
else:
return cp.copy(self)
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class CoreObject(Object):
name: str
id: Optional[int] = None
_schema: ClassVar[Type[CoreSchema]] = CoreSchema
@property
def _id(self) -> Optional[int]:
return getattr(self, "id")
@property
def _name(self) -> str:
return getattr(self, "name")
|
1634248
|
import sqlalchemy as sa
from .reverter import Reverter
from .utils import get_versioning_manager, is_internal_column, parent_class
class VersionClassBase(object):
@property
def previous(self):
"""
Returns the previous version relative to this version in the version
history. If current version is the first version this method returns
None.
"""
return (
get_versioning_manager(self)
.fetcher(parent_class(self.__class__))
.previous(self)
)
@property
def next(self):
"""
Returns the next version relative to this version in the version
history. If current version is the last version this method returns
None.
"""
return (
get_versioning_manager(self)
.fetcher(parent_class(self.__class__))
.next(self)
)
@property
def index(self):
"""
Return the index of this version in the version history.
"""
return (
get_versioning_manager(self)
.fetcher(parent_class(self.__class__))
.index(self)
)
@property
def changeset(self):
"""
Return a dictionary of changed fields in this version with keys as
field names and values as lists with first value as the old field value
and second list value as the new value.
"""
previous_version = self.previous
data = {}
for key in sa.inspect(self.__class__).columns.keys():
if is_internal_column(self, key):
continue
if not previous_version:
old = None
else:
old = getattr(previous_version, key)
new = getattr(self, key)
if old != new:
data[key] = [old, new]
manager = get_versioning_manager(self)
manager.plugins.after_construct_changeset(self, data)
return data
def revert(self, relations=[]):
return Reverter(self, relations=relations)()
|
1634263
|
import numpy as np
import copy
def calcbadness(xvals, validcolumns, stimix, results, sessionindicator):
"""
badness = calcbadness(xvals,validcolumns,stimix,results,sessionindicator)
Arguments:
__________
<xvals>:
is a list vector of vectors of run indices
<validcolumns>:
is a list vector, each element is the vector of trial indices
associated with the run
<stimix>:
is a list vector, each element is the vector of actual condition
numbers occurring with a given run
<results>:
is a 1 x n with results. the first one is SPECIAL and is unregularized.
<sessionindicator>
is 1 x RUNS with positive integers indicating run groupings for sessions.
this is used only to perform the session-wise z-scoring for the purposes
of hyperparameter evaluation.
Returns
__________
<badness>:
voxels x hyperparameters with the sum of the squared error from
cross-validation.
the testing data consists of the beta weights from results[0],
i.e. unregularized beta weights.
note that the squared error is expressed in the z-score units
(given that we z-score the single-trial beta weights prior to evaluation
of the different hyperparameters).
note:
the unregularized betas set the stage for the session-wise normalization:
for each session, we determine a fixed mu and sigma that are applied to
the session under all of the various regularization levels.
"""
# initialize
badness = np.zeros(
(results[0].shape[0], len(results))
)
# calc
alltheruns = np.arange(len(validcolumns))
# z-score transform the single-trial beta weights
if np.max(sessionindicator) == 1:
sessions = [1]
else:
sessions = range(1, np.max(sessionindicator) + 1)
for sess in sessions:
wh = np.flatnonzero(np.array(sessionindicator) == sess)
whcol = np.concatenate(np.asarray(validcolumns)[wh])
# mean of unregularized case
mn = np.mean(results[0][:, whcol], axis=1)
# std dev of unregularized case
sd = np.std(results[0][:, whcol], axis=1)
resultsdm = copy.deepcopy(results)
for runis in range(len(resultsdm)):
rundemean = results[runis][:, whcol]-mn[:, np.newaxis]
with np.errstate(divide="ignore", invalid="ignore"):
resultsdm[runis][:, whcol] = rundemean / sd[:, np.newaxis]
# do cross-validation
for xx in range(len(xvals)):
# calc
# which runs are training, e.g. [1 2 5 6 7 8 9 10 11 12]
testix = xvals[xx]
trainix = np.setdiff1d(alltheruns, testix)
# calc
# vector of trial indices in the testing data
testcols = np.asarray(validcolumns[testix])
# vector of trial indices in the training data
traincols = np.concatenate(
np.asarray([validcolumns[tx] for tx in trainix])
)
# vector of condition-ids in the testing data
testids = stimix[testix]
# vector of condition-ids in the training data
trainids = np.concatenate(
np.asarray([stimix[tx] for tx in trainix])
)
# calculate cross-validation performance
for pcr in range(len(results)):
# hashrec = cell(1,max(testids)); # speed-up by caching results
for trial in range(len(testids)):
# which training trials match the current condition-id?
haveix = np.flatnonzero(trainids == testids[trial])
if haveix.size > 0:
# NOTE:
# testcols(trial) tells us which trial in the testing runs
# to pull betas for (these are 0-based trial numbers)
# traincols(haveix) tells us the corresponding trials
# (isolated within the training runs) to pull betas for
# (these are 1-based trial numbers)
# compute squared error of all training betas against the
# current testing beta, and accumulate!!
betas_1 = resultsdm[pcr][:, traincols[haveix]]
betas_2 = resultsdm[0][:, testcols[trial]]
badness[:, pcr] = badness[:, pcr] + np.sum(
(betas_1-betas_2[:, np.newaxis])**2, axis=1)
# NOTICE the use of results(0)
return badness
"""
# if isempty(hashrec{testids(ttt)})
# hashrec{testids(ttt)} = \\
# mean(results(ll).modelmd{2}(:,traincols(haveix)),2); # voxels x 1
# hashrec{testids(ttt)} = results(ll).modelmd{2}(:,traincols(haveix));
# voxels x instances
# end
"""
|
1634296
|
import magma as m
import magma.testing
import shutil
import os
import fault
def test_log(capsys):
FF = m.define_from_verilog("""
module FF(input I, output reg O, input CLK, input CE);
always @(posedge CLK) begin
if (CE) O <= I;
end
endmodule
""", type_map={"CLK": m.In(m.Clock), "CE": m.In(m.Enable)})[0]
class TestLog(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO(has_enable=True)
ff = FF()
io.O @= ff(io.I)
m.log.debug("ff.O=%d, ff.I=%d", ff.O, ff.I.value()).when(m.posedge(io.CLK))\
.if_(io.CE)
m.log.info("ff.O=%d, ff.I=%d", ff.O, ff.I.value()).when(m.posedge(io.CLK))\
.if_(io.CE)
m.log.warning("ff.O=%d, ff.I=%d", ff.O, ff.I.value()).when(m.posedge(io.CLK))\
.if_(io.CE)
m.log.error("ff.O=%d, ff.I=%d", ff.O, ff.I.value()).when(m.posedge(io.CLK))\
.if_(io.CE)
m.compile("build/TestLog", TestLog, inline=True)
assert not os.system('cd tests/test_verilog/build && '
'verilator --lint-only TestLog.v '
'--top-module TestLog')
assert m.testing.check_files_equal(__file__,
f"build/TestLog.v",
f"gold/TestLog.v")
tester = fault.SynchronousTester(TestLog, TestLog.CLK)
tester.poke(TestLog.CE, 1)
tester.poke(TestLog.I, 1)
tester.expect(TestLog.O, 0)
tester.advance_cycle()
tester.poke(TestLog.I, 0)
tester.expect(TestLog.O, 1)
tester.advance_cycle()
tester.expect(TestLog.O, 0)
tester.poke(TestLog.CE, 0)
tester.poke(TestLog.I, 1)
tester.advance_cycle()
tester.expect(TestLog.O, 0)
tester.poke(TestLog.I, 1)
tester.advance_cycle()
tester.expect(TestLog.O, 0)
tester.poke(TestLog.CE, 1)
tester.advance_cycle()
tester.expect(TestLog.O, 1)
tester.advance_cycle()
directory = f"{os.path.abspath(os.path.dirname(__file__))}/build/"
tester.compile_and_run(target="verilator", directory=directory,
flags=['-Wno-unused'], skip_compile=True,
disp_type="realtime")
out, err = capsys.readouterr()
# No debug
assert f"""
[INFO] ff.O=0, ff.I=1
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[INFO] ff.O=1, ff.I=0
[WARNING] ff.O=1, ff.I=0
[ERROR] ff.O=1, ff.I=0
[INFO] ff.O=0, ff.I=1
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[INFO] ff.O=1, ff.I=1
[WARNING] ff.O=1, ff.I=1
[ERROR] ff.O=1, ff.I=1
""" in out, out
# Force recompile for new define
shutil.rmtree(os.path.join(directory, "obj_dir"))
tester.compile_and_run(target="verilator", directory=directory,
flags=['-Wno-unused', '+define+MAGMA_LOG_LEVEL=2'], skip_compile=True,
disp_type="realtime")
out, err = capsys.readouterr()
# No Info
assert f"""
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[WARNING] ff.O=1, ff.I=0
[ERROR] ff.O=1, ff.I=0
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[WARNING] ff.O=1, ff.I=1
[ERROR] ff.O=1, ff.I=1
""" in out, out
# Force recompile for new define
shutil.rmtree(os.path.join(directory, "obj_dir"))
tester.compile_and_run(target="verilator", directory=directory,
flags=['-Wno-unused', '+define+MAGMA_LOG_LEVEL=3'], skip_compile=True,
disp_type="realtime")
out, err = capsys.readouterr()
# Only Error
assert f"""
[ERROR] ff.O=0, ff.I=1
[ERROR] ff.O=1, ff.I=0
[ERROR] ff.O=0, ff.I=1
[ERROR] ff.O=1, ff.I=1
""" in out, out
# Force recompile for new define
shutil.rmtree(os.path.join(directory, "obj_dir"))
tester.compile_and_run(target="verilator", directory=directory,
flags=['-Wno-unused', '+define+MAGMA_LOG_LEVEL=0'], skip_compile=True,
disp_type="realtime")
out, err = capsys.readouterr()
# All
assert f"""
[DEBUG] ff.O=0, ff.I=1
[INFO] ff.O=0, ff.I=1
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[DEBUG] ff.O=1, ff.I=0
[INFO] ff.O=1, ff.I=0
[WARNING] ff.O=1, ff.I=0
[ERROR] ff.O=1, ff.I=0
[DEBUG] ff.O=0, ff.I=1
[INFO] ff.O=0, ff.I=1
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[DEBUG] ff.O=1, ff.I=1
[INFO] ff.O=1, ff.I=1
[WARNING] ff.O=1, ff.I=1
[ERROR] ff.O=1, ff.I=1
""" in out, out
def test_flog():
FF = m.define_from_verilog("""
module FF(input I, output reg O, input CLK, input CE);
always @(posedge CLK) begin
if (CE) O <= I;
end
endmodule
""", type_map={"CLK": m.In(m.Clock), "CE": m.In(m.Enable)})[0]
class TestFLog(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO(has_enable=True)
ff = FF()
io.O @= ff(io.I)
with m.File("test_flog.log", "a") as log_file:
m.log.debug("ff.O=%d, ff.I=%d", ff.O, ff.I.value(), file=log_file)\
.when(m.posedge(io.CLK))\
.if_(io.CE)
m.log.info("ff.O=%d, ff.I=%d", ff.O, ff.I.value(), file=log_file)\
.when(m.posedge(io.CLK))\
.if_(io.CE)
m.log.warning("ff.O=%d, ff.I=%d", ff.O, ff.I.value(), file=log_file)\
.when(m.posedge(io.CLK))\
.if_(io.CE)
m.log.error("ff.O=%d, ff.I=%d", ff.O, ff.I.value(), file=log_file)\
.when(m.posedge(io.CLK))\
.if_(io.CE)
m.compile("build/TestFLog", TestFLog, inline=True)
assert not os.system('cd tests/test_verilog/build && '
'verilator --lint-only TestFLog.v '
'--top-module TestFLog')
assert m.testing.check_files_equal(__file__,
f"build/TestFLog.v",
f"gold/TestFLog.v")
tester = fault.SynchronousTester(TestFLog, TestFLog.CLK)
tester.poke(TestFLog.CE, 1)
tester.poke(TestFLog.I, 1)
tester.expect(TestFLog.O, 0)
tester.advance_cycle()
tester.poke(TestFLog.I, 0)
tester.expect(TestFLog.O, 1)
tester.advance_cycle()
tester.expect(TestFLog.O, 0)
tester.poke(TestFLog.CE, 0)
tester.poke(TestFLog.I, 1)
tester.advance_cycle()
tester.expect(TestFLog.O, 0)
tester.poke(TestFLog.I, 1)
tester.advance_cycle()
tester.expect(TestFLog.O, 0)
tester.poke(TestFLog.CE, 1)
tester.advance_cycle()
tester.expect(TestFLog.O, 1)
tester.advance_cycle()
directory = f"{os.path.abspath(os.path.dirname(__file__))}/build/"
tester.compile_and_run(target="verilator", directory=directory,
flags=['-Wno-unused'], skip_compile=True,
disp_type="realtime")
# No debug
with open(os.path.join(directory, "test_flog.log"), "r") as f:
assert f"""\
[INFO] ff.O=0, ff.I=1
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[INFO] ff.O=1, ff.I=0
[WARNING] ff.O=1, ff.I=0
[ERROR] ff.O=1, ff.I=0
[INFO] ff.O=0, ff.I=1
[WARNING] ff.O=0, ff.I=1
[ERROR] ff.O=0, ff.I=1
[INFO] ff.O=1, ff.I=1
[WARNING] ff.O=1, ff.I=1
[ERROR] ff.O=1, ff.I=1
""" in f.read()
|
1634345
|
from pip._internal.req import parse_requirements
from setuptools import setup, find_packages
raw_requirements = parse_requirements("requirements/production.txt", session=False)
requirements = [str(ir.req) for ir in raw_requirements]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="python-chain",
version="1.0.4",
scripts=["bin/build_chain.py"],
author="QuintoAndar",
author_email="<EMAIL>",
description="An easy to use pattern of function chaining on Python.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/quintoandar/python-chain/",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
1634348
|
from utils.misc import isnotebook
if isnotebook():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
import numpy as np
import torch
from torch_geometric.data import Data
from torch_geometric.utils import remove_self_loops
from utils.load_dataset import load_data, load_zinc_data, load_ogb_data, load_g6_graphs
from utils.utils_subgraphs import compute_degrees
import os
import torch_geometric.datasets as ptg_datasets
def unique_indices(num_unique, inverse):
perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device)
inverse, perm = inverse.flip([0]), perm.flip([0])
perm = inverse.new_empty(num_unique).scatter_(0, inverse, perm)
return perm
def generate_dataset(data_path,
dataset_name,
directed):
### load and preprocess dataset
dataset_family = os.path.split(os.path.split(data_path)[0])[1]
if dataset_family == 'PPI':
dataset_type = 'ptg'
graphs = []
start = 0
if not os.path.exists(os.path.join(data_path, '10fold_idx')):
os.makedirs(os.path.join(data_path, '10fold_idx'))
for split in ['train', 'test', 'val']:
graphs_temp = getattr(ptg_datasets, dataset_family)(data_path, split)
graphs += [graphs_temp[i] for i in range(len(graphs_temp))]
end = len(graphs)
split_idx = list(range(start, end))
filename = os.path.join(data_path, '10fold_idx', split+'_idx-{}.txt'.format(0))
np.savetxt(filename, np.array(split_idx).astype(int), fmt='%d')
start = end
num_classes = graphs_temp.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'KarateClub':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)()
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'TUDataset':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)(data_path, dataset_name, cleaned=True)
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'Amazon':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)(data_path, dataset_name)
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'Planetoid':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)(data_path, dataset_name)
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif 'ogb' in data_path:
dataset_type = 'general'
graphs, num_classes = load_ogb_data(data_path, dataset_name, False)
num_node_type, num_edge_type = None, None
elif dataset_name == 'ZINC':
dataset_type = 'general'
graphs, num_classes, num_node_type, num_edge_type = load_zinc_data(data_path, dataset_name, False)
elif os.path.split(data_path)[-1] in ['SR_graphs', 'all_graphs']:
dataset_type = 'general'
graphs, num_classes = load_g6_graphs(data_path, dataset_name)
num_node_type, num_edge_type = None, None
else:
dataset_type = 'general'
graphs, num_classes = load_data(data_path, dataset_name, False)
num_node_type, num_edge_type = None, None
graphs_ptg = list()
for i, data in tqdm(enumerate(graphs)):
new_data = _prepare(data, directed, dataset_type, dataset_name)
graphs_ptg.append(new_data)
return graphs_ptg, num_classes, num_node_type, num_edge_type
# ------------------------------------------------------------------------
def _prepare(data, directed, dataset_type='ptg', dataset_name=None):
new_data = Data()
# nodes
if dataset_type == 'ptg':
if hasattr(data, 'x') and data.x is not None:
num_nodes = data.x.shape[0]
x = data.x
else:
num_nodes = data.num_nodes
x = torch.ones((num_nodes,1))
setattr(new_data, 'x', x)
else:
num_nodes = data.node_features.shape[0]
setattr(new_data, 'x', data.node_features)
setattr(new_data, 'graph_size', float(num_nodes))
# edges
if dataset_type == 'ptg':
num_edges = float(data.edge_index.shape[1]) if directed else data.edge_index.shape[1]/2
edge_index = data.edge_index
if hasattr(data, 'edge_attr') and data.edge_attr is not None:
edge_features = data.edge_attr
else:
edge_features = None
else:
num_edges = float(data.edge_mat.shape[1]) if directed else data.edge_mat.shape[1]/2
edge_index = data.edge_mat
if hasattr(data, 'edge_features') and data.edge_features is not None:
edge_features = data.edge_features
else:
edge_features = None
setattr(new_data, 'edge_size', num_edges)
# adjacency
if edge_index.numel()!=0:
# multi-edge graphs not allowed
init_num_edges = edge_index.shape[1]
edge_index, inverse = torch.unique(edge_index, dim=1, sorted=True, return_inverse=True)
kept_inds = unique_indices(edge_index.shape[1], inverse)
# warning messages
if init_num_edges!=edge_index.shape[1]:
print('Warning: detected duplicate edges')
init_num_edges = edge_index.shape[1]
if edge_features is not None:
edge_features = edge_features[kept_inds]
edge_index, edge_features = remove_self_loops(edge_index, edge_features)
else:
edge_index, _ = remove_self_loops(edge_index, None)
# warning messages
if init_num_edges!=edge_index.shape[1]:
print('Warning: detected self loops')
setattr(new_data, 'edge_index', edge_index)
# edge features
if edge_features is not None:
setattr(new_data, 'edge_features', edge_features)
# degrees
degrees = compute_degrees(edge_index, num_nodes, directed)
setattr(new_data, 'degrees', degrees)
# if regression or dataset_name in {'ogbg-molpcba', 'ogbg-molhiv', 'ZINC'}:
# setattr(new_data, 'y', torch.tensor(data.label).unsqueeze(0).float())
# else:
# setattr(new_data, 'y', torch.tensor(data.label).unsqueeze(0).long())
return new_data
# --------------------------------------------------------------------------------------
|
1634352
|
from .context import assert_equal, x, y, _Add, _Mul, _Pow
import pytest
from sympy import Rational, Mod, sqrt, pi
def _Mod(*args):
return Mod(*args, evaluate=False)
def test_mod_usual():
assert_equal("128\\mod 3", _Mod(128, 3))
assert_equal("7\\mod 128", _Mod(7, 128))
assert_equal("5\\mod 10", _Mod(5, 10))
assert_equal("5\\mod 5", _Mod(5, 5))
assert_equal("3\\mod 2", _Mod(3, 2))
assert_equal("0 \\mod 6", _Mod(0, 6))
assert_equal("6109\\mod 28", _Mod(6109, 28))
assert_equal("4000000000\\mod 28791", _Mod(4000000000, 28791))
assert_equal("128\\times 10^{300}\\mod 876123", _Mod(Rational('128E300'), 876123))
assert_equal("876,123\\mod 128E300)", _Mod(876123, Rational('128E300')))
def test_mod_negative():
assert_equal("-1\\mod 2", _Mod(-1, 2))
assert_equal("-3\\mod 3", _Mod(-3, 3))
assert_equal("-12\\mod -12", _Mod(-12, -12))
assert_equal("-128\\mod 4", _Mod(-128, 4))
assert_equal("9\\mod -213", _Mod(9, -213))
assert_equal("123123\\mod -541", _Mod(123123, -541))
assert_equal("-123123\\mod 541", _Mod(-123123, 541))
assert_equal("-97E34\\mod 7", _Mod(Rational('-97E34'), 7))
def test_mod_fraction():
assert_equal("\\frac{1}{2} \\mod 3", _Mod(Rational(1, 2), 3))
assert_equal("\\frac{6}{2} \\mod 3", _Mod(Rational(6, 2), 3))
assert_equal("\\frac{-14}{2} \\mod 5", _Mod(Rational(-14, 2), 5))
assert_equal("123\\mod \\frac{42}{6}", _Mod(123, Rational(42, 6)))
assert_equal("431\\mod \\frac{2}{123}", _Mod(431, Rational(2, 123)))
assert_equal("\\frac{5}{5} \\mod \\frac{5}{5}", _Mod(Rational(5, 5), Rational(5, 5)))
assert_equal("\\frac{849}{-21}\\mod \\frac{92}{2}", _Mod(Rational(849, -21), Rational(92, 2)))
assert_equal("13\\times 10^9\\mod \\frac{21}{-2}", _Mod(Rational('13E9'), Rational(21, -2)))
def test_mod_float():
assert_equal("0.41\\mod 2", _Mod(Rational('0.41'), 2))
assert_equal("143E-13\\mod 21", _Mod(Rational('143E-13'), 21))
assert_equal("-9.80665\\mod 9.80665", _Mod(Rational('-9.80665'), Rational('9.80665')))
assert_equal("0.0000923423\\mod -8341.234802909", _Mod(Rational('0.0000923423'), Rational('-8341.234802909')))
assert_equal("\\sqrt{5}\\mod \\sqrt{2}", _Mod(sqrt(5, evaluate=False), sqrt(2, evaluate=False)))
assert_equal("987\\mod \\pi", _Mod(987, pi))
assert_equal("\\pi\\mod \\frac{1+\\sqrt{5}}{2})", _Mod(pi, _Mul(_Pow(2, -1), _Add(1, sqrt(5, evaluate=False)))))
assert_equal("1234\\mod 1E-29", _Mod(1234, Rational('1E-29')))
def test_mod_expr():
assert_equal("1+1\\mod 2", 1 + _Mod(1, 2))
assert_equal("876123\\mod 128\\times 10^{300}", _Mod(876123, Rational('128E300')))
assert_equal("141\\mod \\frac{9}{3}", _Mod(141, Rational(9, 3)))
assert_equal("\\frac{872}{12\\mod 9 * 4} * 2", _Mul(872, _Pow(_Mul(_Mod(12, 9), 4), -1), 2))
assert_equal("1E-32 * (1E29\\mod 74)", _Mul(Rational('1E-32'), _Mod(Rational('1E29'), 74)))
assert_equal("299,792,458\\mod 9.81", _Mod(299792458, Rational('9.81')))
def test_mod_symbol():
assert_equal("x\\mod y", _Mod(x, y))
assert_equal("2x\\mod y", _Mod(2 * x, y))
assert_equal("y + \\frac{3\\mod 2}{4}", _Add(y, _Mul(_Mod(3, 2), _Pow(4, -1))))
assert_equal("0.5x * 2 + \\sqrt{x}\\mod 8y", _Add(_Mul(Rational(1, 2), x, 2), _Mod(sqrt(x), _Mul(8, y))))
assert_equal("6.673E-11 * \\frac{(8.85418782E-12\\mod 9x) + 4}{2y}", _Mul(Rational('6.673E-11'), _Add(_Mod(Rational('8.85418782E-12'), _Mul(9, x)), 4), _Pow(_Mul(2, y), -1)))
|
1634378
|
import base64
import math
import os
import uuid
from datetime import datetime
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Q
""" Project model."""
class Project(models.Model):
name = models.CharField(max_length=100)
introduction = models.TextField(blank=True, default="")
conclusion = models.TextField(blank=True, default="")
scope = models.TextField(blank=True, default="")
added = models.DateTimeField(default=datetime.now)
start_date = models.DateField(null=True)
end_date = models.DateField(null=True)
archived = models.BooleanField(default=False)
pentesters = models.ManyToManyField(User, related_name='%(class)s_pentesters')
viewers = models.ManyToManyField(User, related_name='%(class)s_viewers')
def __str__(self):
return self.name
def p1_hits(self):
"""Return all P1 hits for the project."""
return self.hits_by_severity(1)
def p2_hits(self):
"""Return all P2 hits for the project."""
return self.hits_by_severity(2)
def p3_hits(self):
"""Return all P3 hits for the project."""
return self.hits_by_severity(3)
def p4_hits(self):
"""Return all P4 hits for the project."""
return self.hits_by_severity(4)
def p5_hits(self):
"""Return all P5 hits for the project."""
return self.hits_by_severity(5)
def hits_by_severity(self, severity):
"""Filter hits by severity for the project."""
hits = []
for assessment in self.assessment_set.all() :
hits.extend(assessment.hits_by_severity(severity))
return hits
def get_viewable(user):
"""Returns all viewable & non-archived projects"""
return Project.objects.filter(Q(pentesters__in=[user]) | Q(viewers__in=[user])).filter(archived = False).distinct()
def get_archived_viewable(user):
"""Returns all viewable & non-archived projects"""
return Project.objects.filter(Q(pentesters__in=[user]) | Q(viewers__in=[user])).filter(archived = True).distinct()
def is_user_can_view(self, user):
"""Verify if the user have read access for this project"""
result = False
if user in self.pentesters.all() or user in self.viewers.all() :
result = True
return result
def is_user_can_edit(self, user):
"""Verify if the user have write access for this project"""
result = False
if user in self.pentesters.all() :
result = True
return result
def is_user_can_create(self, user):
"""Verify if the user can create this project"""
return True
class Meta:
ordering = ('name',)
"""Assesment model."""
class Assessment(models.Model):
name = models.CharField(max_length=100)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
added = models.DateTimeField(default=datetime.now)
def __str__(self):
return self.name
def displayable_hits(self):
return self.hit_set.filter(displayable = True)
def p1_hits(self):
"""Return all P1 hits for the assessment."""
return self.hits_by_severity(1)
def p2_hits(self):
"""Return all P2 hits for the assessment."""
return self.hits_by_severity(2)
def p3_hits(self):
"""Return all P3 hits for the assessment."""
return self.hits_by_severity(3)
def p4_hits(self):
"""Return all P4 hits for the assessment."""
return self.hits_by_severity(4)
def p5_hits(self):
"""Return all P5 hits for the assessment."""
return self.hits_by_severity(5)
def hits_by_severity(self, severity):
"""Filter hits by severity for the assessment."""
hits = []
for hit in self.hit_set.filter(severity = severity).all() :
hits.append(hit)
return hits
def open_flags(self):
"""Return all open flags for the assessment."""
return self.flag_set.filter(done = False)
def get_viewable(user):
"""Returns all viewable assessments"""
return Assessment.objects.filter(project__in=Project.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this assessment"""
return self.project.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this assessment"""
return self.project.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this assessment"""
return self.project.is_user_can_edit(user)
class Meta:
ordering = ('name',)
"""Label model."""
class Label(models.Model):
title = models.CharField(max_length=200)
color = models.CharField(max_length=7)
deprecated = models.BooleanField(default=False)
def __str__(self):
return self.title
def get_viewable(user):
"""Returns all viewable labels"""
return Label.objects.all()
def get_not_deprecated(user):
"""Returns not deprecated labels"""
return Label.objects.filter(deprecated=False)
def is_user_can_view(self, user):
"""Verify if the user have read access for this label"""
return True
def is_user_can_edit(self, user):
"""Verify if the user have write access for this label"""
return user.is_staff
def is_user_can_create(self, user):
"""Verify if the user can create this label"""
return user.is_staff
class Meta:
ordering = ('pk',)
"""CvssV3.1 model"""
class Cvss(models.Model):
NALP_CHOICES = (
('N', 'Network'),
('A', 'Adjacent'),
('L', 'Local'),
('P', 'Physical')
)
LH_CHOICES = (
('L', 'Low'),
('H', 'High')
)
NLH_CHOICES = (
('N', 'None'),
('L', 'Low'),
('H', 'High')
)
NR_CHOICES = (
('N', 'None'),
('R', 'Required')
)
UC_CHOICES = (
('U', 'Unchanged'),
('C', 'Changed')
)
"""CVSS String values"""
attack_vector = models.CharField(max_length=1,choices=NALP_CHOICES)
attack_complexity = models.CharField(max_length=1,choices=LH_CHOICES)
privilege_required = models.CharField(max_length=1,choices=NLH_CHOICES)
user_interaction = models.CharField(max_length=1,choices=NR_CHOICES)
scope = models.CharField(max_length=1,choices=UC_CHOICES)
confidentiality = models.CharField(max_length=1,choices=NLH_CHOICES)
integrity = models.CharField(max_length=1,choices=NLH_CHOICES)
availability = models.CharField(max_length=1,choices=NLH_CHOICES)
"""Values for usage"""
decimal_value = models.DecimalField(max_digits=3, decimal_places=1, default=-1.0)
def __round_up(self, n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def __get_cia_value(self,value) :
if value == "H":
return 0.56
elif value == "L":
return 0.22
else :
return 0.0
def __get_confidentiality_value(self) :
return self.__get_cia_value(self.confidentiality)
def __get_integrity_value(self) :
return self.__get_cia_value(self.integrity)
def __get_availability_value(self) :
return self.__get_cia_value(self.availability)
def __get_attack_vector_value(self) :
if self.attack_vector == "N":
return 0.85
elif self.attack_vector == "A":
return 0.62
elif self.attack_vector == "L":
return 0.55
else :
return 0.2
def __get_attack_complexity_value(self) :
if self.attack_complexity == "L" :
return 0.77
else :
return 0.44
def __get_privilege_required_value(self) :
if self.privilege_required == "N" :
return 0.85
elif self.privilege_required == "L" :
if self.scope == "U" :
return 0.62
else :
return 0.68
else :
if self.scope == "U" :
return 0.27
else :
return 0.50
def __get_user_interaction_value(self) :
if self.user_interaction == "N" :
return 0.85
else :
return 0.62
def __get_exploitability(self) :
return 8.22 * self.__get_attack_vector_value() * self.__get_attack_complexity_value() * self.__get_privilege_required_value() * self.__get_user_interaction_value()
def __get_isc_base(self) :
return 1.0 - ((1.0 - self.__get_confidentiality_value()) * (1.0 - self.__get_integrity_value()) * (1.0 - self.__get_availability_value()))
def __get_isc(self, isc_base) :
if self.scope == "U" :
return 6.42 * isc_base
else :
return 7.52 * (isc_base - 0.029) - 3.25 * (isc_base - 0.02)**15
def compute_cvss_value(self) :
isc_base = self.__get_isc_base()
isc = self.__get_isc(isc_base)
exploitability = self.__get_exploitability()
if isc > 0.0 :
exploitability = self.__get_exploitability()
if self.scope == "U" :
self.decimal_value = self.__round_up(min(isc + exploitability, 10.0), 1)
else :
self.decimal_value = self.__round_up(min(1.08 * (isc + exploitability), 10.0), 1)
else :
self.decimal_value = 0.0
class Meta:
ordering = ('decimal_value',)
"""Hit model."""
class Hit(models.Model):
title = models.CharField(max_length=200)
body = models.TextField(blank=True, default="")
remediation = models.TextField(blank=True, default="")
asset = models.CharField(blank=True, max_length=256, default="")
assessment = models.ForeignKey(Assessment, null=True, on_delete=models.CASCADE)
added = models.DateTimeField(default=datetime.now)
severity = models.IntegerField(default=5, validators=[MinValueValidator(0), MaxValueValidator(5)])
fix_complexity = models.IntegerField(default=0, validators=[MinValueValidator(0), MaxValueValidator(3)])
displayable = models.BooleanField(default=True)
cvss = models.OneToOneField(Cvss, null=True, on_delete=models.SET_NULL)
labels = models.ManyToManyField(Label)
def __str__(self):
return self.title
def get_viewable(user):
"""Returns all viewable hits"""
return Hit.objects.filter(assessment__in=Assessment.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this hit"""
return self.assessment.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this hit"""
return self.assessment.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this hit"""
return self.assessment.is_user_can_edit(user)
def get_unique_id(self):
"""Return a pretty value of the ID, ex: PTART-2022-<id>"""
return "PTART-" + str(self.added.year) + "-" + str(self.id).zfill(5)
def get_fix_complexity_str(self) :
value = "N/D"
if self.fix_complexity == 1 :
value = "Hard"
elif self.fix_complexity == 2 :
value = "Moderate"
elif self.fix_complexity == 3 :
value = "Easy"
return value
def get_cvss_value(self):
"""Return the decimal value of the cvss"""
if self.cvss is None :
return "---"
else :
return self.cvss.decimal_value
def get_cvss_string(self):
"""Return the string value of the cvss"""
if self.cvss is None :
return ""
else :
return "CVSS:3.1/AV:" + self.cvss.attack_vector + "/AC:" + self.cvss.attack_complexity + "/PR:" + self.cvss.privilege_required + "/UI:" + self.cvss.user_interaction + "/S:" + self.cvss.scope + "/C:" + self.cvss.confidentiality + "/I:" + self.cvss.integrity + "/A:" + self.cvss.availability
def delete(self, *args, **kwargs):
if self.cvss:
self.cvss.delete()
return super(self.__class__, self).delete(*args, **kwargs)
class Meta:
ordering = ('severity', '-cvss', 'title',)
"""Comment model."""
class Comment(models.Model):
hit = models.ForeignKey(Hit, null=True, on_delete=models.CASCADE)
text = models.CharField(max_length=1000, default="")
author = models.ForeignKey(User, null=True, on_delete=models.PROTECT)
added = models.DateTimeField(default=datetime.now)
def get_viewable(user):
"""Returns all viewable comments"""
return Comment.objects.filter(hit__in=Hit.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this comment"""
return self.hit.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this comment"""
return self.hit.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this comment"""
return self.hit.is_user_can_edit(user)
def __str__(self):
return self.text
class Meta:
ordering = ('added',)
"""Screenshot model."""
class Screenshot(models.Model):
upload_folder = 'screenshots'
hit = models.ForeignKey(Hit, null=True, on_delete=models.CASCADE)
screenshot = models.ImageField(upload_to=upload_folder)
caption = models.CharField(blank=True, max_length=256, default="")
def get_data(self):
"""Get screenshot data in Base64"""
encoded_string = ''
extension = os.path.splitext(self.screenshot.url)[1]
with open(self.screenshot.url, 'rb') as img_f:
encoded_string = base64.b64encode(img_f.read())
return 'data:image/%s;base64,%s' % (extension,encoded_string.decode("utf-8"))
def get_raw_data(self):
"""Get screenshot data in binary format"""
result = ''
with open(self.screenshot.url, 'rb') as img_f:
result = img_f.read()
return result
def delete(self):
"""Delete file related to the screenshot"""
os.remove(self.screenshot.url)
super(Screenshot, self).delete()
def get_viewable(user):
"""Returns all viewable screenshots"""
return Screenshot.objects.filter(hit__in=Hit.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this screenshot"""
return self.hit.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this screenshot"""
return self.hit.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this screenshot"""
return self.hit.is_user_can_edit(user)
def __str__(self):
return self.screenshot
"""Attachment model."""
class Attachment(models.Model):
upload_folder = 'attachments'
hit = models.ForeignKey(Hit, null=True, on_delete=models.CASCADE)
attachment_name = models.CharField(max_length=100, default="")
attachment = models.FileField(upload_to=upload_folder)
def get_data(self):
"""Get attachment data in Base64"""
encoded_string = ''
with open(self.attachment.url, 'rb') as file_f:
encoded_string = base64.b64encode(file_f.read())
return 'data:application/octet;base64,%s' % (encoded_string.decode("utf-8"))
def get_raw_data(self):
"""Get attachment data in binary format"""
result = ''
with open(self.attachment.url, 'rb') as file_f:
result = file_f.read()
return result
def delete(self):
"""Delete file related to the attachment"""
os.remove(self.attachment.url)
super(Attachment, self).delete()
def get_viewable(user):
"""Returns all viewable attachments"""
return Attachment.objects.filter(hit__in=Hit.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this attachment"""
return self.hit.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this attachment"""
return self.hit.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this attachment"""
return self.hit.is_user_can_edit(user)
def __str__(self):
return self.attachment
"""Flag model."""
class Flag(models.Model):
title = models.CharField(max_length=100)
note = models.TextField(blank=True, default="")
asset = models.CharField(blank=True, max_length=256, default="")
assessment = models.ForeignKey(Assessment, null=True, on_delete=models.CASCADE)
done = models.BooleanField(default=False)
added = models.DateTimeField(default=datetime.now)
assignee = models.ForeignKey(User, null=True, on_delete=models.PROTECT)
def __str__(self):
return self.title
def get_viewable(user):
"""Returns all viewable flags"""
return Flag.objects.filter(assessment__in=Assessment.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this flag"""
return self.assessment.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this flag"""
return self.assessment.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this flag"""
return self.assessment.is_user_can_edit(user)
class Meta:
ordering = ('title',)
"""Template model."""
class Template(models.Model):
name = models.CharField(max_length=100)
severity = models.IntegerField(default=5, validators=[MinValueValidator(0), MaxValueValidator(5)])
body = models.TextField(blank=True, default="")
remediation = models.TextField(blank=True, default="")
asset = models.CharField(blank=True, max_length=256, default="")
def __str__(self):
return self.name
def get_viewable(user):
"""Returns all viewable templates"""
return Template.objects.all()
def is_user_can_view(self, user):
"""Verify if the user have read access for this template"""
return True
def is_user_can_edit(self, user):
"""Verify if the user have write access for this template"""
return user.is_staff
def is_user_can_create(self, user):
"""Verify if the user can create this template"""
return user.is_staff
class Meta:
ordering = ('severity','name',)
"""Methodology model."""
class Methodology(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True, default="")
def __str__(self):
return self.name
def get_viewable(user):
"""Returns all viewable methodologies"""
return Methodology.objects.all()
def is_user_can_view(self, user):
"""Verify if the user have read access for this methodology"""
return True
def is_user_can_edit(self, user):
"""Verify if the user have write access for this methodology"""
return user.is_staff
def is_user_can_create(self, user):
"""Verify if the user can create this methodology"""
return user.is_staff
class Meta:
ordering = ('name',)
"""Module model."""
class Module(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True, default="")
methodology = models.ForeignKey(Methodology, on_delete=models.CASCADE, null=True, default=None)
def get_viewable(user):
"""Returns all viewable modules"""
return Module.objects.all()
def is_user_can_view(self, user):
"""Verify if the user have read access for this module"""
return True
def is_user_can_edit(self, user):
"""Verify if the user have write access for this module"""
return user.is_staff
def is_user_can_create(self, user):
"""Verify if the user can create this module"""
return user.is_staff
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
"""Case model."""
class Case(models.Model):
name = models.CharField(max_length=100)
module = models.ForeignKey(Module, on_delete=models.CASCADE)
description = models.TextField(blank=True, default="")
def get_viewable(user):
"""Returns all viewable cases"""
return Case.objects.all()
def is_user_can_view(self, user):
"""Verify if the user have read access for this case"""
return True
def is_user_can_edit(self, user):
"""Verify if the user have write access for this case"""
return user.is_staff
def is_user_can_create(self, user):
"""Verify if the user can create this case"""
return user.is_staff
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
"""Severity structure."""
class Severity():
values = [1,2,3,4,5]
#-----------------------------------------------------------------------------#
# ASSET MANAGEMENT #
#-----------------------------------------------------------------------------#
"""Host model."""
class Host(models.Model):
project = models.ForeignKey(Project, null=True, on_delete=models.CASCADE)
hostname = models.CharField(max_length=100, default="")
ip = models.CharField(max_length=100, default="")
os = models.CharField(max_length=100, default="")
notes = models.CharField(max_length=1000, default="")
def get_viewable(user):
"""Returns all viewable hosts"""
return Host.objects.filter(project__in=Project.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this host"""
return self.project.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this host"""
return self.project.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this host"""
return self.project.is_user_can_edit(user)
def __str__(self):
return self.hostname + " - " + self.ip
class Meta:
ordering = ('hostname',)
"""Service model."""
class Service(models.Model):
host = models.ForeignKey(Host, null=True, on_delete=models.CASCADE)
port = models.IntegerField(default=0)
protocol = models.CharField(max_length=200, default="")
name = models.CharField(max_length=200, default="")
version = models.CharField(max_length=100, default="")
banner = models.CharField(max_length=1000, default="")
def get_viewable(user):
"""Returns all viewable Services"""
return Host.objects.filter(host__in=Host.get_viewable(user))
def is_user_can_view(self, user):
"""Verify if the user have read access for this host"""
return self.host.is_user_can_view(user)
def is_user_can_edit(self, user):
"""Verify if the user have write access for this host"""
return self.host.is_user_can_edit(user)
def is_user_can_create(self, user):
"""Verify if the user can create this host"""
return self.host.is_user_can_edit(user)
def __str__(self):
return self.hostname + " - " + self.ip
class Meta:
ordering = ('port',)
|
1634406
|
import utils
import os
import unittest
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import ihm.dataset
import ihm.location
import ihm.geometry
def _make_test_file(fname):
with open(fname, 'w') as fh:
fh.write('contents')
class Tests(unittest.TestCase):
def test_dataset(self):
"""Test Dataset base class"""
loc = ihm.location.PDBLocation('1abc', version='foo', details='bar')
d = ihm.dataset.Dataset(loc)
self.assertIsNone(d.details)
self.assertEqual(len(d.parents), 0)
l2 = ihm.location.PDBLocation('1xyz', version='foo', details='bar')
d2 = ihm.dataset.Dataset(l2, details='foo')
self.assertEqual(d2.details, 'foo')
d.parents.append(d2)
self.assertEqual(len(d.parents), 1)
self.assertNotEqual(d, d2)
l3 = ihm.location.PDBLocation('1cde', version='foo', details='bar')
d3 = ihm.dataset.Dataset(l3, details='bar')
t = ihm.geometry.Transformation(
rot_matrix=[[-0.64, 0.09, 0.77], [0.76, -0.12, 0.64],
[0.15, 0.99, 0.01]],
tr_vector=[1., 2., 3.])
td = ihm.dataset.TransformedDataset(d3, transform=t)
d.parents.append(td)
self.assertEqual(len(d.parents), 2)
def test_add_primary_no_parents(self):
"""Test add_primary() method, no parents"""
l1 = ihm.location.PDBLocation('1abc', version='foo', details='bar')
d1 = ihm.dataset.Dataset(l1)
l2 = ihm.location.PDBLocation('1xyz', version='foo', details='bar')
d2 = ihm.dataset.Dataset(l2)
d1.add_primary(d2)
self.assertEqual(d1.parents, [d2])
def test_add_primary_one_parent(self):
"""Test add_primary() method, one parent"""
l1 = ihm.location.PDBLocation('1abc', version='foo', details='bar')
d1 = ihm.dataset.Dataset(l1)
l2 = ihm.location.PDBLocation('1xyz', version='foo', details='bar')
d2 = ihm.dataset.Dataset(l2)
l3 = ihm.location.PDBLocation('2def', version='foo', details='bar')
d3 = ihm.dataset.Dataset(l3)
d1.parents.append(d2)
d1.add_primary(d3)
self.assertEqual(d1.parents, [d2])
self.assertEqual(d2.parents, [d3])
def test_add_primary_two_parents(self):
"""Test add_primary() method, two parents"""
l1 = ihm.location.PDBLocation('1abc', version='foo', details='bar')
d1 = ihm.dataset.Dataset(l1)
l2 = ihm.location.PDBLocation('1xyz', version='foo', details='bar')
d2 = ihm.dataset.Dataset(l2)
l3 = ihm.location.PDBLocation('2def', version='foo', details='bar')
d3 = ihm.dataset.Dataset(l3)
_ = ihm.location.PDBLocation('2ghi', version='foo', details='bar')
d4 = ihm.dataset.Dataset(l3)
d1.parents.extend((d2, d3))
self.assertRaises(ValueError, d1.add_primary, d4)
def test_cxms_dataset(self):
"""Test CXMSDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.CXMSDataset(loc)
self.assertEqual(d.data_type, 'CX-MS data')
def test_hdx_dataset(self):
"""Test HDXDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.HDXDataset(loc)
self.assertEqual(d.data_type, 'H/D exchange data')
def test_mass_spec_dataset(self):
"""Test MassSpecDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.MassSpecDataset(loc)
self.assertEqual(d.data_type, 'Mass Spectrometry data')
def test_em_density_dataset(self):
"""Test EMDensityDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.EMDensityDataset(loc)
self.assertEqual(d.data_type, '3DEM volume')
def test_pdb_dataset(self):
"""Test PDBDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.PDBDataset(loc)
self.assertEqual(d.data_type, 'Experimental model')
def test_comp_model_dataset(self):
"""Test ComparativeModelDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.ComparativeModelDataset(loc)
self.assertEqual(d.data_type, 'Comparative model')
def test_int_model_dataset(self):
"""Test IntegrativeModelDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.IntegrativeModelDataset(loc)
self.assertEqual(d.data_type, 'Integrative model')
def test_de_novo_model_dataset(self):
"""Test DeNovoModelDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.DeNovoModelDataset(loc)
self.assertEqual(d.data_type, 'De Novo model')
def test_nmr_dataset(self):
"""Test NMRDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.NMRDataset(loc)
self.assertEqual(d.data_type, 'NMR data')
def test_mutagenesis_dataset(self):
"""Test MutagenesisDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.MutagenesisDataset(loc)
self.assertEqual(d.data_type, 'Mutagenesis data')
def test_em2d_class_dataset(self):
"""Test EM2DClassDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.EM2DClassDataset(loc)
self.assertEqual(d.data_type, '2DEM class average')
def test_em_micrographs_dataset(self):
"""Test EMMicrographsDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.EMMicrographsDataset(loc)
self.assertEqual(d.data_type, 'EM raw micrographs')
def test_sas_dataset(self):
"""Test SASDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.SASDataset(loc)
self.assertEqual(d.data_type, 'SAS data')
def test_fret_dataset(self):
"""Test FRETDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.FRETDataset(loc)
self.assertEqual(d.data_type, 'Single molecule FRET data')
def test_y2h_dataset(self):
"""Test YeastTwoHybridDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.YeastTwoHybridDataset(loc)
self.assertEqual(d.data_type, 'Yeast two-hybrid screening data')
def test_genetic_dataset(self):
"""Test GeneticInteractionsDataset"""
loc = ihm.location.FileLocation(repo='mydoi', path='a')
d = ihm.dataset.GeneticInteractionsDataset(loc)
self.assertEqual(
d.data_type, 'Quantitative measurements of genetic interactions')
def test_duplicate_datasets_details(self):
"""Datasets with differing details should be considered duplicates"""
with utils.temporary_directory() as tmpdir:
fname = os.path.join(tmpdir, 'test.pdb')
_make_test_file(fname)
l1 = ihm.location.InputFileLocation(fname, details='test details')
_ = ihm.dataset.PDBDataset(l1)
l2 = ihm.location.InputFileLocation(fname, details='other details')
d2 = ihm.dataset.PDBDataset(l2)
self.assertEqual(l1, l2)
d3 = ihm.dataset.PDBDataset(l2, details='other dataset details')
self.assertEqual(d2, d3)
def test_duplicate_locations(self):
"""Datasets with same location should be considered duplicates"""
with utils.temporary_directory() as tmpdir:
fname1 = os.path.join(tmpdir, 'test1.pdb')
fname2 = os.path.join(tmpdir, 'test2.pdb')
_make_test_file(fname1)
_make_test_file(fname2)
loc1 = ihm.location.InputFileLocation(fname1)
loc2 = ihm.location.InputFileLocation(fname2)
# Identical datasets in the same location aren't duplicated
pdb1 = ihm.dataset.PDBDataset(loc1)
pdb2 = ihm.dataset.PDBDataset(loc1)
self.assertEqual(pdb1, pdb2)
# Datasets in different locations are OK
pdb1 = ihm.dataset.PDBDataset(loc1)
pdb2 = ihm.dataset.PDBDataset(loc2)
self.assertNotEqual(pdb1, pdb2)
if __name__ == '__main__':
unittest.main()
|
1634426
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import arrow
import emission.net.usercache.formatters.common as fc
import emission.storage.decorations.local_date_queries as ecsdlq
import attrdict as ad
def format(entry):
formatted_entry = ad.AttrDict()
formatted_entry["_id"] = entry["_id"]
formatted_entry.user_id = entry.user_id
metadata = entry.metadata
if "time_zone" not in metadata:
metadata.time_zone = "America/Los_Angeles"
logging.debug("Timestamp conversion: %s -> %s done" % (entry.metadata.write_ts, metadata.write_ts))
fc.expand_metadata_times(metadata)
formatted_entry.metadata = metadata
data = entry.data
fc.expand_data_times(data, metadata)
formatted_entry.data = data
return formatted_entry
|
1634432
|
from .Config import Config
from PyQt5 import QtCore, QtGui, QtWidgets, QtWebEngineWidgets
class Ad(QtWidgets.QWidget):
def __init__(self, minimumSize, responsive=False):
super().__init__()
self.adView = AdView()
self.setLayout(QtWidgets.QHBoxLayout())
self.layout().addChildWidget(self.adView)
if responsive:
self.setMinimumSize(minimumSize)
else:
self.setFixedSize(minimumSize)
self.adView.getAd(minimumSize.width(), minimumSize.height())
def resizeEvent(self, event):
self.adView.resize(self.size())
return super().resizeEvent(event)
class AdView(QtWebEngineWidgets.QWebEngineView):
def __init__(self):
super().__init__()
self.currentAd = None
self.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
self.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.setPage(Page(self))
self.loadFinished.connect(self.showAd)
self.hide()
def getAvailableAds(self, width, height):
availableAds = []
for adSize in Config.SIZE_LIST:
if adSize[0] > width or adSize[1] > height:
continue
availableAds.append(adSize)
return availableAds
def getAd(self, width, height):
availableAds = self.getAvailableAds(width, height)
if len(availableAds) == 0:
if self.currentAd != None:
self.currentAd = None
self.removeAd()
else:
if self.currentAd != availableAds[0]:
self.currentAd = availableAds[0]
self.loadAd(self.currentAd)
def removeAd(self):
self.stop()
self.hide()
def loadAd(self, size):
self.removeAd()
self.load(QtCore.QUrl("{}?{}".format(Config.SERVER, Config.URL_QUERY.format(width=size[0], height=size[1]))))
def showAd(self, success):
if success:
self.page().runJavaScript("document.body.style.webkitUserSelect = 'none';document.body.style.webkitUserDrag = 'none';")
self.show()
else:
self.hide()
def dropEvent(self, event):
return event.ignore()
class Page(QtWebEngineWidgets.QWebEnginePage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings()
settings.setAttribute(QtWebEngineWidgets.QWebEngineSettings.ShowScrollBars, False)
settings.setAttribute(QtWebEngineWidgets.QWebEngineSettings.ErrorPageEnabled, False)
self.setBackgroundColor(QtCore.Qt.transparent)
def createWindow(self, type):
page = Page(self)
page.urlChanged.connect(self.on_url_changed)
return page
def on_url_changed(self, url):
self.sender().deleteLater()
QtGui.QDesktopServices.openUrl(url)
|
1634555
|
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: str
items = [
{"name": "Foo", "description": "There comes my hero"},
{"name": "Red", "description": "It's my aeroplane"},
]
@app.get("/items/", response_model=List[Item])
async def read_items():
return items
|
1634568
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestFcOperator(hu.HypothesisTestCase):
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
mask = np.arange(3)
np.random.shuffle(mask)
input_str = ['indices', 'values', 'default']
input_data = [indices, values, default]
if use_length and n > 1:
input_str.append('lengths')
input_data.append(lengths)
output_str = ['output']
op = core.CreateOperator(
'SparseToDenseMask',
input_str,
output_str,
mask=mask,
)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, input_data, [0])
# Gradient check for values
self.assertGradientChecks(
gc, op, input_data, 1, [0])
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
def test_sparse_to_dense_mask_with_int64(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
int64_mask = 10000000000
indices = np.random.randint(5, size=N) + int64_mask
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
mask = np.arange(3) + int64_mask
np.random.shuffle(mask)
input_str = ['indices', 'values', 'default']
input_data = [indices, values, default]
if use_length and n > 1:
input_str.append('lengths')
input_data.append(lengths)
output_str = ['output']
op = core.CreateOperator(
'SparseToDenseMask',
input_str,
output_str,
mask=mask,
)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, input_data, [0])
# Gradient check for values
self.assertGradientChecks(
gc, op, input_data, 1, [0])
@given(n=st.integers(1, 10), k=st.integers(1, 5),
dim=st.integers(1, 3), **hu.gcs_cpu_only)
def test_sparse_to_dense_mask_high_dim(self, n, k, dim, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
shape = np.random.randint(5, size=dim).astype(np.int32) + 1
values = np.random.rand(*((N,) + tuple(shape))).astype(np.float32)
default = np.random.rand(*shape).astype(np.float32)
mask = np.arange(3)
np.random.shuffle(mask)
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=mask,
)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, [indices, values, default, lengths], [0])
# Gradient check for values
self.assertGradientChecks(
gc, op, [indices, values, default, lengths], 1, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
1634570
|
from django.shortcuts import get_object_or_404, render
from curate.models import Author
def author_embed(request, *args, **kwargs):
slug = kwargs.get('slug')
# Return a 404 if no Author matching that slug is found
get_object_or_404(Author, slug=slug)
# Else return the embed script
return render(
request,
'author_embed.js',
{'slug': slug},
content_type='application/javascript'
)
|
1634600
|
import os
import unittest
import json
from doc2json.s2orc import load_s2orc
JSON_INPUT_DATA = os.path.join('tests', 'pdf', 'N18-3011.json')
class TestS2ORC(unittest.TestCase):
def test_read_write(self):
"""
Check loading current s2orc files
:return:
"""
with open(JSON_INPUT_DATA, 'r') as f:
data = json.load(f)
try1 = load_s2orc(data)
output1 = try1.release_json("pdf")
try2 = load_s2orc(data)
output2 = try2.release_json("pdf")
for key, value in output2.items():
if key == 'header':
assert value != output1[key]
else:
assert value == output1[key]
|
1634645
|
from cstruct import CStructMeta, CStruct, BIG_ENDIAN
from typing import Optional, Any, Union, Dict
from .fields import eTRVField
from .utils import etrv_read_data, etrv_write_data
class eTRVProperty:
def __init__(self, data_struct, **init_kwargs):
self.data_struct = data_struct
self.init_kwargs = init_kwargs
def __set_name__(self, owner, name):
self.name = name
def get_data_object(self, device):
if self.name not in device.fields:
device.fields[self.name] = self.data_struct(device=device, **self.init_kwargs)
return device.fields[self.name]
def __get__(self, device: 'eTRVDevice', instance_type=None):
return self.get_data_object(device).retrieve()
def __set__(self, device: 'eTRVDevice', value) -> None:
self.get_data_object(device).update(value)
class eTRVDataMeta(type):
def __new__(mcls, name, bases, attrs):
cls = super(eTRVDataMeta, mcls).__new__(mcls, name, bases, attrs)
for attr, obj in attrs.items():
if isinstance(obj, eTRVField):
obj.__set_name__(cls, attr)
return cls
class eTRVData(metaclass=eTRVDataMeta):
class Meta:
structure = None # type: Dict[int, str]
send_pin = True
use_encoding = True
read_only = False
def __init__(self, device):
self.device = device
# TODO Should we switch to frozendict?
self.raw_data = {}
for handler, struct in self.Meta.structure.items():
class RawDataStruct(CStruct):
__byte_order__ = BIG_ENDIAN
__struct__ = struct
is_populated = False
is_changed = False
self.raw_data[handler] = RawDataStruct()
def retrieve(self):
if not self.is_populated:
self.read()
return self.retrieve_object(self.device)
def retrieve_object(self, device):
return self
def update(self, data):
read_only = getattr(self.Meta, 'read_only', eTRVData.Meta.read_only)
if read_only:
raise AttributeError('this attribute is read-only')
self.update_object(self.device, data)
def update_object(self, device, data):
pass
@property
def is_populated(self):
return all(map(lambda obj: obj.is_populated, self.raw_data.values()))
@property
def is_changed(self):
return any(map(lambda obj: obj.is_populated, self.raw_data.values()))
def read(self, handlers = None):
"""
If handlers are None it will use all
"""
send_pin = getattr(self.Meta, 'send_pin', eTRVData.Meta.send_pin)
use_encoding = getattr(self.Meta, 'use_encoding', eTRVData.Meta.use_encoding)
for handler, struct in self.raw_data.items():
data = etrv_read_data(self.device, handler, send_pin, use_encoding)
struct.unpack(data)
struct.is_populated = True
struct.is_changed = False
def save(self):
if getattr(self.Meta, 'read_only', eTRVData.Meta.read_only):
raise AttributeError('this attribute is read-only')
results = []
send_pin = getattr(self.Meta, 'send_pin', eTRVData.Meta.send_pin)
use_encoding = getattr(self.Meta, 'use_encoding', eTRVData.Meta.use_encoding)
for handler, struct in self.raw_data.items():
data = struct.pack()
result = etrv_write_data(self.device, handler, data, send_pin,use_encoding)
if result:
struct.is_changed = False
results.append(result)
return all(results)
def invalidate(self):
for struct in self.raw_data.values():
struct.is_populated = False
struct.is_changed = False
class eTRVSingleData(eTRVData):
def get_direct_field(self):
direct_field = getattr(self.Meta, 'direct_field', None)
if direct_field is None:
raise AttributeError('Field "direct_field" should be defined or "get_direct_field" should be implemented')
return direct_field
def retrieve_object(self, device):
return getattr(self, self.get_direct_field())
def update_object(self, device, data):
return setattr(self, self.get_direct_field(), data)
class Meta:
direct_field = None
|
1634688
|
import datetime
from mongoengine import *
from flask_login import UserMixin
from .annotations import AnnotationModel
from .categories import CategoryModel
from .datasets import DatasetModel
from .images import ImageModel
class UserModel(DynamicDocument, UserMixin):
password = StringField(required=True)
username = StringField(max_length=25, required=True, unique=True)
email = StringField(max_length=30)
name = StringField()
online = BooleanField(default=False)
last_seen = DateTimeField()
is_admin = BooleanField(default=False)
preferences = DictField(default={})
permissions = ListField(defualt=[])
# meta = {'allow_inheritance': True}
@property
def datasets(self):
self._update_last_seen()
if self.is_admin:
return DatasetModel.objects
return DatasetModel.objects(Q(owner=self.username) | Q(users__contains=self.username))
@property
def categories(self):
self._update_last_seen()
if self.is_admin:
return CategoryModel.objects
dataset_ids = self.datasets.distinct('categories')
return CategoryModel.objects(Q(id__in=dataset_ids) | Q(creator=self.username))
@property
def images(self):
self._update_last_seen()
if self.is_admin:
return ImageModel.objects
dataset_ids = self.datasets.distinct('id')
return ImageModel.objects(dataset_id__in=dataset_ids)
@property
def annotations(self):
self._update_last_seen()
if self.is_admin:
return AnnotationModel.objects
image_ids = self.images.distinct('id')
return AnnotationModel.objects(image_id__in=image_ids)
def can_view(self, model):
if model is None:
return False
return model.can_view(self)
def can_download(self, model):
if model is None:
return False
return model.can_download(self)
def can_delete(self, model):
if model is None:
return False
return model.can_delete(self)
def can_edit(self, model):
if model is None:
return False
return model.can_edit(self)
def _update_last_seen(self):
self.update(last_seen=datetime.datetime.utcnow())
__all__ = ["UserModel"]
|
1634705
|
import pytest
from river.adapters.progression_counter import InMemoryProgressionCounter
from river.adapters.topics import InMemoryTopicsManager
from river.topicleaner.service import clean
pytestmark = pytest.mark.django_db
def test_done_batch_is_cleaned(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 10} for resource in batch.resources.all()}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics == set()
def test_done_batch_is_cleaned_with_failed(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={
f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 6, "failed": 4}
for resource in batch.resources.all()
}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics == set()
def test_ongoing_batch_is_not_cleaned(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 9} for resource in batch.resources.all()}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
def test_ongoing_batch_is_not_cleaned_with_failed(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={
f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 6, "failed": 2}
for resource in batch.resources.all()
}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
def test_none_counter_prevents_cleaning(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": None, "loaded": 10} for resource in batch.resources.all()}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
def test_missing_counter_prevents_cleaning(batch_factory, resource_factory):
r1, r2 = resource_factory.create_batch(2)
batch = batch_factory.create(resources=[r1, r2])
counters = InMemoryProgressionCounter(
counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 10} for resource in batch.resources.all()[1:]}
)
topics = InMemoryTopicsManager(
topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]]
)
clean(counters, topics)
assert topics._topics != set()
|
1634706
|
from trex_stl_lib.api import *
import argparse
# stream from pcap file. continues pps 10 in sec
# path_relative_to_profile = True
class STLS1(object):
def get_streams (self, direction, tunables, **kwargs):
parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args(tunables)
return [STLStream(packet = STLPktBuilder(pkt ="udp_64B_no_crc.pcap",
path_relative_to_profile = True), # path relative to profile and not to loader path
mode = STLTXCont(pps=10)) ] #rate continues, could be STLTXSingleBurst,STLTXMultiBurst
# dynamic load - used for trex console or simulator
def register():
return STLS1()
|
1634728
|
import torch
import torch.nn as nn
import torch.distributions as tdist
from iflow.densities import AngleNormal
class DynamicModel(nn.Module):
def __init__(self, dim, device=None, dt = 0.01, requires_grad=True):
super().__init__()
self.dim = dim
self.device = device
self.dt = dt
def forward(self, x, logp=None, reverse=False):
return x, logp
def backward(self, z, logp=None):
return z, logp
def velocity(self,x):
raise NotImplementedError('Velocity must be Implemented in the inherited Method')
def first_Taylor_dyn(self, x):
raise NotImplementedError('first_Taylor_dyn must be Implemented in the inherited Method')
def step_forward(self, xt0, noise=False):
vel = self.velocity(xt0)
mu = vel * self.dt + xt0
if noise == True:
var_step = self.var * self.dt
mv_dist = tdist.MultivariateNormal(mu, var_step)
xt1 = mv_dist.rsample()
else:
xt1 = mu
return xt1
def step_backwards(self, xt1, noise=False):
vel = -self.velocity(xt1)
mu_b = vel * self.dt + xt1
if noise == True:
var_b = self.var * self.dt
mv_dist = tdist.MultivariateNormal(mu_b, var_b)
xt0 = mv_dist.rsample()
else:
xt0 = mu_b
return xt0
def evolve(self, xti, T=1, reverse=False, noise=False):
xt0 = xti
if reverse == False:
for i in range(T):
xt1 = self.step_forward(xt0, noise=noise)
xt0 = xt1
else:
for i in range(T):
xt1 = self.step_backwards(xt0, noise=noise)
xt0 = xt1
return xt0
def generate_trj(self, xti, T=1, reverse=False, noise=False):
xt0 = xti
trx = xt0[None, ...]
if not reverse:
for i in range(T - 1):
xt1 = self.step_forward(xt0, noise=noise)
xt0 = xt1
trx = torch.cat((trx, xt0[None, ...]))
else:
for i in range(T - 1):
xt1 = self.step_backwards(xt0, noise=noise)
xt0 = xt1
trx = torch.cat((trx, xt0[None, ...]))
return trx
def generate_trj_density(self, xti, T=1, reverse=False):
_mu = xti
_var = torch.zeros(xti.shape[0], self.dim, self.dim).to(xti)
tr_mean = _mu[None, ...]
tr_var = _var[None, ...]
if not reverse:
for i in range(T - 1):
Ad = self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
tr_mean = torch.cat((tr_mean, _mu[None, ...]))
tr_var = torch.cat((tr_var, _var[None, ...]))
else:
for i in range(T - 1):
Ad = -self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = -self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
tr_mean = torch.cat((tr_mean, _mu[None, ...]))
tr_var = torch.cat((tr_var, _var[None, ...]))
return tr_mean, tr_var
def conditional_distribution(self, xti, T=1, reverse=False):
_mu = xti
_var = torch.zeros(xti.shape[0], self.dim, self.dim).to(xti)
if not reverse:
for i in range(T):
Ad = self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
else:
for i in range(T):
Ad = -self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = -self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
return tdist.MultivariateNormal(_mu, _var)
class LimitCycleDynamicModel(nn.Module):
def __init__(self, dim, device=None, dt=0.01, requires_grad=True):
super().__init__()
self.dim = dim
self.device = device
self.dt = dt
def forward(self, x, logpx=None, reverse=False):
raise NotImplementedError('Forward function has to be implemented')
def transform(self, x, reverse=False):
raise NotImplementedError('Implement transformation from cycle to cartesian')
def velocity(self, x):
raise NotImplementedError('Velocity must be Implemented in the inherited Method')
def first_Taylor_dyn(self, x):
raise NotImplementedError('first_Taylor_dyn must be Implemented in the inherited Method')
def step_forward(self, xt0, noise=False):
## Go To Polar
xt0 = self.transform(xt0, reverse=False)
##Evolve
vel = self.velocity(xt0)
mu = vel * self.dt + xt0
if noise == True:
var_step = self.var * self.dt
mv_dist = tdist.MultivariateNormal(mu, var_step)
xt1 = mv_dist.rsample()
else:
xt1 = mu
## Go To Cartesian
xt1 = self.transform(xt1, reverse=True)
return xt1
def step_backwards(self, xt1, noise=False):
## Go To Polar
xt1 = self.transform(xt1, reverse=False)
##Evolve
vel = -self.velocity(xt1)
mu_b = vel * self.dt + xt1
if noise == True:
var_b = self.var * self.dt
mv_dist = tdist.MultivariateNormal(mu_b, var_b)
xt0 = mv_dist.rsample()
else:
xt0 = mu_b
## Go To Cartesian
xt0 = self.transform(xt0, reverse=True)
return xt0
def evolve(self, xti, T=1, reverse=False, noise=False):
##Evolve
xt0 = xti
if reverse == False:
for i in range(T):
xt1 = self.step_forward(xt0, noise=noise)
xt0 = xt1
else:
for i in range(T):
xt1 = self.step_backwards(xt0, noise=noise)
xt0 = xt1
return xt0
def generate_trj(self, xti, T=1, reverse=False, noise=False):
xt0 = xti
trx = xt0[None, ...]
if not reverse:
for i in range(T - 1):
xt1 = self.step_forward(xt0, noise=noise)
xt0 = xt1
trx = torch.cat((trx, xt0[None, ...]))
else:
for i in range(T - 1):
xt1 = self.step_backwards(xt0, noise=noise)
xt0 = xt1
trx = torch.cat((trx, xt0[None, ...]))
return trx
def generate_trj_density(self, xti, T=1, reverse=False):
'''
Generate Trajectory Density will create a trajectory in the polar coordinate space.
'''
_mu = xti
_var = torch.zeros(xti.shape[0], self.dim, self.dim).to(xti)
tr_mean = _mu[None, ...]
tr_var = _var[None, ...]
if not reverse:
for i in range(T - 1):
Ad = self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
tr_mean = torch.cat((tr_mean, _mu[None, ...]))
tr_var = torch.cat((tr_var, _var[None, ...]))
else:
for i in range(T - 1):
Ad = -self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = -self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
tr_mean = torch.cat((tr_mean, _mu[None, ...]))
tr_var = torch.cat((tr_var, _var[None, ...]))
return tr_mean, tr_var
def conditional_distribution(self, xti, T=1, reverse=False):
'''
Conditional Distribution will compute the distribution in the Polar Space
'''
_mu = xti
_var = torch.zeros(xti.shape[0], self.dim, self.dim).to(xti)
if not reverse:
for i in range(T):
Ad = self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
else:
for i in range(T):
Ad = -self.first_Taylor_dyn(_mu) * self.dt + torch.eye(self.dim).to(xti)
_mu = -self.velocity(_mu) * self.dt + _mu
_var = torch.bmm(torch.bmm(Ad, _var), Ad) + self.var * self.dt
dists = []
dist_r = tdist.Normal(loc=_mu[:,0], scale=torch.sqrt(_var[:,0,0]))
dists.append(dist_r)
dist_w = AngleNormal(loc=_mu[:,1], scale=torch.sqrt(_var[:,1,1]))
dists.append(dist_w)
if self.dim ==3:
dist_z = tdist.Normal(loc=_mu[:,2], scale=_var[:,2,2])
dists.append(dist_z)
elif self.dim>3:
dist_z = tdist.MultivariateNormal(loc=_mu[:,2:], scale=_var[:,2:,2:])
dists.append(dist_z)
return dists
def cartesian_conditional_distribution(self, xti, T=1, reverse=False):
_mu = xti
_var = torch.zeros(xti.shape[0], self.dim, self.dim).to(xti)
if not reverse:
_mu = self.evolve(_mu, T=T)
var = self.var*self.dt*T
_var = var
return tdist.MultivariateNormal(_mu, _var)
def cartesian_cond_log_prob(self, xt0, xt1, T=1, reverse=False):
'''
Compute the Cartesian Conditional Distribution given as input xt0 and xt1
'''
if not reverse:
dist = self.cartesian_conditional_distribution(xt0, T=T, reverse=reverse)
log_px = dist.log_prob(xt1)
else:
dist = self.cartesian_conditional_distribution(xt1, T=T, reverse=reverse)
log_px = dist.log_prob(xt0)
return log_px
def conditional_log_prob(self, xt0, xt1, T=1, reverse=False):
'''
Compute the Conditional Distribution given as input xt0 and xt1
'''
zeros = torch.zeros(xt0.shape[0],1).to(xt0)
z0, log_J0 = self.forward(xt0, zeros)
z1, log_J1 = self.forward(xt1, zeros)
if not reverse:
dists = self.conditional_distribution(z0, T=T, reverse=reverse)
log_px = log_J1.squeeze()
log_px += dists[0].log_prob(z1[:,0])
log_px += dists[1].log_prob(z1[:,1])
if self.dim == 3:
log_px += dists[2].log_prob(z1[:,2])
elif self.dim > 3:
log_px += dists[2].log_prob(z1[:,2:])
else:
dists = self.conditional_distribution(z1, T=T, reverse=reverse)
log_px = log_J0.squeeze()
log_px += dists[0].log_prob(z0[:, 0])
log_px += dists[1].log_prob(z0[:, 1])
if self.dim == 3:
log_px += dists[2].log_prob(z0[:, 2])
elif self.dim > 3:
log_px += dists[2].log_prob(z0[:, 2:])
return log_px
def final_distribution(self, x , ref_phase=None):
dists = []
##radius final distribution
_mu_r = self.r_des
_var_r = 0.01*torch.ones(1).to(x)
dist_r = tdist.Normal(loc=_mu_r, scale=_var_r)
dists.append(dist_r)
##
return dists
def cartesian_final_distribution(self, x, ref_phase):
sin_x = torch.sin(ref_phase)
cos_x = torch.cos(ref_phase)
_mu = torch.cat([sin_x[:,None],cos_x[:,None]],1)
_var = 0.1*torch.eye(x.shape[1]).to(x)
if x.shape[1]>2:
_mu_z = torch.zeros(x.shape[0], x.shape[1]-2).to(x)
_mu = torch.cat([_mu, _mu_z],1)
dist = tdist.MultivariateNormal(loc=_mu, covariance_matrix=_var)
return dist
def stable_log_prob(self, x, ref_phase = None):
if ref_phase is not None:
dist = self.cartesian_final_distribution(x, ref_phase)
logpx = dist.log_prob(x)
return logpx
else:
zeros = torch.zeros(x.shape[0], 1).to(x)
z, log_J = self.forward(x, zeros)
dists = self.final_distribution(x, ref_phase)
log_px = log_J.squeeze()
log_px += dists[0].log_prob(z[:, 0])
return log_px
|
1634736
|
while True:
try:
a = int(input("enter your age :"))
if a > 18:
print("Adult")
elif 10 < a <= 18:
print("Teen")
elif a <= 10:
print("Child")
break
except ValueError:
print("enter valid age")
break
|
1634744
|
while True:
num = int(input('Please enter an integer 0 through 9. '))
if num in range(0, 10):
print(num)
else:
print('That\'s not in range. ')
break
|
1634747
|
from flask_jwt_extended import JWTManager
from app.dao.users_dao import get_user_by_id
from app.models import User
jwt = JWTManager()
@jwt.user_identity_loader
def transform_user_to_identity_for_jwt(user: User):
return {
'id': user.id,
'name': user.name,
'email_address': user.email_address,
'services': [service.serialize_for_user() for service in user.services if service.active],
'permissions': user.get_permissions()
}
@jwt.user_lookup_loader
def transform_jwt_to_user(_jwt_header, jwt_data) -> User:
sub = jwt_data['sub']
return get_user_by_id(user_id=sub['id'])
|
1634763
|
import torch.nn as nn
class IntermediateSequential(nn.Sequential):
def __init__(self, *args, return_intermediate=True):
super().__init__(*args)
self.return_intermediate = return_intermediate
def forward(self, input):
if not self.return_intermediate:
return super().forward(input)
intermediate_outputs = {}
output = input
for name, module in self.named_children():
output = intermediate_outputs[name] = module(output)
return output, intermediate_outputs
|
1634785
|
from typing import List
from geoalchemy2.shape import from_shape
from couchers.models import Cluster, ClusterRole, ClusterSubscription, Node, Page, PageType, PageVersion, Thread
DEFAULT_PAGE_CONTENT = "There is nothing here yet..."
DEFAULT_PAGE_TITLE_TEMPLATE = "Main page for the {name} {type}"
def create_node(session, geom, parent_node_id):
node = Node(geom=from_shape(geom), parent_node_id=parent_node_id)
session.add(node)
session.flush()
return node
def create_cluster(
session,
parent_node_id: int,
name: str,
description: str,
creator_user_id: int,
admin_ids: List,
is_community: bool,
):
type = "community" if is_community else "group"
cluster = Cluster(
name=name,
description=description,
parent_node_id=parent_node_id,
is_official_cluster=is_community,
)
session.add(cluster)
session.flush()
main_page = Page(
parent_node=cluster.parent_node,
creator_user_id=creator_user_id,
owner_cluster=cluster,
type=PageType.main_page,
thread=Thread(),
)
session.add(main_page)
session.flush()
page_version = PageVersion(
page=main_page,
editor_user_id=creator_user_id,
title=DEFAULT_PAGE_TITLE_TEMPLATE.format(name=name, type=type),
content=DEFAULT_PAGE_CONTENT,
)
session.add(page_version)
cluster.cluster_subscriptions.append(
ClusterSubscription(
user_id=creator_user_id,
role=ClusterRole.admin,
)
)
for admin_id in admin_ids:
cluster.cluster_subscriptions.append(
ClusterSubscription(
user_id=admin_id,
role=ClusterRole.admin,
)
)
return cluster
|
1634820
|
import discord
from MuteAll import errors
async def do(ctx, task="mute", members=[]):
try:
for member in members:
match task:
case "mute":
await member.edit(mute=True)
case "unmute":
await member.edit(mute=False)
case "end":
await member.edit(mute=False)
case "deafen":
await member.edit(deafen=True)
case "undeafen":
await member.edit(deafen=False)
case "all":
await member.edit(mute=True)
await member.edit(deafen=True)
case "unall":
await member.edit(mute=False)
await member.edit(deafen=False)
except discord.Forbidden: # the bot doesn't have the permission to mute
return await errors.show_permission_error(ctx)
except Exception as e:
return await errors.show_common_error(ctx, e)
|
1634837
|
from functools import total_ordering
from operator import is_
import numpy as np
from typing import Callable, Iterable, List, Optional, Sequence, Tuple, Union
from numpy import random
from scipy.ndimage import rotate, map_coordinates, gaussian_filter
import h5py
from itertools import chain
from batchgenerators.augmentations.utils import resize_segmentation
import matplotlib.pyplot as plt
import torch
from .utils import generate_pos_neg_label_crop_centers, \
create_zero_centered_coordinate_mesh, \
elastic_deform_coordinates, \
interpolate_img, scale_coords,\
augment_gamma, augment_mirroring, is_positive, generate_spatial_bounding_box,\
Pad
from medical_seg.utils import resample_image_array_size
from .utils import resample_data_or_seg
RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3
def get_do_separate_z(spacing, anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold
return do_separate_z
def get_lowres_axis(new_spacing):
axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic
return axis
def resample_patient(data, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False,
order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
"""
:param data:
:param seg:
:param original_spacing:
:param target_spacing:
:param order_data:
:param order_seg:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately
:param order_z_seg: only applies if do_separate_z is True
:param order_z_data: only applies if do_separate_z is True
:param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis)
then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg
:return:
"""
assert not ((data is None) and (seg is None))
if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
if seg is not None:
if len(seg.shape) == 3:
seg = np.expand_dims(seg, axis=0)
assert len(seg.shape) == 4, "seg must be c x y z"
if data is not None:
shape = np.array(data[0].shape)
else:
shape = np.array(seg[0].shape)
new_shape = np.round(((np.array(original_spacing) / np.array(target_spacing)).astype(float) * shape)).astype(int)
if force_separate_z is not None:
do_separate_z = force_separate_z
if force_separate_z:
axis = get_lowres_axis(original_spacing)
else:
axis = None
else:
if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(original_spacing)
elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(target_spacing)
else:
do_separate_z = False
axis = None
if axis is not None:
if len(axis) == 3:
# every axis has the spacing, this should never happen, why is this code here?
do_separate_z = False
elif len(axis) == 2:
# this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample
# separately in the out of plane axis
do_separate_z = False
else:
pass
if data is not None:
data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z,
order_z=order_z_data)
else:
data_reshaped = None
if seg is not None:
seg_reshaped = resample_data_or_seg(seg, new_shape, True, axis, order_seg, do_separate_z, order_z=order_z_seg)
else:
seg_reshaped = None
if len(seg_reshaped.shape) == 4:
seg_reshaped = np.squeeze(seg_reshaped, axis=0)
return data_reshaped, seg_reshaped
class ResampleImage:
def __init__(self, resample_size, order=[3, 0]) -> None:
self.rsize = resample_size
self.order = order
def __call__(self, image, label=None):
if len(image.shape) == 3:
image = np.expand_dims(image, axis=0)
c = image.shape[0]
image = resample_image_array_size(image, out_size=(c,) + self.rsize, order=self.order[0])
if label is not None:
label = resample_image_array_size(label, out_size=self.rsize, order=self.order[1])
return image, label
class CropForegroundImageLabel:
def __init__(self,
select_fn: Callable = is_positive,
channel_indices = None,
margin = 0,
mode = ["constant"]
):
pass
self.cropper = CropForeground(
select_fn=select_fn, channel_indices=channel_indices, margin=margin
)
self.mode = mode
def __call__(self, image, label=None):
if len(image.shape) == 3:
image = np.expand_dims(image, axis=0)
box_start, box_end = self.cropper.compute_bounding_box(image)
print(box_start, box_end)
# d[self.start_coord_key] = box_start
# d[self.end_coord_key] = box_end
# for key, m in self.key_iterator(d, self.mode):
# self.push_transform(d, key, extra_info={"box_start": box_start, "box_end": box_end})
image = self.cropper.crop_pad(img=image, box_start=box_start, box_end=box_end, mode=self.mode[0])
if label is not None :
if len(label.shape) == 3:
label = np.expand_dims(label, axis=0)
label = self.cropper.crop_pad(img=label, box_start=box_start, box_end=box_end, mode=self.mode[1])
if len(label.shape) == 4:
label = np.squeeze(label, axis=0)
return image, label
class CropForeground():
"""
Crop an image using a bounding box. The bounding box is generated by selecting foreground using select_fn
at channels channel_indices. margin is added in each spatial dimension of the bounding box.
The typical usage is to help training and evaluation if the valid part is small in the whole medical image.
Users can define arbitrary function to select expected foreground from the whole image or specified channels.
And it can also add margin to every dim of the bounding box of foreground object.
For example:
.. code-block:: python
image = np.array(
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0],
[0, 1, 3, 2, 0],
[0, 1, 2, 1, 0],
[0, 0, 0, 0, 0]]]) # 1x5x5, single channel 5x5 image
def threshold_at_one(x):
# threshold at 1
return x > 1
cropper = CropForeground(select_fn=threshold_at_one, margin=0)
print(cropper(image))
[[[2, 1],
[3, 2],
[2, 1]]]
"""
def __init__(
self,
select_fn: Callable = is_positive,
channel_indices = None,
margin: Union[Sequence[int], int] = 0,
return_coords: bool = False,
mode: str = "constant",
**np_kwargs,
) -> None:
"""
Args:
select_fn: function to select expected foreground, default is to select values > 0.
channel_indices: if defined, select foreground only on the specified channels
of image. if None, select foreground on the whole image.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
return_coords: whether return the coordinates of spatial bounding box for foreground.
k_divisible: make each spatial dimension to be divisible by k, default to 1.
if `k_divisible` is an int, the same `k` be applied to all the input spatial dimensions.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
np_kwargs: other args for `np.pad` API, note that `np.pad` treats channel dimension as the first dimension.
more details: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
"""
self.select_fn = select_fn
self.channel_indices = channel_indices
self.margin = margin
self.return_coords = return_coords
self.mode = mode
self.np_kwargs = np_kwargs
def compute_bounding_box(self, img):
"""
Compute the start points and end points of bounding box to crop.
And adjust bounding box coords to be divisible by `k`.
"""
box_start, box_end = generate_spatial_bounding_box(img, self.select_fn, self.channel_indices, self.margin)
# box_start_, *_ = convert_data_type(box_start, output_type=np.ndarray, dtype=np.int16, wrap_sequence=True)
# box_end_, *_ = convert_data_type(box_end, output_type=np.ndarray, dtype=np.int16, wrap_sequence=True)
# print(box_start)
# print(box_end)
box_start = np.array(box_start)
box_end = np.array(box_end)
orig_spatial_size = box_end - box_start
# make the spatial size divisible by `k`
spatial_size = np.array(orig_spatial_size)
# spatial_size = np.asarray(compute_divisible_spatial_size(orig_spatial_size.tolist(), k=self.k_divisible))
# update box_start and box_end
box_start_ = box_start - np.floor_divide(np.asarray(spatial_size) - orig_spatial_size, 2)
box_end_ = box_start + spatial_size
return box_start_, box_end_
def crop_pad(
self,
img,
box_start: np.ndarray,
box_end: np.ndarray,
mode = None,
):
"""
Crop and pad based on the bounding box.
"""
cropped = SpatialCrop(roi_start=box_start, roi_end=box_end)(img)
pad_to_start = np.maximum(-box_start, 0)
pad_to_end = np.maximum(box_end - np.asarray(img.shape[1:]), 0)
pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist())))
return BorderPad(spatial_border=pad, mode=mode or self.mode, **self.np_kwargs)(cropped)
def __call__(self, img, mode = None):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't change the channel dim.
"""
box_start, box_end = self.compute_bounding_box(img)
cropped = self.crop_pad(img, box_start, box_end, mode)
if self.return_coords:
return cropped, box_start, box_end
return cropped
class Random:
def __init__(self, seed) -> None:
self.seed = seed
self.R = np.random.RandomState(seed)
def do_transform(self, prob):
## 随机一个概率,当这个概率小于prob的时候,便去进行变换。
prob = min(max(prob, 0.0), 1.0)
return self.R.rand() < prob
class BorderPad:
"""
Pad the input data by adding specified borders to every dimension.
Args:
spatial_border: specified size for every spatial border. Any -ve values will be set to 0. It can be 3 shapes:
- single int number, pad all the borders with the same size.
- length equals the length of image shape, pad every spatial dimension separately.
for example, image shape(CHW) is [1, 4, 4], spatial_border is [2, 1],
pad every border of H dim with 2, pad every border of W dim with 1, result shape is [1, 8, 6].
- length equals 2 x (length of image shape), pad every border of every dimension separately.
for example, image shape(CHW) is [1, 4, 4], spatial_border is [1, 2, 3, 4], pad top of H dim with 1,
pad bottom of H dim with 2, pad left of W dim with 3, pad right of W dim with 4.
the result shape is [1, 7, 11].
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
def __init__(
self,
spatial_border: Union[Sequence[int], int],
mode = "constant",
**kwargs,
) -> None:
self.spatial_border = spatial_border
self.mode = mode
self.kwargs = kwargs
def __call__(
self, img, mode = None
):
"""
Args:
img: data to be transformed, assuming `img` is channel-first and
padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
Raises:
ValueError: When ``self.spatial_border`` does not contain ints.
ValueError: When ``self.spatial_border`` length is not one of
[1, len(spatial_shape), 2*len(spatial_shape)].
"""
spatial_shape = img.shape[1:]
spatial_border = self.spatial_border
if not all(isinstance(b, int) for b in spatial_border):
raise ValueError(f"self.spatial_border must contain only ints, got {spatial_border}.")
spatial_border = tuple(max(0, b) for b in spatial_border)
if len(spatial_border) == 1:
data_pad_width = [(spatial_border[0], spatial_border[0]) for _ in spatial_shape]
elif len(spatial_border) == len(spatial_shape):
data_pad_width = [(sp, sp) for sp in spatial_border[: len(spatial_shape)]]
elif len(spatial_border) == len(spatial_shape) * 2:
data_pad_width = [(spatial_border[2 * i], spatial_border[2 * i + 1]) for i in range(len(spatial_shape))]
else:
raise ValueError(
f"Unsupported spatial_border length: {len(spatial_border)}, available options are "
f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2*len(spatial_shape)}]."
)
all_pad_width = [(0, 0)] + data_pad_width
padder = Pad(all_pad_width, mode or self.mode, **self.kwargs)
return padder(img)
def map_spatial_axes(
img_ndim: int,
spatial_axes=None,
channel_first=True,
) -> List[int]:
"""
Utility to map the spatial axes to real axes in channel first/last shape.
For example:
If `channel_first` is True, and `img` has 3 spatial dims, map spatial axes to real axes as below:
None -> [1, 2, 3]
[0, 1] -> [1, 2]
[0, -1] -> [1, -1]
If `channel_first` is False, and `img` has 3 spatial dims, map spatial axes to real axes as below:
None -> [0, 1, 2]
[0, 1] -> [0, 1]
[0, -1] -> [0, -2]
Args:
img_ndim: dimension number of the target image.
spatial_axes: spatial axes to be converted, default is None.
The default `None` will convert to all the spatial axes of the image.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints.
channel_first: the image data is channel first or channel last, default to channel first.
"""
if spatial_axes is None:
spatial_axes_ = list(range(1, img_ndim) if channel_first else range(img_ndim - 1))
else:
spatial_axes_ = []
for a in spatial_axes:
if channel_first:
spatial_axes_.append(a if a < 0 else a + 1)
else:
spatial_axes_.append(a - 1 if a < 0 else a)
return spatial_axes_
class RandomFlip():
"""
Reverses the order of elements along the given spatial axis. Preserves shape.
Uses ``np.flip`` in practice. See numpy.flip for additional details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html.
Args:
spatial_axis: spatial axes along which to flip over. Default is None.
The default `axis=None` will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
"""
def __init__(self, random_state, spatial_axis = None, execution_probability=0.2):
self.spatial_axis = spatial_axis
self.random_state = random_state
self.execution_probability = execution_probability
def __call__(self, img: np.ndarray, label: np.ndarray = None) -> np.ndarray:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
if self.random_state.uniform() > self.execution_probability:
## 不去做变换
return img, label
result: np.ndarray = np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))
if label is not None :
if len(label.shape) == 3:
# 说明通道维度没有
label = np.expand_dims(label, axis=0)
label = np.flip(label, map_spatial_axes(label.ndim, self.spatial_axis))
label = np.squeeze(label, axis=0)
elif len(label.shape) == 4:
label = np.flip(label, map_spatial_axes(label.ndim, self.spatial_axis))
else :
raise "label shape err"
return result.astype(img.dtype), label.astype(label.dtype)
return result.astype(img.dtype)
class RandomRotate90:
def __init__(self, random_state, execution_probability=0.2):
self.random_state = random_state
self.axis = (1, 2)
self.execution_probability = execution_probability
def __call__(self, m, label=None):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
k = self.random_state.randint(0, 4)
# rotate k times around a given plane
assert m.ndim == 4, "输入必须为3d图像,第一个维度为channel"
if self.random_state.uniform() < self.execution_probability:
channels = [np.rot90(m[c], k, self.axis) for c in range(m.shape[0])]
m = np.stack(channels, axis=0)
if label is not None :
assert label.ndim == 3, "label shape 必须为三维"
label = np.rot90(label, k, self.axis)
return m, label
class RandomRotate:
"""
Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval.
Rotation axis is picked at random from the list of provided axes.
"""
def __init__(self, random_state, angle_spectrum=30, axes=None, mode='reflect', order=0, execution_probability=0.2):
if axes is None:
axes = [[2, 1]] # 这样就是以后两个维度为平面进行旋转。 第一个维度是深度
self.random_state = random_state
self.angle_spectrum = angle_spectrum
self.axes = axes
self.execution_probability = execution_probability
self.mode = mode
self.order = order
def __call__(self, m, label=None):
if self.random_state.uniform() < self.execution_probability:
axis = self.axes[self.random_state.randint(len(self.axes))]
angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum)
assert m.ndim == 4, "输入必须为3d图像,第一个维度为channel"
channels = [rotate(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1) for c
in range(m.shape[0])]
m = np.stack(channels, axis=0)
if label is not None :
assert label.ndim == 3, "label shape 必须为三维"
label = rotate(label, angle, axes=axis, reshape=False, order=self.order, mode="nearest", cval=-1)
return m, label
class Elatic:
def __init__(self, random_state, alpha=(0., 900.), sigma=(9., 13.), scale=(0.85, 1.25),
order_seg=1, order_data=3, border_mode_seg="constant",
border_cval_seg=0, execution_probability=0.2) -> None:
self.random_state = random_state
self.alpha = alpha
self.sigma = sigma
self.scale = scale
self.order_seg = order_seg
self.order_data = order_data
self.border_mode_seg = border_mode_seg
self.border_cval_seg = border_cval_seg
self.execution_probability = execution_probability
def _do_elastic(self, m, seg=None):
a = self.random_state.uniform(self.alpha[0], self.alpha[1])
s = self.random_state.uniform(self.sigma[0], self.sigma[1])
patch_size = m.shape[1:]
coords = create_zero_centered_coordinate_mesh(patch_size)
coords = elastic_deform_coordinates(coords, a, s, self.random_state)
dim = 3
seg_result = None
if seg is not None:
seg_result = np.zeros((patch_size[0], patch_size[1], patch_size[2]),
dtype=np.float32)
data_result = np.zeros((m.shape[0], patch_size[0], patch_size[1], patch_size[2]),
dtype=np.float32)
for d in range(dim):
ctr = m.shape[d + 1] / 2. - 0.5
coords[d] += ctr
if self.scale[0] < 1:
sc = self.random_state.uniform(self.scale[0], 1)
else :
sc = self.random_state.uniform(max(self.scale[0], 1), self.scale[1])
coords = scale_coords(coords, sc)
for channel_id in range(m.shape[0]):
data_result[channel_id] = interpolate_img(m[channel_id], coords, self.order_data,
cval=0.0, is_seg=False)
if seg is not None:
seg_result = interpolate_img(seg, coords, self.order_seg,
self.border_mode_seg,
cval=self.border_cval_seg,
is_seg=True)
return data_result, seg_result
def __call__(self, m, seg=None):
assert len(m.shape) == 4, "image dim 必须为4"
if self.random_state.uniform() < self.execution_probability:
m, seg = self._do_elastic(m, seg=seg)
if seg is not None :
return m, seg
else :
return m
class Standardize:
"""
Apply Z-score normalization to a given input tensor, i.e. re-scaling the values to be 0-mean and 1-std.
Mean and std parameter have to be provided explicitly.
"""
def __init__(self, a_min, a_max, b_min=0, b_max=1, eps=1e-6, clip=True):
self.a_min = a_min
self.a_max = a_max
self.b_min = b_min
self.b_max = b_max
self.eps = eps
self.clip = clip
def __call__(self, m):
img = (m - self.a_min) / (self.a_max - self.a_min)
if self.clip:
img = np.clip(img, self.b_min, self.b_max)
return img
class Normalization():
def __init__(self, channel_wise=False):
pass
self.channel_wise = channel_wise
def __call__(self, m):
assert len(m.shape) == 4, "image shape err"
if not self.channel_wise:
m = (m - m.mean()) / m.std()
else :
for i, d in enumerate(m):
slices = d != 0
_sub = d[slices].mean()
_div = d[slices].std()
m[i][slices] = (m[i][slices] - _sub) / (_div+1e-8)
return m
class AdditiveGaussianNoise:
def __init__(self, random_state, scale=(0.0, 0.2), execution_probability=0.2):
self.execution_probability = execution_probability
self.random_state = random_state
self.scale = scale
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
std = self.random_state.uniform(self.scale[0], self.scale[1])
gaussian_noise = self.random_state.normal(0, std, size=m.shape)
return m + gaussian_noise
return m
class AdditivePoissonNoise:
def __init__(self, random_state, lam=(0.0, 0.2), execution_probability=0.2):
self.execution_probability = execution_probability
self.random_state = random_state
self.lam = lam
def __call__(self, m):
if self.random_state.rand() < self.execution_probability:
lam = self.random_state.uniform(self.lam[0], self.lam[1])
poisson_noise = self.random_state.poisson(lam, size=m.shape)
return m + poisson_noise
return m
class SpatialCrop:
"""
General purpose cropper to produce sub-volume region of interest (ROI).
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
It can support to crop ND spatial (channel-first) data.
The cropped region can be parameterised in various ways:
- a list of slices for each spatial dimension (allows for use of -ve indexing and `None`)
- a spatial center and size
- the start and end coordinates of the ROI
"""
def __init__(
self,
roi_center: Union[Sequence[int], np.ndarray, None] = None,
roi_size: Union[Sequence[int], np.ndarray, None] = None,
roi_start: Union[Sequence[int], np.ndarray, None] = None,
roi_end: Union[Sequence[int], np.ndarray, None] = None,
) -> None:
"""
Args:
roi_center: voxel coordinates for center of the crop ROI.
roi_size: size of the crop ROI, if a dimension of ROI size is bigger than image size,
will not crop that dimension of the image.
roi_start: voxel coordinates for start of the crop ROI.
roi_end: voxel coordinates for end of the crop ROI, if a coordinate is out of image,
use the end coordinate of image.
roi_slices: list of slices for each of the spatial dimensions.
"""
if roi_center is not None and roi_size is not None:
roi_center = np.asarray(roi_center, dtype=np.int16)
roi_size = np.asarray(roi_size, dtype=np.int16)
roi_start_np = np.maximum(roi_center - np.floor_divide(roi_size, 2), 0)
roi_end_np = np.maximum(roi_start_np + roi_size, roi_start_np)
else:
if roi_start is None or roi_end is None:
raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.")
roi_start_np = np.maximum(np.asarray(roi_start, dtype=np.int16), 0)
roi_end_np = np.maximum(np.asarray(roi_end, dtype=np.int16), roi_start_np)
# Allow for 1D by converting back to np.array (since np.maximum will convert to int)
roi_start_np = roi_start_np if isinstance(roi_start_np, np.ndarray) else np.array([roi_start_np])
roi_end_np = roi_end_np if isinstance(roi_end_np, np.ndarray) else np.array([roi_end_np])
# convert to slices
self.slices = [slice(s, e) for s, e in zip(roi_start_np, roi_end_np)]
def __call__(self, img: Union[np.ndarray, torch.Tensor]):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
sd = min(len(self.slices), len(img.shape[1:])) # spatial dims
slices = [slice(None)] + self.slices[:sd]
return img[tuple(slices)]
class CenterSpatialCrop:
"""
Crop at the center of image with specified ROI size.
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
Args:
roi_size: the spatial size of the crop region e.g. [224,224,128]
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
If its components have non-positive values, the corresponding size of input image will be used.
for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
"""
def __init__(self, roi_size: Union[Sequence[int], int]) -> None:
self.roi_size = roi_size
def __call__(self, img: np.ndarray):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
assert img.ndim == 4, "img ndim 必须为4, (channel, W, H, D)"
center = [i // 2 for i in img.shape[1:]]
cropper = SpatialCrop(roi_center=center, roi_size=self.roi_size)
return cropper(img)
def map_binary_to_indices(
label: np.ndarray,
image: Optional[np.ndarray] = None,
image_threshold: float = 0.0,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the foreground and background of input label data, return the indices after fattening.
For example:
``label = np.array([[[0, 1, 1], [1, 0, 1], [1, 1, 0]]])``
``foreground indices = np.array([1, 2, 3, 5, 6, 7])`` and ``background indices = np.array([0, 4, 8])``
Args:
label: use the label data to get the foreground/background information.
image: if image is not None, use ``label = 0 & image > image_threshold``
to define background. so the output items will not map to all the voxels in the label.
image_threshold: if enabled `image`, use ``image > image_threshold`` to
determine the valid image content area and select background only in this area.
"""
# Prepare fg/bg indices
if label.shape[0] > 1:
label = label[1:] # for One-Hot format data, remove the background channel
label_flat = np.any(label, axis=0).ravel() # in case label has multiple dimensions
fg_indices = np.nonzero(label_flat)[0]
if image is not None:
img_flat = np.any(image > image_threshold, axis=0).ravel()
bg_indices = np.nonzero(np.logical_and(img_flat, ~label_flat))[0]
else:
bg_indices = np.nonzero(~label_flat)[0]
return fg_indices, bg_indices
class RandCropByPosNegLabel:
"""
Crop random fixed sized regions with the center being a foreground or background voxel
based on the Pos Neg Ratio.
And will return a list of arrays for all the cropped images.
For example, crop two (3 x 3) arrays from (5 x 5) array with pos/neg=1::
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0], [[0, 1, 2], [[2, 1, 0],
[0, 1, 3, 0, 0], --> [0, 1, 3], [3, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 0]] [0, 0, 0]]
[0, 0, 0, 0, 0]]]
If a dimension of the expected spatial size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than expected size, and the cropped
results of several images may not have exactly same shape.
Args:
spatial_size: the spatial size of the crop region e.g. [224, 224, 128].
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
if its components have non-positive values, the corresponding size of `label` will be used.
for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
label: the label image that is used for finding foreground/background, if None, must set at
`self.__call__`. Non-zero indicates foreground, zero indicates background.
pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
num_samples: number of samples (crop regions) to take in each list.
image: optional image data to help select valid area, can be same as `img` or another image array.
if not None, use ``label == 0 & image > image_threshold`` to select the negative
sample (background) center. So the crop center will only come from the valid image areas.
image_threshold: if enabled `image`, use ``image > image_threshold`` to determine
the valid image content areas.
fg_indices: if provided pre-computed foreground indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
bg_indices: if provided pre-computed background indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
Raises:
ValueError: When ``pos`` or ``neg`` are negative.
ValueError: When ``pos=0`` and ``neg=0``. Incompatible values.
"""
def __init__(
self,
spatial_size: Union[Sequence[int], int],
label: Optional[np.ndarray] = None,
pos: float = 1.0,
neg: float = 1.0,
num_samples: int = 1,
image: Optional[np.ndarray] = None,
image_threshold: float = 0.0,
random_state: np.random.RandomState = None,
) -> None:
self.spatial_size = spatial_size
self.label = label
if pos < 0 or neg < 0:
raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.")
if pos + neg == 0:
raise ValueError("Incompatible values: pos=0 and neg=0.")
self.pos_ratio = pos / (pos + neg)
self.num_samples = num_samples
self.image = image
self.image_threshold = image_threshold
self.centers: Optional[List[List[np.ndarray]]] = None
self.random_state = random_state
def randomize(
self,
label: np.ndarray,
image: Optional[np.ndarray] = None,
) -> None:
self.spatial_size = self.spatial_size
fg_indices_, bg_indices_ = map_binary_to_indices(label, image, self.image_threshold)
self.centers = generate_pos_neg_label_crop_centers(
self.spatial_size, self.num_samples, self.pos_ratio, label.shape[1:], fg_indices_, bg_indices_, rand_state=self.random_state
)
def __call__(
self,
img: np.ndarray,
label: Optional[np.ndarray] = None,
image: Optional[np.ndarray] = None,
is_label = False,
) -> List[np.ndarray]:
"""
Args:
img: input data to crop samples from based on the pos/neg ratio of `label` and `image`.
Assumes `img` is a channel-first array.
label: the label image that is used for finding foreground/background, if None, use `self.label`.
image: optional image data to help select valid area, can be same as `img` or another image array.
use ``label == 0 & image > image_threshold`` to select the negative sample(background) center.
so the crop center will only exist on valid image area. if None, use `self.image`.
fg_indices: foreground indices to randomly select crop centers,
need to provide `fg_indices` and `bg_indices` together.
bg_indices: background indices to randomly select crop centers,
need to provide `fg_indices` and `bg_indices` together.
"""
if label is None:
label = self.label
if label is None:
raise ValueError("label should be provided.")
if len(label.shape) == 3:
label = np.expand_dims(label, axis=0)
if image is None:
image = self.image
if not is_label:
self.randomize(label, image)
else :
if len(img.shape) == 3:
img = np.expand_dims(img, axis=0)
results: List[np.ndarray] = []
if self.centers is not None:
for center in self.centers:
cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore
r = cropper(img)
if is_label:
if len(r.shape) == 4:
r = np.squeeze(r, axis=0)
results.append(r)
return results
class Normalize:
"""
Apply simple min-max scaling to a given input tensor, i.e. shrinks the range of the data in a fixed range of [-1, 1].
"""
def __init__(self, min_value, max_value):
assert max_value > min_value
self.min_value = min_value
self.value_range = max_value - min_value
def __call__(self, m):
norm_0_1 = (m - self.min_value) / self.value_range
return np.clip(2 * norm_0_1 - 1, -1, 1)
class GammaTransformer:
def __init__(self, random_state, gamma_range=(0.5, 2), epsilon=1e-7, per_channel=False,
retain_stats: Union[bool, Callable[[], bool]] = False, execution_probability=0.2) -> None:
self.gamma_range = gamma_range
self.epsilon = epsilon
self.per_channel = per_channel
self.retain_stats = retain_stats
self.execution_probability = execution_probability
self.random_state = random_state
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
m = augment_gamma(m, gamma_range=self.gamma_range, epsilon=self.epsilon,
per_channel=self.per_channel, retain_stats=self.retain_stats)
return m
class MirrorTransform:
""" Randomly mirrors data along specified axes. Mirroring is evenly distributed. Probability of mirroring along
each axis is 0.5
Args:
axes (tuple of int): axes along which to mirror
"""
def __init__(self, random_state, axes=(0, 1, 2), execution_probability=0.2):
self.execution_probability = execution_probability
self.random_state = random_state
self.axes = axes
if max(axes) > 2:
raise ValueError("MirrorTransform now takes the axes as the spatial dimensions. What previously was "
"axes=(2, 3, 4) to mirror along all spatial dimensions of a 5d tensor (b, c, x, y, z) "
"is now axes=(0, 1, 2). Please adapt your scripts accordingly.")
def __call__(self, data, seg=None):
if self.random_state.uniform() < self.execution_probability:
ret_val = augment_mirroring(data, self.random_state, sample_seg=seg, axes=self.axes)
data = ret_val[0]
if seg is not None:
seg = ret_val[1]
return data, seg
# if __name__ == "__main__":
# print("数据增强函数测试")
# r = Random(seed=8)
# print(r.do_transform(0.5))
# print(r.do_transform(0.5))
# print(r.do_transform(0.5))
# print(r.do_transform(0.5))
# f = RandomFlip(r.R)
# image = h5py.File("./BAI_YUE_BIN_data.h5", "r")
# single_model_image = image["image"][:1]
# label = image["label"][0]
# print(f"label shape is {label.shape}")
# print(single_model_image.shape)
# sd = Standardize(a_min=single_model_image.min(), a_max=single_model_image.max())
# single_model_image = sd(single_model_image)
# print("归一化变换")
# plot_3d(single_model_image)
# plot_3d_label(label)
# # print("随机翻转变换")
# # single_model_image, label = f(single_model_image, label)
# # plot_3d(single_model_image)
# # plot_3d_label(label)
# # print("随机旋转变换")
# # ro = RandomRotate(random_state=r.R)
# # single_model_image, label = ro(single_model_image, label)
# # print(single_model_image.shape)
# # plot_3d(single_model_image)
# # plot_3d_label(label)
# # print("添加高斯噪声")
# # gn = AdditiveGaussianNoise(r.R)
# # single_model_image = gn(single_model_image)
# # plot_3d(single_model_image)
# print("添加柏松噪声")
# pn = AdditivePoissonNoise(r.R)
# single_model_image = pn(single_model_image)
# plot_3d(single_model_image)
|
1634859
|
import serial
import rospy
from constants import constants
import numpy as np
from std_srvs.srv import SetBool, SetBoolResponse
class NoopSerial(serial.Serial):
'''
Inherits from serial.Serial, doing nothing for each function.
Allows super classes to implement custom behavior for simulating
serial devices.
'''
port = 'noop-serial'
def __init__(*args, **kwargs):
pass
def open(self):
pass
@property
def in_waiting(self):
return 0
@property
def out_waiting(self):
return 0
def close(self):
pass
def __del__(self):
pass
def read(self, **kwargs):
pass
def write(self, *args):
pass
def flush(self):
pass
def flushInput(self):
pass
def flushOuput(self):
pass
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def send_break(self, *args, **kwargs):
pass
class SimulatedSerial(NoopSerial):
'''
Simulates a serial device, storing a buffer to be read in a program like a normal OS serial device.
Intended to be extended by other classes, which should override the write function to recieve writes to
the simulated device. These classes simply append to the buffer string which will be returned
on reads to the simulated device.
Note: NoopSerial and SimulatedSerial are generic and are candidates for mil_common.
'''
def __init__(self, *args, **kwargs):
self.buffer = ''
@property
def in_waiting(self):
return len(self.buffer)
def reset_input_buffer(self):
self.buffer = ''
def read(self, length):
b, self.buffer = self.buffer[0:length], self.buffer[length:]
return b
class SimulatedKillBoard(SimulatedSerial):
'''
Pretends to be NaviGator's kill board over serial, responding according to the protocol
to requests and sending current state periodically
'''
port = 'simulated-kill-board'
def __init__(self, *args, **kwargs):
super(SimulatedKillBoard, self).__init__()
self.last_ping = None
self.memory = {
'BUTTON_FRONT_PORT': False,
'BUTTON_AFT_PORT': False,
'BUTTON_FRONT_STARBOARD': False,
'BUTTON_AFT_STARBOARD': False,
'COMPUTER': False,
'HEARTBEAT_COMPUTER': False,
'BUTTON_REMOTE': False,
'HEARTBEAT_REMOTE': False,
}
for key in constants['KILLS']:
if key.find('BUTTON') == 0:
rospy.Service('~{}'.format(key), SetBool, lambda req, _button=key: self._set_button(_button, req.data))
self.killed = False
self.light = 'OFF'
rospy.Timer(rospy.Duration(0.2), self._timer_cb)
def _set_button(self, button, pressed):
self._set_kill(button, pressed)
return SetBoolResponse(success=True)
def _timer_cb(self, *args):
self._check_timeout()
def _check_timeout(self, *args):
if self.last_ping is None:
return
if (rospy.Time.now() - self.last_ping).to_sec() >= constants['TIMEOUT_SECONDS']:
self._set_kill('HEARTBEAT_COMPUTER', True)
else:
self._set_kill('HEARTBEAT_COMPUTER', False)
def _set_kill(self, name, on, update=True):
if self.memory[name] != on:
self.memory[name] = on
self.killed = np.any([self.memory[x] for x in self.memory])
if not update:
return
if on:
self.buffer += constants[name]['TRUE']
else:
self.buffer += constants[name]['FALSE']
if self.killed:
self.buffer += constants['OVERALL']['TRUE']
else:
self.buffer += constants['OVERALL']['FALSE']
def _set_light(self, status):
if self.light != status:
print 'setting lights', status
self.light = status
def _get_status(self, byte):
def _res(boolean):
return constants['RESPONSE_TRUE'] if boolean else constants['RESPONSE_FALSE']
if byte == constants['OVERALL']['REQUEST']:
self.buffer = _res(self.killed) + self.buffer
return
for key in self.memory:
if byte == constants[key]['REQUEST']:
self.buffer = _res(self.memory[key]) + self.buffer
return
def _handle_sync(self, data):
# Handle syncronous requests
if data == constants['PING']['REQUEST']:
self.last_ping = rospy.Time.now()
self.buffer = constants['PING']['RESPONSE'] + self.buffer
elif data == constants['COMPUTER']['KILL']['REQUEST']:
self._set_kill('COMPUTER', True)
self.buffer = constants['COMPUTER']['KILL']['RESPONSE'] + self.buffer
elif data == constants['COMPUTER']['CLEAR']['REQUEST']:
self._set_kill('COMPUTER', False)
self.buffer = constants['COMPUTER']['CLEAR']['RESPONSE'] + self.buffer
elif data == constants['LIGHTS']['OFF_REQUEST']:
self._set_light('OFF')
self.buffer = constants['LIGHTS']['OFF_RESPONSE'] + self.buffer
elif data == constants['LIGHTS']['YELLOW_REQUEST']:
self._set_light('YELLOW')
self.buffer = constants['LIGHTS']['YELLOW_RESPONSE'] + self.buffer
elif data == constants['LIGHTS']['GREEN_REQUEST']:
self._set_light('GREEN')
self.buffer = constants['LIGHTS']['GREEN_RESPONSE'] + self.buffer
else:
self._get_status(data)
def write(self, data):
def s(data):
'''
Serialize data into a string representing a hex code.
ex:
'''
return hex(ord(data))
self._check_timeout()
self._handle_sync(data)
return len(data)
|
1634873
|
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.realpath(__name__))+"/../sklite")
project = 'SkLite'
copyright = '2019, <NAME>'
author = '<NAME>'
release = '0.0.1'
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'alabaster'
html_static_path = ['_static']
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = False
numpydoc_use_blockquotes = False
master_doc = 'index'
|
1634876
|
from pyNastran.bdf.bdf import read_bdf
from pyNastran.op2.op2 import read_op2
bdf_filename = 'beam_modes.dat'
op2_filename = 'beam_modes_m2.op2'
#2) Open an op2
op2_model = read_op2(op2_filename)
print(op2_model.get_op2_stats())
subcase = 1
# 3a) Load grid point locations
model = read_bdf(bdf_filename)
# I'm assuming you don't have SPOINTs/EPOINTs
xyz_cid0 = {}
for nid, node in model.nodes.items():
xyz_cid0[nid] = node.get_position()
# 3b) generalized mass, eigenvalues or frequencies, and mode shapes from a 103 solution
# it's rare you'd have more than 1 key, so we'll just grab the 0th key
eigenvalue_keys = list(op2_model.eigenvalues.keys())
title = eigenvalue_keys[0]
# grab the data
#self.mode = np.zeros(nmodes, dtype='int32')
#self.extraction_order = np.zeros(nmodes, dtype='int32')
#self.eigenvalues = np.zeros(nmodes, dtype='float32')
#self.radians = np.zeros(nmodes, dtype='float32')
#self.cycles = np.zeros(nmodes, dtype='float32')
#self.generalized_mass = np.zeros(nmodes, dtype='float32')
#self.generalized_stiffness = np.zeros(nmodes, dtype='float32')
eigenvalue_obj = op2_model.eigenvalues[title]
eigenvalues = eigenvalue_obj.eigenvalues
frequencies = eigenvalue_obj.cycles
generalized_mass = eigenvalue_obj.generalized_mass
#4) Put them into numpy arrays so I can do some math with them
eigenvector_obj = op2_model.eigenvectors[1]
# take your pick on which form, but probably eigenvectors2
eigenvectors1 = eigenvector_obj.data # (nmodes, nnodes, 6)
eigenvectors2 = eigenvector_obj.get_phi() # (ndof, nmodes)
#5) I will also need node ids
nids = eigenvector_obj.node_gridtype[:, 0]
print(eigenvalues, frequencies, generalized_mass)
|
1634882
|
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin, UserManager
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .validators import UsernameValidator
class User(AbstractBaseUser, PermissionsMixin):
username_validator = UsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
email = models.EmailField(_('email address'), blank=True)
display_name = models.CharField(_('display name'), max_length=30, blank=True)
profile_icon = models.ImageField(_('profile icon'), upload_to='profile_icons', null=True, blank=True)
self_introduction = models.CharField(_('self introduction'), max_length=512, blank=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
db_table = 'users'
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def email_user(self, subject, message, from_email=None, **kwargs):
send_mail(subject, message, from_email, [self.email], **kwargs)
|
1634904
|
class Car():
"""Un simple intento de representar un auto"""
def __init__(self, manufacturer, model, year):
"""Inicializar atributos para describir un automóvil."""
self.manufacturer = manufacturer
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""Devuelve un nombre descriptivo prolijamente formateado."""
long_name = str(self.year) + ' ' + self.manufacturer + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""Imprima una declaración que muestre el millaje del automóvil."""
print("Este automóvil tiene" + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
"""
Establezca la lectura del odómetro en el valor dado.
Rechace el cambio si intenta hacer retroceder el odómetro.
"""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("No puedes hacer retroceder un odómetro!")
def increment_odometer(self, miles):
"""Agregue la cantidad dada a la lectura del odómetro."""
self.odometer_reading += miles
class Battery():
"""Un simple intento de modelar una batería para un automóvil eléctrico."""
def __init__(self, battery_size=60):
"""Inicializar los atributos del batteery."""
self.battery_size = battery_size
def describe_battery(self):
"""Imprima una declaración que describa el tamaño de la batería."""
print("Este automóvil tiene una " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
"""Imprime una declaración sobre el alcance que proporciona esta batería"""
if self.battery_size == 60:
range = 140
elif self.battery_size == 85:
range = 185
message = "Este automóvil puede ir aproximadamente " + str(range)
message += " millas con una carga completa."
print(message)
def upgrade_battery(self):
"""Actualice la batería si es posible."""
if self.battery_size == 60:
self.battery_size = 85
print("Actualizó la batería a 85 kWh.")
else:
print("La batería ya está actualizada")
class ElectricCar(Car):
"""Aspectos de modelos de un automóvil, específicos de vehículos eléctricos."""
def __init__(self, manufacturer, model, year):
"""
Inicializar atributos de la clase padre.
A continuación, inicialice los atributos específicos de un automóvil eléctrico.
"""
super().__init__(manufacturer, model, year)
self.battery = Battery()
print("Hacer un automóvil eléctrico y verificar la batería:")
my_tesla = ElectricCar('tesla', 'model s', 2016)
my_tesla.battery.describe_battery()
print("\nActualice la batería y revísela nuevamente:")
my_tesla.battery.upgrade_battery()
my_tesla.battery.describe_battery()
print("\nIntente actualizar la batería por segunda vez.")
my_tesla.battery.upgrade_battery()
my_tesla.battery.describe_battery()
|
1634930
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from typing import Callable
from functools import wraps
class glpy:
"""Main class to initialize OpenGL and Glut functions
"""
def __init__(self, **kwargs):
"""initialize the class with the following parameters
Keyword arguments:
mode -- diplay mode (default GLUT_DOUBLE | GLUT_RGB)
size -- window size (default (500, 500))
position -- window position (default (0, 0))
title -- window title (default "glpy")
color -- background color (default (0.0, 0.0, 0.0))
range -- window range (default (-1.0, 1.0, -1.0, 1.0))
"""
self.mode = kwargs["mode"] if "mode" in kwargs else GLUT_RGBA
self.size = kwargs["size"] if "size" in kwargs else (500, 500)
self.position = kwargs["position"] if "position" in kwargs else (0, 0)
self.title = kwargs["title"]if "title" in kwargs else "new title"
self.color = kwargs["bgcolor"] if "bgcolor" in kwargs else (0, 0, 0, 1.0)
self.range = kwargs["axis_range"] if "axis_range" in kwargs else (-100, 100,-100, 100)
def run(self, cb: Callable):
"""
Run the main loop of the program to execute the callbacks
Keyword arguments:
function(ListCallable) : a list of callback functions that will be executed -required
"""
glutInit(sys.argv)
glutInitDisplayMode(self.mode)
glutInitWindowSize(*self.size)
glutInitWindowPosition(*self.position)
glutCreateWindow(self.title)
glutDisplayFunc(cb)
glClearColor(*self.color)
gluOrtho2D(*self.range)
glutMainLoop()
def point(size: float):
''' wraps a callback function allowing it to plot points '''
def decorate(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
glPointSize(size) # setting the point size
glBegin(GL_POINTS)
func(*args, **kwargs)
glEnd()
glFlush()
return wrapper
return decorate
def line(width: float):
''' wraps a callback function allowing it to plot lines '''
def decorate(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
glLineWidth(width) # setting the line width
glBegin(GL_LINES)
func(*args, **kwargs)
glEnd()
glFlush()
return wrapper
return decorate
|
1634942
|
import numpy as np
from somo.sm_manipulator_definition import SMManipulatorDefinition
from somo.sm_actuator_definition import SMActuatorDefinition
from somo.sm_link_definition import SMLinkDefinition
from somo.sm_joint_definition import SMJointDefinition
from somo.sm_continuum_manipulator import SMContinuumManipulator
# base_definition
# todo: minimize the content of this;
# todo have definitions saved in one file instead of in separate files
base_definition = None
joint_definition1 = {
"joint_type": "revolute",
"axis": [1, 0, 0],
"limits": [-3.141592, 3.141592, 100, 3],
"spring_stiffness": 100,
"joint_neutral_position": 0,
"neutral_axis_offset": [0.0, 0.05, 0.0, 0.0, 0.0, 0.0],
"joint_control_limit_force": 1.0,
}
joint_definition2 = {
"joint_type": "revolute",
"axis": [0, 1, 0],
"limits": [-3.141592, 3.141592, 100, 3],
"spring_stiffness": 100,
"joint_neutral_position": 0,
"joint_control_limit_force": 1.0,
}
link_definition = {
"shape_type": "stadium",
"dimensions": [0.2, 0.2, 0.2],
"mass": 0.350,
"inertial_values": [1, 0, 0, 1, 0, 1],
"material_color": [0.6, 0.0, 0.8, 1.0],
"material_name": "green",
}
tip_definition = None
actuator_definition = {
"actuator_length": 2,
"n_segments": 10,
"link_definition": link_definition,
"joint_definitions": [joint_definition1, joint_definition2],
"planar_flag": 0,
}
manipulator_definition = {
"n_act": 1,
"base_definition": base_definition,
"actuator_definitions": [actuator_definition],
"tip_definition": tip_definition,
"manipulator_name": "finger",
"urdf_filename": "finger.urdf",
}
|
1634946
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from torch.nn import init
import math
from net import MLP, StateTransition
class GNN(nn.Module):
def __init__(self, config, state_net=None, out_net=None):
super(GNN, self).__init__()
self.config = config
# hyperparameters and general properties
self.convergence_threshold = config.convergence_threshold
self.max_iterations = config.max_iterations
self.n_nodes = config.n_nodes
self.state_dim = config.state_dim
self.label_dim = config.label_dim
self.output_dim = config.output_dim
self.state_transition_hidden_dims = config.state_transition_hidden_dims
self.output_function_hidden_dims = config.output_function_hidden_dims
# node state initialization
# self.node_state = torch.zeros(*[self.n_nodes, self.state_dim]).to(self.config.device) # (n,d_n)
self.node_state = torch.rand(*[self.n_nodes, self.state_dim]).to(self.config.device) # (n,d_n)
self.converged_states = torch.zeros(*[self.n_nodes, self.state_dim]).to(self.config.device)
# state and output transition functions
if state_net is None:
self.state_transition_function = StateTransition(self.state_dim, self.label_dim,
mlp_hidden_dim=self.state_transition_hidden_dims,
activation_function=config.activation)
else:
self.state_transition_function = state_net
if out_net is None:
self.output_function = MLP(self.state_dim, self.output_function_hidden_dims, self.output_dim)
else:
self.output_function = out_net
self.graph_based = self.config.graph_based
def reset_parameters(self):
self.state_transition_function.mlp.init()
self.output_function.init()
def forward(self,
edges,
agg_matrix,
node_labels,
node_states=None,
graph_agg=None
):
n_iterations = 0
# convergence loop
# state initialization
node_states = self.node_state if node_states is None else node_states
# while n_iterations < self.max_iterations:
# with torch.no_grad(): # without memory consumption
# new_state = self.state_transition_function(node_states, node_labels, edges, agg_matrix)
# n_iterations += 1
# # convergence condition
#
# # if torch.dist(node_states, new_state) < self.convergence_threshold: # maybe uses broadcst?
# # break
# # with torch.no_grad():
# # distance = torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) + 1e-20)
# distance = torch.norm(input=new_state - node_states,
# dim=1) # checked, they are the same (in cuda, some bug)
# #
# # diff =torch.norm(input=new_state - node_states, dim=1) - torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) )
#
# check_min = distance < self.convergence_threshold
# node_states = new_state
#
# if check_min.all():
# break
# node_states = self.state_transition_function(node_states, node_labels, edges, agg_matrix) # one more to propagate gradient only on last
while n_iterations < self.max_iterations:
new_state = self.state_transition_function(node_states, node_labels, edges, agg_matrix)
n_iterations += 1
# convergence condition
with torch.no_grad():
# distance = torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) + 1e-20)
distance = torch.norm(input=new_state - node_states,
dim=1) # checked, they are the same (in cuda, some bug)
check_min = distance < self.convergence_threshold
node_states = new_state
if check_min.all():
break
states = node_states
self.converged_states = states
if self.graph_based:
states = torch.matmul(graph_agg, node_states)
output = self.output_function(states)
return output, n_iterations
|
1634979
|
import pytest
from rig.bitfield import BitField
from nengo_spinnaker.utils import keyspaces
def test_get_derived_keyspaces():
"""Test creation of derived keyspaces."""
ks = BitField()
ks.add_field("index")
ks.add_field("spam")
# General usage
kss = keyspaces.get_derived_keyspaces(ks, (slice(5), 5, 6, 7))
for i, x in enumerate(kss):
assert x.index == i
# Specify a field
kss = keyspaces.get_derived_keyspaces(ks, slice(1, 3),
field_identifier="spam")
for x, i in zip(kss, (1, 2)):
assert x.spam == i
# Fail when no maximum is specified
with pytest.raises(ValueError):
list(keyspaces.get_derived_keyspaces(ks, (slice(None))))
def test_Keyspaces_and_is_nengo_keyspace():
"""Test the dictionary-like getter for keyspaces."""
kss = keyspaces.KeyspaceContainer()
default_ks = kss["nengo"]
default_ks(connection_id=0, cluster=0, index=0)
other_ks = kss["other"]
assert kss.routing_tag is not None
assert kss.filter_routing_tag is not None
# Can easily determine what is and isn't a default keyspace
assert keyspaces.is_nengo_keyspace(default_ks)
assert not keyspaces.is_nengo_keyspace(other_ks)
# Assigning fields fixes sizing and positioning
with pytest.raises(Exception):
other_ks.get_mask()
kss.assign_fields()
other_ks.get_mask()
|
1635027
|
import matplotlib.pyplot as plt
# Measurements from block-size.data
bsize = [
('HS-b100',[
(15.680,11.21),
(28.095,11.75),
(52.799,12.84),
(54.457,22.74),
(54.378,26.32),
(53.818,40.95)
], '-o', 'coral'),
('HS-b400',[
(20.0,11.58),
(40.0,12.69),
(60.0,14.16),
(79.8,16.47),
(99.89,20.28),
(122.2,35.67),
(125.4,49.9),
], '-^', 'coral'),
('HS-b800',[
(20.0,13.5),
(59.7,14.2),
(99.7,17.4),
(132.4, 23.1),
(154.3, 31.7),
(162.4, 37.9),
(164.1,41.8),
(164.0,45.8)
], '-*', 'coral'),
('2CHS-b100',[
(19.630,9.85),
(36.824,10.16),
(54.214,12.08),
(54.325,19.24),
(53.214,26.85),
(53.057,41.31),
], '-p', 'darkseagreen'),
('2CHS-b400',[
(19.9, 10.1),
(39.98, 10.92),
(59.9, 12.56),
(79.8, 14.63),
(99.7, 18.27),
(112.1, 21.25),
(118.4, 29.43),
(125.6, 48.1),
], '-v', 'darkseagreen'),
('2CHS-b800',[
(19.9,10.05),
(59.9,12.2),
(99.7,15.2),
(131.8,20.0),
(152.2,27.9),
(161.1,36.0),
(162.2,42.1)
], '-d', 'darkseagreen'),
('SL-b100',[
(20.0,14.9),
(29.9,24.2),
(31.9,32.8),
(33.9,42.8),
(34.9,50.8),
], '-h', 'steelblue'),
('SL-b400',[
(20.0,13.3),
(40.0,15.1),
(59.9,19.2),
(79.9,30.52),
(89.8,45.88),
(91.8,55.1)
], '-s', 'steelblue'),
('SL-b800',[
(20.0, 13.3),
(59.9, 17.7),
(99.4, 29.2),
(108.9, 35.0),
(117.2, 45.7),
(118.2, 56.3)
], '->', 'steelblue'),
('OHS-b100',[
(9.700,10.194),
(19.799,12.205),
(33.7,11.409),
(38.760,10.0),
(48.00,19.0),
(48.00,42.0),
], '-8', 'darkmagenta'),
('OHS-b800',[
(17.966,12.14),
(58.966,12.52),
(131.544,13.07),
(141.544,14.07),
(151.544,15.07),
(169.542,18.3),
(172.564,22.4),
(176.649,37.4),
], '-<', 'darkmagenta')]
def do_plot():
f = plt.figure(1, figsize=(7,5))
plt.clf()
ax = f.add_subplot(1, 1, 1)
for name, entries, style, color in bsize:
throughput = []
latency = []
for t, l in entries:
throughput.append(t)
latency.append(l)
ax.plot(throughput, latency, style, color=color, label='%s' % name, markersize=8, alpha=0.8)
plt.legend(fancybox=True,frameon=False,framealpha=0.8,mode={"expand", None},ncol=3, loc='upper center')
plt.grid(linestyle='--', alpha=0.3)
plt.ylim([0,90])
plt.ylabel('Latency (ms)')
plt.xlabel('Throughput (KTx/s)')
plt.tight_layout()
plt.savefig('block-size.pdf', format='pdf')
plt.show()
if __name__ == '__main__':
do_plot()
|
1635035
|
import argparse
import logging
import timeit
import random
import cv2
from util.frame_convert import pretty_depth_cv
from robot.base import get_remote_robot
from learner.games import ObstacleAvoidanceGameEnvironment
def main(args):
print('preparing ...')
robot = get_remote_robot(args.agent, args.host, args.port)
with ObstacleAvoidanceGameEnvironment(robot) as game:
if not game:
print('could not create game')
return
actions = game.actions
game.step(actions[0]) # perform a single step to ensure everything is set up before measuring
n_iter = args.n_iter
print('performing %d iterations ...' % n_iter)
start = timeit.default_timer()
for i in xrange(n_iter):
frame, reward, terminal, lives = game.step(random.choice(actions))
if terminal:
print('resetting game ...')
game.reset()
continue
# frame *= 2047.
# cv2.cv.SaveImage('/Users/matze/Desktop/test/out_%.3d.png' % i, pretty_depth_cv(frame))
# print reward, terminal
duration = timeit.default_timer() - start
fps = int(float(n_iter) / duration)
print('total time: %fs (%dfps)' % (duration, fps))
def get_parser():
parser = argparse.ArgumentParser(description='Benchmark for PiBot.')
parser.add_argument('--host', help='host of the robot, e.g. 192.168.1.2', type=str, default=None)
parser.add_argument('--port', help='port of the robot, e.g. 9090', type=int, default=9090)
parser.add_argument('--n-iter', help='number of iterations to perform', type=int, default=100)
parser.add_argument('agent', help='name of the robot')
return parser
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(get_parser().parse_args())
|
1635040
|
from datetime import datetime
from time import time
from typing import TYPE_CHECKING
import pytest
from grouper.entities.group import GroupJoinPolicy
from grouper.graph import NoSuchGroup, NoSuchUser
from grouper.plugin.base import BasePlugin
if TYPE_CHECKING:
from tests.setup import SetupTest
def build_test_graph(setup):
# type: (SetupTest) -> None
"""Build a relatively complex test graph.
+-----------------------+
| |
| team-sre |
| * gary (o) +---------------------------------+
| * zay | |
| * zorkian | |
| * service (s) | +-----------v-----------+
| | | |
+-----------------------+ | serving-team |
+-----------------------+ +---------> * zorkian (o) |
| | | | |
| tech-ops | | +-----------+-----------+
| * zay (o) | | |
| * gary +-----------+ |
| * figurehead (np) | |
| | |
+-----------------------+ |
+-----------------------+ +-----------v-----------+
| | | |
| security-team | | team-infra |
| * oliver (o) +---------------------> * gary (o) |
| * figurehead | | |
| | +-----------+-----------+
+-----------------------+ |
+-----------------------+ +-----------v-----------+
| | | |
| sad-team | | all-teams |
| * zorkian (o) | | * testuser (o) |
| * oliver | | |
| | +-----------------------+
+-----------------------+
Arrows denote that the group at the tail of the arrow is a member of the group at the head of
the arrow. (o) for owners, (np) for non-permissioned owners, (s) for service accounts.
"""
with setup.transaction():
setup.add_user_to_group("<EMAIL>", "all-teams", role="owner")
setup.add_user_to_group("<EMAIL>", "team-infra", role="owner")
setup.add_group_to_group("team-infra", "all-teams")
setup.grant_permission_to_group("sudo", "shell", "team-infra")
setup.add_user_to_group("<EMAIL>", "serving-team", role="owner")
setup.add_group_to_group("serving-team", "team-infra")
setup.create_permission("audited", "An audited permission", audited=True)
setup.grant_permission_to_group("audited", "", "serving-team")
setup.add_user_to_group("<EMAIL>", "team-sre", role="owner")
setup.add_user_to_group("<EMAIL>", "team-sre")
setup.add_user_to_group("<EMAIL>", "team-sre")
setup.create_service_account(
"<EMAIL>", "team-sre", "Some service account", "owner=team-sre"
)
setup.add_group_to_group("team-sre", "serving-team")
setup.grant_permission_to_group("ssh", "*", "team-sre")
setup.grant_permission_to_group("team-sre", "*", "team-sre")
setup.grant_permission_to_service_account("team-sre", "*", "<EMAIL>")
setup.add_user_to_group("<EMAIL>", "tech-ops", role="owner")
setup.add_user_to_group("<EMAIL>", "tech-ops")
setup.add_user_to_group("<EMAIL>", "tech-ops", role="np-owner")
setup.add_group_to_group("tech-ops", "serving-team")
setup.grant_permission_to_group("ssh", "shell", "tech-ops")
setup.add_user_to_group("<EMAIL>", "security-team", role="owner")
setup.add_user_to_group("<EMAIL>", "security-team")
setup.add_group_to_group("security-team", "team-infra")
setup.add_user_to_group("<EMAIL>", "sad-team", role="owner")
setup.add_user_to_group("<EMAIL>", "sad-team")
setup.grant_permission_to_group("owner", "sad-team", "sad-team")
def test_get_permissions(setup):
# type: (SetupTest) -> None
build_test_graph(setup)
permissions = setup.graph.get_permissions()
permission_names = [p.name for p in permissions]
assert sorted(permission_names) == ["audited", "owner", "ssh", "sudo", "team-sre"]
permissions = setup.graph.get_permissions(audited=True)
assert all([p.audited == True for p in permissions])
permission_names = [p.name for p in permissions]
assert sorted(permission_names) == ["audited"]
def test_get_permissions_data(setup):
# type: (SetupTest) -> None
"""Test some of the other permission fields not exercised by the sample graph."""
early_date = datetime.utcfromtimestamp(1)
now = datetime.utcfromtimestamp(int(time()))
with setup.transaction():
setup.create_permission("one", "Description", created_on=early_date)
setup.create_permission("disabled", "", enabled=False, created_on=now)
setup.create_permission("audited", "Audited permission", audited=True, created_on=now)
permission = {p.name: p for p in setup.graph.get_permissions()}
assert "disabled" not in permission
assert permission["one"].description == "Description"
assert not permission["one"].audited
assert permission["one"].created_on == early_date
assert permission["audited"].description == "Audited permission"
assert permission["audited"].audited
assert permission["audited"].created_on == now
permissions = [p.name for p in setup.graph.get_permissions(audited=True)]
assert permissions == ["audited"]
def test_get_permission_details(setup):
# type: (SetupTest) -> None
build_test_graph(setup)
details = setup.graph.get_permission_details("sudo")
assert isinstance(details["groups"], dict)
groups_with_sudo = details["groups"].keys()
assert sorted(groups_with_sudo) == [
"security-team",
"serving-team",
"team-infra",
"team-sre",
"tech-ops",
]
for group in groups_with_sudo:
for permission_data in details["groups"][group]["permissions"]:
assert permission_data["permission"] == "sudo"
assert permission_data["argument"] == "shell"
assert permission_data["audited"] == False
assert not details["service_accounts"]
details = setup.graph.get_permission_details("team-sre")
assert isinstance(details["service_accounts"], dict)
service_accounts_with_sudo = details["service_accounts"].keys()
assert sorted(service_accounts_with_sudo) == ["service@svc.localhost"]
for service in service_accounts_with_sudo:
for permission_data in details["service_accounts"][service]["permissions"]:
assert permission_data["permission"] == "team-sre"
assert permission_data["argument"] == "*"
details = setup.graph.get_permission_details("audited")
assert isinstance(details["audited"], bool)
assert details["audited"] == True
def test_get_disabled_groups(setup):
# type: (SetupTest) -> None
with setup.transaction():
setup.create_group("sad-team", "Some group", join_policy=GroupJoinPolicy.CAN_JOIN)
assert setup.graph.get_disabled_groups() == []
with setup.transaction():
setup.disable_group("sad-team")
disabled_groups = setup.graph.get_disabled_groups()
assert len(disabled_groups) == 1
disabled_group = disabled_groups[0]
assert disabled_group.name == "sad-team"
assert disabled_group.description == "Some group"
assert disabled_group.join_policy == GroupJoinPolicy.CAN_JOIN
assert not disabled_group.enabled
assert not disabled_group.is_role_user
def test_get_groups(setup):
# type: (SetupTest) -> None
build_test_graph(setup)
groups = setup.graph.get_groups()
group_names = [g.name for g in groups]
assert sorted(group_names) == [
"all-teams",
"sad-team",
"security-team",
"serving-team",
"team-infra",
"team-sre",
"tech-ops",
]
for group in groups:
assert group.description == ""
assert group.join_policy == GroupJoinPolicy.CAN_ASK
assert group.enabled
assert not group.is_role_user
groups = setup.graph.get_groups(audited=True)
group_names = [g.name for g in groups]
assert sorted(group_names) == ["serving-team", "team-sre", "tech-ops"]
groups = setup.graph.get_groups(directly_audited=True)
group_names = [g.name for g in groups]
assert sorted(group_names) == ["serving-team"]
def test_get_groups_role_user(setup):
# type: (SetupTest) -> None
with setup.transaction():
setup.create_group("some-group", "")
setup.create_role_user("<EMAIL>")
setup.create_group("not-<EMAIL>")
setup.create_user("<EMAIL>")
groups = {g.name: g for g in setup.graph.get_groups()}
assert not groups["some-group"].is_role_user
assert groups["<EMAIL>"].is_role_user
assert not groups["<EMAIL>"].is_role_user
def test_get_group_details(setup):
# type: (SetupTest) -> None
build_test_graph(setup)
with pytest.raises(NoSuchGroup):
setup.graph.get_group_details("nonexistent")
details = setup.graph.get_group_details("serving-team")
assert sorted(details["groups"].keys()) == ["all-teams", "team-infra"]
assert sorted(details["subgroups"].keys()) == ["team-sre", "tech-ops"]
assert sorted(details["users"].keys()) == [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
]
perms_audited = list(filter(lambda p: p["audited"] == True, details["permissions"]))
assert len(perms_audited) == 1
assert perms_audited[0]["permission"] == "audited"
assert details["audited"]
user_details = details["users"]["<EMAIL>"]
assert user_details["name"] == "<EMAIL>"
assert user_details["distance"] == 1
assert user_details["rolename"] == "owner"
user_details = details["users"]["<EMAIL>"]
assert user_details["name"] == "<EMAIL>"
assert user_details["distance"] == 2
assert user_details["rolename"] == "member"
permissions = [(p["permission"], p["argument"]) for p in details["permissions"]]
assert sorted(permissions) == [("audited", ""), ("sudo", "shell")]
details = setup.graph.get_group_details("sad-team")
assert sorted(details["groups"].keys()) == []
assert sorted(details["subgroups"].keys()) == []
assert sorted(details["users"].keys()) == ["<EMAIL>", "<EMAIL>"]
permissions = [(p["permission"], p["argument"]) for p in details["permissions"]]
assert sorted(permissions) == [("owner", "sad-team")]
def test_get_user_details(setup):
# type: (SetupTest) -> None
build_test_graph(setup)
with pytest.raises(NoSuchUser):
setup.graph.get_user_details("<EMAIL>")
details = setup.graph.get_user_details("<EMAIL>")
assert sorted(details["groups"].keys()) == [
"all-teams",
"security-team",
"team-infra",
"tech-ops",
]
group_details = details["groups"]["tech-ops"]
assert group_details["distance"] == 1
assert group_details["rolename"] == "np-owner"
group_details = details["groups"]["all-teams"]
assert group_details["distance"] == 3
assert group_details["rolename"] == "member"
permissions = [(p["permission"], p["argument"]) for p in details["permissions"]]
assert sorted(permissions) == [("sudo", "shell")]
details = setup.graph.get_user_details("<EMAIL>")
assert not details["groups"]
permissions = [(p["permission"], p["argument"]) for p in details["permissions"]]
assert sorted(permissions) == [("team-sre", "*")]
class MockStats(BasePlugin):
def __init__(self):
# type: () -> None
self.update_ms = 0.0
def log_graph_update_duration(self, duration_ms):
# type: (int) -> None
self.update_ms = duration_ms
def test_graph_update_stats(setup):
# type: (SetupTest) -> None
"""Test that update timings are logged by a graph update."""
mock_stats = MockStats()
setup.plugins.add_plugin(mock_stats)
# Create a user and a group, which will trigger a graph update.
with setup.transaction():
setup.add_user_to_group("<EMAIL>", "some-group")
assert mock_stats.update_ms > 0.0
|
1635052
|
import os
import unittest
import sys
import tempfile
sys.path.append('../fightchurn')
sys.path.append('../datagen')
from fightchurn import run_churn_listing
from fightchurn.datagen import churndb
class TestFightChurnWIthData(unittest.TestCase):
def test_run_entire_book(self):
database=username=password='<PASSWORD>'
test_ouput_dir = os.path.join(tempfile.gettempdir(),'fightchurn_test_output')
print(f'TestFightChurnWIthData writing to temporary output directory {test_ouput_dir}')
run_churn_listing.set_churn_environment(database,username,password)
churndb.drop_test_schema()
self.assertEqual(True, run_churn_listing.run_everything(database, username, password,
schema='test', output_dir=test_ouput_dir))
if __name__ == '__main__':
unittest.main()
|
1635086
|
from eventsourcing.popo import (
Factory,
POPOAggregateRecorder,
POPOApplicationRecorder,
POPOProcessRecorder,
)
from eventsourcing.tests.base_aggregate_recorder_tests import (
AggregateRecorderTestCase,
)
from eventsourcing.tests.base_application_recorder_tests import (
ApplicationRecorderTestCase,
)
from eventsourcing.tests.base_infrastructure_tests import (
InfrastructureFactoryTestCase,
)
from eventsourcing.tests.base_process_recorder_tests import (
ProcessRecorderTestCase,
)
from eventsourcing.utils import Environment
class TestPOPOAggregateRecorder(AggregateRecorderTestCase):
def create_recorder(self):
return POPOAggregateRecorder()
class TestPOPOApplicationRecorder(ApplicationRecorderTestCase):
def create_recorder(self):
return POPOApplicationRecorder()
class TestPOPOProcessRecorder(ProcessRecorderTestCase):
def create_recorder(self):
return POPOProcessRecorder()
def test_performance(self):
super().test_performance()
class TestPOPOInfrastructureFactory(InfrastructureFactoryTestCase):
def setUp(self) -> None:
self.env = Environment("TestCase")
super().setUp()
def expected_factory_class(self):
return Factory
def expected_aggregate_recorder_class(self):
return POPOAggregateRecorder
def expected_application_recorder_class(self):
return POPOApplicationRecorder
def expected_process_recorder_class(self):
return POPOProcessRecorder
del AggregateRecorderTestCase
del ApplicationRecorderTestCase
del ProcessRecorderTestCase
del InfrastructureFactoryTestCase
|
1635198
|
from unittest import TestCase
from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index
import numpy as np
import tempfile
class UtilitiesTestCase(TestCase):
def test_get_bin_index_works_fine(self):
self.assertEqual(get_bin_index(0.0, 4), 0)
self.assertEqual(get_bin_index(0.24, 4), 0)
self.assertEqual(get_bin_index(0.25, 4), 1)
self.assertEqual(get_bin_index(0.49, 4), 1)
self.assertEqual(get_bin_index(0.5, 4), 2)
self.assertEqual(get_bin_index(0.74, 4), 2)
self.assertEqual(get_bin_index(0.75, 4), 3)
self.assertEqual(get_bin_index(1.0, 4), 3)
class ParameterDefinitionTestCase(TestCase):
def test_works_fine(self):
parameter_definition = ParameterDefinition(MinMax(0.0, 5.9), float)
self.assertIsInstance(parameter_definition.value, MinMax)
self.assertEqual(parameter_definition.param_type, float)
class OptimizationStatsTestCase(TestCase):
def setUp(self):
y = np.array(
[
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
"Class 2",
"Class 2",
"Class 2",
"Class 2",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
"Class 2",
"Class 1",
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
]
)
predicted = np.array(
[
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 2",
"Class 2",
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
"Class 1",
"Class 2",
"Class 2",
"Class 1",
"Class 2",
"Class 1",
"Class 2",
"Class 2",
"Class 2",
]
)
self.__stats = OptimizationStats(predicted, y)
def test_works_fine(self):
self.assertEqual(self.__stats._accuracy, 0.5)
self.assertEqual(self.__stats._precision, 0.5199999999999999)
self.assertEqual(self.__stats._cohen_kappa, 0.0)
self.assertEqual(self.__stats._f1_score, 0.505050505050505)
class MinMaxTestCase(TestCase):
def test_works_fine(self):
minmax = MinMax(0.0, 5.9)
self.assertEqual(minmax.min, 0.0)
self.assertEqual(minmax.max, 5.9)
|
1635258
|
import unittest
from generativepy.nparray import make_nparray, make_nparray_frame
from generativepy.movie import save_frame
from image_test_helper import run_image_test
import numpy as np
"""
Test each function of the nparray module, with 1, 3 and 4 channel output
"""
def draw4(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a transparent blue rectangle on a brown background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [128, 64, 0, 255]
array[50:350, 100:500] = [0, 128, 196, 64]
def draw3(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a blue rectangle on a brown background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [128, 64, 0]
array[50:350, 100:500] = [0, 128, 196]
def draw1(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a dark grey rectangle on a light greay background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [196]
array[50:350, 100:500] = [64]
def draw3_nofill(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a blue rectangle with no background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[50:350, 100:500] = [0, 128, 196]
class TestNparrayModule(unittest.TestCase):
def test_make_nparray_rgba(self):
def creator(file):
make_nparray(file, draw4, 600, 400, channels=4)
self.assertTrue(run_image_test('test_make_nparray_rgba.png', creator))
def test_make_nparray_rgb(self):
def creator(file):
make_nparray(file, draw3, 600, 400, channels=3)
self.assertTrue(run_image_test('test_make_nparray_rgb.png', creator))
def test_make_nparray_gray(self):
def creator(file):
make_nparray(file, draw1, 600, 400, channels=1)
self.assertTrue(run_image_test('test_make_nparray_gray.png', creator))
def test_make_bitmap_frame_rgba(self):
def creator(file):
frame = make_nparray_frame(draw4, 600, 400, channels=4)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_rgba.png', creator))
def test_make_nparray_frame_rgb(self):
def creator(file):
frame = make_nparray_frame(draw3, 600, 400, channels=3)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_rgb.png', creator))
def test_make_nparray_frame_gray(self):
def creator(file):
frame = make_nparray_frame(draw1, 600, 400, channels=1)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_gray.png', creator))
def test_make_nparray_frame_with_output_rgb(self):
def creator(file):
out = np.full((400, 600, 3), 128, dtype=np.uint)
out[25:100, 50:550] = [0, 0, 0]
frame = make_nparray_frame(draw3_nofill, 600, 400, out=out)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_with_output_rgb.png', creator))
|
1635272
|
from io import BytesIO, SEEK_END
import attr
from PIL import Image
MAX_EDGE_PIXELS = 1024
QUALITY = 80
SUPPORTED_FORMATS = ('JPEG', 'PNG', 'GIF')
MAX_SIZE_IN_BYTES_AFTER_PROCESSING = 1024 * 1024
MIN_AREA_TRACKING_PIXEL = 10
@attr.s
class ImageProcessingResult:
size_in_bytes: int = attr.ib()
width: int = attr.ib()
height: int = attr.ib()
image_format: str = attr.ib()
data: BytesIO = attr.ib()
class ImageProcessingError(Exception):
pass
def process_image_data(data: bytes) -> ImageProcessingResult:
try:
image = Image.open(BytesIO(data))
except OSError as e:
raise ImageProcessingError('Cannot open image: {}'.format(e))
with image:
if image.format not in SUPPORTED_FORMATS:
raise ImageProcessingError(
'Unsupported format {}'.format(image.format)
)
width, height = image.size
if (width * height) < MIN_AREA_TRACKING_PIXEL:
raise ImageProcessingError('Tracking pixel')
if image.format == 'GIF':
# Gif are weird, saving them often fails and the result after
# compression is sometimes bigger than the original file.
# Let's just keep the original file.
data = BytesIO(data)
else:
data = BytesIO()
try:
image.thumbnail((MAX_EDGE_PIXELS, MAX_EDGE_PIXELS))
width, height = image.size
image.save(data, image.format, quality=QUALITY,
optimize=True, progressive=True)
except (OSError, EOFError) as e:
raise ImageProcessingError('Cannot resize image: {}'.format(e))
size_in_bytes = data.seek(0, SEEK_END)
data.seek(0)
if size_in_bytes > MAX_SIZE_IN_BYTES_AFTER_PROCESSING:
raise ImageProcessingError(
'Resulting file too big: {} bytes'.format(size_in_bytes)
)
return ImageProcessingResult(
size_in_bytes, width, height, image.format, data
)
|
1635282
|
import tensorflow as tf
from tensorflow.keras import layers, Sequential, Model
class BasicConv2D(layers.Layer):
def __init__(self, kernels, kernel_size=(3, 3), strides=1, padding='valid'):
super(BasicConv2D, self).__init__(self)
self.conv = layers.Conv2D(kernels,
kernel_size,
strides=strides,
padding=padding,
use_bias=False)
self.bn = layers.BatchNormalization()
self.relu = layers.ReLU()
def call(self, x, training=False):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.relu(x)
return x
class InceptionA(layers.Layer):
def __init__(self, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2D(64, (1, 1))
self.branch5x5 = Sequential([
BasicConv2D(48, (1, 1)),
BasicConv2D(64, (5, 5), padding='same')
])
self.branch3x3 = Sequential([
BasicConv2D(64, (1, 1)),
BasicConv2D(96, (3, 3), padding='same'),
BasicConv2D(96, (3, 3), padding='same')
])
self.branchpool = Sequential([
layers.AveragePooling2D((3, 3), strides=1, padding='same'),
BasicConv2D(pool_features, (3, 3), padding='same')
])
def call(self, x, training=False):
branch1x1 = self.branch1x1(x, training=training)
branch5x5 = self.branch5x5(x, training=training)
branch3x3 = self.branch3x3(x, training=training)
branchpool = self.branchpool(x, training=training)
outputs = [branch1x1, branch5x5, branch3x3, branchpool]
return tf.concat(outputs, axis=-1) # TODO CHECK AXIS
class InceptionB(layers.Layer):
def __init__(self):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2D(384, (3, 3), strides=2)
self.branch3x3stack = Sequential([
BasicConv2D(64, (1, 1)),
BasicConv2D(96, (3, 3), padding='same'),
BasicConv2D(96, (3, 3), strides=2)
])
self.branchpool = layers.MaxPooling2D((3, 3), strides=2)
def call(self, x, training=False):
branch3x3 = self.branch3x3(x, training=training)
branch3x3stack = self.branch3x3stack(x, training=training)
branchpool = self.branchpool(x, training=training)
outputs = [branch3x3, branch3x3stack, branchpool]
return tf.concat(outputs, axis=-1)
class InceptionC(layers.Layer):
def __init__(self, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2D(192, (1, 1))
c7 = channels_7x7
self.branch7x7 = Sequential([
BasicConv2D(c7, (1, 1)),
layers.ZeroPadding2D((3, 0)),
BasicConv2D(c7, (7, 1)),
layers.ZeroPadding2D((0, 3)),
BasicConv2D(192, (1, 7))
])
self.branch7x7stack = Sequential([
BasicConv2D(c7, (1, 1)),
layers.ZeroPadding2D((3, 0)),
BasicConv2D(c7, (7, 1)),
layers.ZeroPadding2D((0, 3)),
BasicConv2D(c7, (1, 7)),
layers.ZeroPadding2D((3, 0)),
BasicConv2D(c7, (7, 1)),
layers.ZeroPadding2D((0, 3)),
BasicConv2D(192, (1, 7)),
])
self.branchpool = Sequential([
layers.AveragePooling2D((3, 3), strides=1, padding='same'),
BasicConv2D(192, (1, 1))
])
def call(self, x, training=False):
branch1x1 = self.branch1x1(x, training=training)
branch7x7 = self.branch7x7(x, training=training)
branch7x7stack = self.branch7x7stack(x, training=training)
branchpool = self.branchpool(x, training=training)
outputs = [branch1x1, branch7x7, branch7x7stack, branchpool]
return tf.concat(outputs, 3)
class InceptionD(layers.Layer):
def __init__(self):
super(InceptionD, self).__init__()
self.branch3x3 = Sequential([
BasicConv2D(192, (1, 1)),
BasicConv2D(320, (3, 3), strides=2)
])
self.branch7x7 = Sequential([
BasicConv2D(192, (1, 1)),
layers.ZeroPadding2D((0, 3)),
BasicConv2D(192, (1, 7)),
layers.ZeroPadding2D((3, 0)),
BasicConv2D(192, (7, 1)),
BasicConv2D(192, (3, 3), strides=2)
])
self.branchpool = layers.AveragePooling2D((3, 3), strides=2)
def call(self, x, training=False):
branch3x3 = self.branch3x3(x, training=training)
branch7x7 = self.branch7x7(x, training=training)
branchpool = self.branchpool(x, training=training)
outputs = [branch3x3, branch7x7, branchpool]
return tf.concat(outputs, axis=-1)
class InceptionE(layers.Layer):
def __init__(self):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2D(320, (1, 1))
self.branch3x3_1 = BasicConv2D(384, (1, 1))
self.branch3x3_2a = Sequential([
layers.ZeroPadding2D((0, 1)),
BasicConv2D(384, (1, 3))
])
self.branch3x3_2b = Sequential([
layers.ZeroPadding2D((1, 0)),
BasicConv2D(384, (3, 1))
])
self.branch3x3stack_1 = BasicConv2D(448, (1, 1))
self.branch3x3stack_2 = BasicConv2D(384, (3, 3), padding='same')
self.branch3x3stack_3a = Sequential([
layers.ZeroPadding2D((0, 1)),
BasicConv2D(384, (1, 3))
])
self.branch3x3stack_3b = Sequential([
layers.ZeroPadding2D((1, 0)),
BasicConv2D(384, (3, 1))
])
self.branchpool = Sequential([
layers.AveragePooling2D((3, 3), strides=1, padding='same'),
BasicConv2D(192, (1, 1))
])
def call(self, x, training=False):
branch1x1 = self.branch1x1(x, training=training)
branch3x3 = self.branch3x3_1(x, training=training)
branch3x3 = [
self.branch3x3_2a(branch3x3, training=training),
self.branch3x3_2b(branch3x3, training=training)
]
branch3x3 = tf.concat(branch3x3, axis=-1)
branch3x3stack = self.branch3x3stack_1(x, training=training)
branch3x3stack = self.branch3x3stack_2(
branch3x3stack, training=training)
branch3x3stack = [
self.branch3x3stack_3a(branch3x3stack, training=training),
self.branch3x3stack_3b(branch3x3stack, training=training)
]
branch3x3stack = tf.concat(branch3x3stack, axis=-1)
branchpool = self.branchpool(x, training=training)
outputs = [branch1x1, branch3x3, branch3x3stack, branchpool]
return tf.concat(outputs, axis=-1)
class InceptionV3(Model):
def __init__(self, num_classes, input_shape=(32, 32, 3)):
super(InceptionV3, self).__init__()
self.conv1 = Sequential([
layers.Input(input_shape),
BasicConv2D(32, (3, 3), padding='same'),
BasicConv2D(32, (3, 3), padding='same'),
BasicConv2D(64, (3, 3), padding='same'),
BasicConv2D(80, (1, 1)),
BasicConv2D(192, (3, 3))
])
self.conv2 = Sequential([
InceptionA(32),
InceptionA(64),
InceptionA(64)
])
self.conv3 = InceptionB()
self.conv4 = Sequential([
InceptionC(channels_7x7=128),
InceptionC(channels_7x7=160),
InceptionC(channels_7x7=160),
InceptionC(channels_7x7=192)
])
self.conv5 = InceptionD()
self.conv6 = Sequential([
InceptionE(),
InceptionE()
])
self.avgpool = layers.GlobalAveragePooling2D()
self.dropout = layers.Dropout(0.5)
self.fc = layers.Dense(num_classes, activation='softmax')
def call(self, inputs, training=False):
x = self.conv1(inputs, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
x = self.avgpool(x)
x = self.dropout(x, training=training)
x = self.fc(x)
return x
def inceptionv3(num_classes):
return InceptionV3(num_classes)
|
1635322
|
from typing import Iterable, Mapping
from chaoslib import Configuration, Secrets
from logzero import logger
from chaosazure import init_compute_management_client
from chaosazure.common import cleanse
from chaosazure.common.compute import command
from chaosazure.vmss.fetcher import fetch_vmss, fetch_instances
from chaosazure.vmss.records import Records
__all__ = [
"delete_vmss", "restart_vmss", "stop_vmss", "deallocate_vmss",
"burn_io", "fill_disk", "network_latency", "stress_vmss_instance_cpu"
]
def delete_vmss(filter: str = None,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Delete a virtual machine scale set instance at random.
**Be aware**: Deleting a VMSS instance is an invasive action. You will not
be able to recover the VMSS instance once you deleted it.
Parameters
----------
filter : str
Filter the virtual machine scale set. If the filter is omitted all
virtual machine scale sets in the subscription will be selected as
potential chaos candidates.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
"""
logger.debug(
"Starting delete_vmss: configuration='{}', filter='{}'".format(
configuration, filter))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
logger.debug(
"Deleting instance: {}".format(instance['name']))
client = init_compute_management_client(secrets, configuration)
client.virtual_machine_scale_set_vms.begin_delete(
scale_set['resourceGroup'],
scale_set['name'],
instance['instance_id'])
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def restart_vmss(filter: str = None,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Restart a virtual machine scale set instance at random.
Parameters
----------
filter : str
Filter the virtual machine scale set. If the filter is omitted all
virtual machine scale sets in the subscription will be selected as
potential chaos candidates.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
"""
logger.debug(
"Starting restart_vmss: configuration='{}', filter='{}'".format(
configuration, filter))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
logger.debug(
"Restarting instance: {}".format(instance['name']))
client = init_compute_management_client(secrets, configuration)
client.virtual_machine_scale_set_vms.begin_restart(
scale_set['resourceGroup'],
scale_set['name'],
instance['instance_id'])
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def stop_vmss(filter: str = None,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Stops instances from the filtered scale set either at random or by
a defined instance criteria.
Parameters
----------
filter : str
Filter the virtual machine scale set. If the filter is omitted all
virtual machine scale sets in the subscription will be selected as
potential chaos candidates.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
instance_criteria : Iterable[Mapping[str, any]]
Allows specification of criteria for selection of a given virtual
machine scale set instance. If the instance_criteria is omitted,
an instance will be chosen at random. All of the criteria within each
item of the Iterable must match, i.e. AND logic is applied.
The first item with all matching criterion will be used to select the
instance.
Criteria example:
[
{"name": "myVMSSInstance1"},
{
"name": "myVMSSInstance2",
"instanceId": "2"
}
{"instanceId": "3"},
]
If the instances include two items. One with name = myVMSSInstance4
and instanceId = 2. The other with name = myVMSSInstance2 and
instanceId = 3. The criteria {"instanceId": "3"} will be the first
match since both the name and the instanceId did not match on the
first criteria.
"""
logger.debug(
"Starting stop_vmss: configuration='{}', filter='{}'".format(
configuration, filter))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
logger.debug(
"Stopping instance: {}".format(instance['name']))
client = init_compute_management_client(secrets, configuration)
client.virtual_machine_scale_set_vms.begin_power_off(
scale_set['resourceGroup'],
scale_set['name'],
instance['instance_id'])
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def deallocate_vmss(filter: str = None,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Deallocate a virtual machine scale set instance at random.
Parameters
----------
filter : str
Filter the virtual machine scale set. If the filter is omitted all
virtual machine scale sets in the subscription will be selected as
potential chaos candidates.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
"""
logger.debug(
"Starting deallocate_vmss: configuration='{}', filter='{}'".format(
configuration, filter))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
logger.debug(
"Deallocating instance: {}".format(instance['name']))
client = init_compute_management_client(secrets, configuration)
client.virtual_machine_scale_set_vms.begin_deallocate(
scale_set['resourceGroup'],
scale_set['name'],
instance['instance_id'])
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def stress_vmss_instance_cpu(
filter: str = None,
duration: int = 120,
timeout: int = 60,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
logger.warning(
"Deprecated usage of activity 'stress_vmss_instance_cpu'."
" Please use activity 'stress_cpu' in favor since this"
" activity will be removed in a future release.")
return stress_cpu(
filter, duration, timeout, instance_criteria, configuration, secrets)
def stress_cpu(filter: str = None,
duration: int = 120,
timeout: int = 60,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Stresses the CPU of a random VMSS instances in your selected VMSS.
Similar to the stress_cpu action of the machine.actions module.
Parameters
----------
filter : str, optional
Filter the VMSS. If the filter is omitted all VMSS in
the subscription will be selected as potential chaos candidates.
duration : int, optional
Duration of the stress test (in seconds) that generates high CPU usage.
Defaults to 120 seconds.
timeout : int
Additional wait time (in seconds) for stress operation to be completed.
Getting and sending data from/to Azure may take some time so it's not
recommended to set this value to less than 30s. Defaults to 60 seconds.
"""
logger.debug(
"Starting stress_vmss_instance_cpu:"
" configuration='{}', filter='{}',"
" duration='{}', timeout='{}'".format(
configuration, filter, duration, timeout))
vmss_records = Records()
vmss = fetch_vmss(filter, configuration, secrets)
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
command_id, script_content = command.prepare(instance,
'cpu_stress_test')
parameters = {
'command_id': command_id,
'script': [script_content],
'parameters': [
{'name': "duration", 'value': duration}
]
}
logger.debug(
"Stressing CPU of VMSS instance: '{}'".format(
instance['instance_id']))
_timeout = duration + timeout
command.run(
scale_set['resourceGroup'], instance, _timeout, parameters,
secrets, configuration)
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def burn_io(filter: str = None,
duration: int = 60,
timeout: int = 60,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Increases the Disk I/O operations per second of the VMSS machine.
Similar to the burn_io action of the machine.actions module.
"""
logger.debug(
"Starting burn_io: configuration='{}', filter='{}', duration='{}',"
" timeout='{}'".format(configuration, filter, duration, timeout))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
command_id, script_content = command.prepare(instance, 'burn_io')
parameters = {
'command_id': command_id,
'script': [script_content],
'parameters': [
{'name': "duration", 'value': duration}
]
}
logger.debug(
"Burning IO of VMSS instance: '{}'".format(instance['name']))
_timeout = duration + timeout
command.run(
scale_set['resourceGroup'], instance, _timeout, parameters,
secrets, configuration)
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def fill_disk(filter: str = None,
duration: int = 120,
timeout: int = 60,
size: int = 1000,
path: str = None,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Fill the VMSS machine disk with random data. Similar to
the fill_disk action of the machine.actions module.
"""
logger.debug(
"Starting fill_disk: configuration='{}', filter='{}',"
" duration='{}', size='{}', path='{}', timeout='{}'".format(
configuration, filter, duration, size, path, timeout))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
command_id, script_content = command.prepare(instance,
'fill_disk')
fill_path = command.prepare_path(instance, path)
parameters = {
'command_id': command_id,
'script': [script_content],
'parameters': [
{'name': "duration", 'value': duration},
{'name': "size", 'value': size},
{'name': "path", 'value': fill_path}
]
}
logger.debug(
"Filling disk of VMSS instance: '{}'".format(
instance['name']))
_timeout = duration + timeout
command.run(
scale_set['resourceGroup'], instance, _timeout, parameters,
secrets, configuration)
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
def network_latency(filter: str = None,
duration: int = 60,
delay: int = 200,
jitter: int = 50,
timeout: int = 60,
instance_criteria: Iterable[Mapping[str, any]] = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Increases the response time of the virtual machine. Similar to
the network_latency action of the machine.actions module.
"""
logger.debug(
"Starting network_latency: configuration='{}', filter='{}',"
" duration='{}', delay='{}', jitter='{}', timeout='{}'".format(
configuration, filter, duration, delay, jitter, timeout))
vmss = fetch_vmss(filter, configuration, secrets)
vmss_records = Records()
for scale_set in vmss:
instances_records = Records()
instances = fetch_instances(scale_set, instance_criteria,
configuration, secrets)
for instance in instances:
command_id, script_content = command.prepare(
instance, 'network_latency')
parameters = {
'command_id': command_id,
'script': [script_content],
'parameters': [
{'name': "duration", 'value': duration},
{'name': "delay", 'value': delay},
{'name': "jitter", 'value': jitter}
]
}
logger.debug(
"Increasing the latency of VMSS instance: '{}'".format(
instance['name']))
_timeout = duration + timeout
command.run(
scale_set['resourceGroup'], instance, _timeout, parameters,
secrets, configuration)
instances_records.add(cleanse.vmss_instance(instance))
scale_set['virtualMachines'] = instances_records.output()
vmss_records.add(cleanse.vmss(scale_set))
return vmss_records.output_as_dict('resources')
|
1635332
|
from asmdot import * # pylint: disable=W0614
from typing import Tuple
header = '''// Automatically generated file.
package {}
import (
\t"bytes"
\t"encoding/binary"
\t"errors"
\t"io"
)
// Bypass unused module error if we don't have assertions.
var _ = errors.New
var (
\tinterbuf = [8]byte{{}}
\tbyteOrder = binary.LittleEndian
\tswappedByteOrder = binary.BigEndian
)
func write16(w io.Writer, x uint16) error {{
\tbyteOrder.PutUint16(interbuf[:], x)
\t_, err := w.Write(interbuf[:2])
\treturn err
}}
func writeSwapped16(w io.Writer, x uint16) error {{
\tswappedByteOrder.PutUint16(interbuf[:], x)
\t_, err := w.Write(interbuf[:2])
\treturn err
}}
func write32(w io.Writer, x uint32) error {{
\tbyteOrder.PutUint32(interbuf[:], x)
\t_, err := w.Write(interbuf[:4])
\treturn err
}}
func writeSwapped32(w io.Writer, x uint32) error {{
\tswappedByteOrder.PutUint32(interbuf[:], x)
\t_, err := w.Write(interbuf[:4])
\treturn err
}}
func write64(w io.Writer, x uint64) error {{
\tbyteOrder.PutUint64(interbuf[:], x)
\t_, err := w.Write(interbuf[:])
\treturn err
}}
func writeSwapped64(w io.Writer, x uint64) error {{
\tswappedByteOrder.PutUint64(interbuf[:], x)
\t_, err := w.Write(interbuf[:])
\treturn err
}}
'''
header_x86 = '''
func getPrefix16(r *Reg16) byte {
if uint8(*r) < 8 {
return byte(*r)
}
*r = Reg16(uint8(*r) - 8)
return 1
}
func getPrefix32(r *Reg32) byte {
if uint8(*r) < 8 {
return byte(*r)
}
*r = Reg32(uint8(*r) - 8)
return 1
}
func getPrefix64(r *Reg64) byte {
if uint8(*r) < 8 {
return byte(*r)
}
*r = Reg64(uint8(*r) - 8)
return 1
}
'''
def _camel_case(s: str) -> str:
return s[0] + s.title().replace('_', '')[1:]
def _pascal_case(s: str) -> str:
return s.title().replace('_', '')
@handle_command_line()
class GoEmitter(Emitter):
modified_list: List[str] = []
var_map: Dict[str, Tuple[IrType, IrType]] = {}
@property
def language(self):
return 'go'
@property
def filename(self):
return f'{self.arch}/{self.arch}.go'
@property
def test_filename(self):
return f'{self.arch}/{self.arch}_test.go'
def get_function_name(self, function: Function) -> str:
return _pascal_case(function.fullname)
def get_operator(self, op: Operator) -> str:
if op == OP_BITWISE_XOR:
return '!='
else:
return op.op
def get_builtin_name(self, builtin: Builtin) -> str:
if builtin == BUILTIN_X86_PREFIX:
return 'getPrefix'
else:
return builtin.name
def __init__(self, args: Namespace, arch: str) -> None:
super().__init__(args, arch)
self.indent = Indent('\t')
def write_header(self):
self.write(header.format(self.arch))
if self.arch == 'x86':
self.write(header_x86)
def write_separator(self):
self.writeline()
def write_expr(self, expr: Expression):
if isinstance(expr, Binary):
self.write('(', expr.l, ' ', expr.op, ' ', expr.r, ')')
elif isinstance(expr, Unary):
self.write(expr.op, expr.v)
elif isinstance(expr, Ternary):
self.write('(func() { if ', expr.condition, ' { return ', expr.consequence, ' } else { return ', expr.alternative, ' })()')
elif isinstance(expr, Var):
name = _camel_case(expr.name)
if name in self.modified_list:
name = name + '_'
else:
name = f'{self.var_map[expr.name][1]}({name})'
self.write(name)
elif isinstance(expr, Call):
if self.var_map[expr.args[0].name][0].id == 'Reg16':
self.write(expr.builtin, '16(&', expr.args[0].name, ')')
elif self.var_map[expr.args[0].name][0].id == 'Reg32':
self.write(expr.builtin, '32(&', expr.args[0].name, ')')
elif self.var_map[expr.args[0].name][0].id == 'Reg64':
self.write(expr.builtin, '64(&', expr.args[0].name, ')')
elif isinstance(expr, Literal):
self.write(expr.value)
else:
raise UnsupportedExpression(expr)
def write_stmt(self, stmt: Statement):
if isinstance(stmt, Assign):
self.writelinei(stmt.variable, ' = ', stmt.value)
elif isinstance(stmt, Conditional):
self.writelinei('if ', stmt.condition, ' {')
with self.indent.further():
self.write_stmt(stmt.consequence)
if stmt.alternative:
self.writelinei('} else {')
with self.indent.further():
self.write_stmt(stmt.alternative)
self.writelinei('}')
elif isinstance(stmt, Block):
for s in stmt.statements:
self.write_stmt(s)
elif isinstance(stmt, Set):
if stmt.type.under in (TYPE_U8, TYPE_I8):
self.writelinei('if err := w.WriteByte(byte(', stmt.value, ')); err != nil {')
else:
if self.bigendian:
write = f'writeSwapped{stmt.type.under.size * 8}'
else:
write = f'write{stmt.type.under.size * 8}'
self.writelinei('if err := ', write, '(w, uint', stmt.type.under.size * 8, '(', stmt.value, ')); err != nil {')
self.writelinei('\treturn err')
self.writelinei('}')
elif isinstance(stmt, Define):
self.writelinei(f'{stmt.name} := ', stmt.value)
else:
raise UnsupportedStatement(stmt)
def write_function(self, fun: Function):
self.modified_list.clear()
self.write(f'func {fun.name}(w *bytes.Buffer')
for name, typ, usagetyp in fun.params:
self.write(f', {_camel_case(name)} {typ}')
self.var_map[name] = typ, usagetyp
self.write(') error {\n')
self.indent += 1
for name, typ, usagetyp in fun.params:
if typ is TYPE_BOOL and usagetyp is not TYPE_BOOL:
name = _camel_case(name)
self.writelinei(f'var {name}_ {usagetyp} = 0')
self.writelinei(f'if {name} {{')
self.writelinei(f'\t{name}_ = 1')
self.writelinei( '}')
self.modified_list.append(name)
for condition in fun.conditions:
self.writelinei('if !', condition, ' {')
self.writelinei('\treturn errors.New("Failed precondition: ', condition, '.")')
self.writelinei('}')
for stmt in fun.body:
self.write_stmt(stmt)
self.writelinei('return nil')
self.write('}\n\n')
self.indent -= 1
def write_decl(self, decl: Declaration):
if isinstance(decl, Enumeration):
self.writeline('// ', decl.descr)
self.writeline('type ', decl.type, ' ', decl.type.underlying, '\n')
self.writeline('const (')
for _, value, descr, fullname in decl.members + decl.additional_members:
self.writeline('\t// ', descr)
self.writeline('\t', fullname, ' ', decl.type, ' = ', value)
self.writeline(')')
elif isinstance(decl, DistinctType):
self.writeline('// ', decl.descr)
self.writeline('type ', decl.type, ' ', decl.type.underlying, '\n')
self.writeline('const (')
for name, value in decl.constants:
self.writeline('\t', name.upper(), ' ', decl.type, ' = ', value)
self.writeline(')')
else:
raise UnsupportedDeclaration(decl)
self.writeline()
def write_test_header(self):
self.writeline('package ', self.arch, '\n')
self.writeline('import (\n\t"bytes"\n\t"testing"\n)\n')
def write_test(self, test: TestCase):
self.write('func Test', _pascal_case(test.name.replace(' ', '_')), '(t *testing.T) {\n')
self.indent += 1
self.writelinei('buf := new(bytes.Buffer)\n')
def arg_str(arg: TestCaseArgument):
if isinstance(arg, ArgConstant):
return f'{arg.const.name.upper()}'
if isinstance(arg, ArgEnumMember):
return arg.member.fullname
elif isinstance(arg, ArgInteger):
return str(arg.value)
else:
raise UnsupportedTestArgument(arg)
for func, args in test.calls:
self.writei(func.name, '(buf')
for arg in args:
self.write(', ', arg_str(arg))
self.write(')\n')
self.writeline()
self.writelinei('if buf.Len() != ', len(test.expected), ' {')
self.writelinei('\tt.Errorf("buf.Len() = %d; want ', len(test.expected), '", buf.Len())')
self.writelinei('}')
self.writelinei('if !bytes.Equal(buf.Bytes(), []byte{', test.expected_bytes, '}) {')
self.writelinei('\tt.Errorf("buf.Bytes() is not valid")')
self.writelinei('}')
self.indent -= 1
self.write('}\n\n')
|
1635365
|
import re
from unidecode import unidecode
from ..utils import squeeze, translation, check_str, check_empty
from .phonetic_algorithm import PhoneticAlgorithm
class Lein(PhoneticAlgorithm):
"""
The Lein name coding procedure.
[Reference]: http://naldc.nal.usda.gov/download/27833/PDF
"""
def __init__(self):
super().__init__()
self.translations = translation(
'DTMNLRBFPVCJKGQSXZ',
'112233444455555555'
)
self.pad = lambda code: '{}0000'.format(code)[:4]
def phonetics(self, word):
check_str(word)
check_empty(word)
word = unidecode(word).upper()
word = re.sub(r'[^A-Z]\s', r'', word)
# Keep the 1st letter
first, code = word[0], word[1:]
# Drop vowels and Y, W & H
code = re.sub(r'[AEIOUYWH]', r'', code)
# Drop consecutive duplicates and truncate to 4 chars
code = squeeze(code)[0: 4]
# Translations
code = ''.join(self.translations.get(char, char) for char in code)
return self.pad(first + code)
|
1635384
|
from multiprocessing import Pool
import dataio as dio
import time
def get_model(seqLen=100, numBalls=2):
time.sleep(3)
ds = dio.DataSaver(wThick=30, isRect=True, mnSeqLen=seqLen, mxSeqLen=seqLen,
numBalls=numBalls, mnBallSz=25, mxBallSz=25)
ims = ds.fetch()
return ims
def run_parallel(numW=4, numJobs=10):
p = Pool(numW)
resPool = []
for i in range(numJobs):
ds = dio.DataSaver(wThick=30, isRect=True,
mnSeqLen=100, mxSeqLen=100,
numBalls=2, mnBallSz=25, mxBallSz=25)
#resPool.append(p.apply_async(get_model))
resPool.append(p.apply_async(ds.fetch))
print('All Processes launched')
res = []
for r in resPool:
res.append(r.get())
return res
def run_process(numJobs=10):
prcs = []
for i in range(numJobs):
p = Process(target=get_model)
p.start()
prcs.append(p)
print('All Processes launched')
for p in prcs:
res.append(r.get())
return res
def run_serial():
for i in range(100):
tmpRes = get_model()
|
1635447
|
import os
import warnings
import sklearn.decomposition
import numpy as np
from .openl3_exceptions import OpenL3Error
with warnings.catch_warnings():
# Suppress TF and Keras warnings when importing
warnings.simplefilter("ignore")
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Input, Conv2D, Permute, BatchNormalization, MaxPooling2D,
Flatten, Activation, Lambda)
import tensorflow.keras.regularizers as regularizers
VALID_FRONTENDS = ("librosa", "kapre")
VALID_INPUT_REPRS = ("linear", "mel128", "mel256")
VALID_CONTENT_TYPES = ("music", "env")
VALID_AUDIO_EMBEDDING_SIZES = (6144, 512)
VALID_IMAGE_EMBEDDING_SIZES = (8192, 512)
def _log10(x):
'''log10 tensorflow function.'''
return tf.math.log(x) / tf.math.log(tf.constant(10, dtype=x.dtype))
def kapre_v0_1_4_magnitude_to_decibel(x, ref_value=1.0, amin=1e-10, dynamic_range=80.0):
'''log10 tensorflow function.'''
amin = tf.cast(amin or 1e-10, dtype=x.dtype)
max_axis = tuple(range(K.ndim(x))[1:]) or None
log_spec = 10. * _log10(K.maximum(x, amin))
return K.maximum(
log_spec - K.max(log_spec, axis=max_axis, keepdims=True),
-dynamic_range)
def __fix_kapre_spec(func):
'''Wraps the kapre composite layer interface to revert .'''
def get_spectrogram(*a, return_decibel=False, **kw):
seq = func(*a, return_decibel=False, **kw)
if return_decibel:
seq.add(Lambda(kapre_v0_1_4_magnitude_to_decibel))
seq.add(Permute((2, 1, 3))) # the output is (None, t, f, ch) instead of (None, f, t, ch), so gotta fix that
return seq
return get_spectrogram
def _validate_audio_frontend(frontend='kapre', input_repr=None, model=None):
'''Make sure that the audio frontend matches the model and input_repr.'''
ndims = len(model.input_shape) if model is not None else None
# if frontend == 'infer': # detect which frontend to use
# if model is None: # default
# frontend = 'kapre'
# elif ndims == 3: # shape: [batch, channel, samples]
# frontend = 'kapre'
# elif ndims == 4: # shape: [batch, frequency, time, channel]
# frontend = 'librosa'
# else:
# raise OpenL3Error(
# 'Invalid model input shape: {}. Expected a model '
# 'with either a 3 or 4 dimensional input, got {}.'.format(model.input_shape, ndims))
if frontend not in VALID_FRONTENDS:
raise OpenL3Error('Invalid frontend "{}". Must be one of {}'.format(frontend, VALID_FRONTENDS))
# validate that our model shape matches our frontend.
if ndims is not None:
if frontend == 'kapre' and ndims != 3:
raise OpenL3Error('Invalid model input shape: {}. Expected 3 dims got {}.'.format(model.input_shape, ndims))
if frontend == 'librosa' and ndims != 4:
raise OpenL3Error('Invalid model input shape: {}. Expected 4 dims got {}.'.format(model.input_shape, ndims))
if input_repr is None:
if frontend == 'librosa':
raise OpenL3Error('You must specify input_repr for a librosa frontend.')
else:
input_repr = 'mel256'
if str(input_repr) not in VALID_INPUT_REPRS:
raise OpenL3Error('Invalid input representation "{}". Must be one of {}'.format(input_repr, VALID_INPUT_REPRS))
return frontend, input_repr
AUDIO_POOLING_SIZES = {
'linear': {
6144: (8, 8),
512: (32, 24),
},
'mel128': {
6144: (4, 8),
512: (16, 24),
},
'mel256': {
6144: (8, 8),
512: (32, 24),
}
}
IMAGE_POOLING_SIZES = {
8192: (7, 7),
512: (28, 28),
}
def load_audio_embedding_model(input_repr, content_type, embedding_size, frontend='kapre'):
"""
Returns a model with the given characteristics. Loads the model
if the model has not been loaded yet.
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 6144 or 512
Embedding dimensionality.
frontend : "kapre" or "librosa"
The audio frontend to use. If frontend == 'kapre', then the kapre frontend will
be included. Otherwise no frontend will be added inside the keras model.
Returns
-------
model : tf.keras.Model
Model object.
"""
model_path = get_audio_embedding_model_path(input_repr, content_type)
return load_audio_embedding_model_from_path(model_path, input_repr, embedding_size, frontend=frontend)
def load_audio_embedding_model_from_path(model_path, input_repr, embedding_size, frontend='kapre'):
"""
Loads a model with weights at the given path.
Parameters
----------
model_path : str
Path to model weights HDF5 (.h5) file. Must be in format
`*._<input_repr>_<content_type>.h5` or
`*._<input_repr>_<content_type>-.*.h5`, since model configuration
will be determined from the filename.
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
embedding_size : 6144 or 512
Embedding dimensionality.
frontend : "kapre" or "librosa"
The audio frontend to use. If frontend == 'kapre', then the kapre frontend will
be included. Otherwise no frontend will be added inside the keras model.
Returns
-------
model : tf.keras.Model
Model object.
"""
frontend, input_repr = _validate_audio_frontend(frontend, input_repr)
# Construct embedding model and load model weights
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = AUDIO_MODELS[input_repr](include_frontend=frontend == 'kapre')
m.load_weights(model_path)
# Pooling for final output embedding size
pool_size = AUDIO_POOLING_SIZES[input_repr][embedding_size]
y_a = MaxPooling2D(pool_size=pool_size, padding='same')(m.output)
y_a = Flatten()(y_a)
m = Model(inputs=m.input, outputs=y_a)
m.frontend = frontend
return m
def get_audio_embedding_model_path(input_repr, content_type):
"""
Returns the local path to the model weights file for the model
with the given characteristics
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for model.
content_type : "music" or "env"
Type of content used to train embedding.
Returns
-------
output_path : str
Path to given model object
"""
return os.path.join(os.path.dirname(__file__),
'openl3_audio_{}_{}.h5'.format(input_repr, content_type))
def load_image_embedding_model(input_repr, content_type, embedding_size):
"""
Returns a model with the given characteristics. Loads the model
if the model has not been loaded yet.
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 8192 or 512
Embedding dimensionality.
Returns
-------
model : tf.keras.Model
Model object.
"""
model_path = get_image_embedding_model_path(input_repr, content_type)
return load_image_embedding_model_from_path(model_path, embedding_size)
def load_image_embedding_model_from_path(model_path, embedding_size):
"""
Loads a model with weights at the given path.
Parameters
----------
model_path : str
Path to model weights HDF5 (.h5) file.
embedding_size : 6144 or 512
Embedding dimensionality.
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 8192 or 512
Embedding dimensionality.
Returns
-------
model : tf.keras.Model
Model object.
"""
# Construct embedding model and load model weights
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = _construct_image_network()
m.load_weights(model_path)
# Pooling for final output embedding size
pool_size = IMAGE_POOLING_SIZES[embedding_size]
y_i = MaxPooling2D(pool_size=pool_size, padding='same')(m.output)
y_i = Flatten()(y_i)
m = Model(inputs=m.input, outputs=y_i)
return m
def get_image_embedding_model_path(input_repr, content_type):
"""
Returns the local path to the model weights file for the model
with the given characteristics
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for model.
content_type : "music" or "env"
Type of content used to train embedding.
Returns
-------
output_path : str
Path to given model object
"""
return os.path.join(os.path.dirname(__file__),
'openl3_image_{}_{}.h5'.format(input_repr, content_type))
def _construct_linear_audio_network(include_frontend=True):
"""
Returns an uninitialized model object for an audio network with a linear
spectrogram input (With 257 frequency bins)
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
n_dft = 512
n_hop = 242
asr = 48000
audio_window_dur = 1
if include_frontend:
# INPUT
input_shape = (1, asr * audio_window_dur)
x_a = Input(shape=input_shape, dtype='float32')
# SPECTROGRAM PREPROCESSING
# 257 x 197 x 1
from kapre.composed import get_stft_magnitude_layer
spec = __fix_kapre_spec(get_stft_magnitude_layer)(
input_shape=input_shape,
n_fft=n_dft, hop_length=n_hop, return_decibel=True,
input_data_format='channels_first',
output_data_format='channels_last')
y_a = spec(x_a)
else: # NOTE: asr - n_dft because we're not padding (I think?)
input_shape = (n_dft // 2 + 1, int(np.ceil((asr - n_dft) * audio_window_dur / n_hop)), 1)
x_a = y_a = Input(shape=input_shape, dtype='float32')
y_a = BatchNormalization()(y_a)
# CONV BLOCK 1
n_filter_a_1 = 64
filt_size_a_1 = (3, 3)
pool_size_a_1 = (2, 2)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a)
# CONV BLOCK 2
n_filter_a_2 = 128
filt_size_a_2 = (3, 3)
pool_size_a_2 = (2, 2)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a)
# CONV BLOCK 3
n_filter_a_3 = 256
filt_size_a_3 = (3, 3)
pool_size_a_3 = (2, 2)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a)
# CONV BLOCK 4
n_filter_a_4 = 512
filt_size_a_4 = (3, 3)
y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_4, filt_size_a_4,
kernel_initializer='he_normal',
name='audio_embedding_layer', padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
m = Model(inputs=x_a, outputs=y_a)
return m
def _construct_mel128_audio_network(include_frontend=True):
"""
Returns an uninitialized model object for an audio network with a Mel
spectrogram input (with 128 frequency bins).
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
n_dft = 2048
n_mels = 128
n_hop = 242
asr = 48000
audio_window_dur = 1
if include_frontend:
# INPUT
input_shape = (1, asr * audio_window_dur)
x_a = Input(shape=input_shape, dtype='float32')
# MELSPECTROGRAM PREPROCESSING
# 128 x 199 x 1
from kapre.composed import get_melspectrogram_layer
spec = __fix_kapre_spec(get_melspectrogram_layer)(
input_shape=input_shape,
n_fft=n_dft, hop_length=n_hop, n_mels=n_mels,
sample_rate=asr, return_decibel=True, pad_end=True,
input_data_format='channels_first',
output_data_format='channels_last')
y_a = spec(x_a)
else:
input_shape = (n_mels, int(np.ceil(asr * audio_window_dur / n_hop)), 1)
x_a = y_a = Input(shape=input_shape, dtype='float32')
y_a = BatchNormalization()(y_a)
# CONV BLOCK 1
n_filter_a_1 = 64
filt_size_a_1 = (3, 3)
pool_size_a_1 = (2, 2)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a)
# CONV BLOCK 2
n_filter_a_2 = 128
filt_size_a_2 = (3, 3)
pool_size_a_2 = (2, 2)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a)
# CONV BLOCK 3
n_filter_a_3 = 256
filt_size_a_3 = (3, 3)
pool_size_a_3 = (2, 2)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a)
# CONV BLOCK 4
n_filter_a_4 = 512
filt_size_a_4 = (3, 3)
pool_size_a_4 = (16, 24)
y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_4, filt_size_a_4,
kernel_initializer='he_normal',
name='audio_embedding_layer', padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
m = Model(inputs=x_a, outputs=y_a)
return m
def _construct_mel256_audio_network(include_frontend=True):
"""
Returns an uninitialized model object for an audio network with a Mel
spectrogram input (with 256 frequency bins).
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
n_dft = 2048
n_mels = 256
n_hop = 242
asr = 48000
audio_window_dur = 1
if include_frontend:
# INPUT
input_shape = (1, asr * audio_window_dur)
x_a = Input(shape=input_shape, dtype='float32')
# MELSPECTROGRAM PREPROCESSING
# 256 x 199 x 1
from kapre.composed import get_melspectrogram_layer
spec = __fix_kapre_spec(get_melspectrogram_layer)(
input_shape=input_shape,
n_fft=n_dft, hop_length=n_hop, n_mels=n_mels,
sample_rate=asr, return_decibel=True, pad_end=True,
input_data_format='channels_first',
output_data_format='channels_last')
y_a = spec(x_a)
else:
input_shape = (n_mels, int(np.ceil(asr * audio_window_dur / n_hop)), 1)
x_a = y_a = Input(shape=input_shape, dtype='float32')
y_a = BatchNormalization()(y_a)
# CONV BLOCK 1
n_filter_a_1 = 64
filt_size_a_1 = (3, 3)
pool_size_a_1 = (2, 2)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a)
# CONV BLOCK 2
n_filter_a_2 = 128
filt_size_a_2 = (3, 3)
pool_size_a_2 = (2, 2)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a)
# CONV BLOCK 3
n_filter_a_3 = 256
filt_size_a_3 = (3, 3)
pool_size_a_3 = (2, 2)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a)
# CONV BLOCK 4
n_filter_a_4 = 512
filt_size_a_4 = (3, 3)
y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_4, filt_size_a_4,
kernel_initializer='he_normal',
name='audio_embedding_layer', padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
m = Model(inputs=x_a, outputs=y_a)
return m
def _construct_image_network():
"""
Returns an uninitialized model object for a image network.
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
im_height = 224
im_width = 224
num_channels = 3
x_i = Input(shape=(im_height, im_width, num_channels), dtype='float32')
y_i = BatchNormalization()(x_i)
# CONV BLOCK 1
n_filter_i_1 = 64
filt_size_i_1 = (3, 3)
pool_size_i_1 = (2, 2)
y_i = Conv2D(n_filter_i_1, filt_size_i_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_1, filt_size_i_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = Activation('relu')(y_i)
y_i = BatchNormalization()(y_i)
y_i = MaxPooling2D(pool_size=pool_size_i_1, strides=2, padding='same')(y_i)
# CONV BLOCK 2
n_filter_i_2 = 128
filt_size_i_2 = (3, 3)
pool_size_i_2 = (2, 2)
y_i = Conv2D(n_filter_i_2, filt_size_i_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_2, filt_size_i_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = MaxPooling2D(pool_size=pool_size_i_2, strides=2, padding='same')(y_i)
# CONV BLOCK 3
n_filter_i_3 = 256
filt_size_i_3 = (3, 3)
pool_size_i_3 = (2, 2)
y_i = Conv2D(n_filter_i_3, filt_size_i_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_3, filt_size_i_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = MaxPooling2D(pool_size=pool_size_i_3, strides=2, padding='same')(y_i)
# CONV BLOCK 4
n_filter_i_4 = 512
filt_size_i_4 = (3, 3)
pool_size_i_4 = (28, 28)
y_i = Conv2D(n_filter_i_4, filt_size_i_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_4, filt_size_i_4,
name='vision_embedding_layer', padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
m = Model(inputs=x_i, outputs=y_i)
return m
AUDIO_MODELS = {
'linear': _construct_linear_audio_network,
'mel128': _construct_mel128_audio_network,
'mel256': _construct_mel256_audio_network
}
|
1635459
|
from unittest import TestCase
from .common import link_test_file, html_lines
class LinkerTest(TestCase):
def test_link_install(self):
self.assertEqual(link_test_file('install.rst'), html_lines('Installation', [
'<h1>Installation</h1>',
]))
def test_link_tutorial(self):
self.assertEqual(link_test_file('tutorial'), html_lines('Beginners Tutorial', [
'<h1>Beginners Tutorial</h1>',
'<p>Welcome to the project tutorial!</p>',
'<p>This text will take you through the basic of ...</p>',
'<a name="hello_world"/>',
'<h2>Hello, World</h2>',
'<a name="adding_logging"/>',
'<h2>Adding Logging</h2>',
]))
def test_link_api(self):
self.assertEqual(link_test_file('api'), html_lines('API Reference', [
'<h1>API Reference</h1>',
'<p>',
'Before reading this, try reading our ',
'<a href="tutorial.html>',
'Beginners Tutorial',
'</a>',
'!',
'</p>',
'<a name="handy_functions"/>',
'<h2>Handy Functions</h2>',
'<a name="obscure_classes"/>',
'<h2>Obscure Classes</h2>',
]))
def test_link_index(self):
self.assertEqual(link_test_file('index.rst'), html_lines('Table of Contents', [
'<h1>Table of Contents</h1>',
'<ul>',
'<ul>',
'<li><a class="toc-h1" href="install.html">Installation</a></li>',
'</ul>',
'<ul>',
'<li>',
'<a class="toc-h1" href="tutorial.html">Beginners Tutorial</a>',
'<ul>',
'<li><a class="toc-h2" href="tutorial.html#hello_world">Hello, World</a></li>',
'<li><a class="toc-h2" href="tutorial.html#adding_logging">Adding Logging</a></li>',
'</ul>',
'</li>',
'</ul>',
'<ul>',
'<li>',
'<a class="toc-h1" href="api.html">API Reference</a>',
'<ul>',
'<li><a class="toc-h2" href="api.html#handy_functions">Handy Functions</a></li>',
'<li><a class="toc-h2" href="api.html#obscure_classes">Obscure Classes</a></li>',
'</ul>',
'</li>',
'</ul>',
'</ul>',
'<p>This is the main text.</p>',
]))
|
1635473
|
from abc import ABC, abstractmethod
from typing import Any, Awaitable, Callable, Dict, Optional, Type
AsyncFunc = Callable[..., Awaitable[Any]]
class ABCErrorHandler(ABC):
error_handlers: Dict[Type[BaseException], AsyncFunc]
undefined_error_handler: Optional[AsyncFunc]
@abstractmethod
def register_error_handler(
self, *error_types: Type[BaseException]
) -> Callable[[AsyncFunc], AsyncFunc]:
pass
@abstractmethod
def register_undefined_error_handler(self, handler: AsyncFunc) -> AsyncFunc:
pass
@abstractmethod
async def handle(self, error: BaseException) -> Any:
pass
@abstractmethod
def catch(self, func: AsyncFunc) -> AsyncFunc:
pass
|
1635479
|
import sys
from unittest.mock import call, create_autospec
import pytest
try:
from rich._win32_console import LegacyWindowsTerm, WindowsCoordinates
from rich._windows_renderer import legacy_windows_render
except:
# These modules can only be imported on Windows
pass
from rich.segment import ControlType, Segment
from rich.style import Style
pytestmark = pytest.mark.skipif(sys.platform != "win32", reason="windows only")
@pytest.fixture
def legacy_term_mock():
return create_autospec(LegacyWindowsTerm)
def test_text_only(legacy_term_mock):
text = "Hello, world!"
buffer = [Segment(text)]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.write_text.assert_called_once_with(text)
def test_text_multiple_segments(legacy_term_mock):
buffer = [Segment("Hello, "), Segment("world!")]
legacy_windows_render(buffer, legacy_term_mock)
assert legacy_term_mock.write_text.call_args_list == [
call("Hello, "),
call("world!"),
]
def test_text_with_style(legacy_term_mock):
text = "Hello, world!"
style = Style.parse("black on red")
buffer = [Segment(text, style)]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.write_styled.assert_called_once_with(text, style)
def test_control_cursor_move_to(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.CURSOR_MOVE_TO, 20, 30)])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.move_cursor_to.assert_called_once_with(
WindowsCoordinates(row=29, col=19)
)
def test_control_carriage_return(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.CARRIAGE_RETURN,)])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.write_text.assert_called_once_with("\r")
def test_control_home(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.HOME,)])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.move_cursor_to.assert_called_once_with(WindowsCoordinates(0, 0))
@pytest.mark.parametrize(
"control_type, method_name",
[
(ControlType.CURSOR_UP, "move_cursor_up"),
(ControlType.CURSOR_DOWN, "move_cursor_down"),
(ControlType.CURSOR_FORWARD, "move_cursor_forward"),
(ControlType.CURSOR_BACKWARD, "move_cursor_backward"),
],
)
def test_control_cursor_single_cell_movement(
legacy_term_mock, control_type, method_name
):
buffer = [Segment("", None, [(control_type,)])]
legacy_windows_render(buffer, legacy_term_mock)
getattr(legacy_term_mock, method_name).assert_called_once_with()
@pytest.mark.parametrize(
"erase_mode, method_name",
[
(0, "erase_end_of_line"),
(1, "erase_start_of_line"),
(2, "erase_line"),
],
)
def test_control_erase_line(legacy_term_mock, erase_mode, method_name):
buffer = [Segment("", None, [(ControlType.ERASE_IN_LINE, erase_mode)])]
legacy_windows_render(buffer, legacy_term_mock)
getattr(legacy_term_mock, method_name).assert_called_once_with()
def test_control_show_cursor(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.SHOW_CURSOR,)])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.show_cursor.assert_called_once_with()
def test_control_hide_cursor(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.HIDE_CURSOR,)])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.hide_cursor.assert_called_once_with()
def test_control_cursor_move_to_column(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.CURSOR_MOVE_TO_COLUMN, 3)])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.move_cursor_to_column.assert_called_once_with(2)
def test_control_set_terminal_window_title(legacy_term_mock):
buffer = [Segment("", None, [(ControlType.SET_WINDOW_TITLE, "Hello, world!")])]
legacy_windows_render(buffer, legacy_term_mock)
legacy_term_mock.set_title.assert_called_once_with("Hello, world!")
|
1635505
|
from typing import List
from .docutils import LATEX_TAG_BEGIN_FORMAT, LATEX_TAG_END_FORMAT
from .snippetExceptions import SnippetException
class Snippet:
BEGIN_TEMPLATE = LATEX_TAG_BEGIN_FORMAT
END_TEMPLATE = LATEX_TAG_END_FORMAT
INDENT = " "
def __init__(self, name: str, tags=[]):
self.__name = name
self.content = ""
self._add_tags(self.BEGIN_TEMPLATE, tags)
self.open = True
@property
def name(self) -> str:
"""
name of snippet
:return: str
"""
return self.__name
def add_content(self, content_to_add: str) -> None:
"""
add new line to content
:param content_to_add: content added
:return: None
"""
if self.is_close():
raise SnippetException(f"Snippet {self.name} already closed.")
self.content += content_to_add
def _add_tags(self, template: str, tags: List[str], reverse: bool = False):
indent_func = lambda i: self.INDENT * i
if reverse:
max_i = len(tags) - 1
indent_func = lambda i: self.INDENT * (max_i - i)
tags = reversed(tags)
self.content += "\n".join(indent_func(i) + template.format(tag=tag) for i, tag in enumerate(tags)) + "\n"
def close(self, tags: List[str]) -> None:
"""
close snippet
"""
self._add_tags(self.END_TEMPLATE, tags, reverse=True)
self.open = False
def is_close(self) -> bool:
"""
True if Snippet is closed
:return: bool
"""
return not self.open
def __str__(self) -> str:
return self.content
|
1635621
|
def rate_password(username, password):
lc = uc = num = spc = oth = usn = 0
for n in password:
if n.islower(): lc = 2
if n.isupper(): uc = 3
if n.isdigit(): num = 5
if n.isspace(): spc = 5
if not n.isalnum(): oth = 10
if password.find(username)> -1: usn = -15
score = len(password) + lc + uc + num + spc + oth + usn
if score < 0: score = 0
return score
|
1635634
|
import logging
logging.basicConfig(level=logging.DEBUG)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
fgp = __import__('FaST-GP')
ds = __import__('data_simulation')
def get_coords(index):
coords = pd.DataFrame(index=index)
coords['x'] = index.str.split('x').str.get(0).map(float)
coords['y'] = index.str.split('x').str.get(1).map(float)
return coords
def main():
df = pd.read_csv('data/Rep12_MOB_1.csv', index_col=0)
sample_info = get_coords(df.index)
# Run workflow
X = sample_info[['x', 'y']]
dfm = np.log10(df + 1)
l = 10
results = fgp.dyn_de(X, dfm, lengthscale=l, num=32)
plt.scatter(results['max_delta'], results['max_ll'], c='k')
plt.xscale('log')
plt.xlim(np.exp(-11), np.exp(11))
plt.xlabel('$\delta$')
plt.ylabel('Maximum Log Likelihood')
plt.title('lengthscale: {}'.format(l))
plt.savefig('fastgp-fits.png', bbox_inches='tight')
print(results.sort_values('max_delta').head(20))
def plot_LL_curves():
# df = pd.read_csv('data/Rep12_MOB_3.csv', index_col=0)
# sample_info = get_coords(df.index)
# X = sample_info[['x', 'y']]
# dfm = np.log10(df + 1).sample(10, axis=1)
l = 10
X, dfm, true_vals = ds.make_ls_data(l, 250, 10)
true_vals['delta'] = true_vals['s2_e'] / true_vals['s2_t']
K = fgp.SE_kernel(X, l)
U, S = fgp.factor(K)
UT1 = fgp.get_UT1(U)
n, G = dfm.shape
for g in range(G):
y = dfm.iloc[:, g]
UTy = fgp.get_UTy(U, y)
LLs = []
delta_range = np.logspace(base=np.e, start=-10, stop=10, num=32)
max_ll = -np.inf
max_delta = np.nan
for delta in delta_range:
cur_ll = fgp.LL(delta, UTy, UT1, S, n)
LLs.append(cur_ll)
if cur_ll > max_ll:
max_ll = cur_ll
max_delta = delta
plt.subplot(np.ceil(G / 2.), 2, g + 1)
plt.plot(delta_range, LLs, marker='o', markeredgecolor='w', markersize=2, markeredgewidth=1, c='k')
plt.scatter([max_delta], [max_ll], marker='v', c='r', edgecolor='none', zorder=5)
plt.title(dfm.columns[g])
plt.axvline(true_vals.iloc[g, -1], color='r')
plt.xscale('log')
plt.xlim(np.exp(-11), np.exp(11))
plt.savefig('example_grids.png')
def opt_simulation():
l = 10
logging.info('Sampling ground truth data...')
X, dfm, true_vals = ds.make_ls_data(10, 500, 500)
logging.info('Done')
results = fgp.dyn_de(X, dfm, lengthscale=l, num=32)
true_vals['delta'] = true_vals['s2_e'] / true_vals['s2_t']
plt.subplot(3, 1, 1)
plt.scatter(results['max_delta'], true_vals['delta'], c='k', label=None)
plt.xscale('log')
plt.xlim(np.exp(-11.), np.exp(11.))
plt.yscale('log')
plt.ylim(np.exp(-11.), np.exp(11.))
plt.plot([1e-4, 1e4], [1e-4, 1e4], c='r', label='$ x = y $ line')
plt.legend(loc='upper left')
plt.ylabel('Ground truth $ \delta $')
plt.subplot(3, 1, 2)
plt.scatter(results['max_s2_t_hat'], true_vals['s2_t'], c='k')
plt.xscale('log')
plt.xlim(np.exp(-6.), np.exp(6.))
plt.yscale('log')
plt.ylim(np.exp(-6.), np.exp(6.))
plt.plot([1e-2, 1e2], [1e-2, 1e2], c='r')
plt.ylabel('Ground truth $ \sigma_t^2 $')
plt.subplot(3, 1, 3)
plt.scatter(results['max_mu_hat'], true_vals['mu'], c='k')
plt.xlim(-1, 6)
plt.ylim(-1, 6)
plt.plot([0, 5], [0, 5], c='r')
plt.ylabel('Ground truth $ \mu $')
plt.xlabel('Inferred Value')
plt.savefig('simulation_accuracy.png')
if __name__ == '__main__':
opt_simulation()
# plot_LL_curves()
# main()
|
1635640
|
from random import randint, seed
seed(10) # Set random seed to make examples reproducible
random_dictionary = {i: randint(1, 10) for i in range(5)}
print(random_dictionary) # {0: 10, 1: 1, 2: 7, 3: 8, 4: 10}
|
1635660
|
from . import views
def register_in(router):
router.register(r'slurm-jobs', views.JobViewSet, basename='slurm-job')
|
1635718
|
import taichi as ti
from utils.tools import Pair
from pcg_method import PCG_Solver
import numpy as np
@ti.data_oriented
class Thin_Flame:
def __init__(self , resolution = 512 ) :
shape = (resolution , resolution)
self._sd_cur = ti.var(dt = ti.f32 , shape= shape)
self._sd_nxt = ti.var(dt = ti.f32 , shape= shape)
self._velocity_cur = ti.Vector(2 ,dt = ti.f32 , shape= shape)
self._velocity_nxt = ti.Vector(2 ,dt = ti.f32 , shape= shape)
self._pressure_cur = ti.var(dt = ti.f32 , shape= shape)
self._pressure_nxt = ti.var(dt = ti.f32 , shape= shape)
self.velocity_div = ti.var(dt = ti.f32 , shape= shape)
self.pixel = ti.Vector( 3 , dt = ti.f32 , shape= shape)
self.density_burnt = 1.2
self.density_fuel = 1.0
self.sign_dis = Pair(self._sd_cur , self._sd_nxt)
self.pressure = Pair(self._pressure_cur , self._pressure_nxt)
self.velocity = Pair(self._velocity_cur , self._velocity_nxt)
self.resolution = resolution
self.RK = 3
self.dx = 1.0
self.dt = 0.04
self.speed = 1.0 # 0.5 m/s
self.direction = ti.Vector([0.0 , 1.0])
self.source_pos_x = range(int(resolution /2) - 10 ,int( resolution/2) + 10)
self.out_momentum = ti.Vector([0.0, 5000.0])
self.dcolor = ti.Vector(list(np.random.rand(3) * 0.7 + 0.3) )
self.clamp_sampler = Thin_Flame.Clamping_Sampler(resolution)
self.extra_sampler = Thin_Flame.Extrapolation_Sampler(resolution)
@ti.data_oriented
class Clamping_Sampler:
def __init__(self , res ):
self.resolution = res
@ti.func
def sample(self , field , u , v):
i = max(0, min(self.resolution - 1, int(u)))
j = max(0, min(self.resolution - 1, int(v)))
return field[i, j]
@ti.data_oriented
class Extrapolation_Sampler:
def __init__(self , res):
self.resolution = res
@ti.func
def sample(self, field , u , v):
i = max(0, min(self.resolution - 1, int(u)))
j = max(0, min(self.resolution - 1, int(v)))
return field[i,j] - ti.abs(v - j) if j != int(v) and j < 0 else field[i,j]
@ti.func
def density(self , u , v):
return self.density_burnt if self.sign_dis.curr[u ,v] <= 0 else self.density_fuel
@ti.func
def lerp(self , v1 , v2 , frac):
return v1 + frac * (v2 - v1)
@ti.func
def sample(self , field , u , v):
i = max(0, min(self.resolution - 1, int(u)))
j = max(0, min(self.resolution - 1, int(v)))
return field[i, j]
@ti.func
def bilinear_interpolate(self , field , u ,v , sampler) :
s, t = u - 0.5, v - 0.5
iu, iv = int(s), int(t)
fu, fv = s - iu, t - iv
a = sampler.sample(field, iu + 0.5, iv + 0.5)
b = sampler.sample(field, iu + 1.5, iv + 0.5)
c = sampler.sample(field, iu + 0.5, iv + 1.5)
d = sampler.sample(field, iu + 1.5, iv + 1.5)
return self.lerp(self.lerp(a, b, fu), self.lerp(c, d, fu), fv)
@ti.func
def backtrace(self , vf , u,v , dt ):
p = ti.Vector([u,v]) + 0.5
if ti.static(self.RK == 1) :
p -= dt * vf[u,v] #RK1
elif ti.static(self.RK == 2):
mid = p - 0.5 * dt * vf[u,v]
p -= dt * self.sample(vf, mid[0] , mid[1])
elif ti.static(self.RK == 3) :
v1 = vf[u,v]
p1 = p - 0.5 * dt * v1
v2 = self.sample(vf , p1[0] , p1[1])
p2 = p - 0.75 * dt * v
v3 = self.sample(vf , p2[0] , p2[1])
p -= dt * ( 2/9 * v1 + 1/3 * v2 + 4/9 * v3 )
else :
ti.static_print(f"unsupported order for RK{self.RK}")
return p
@ti.func
def semi_lagrange(self , vf , field , next_field , dt , sampler):
for i , j in vf :
p = self.backtrace( vf , i , j , dt )
next_field[i,j] = self.bilinear_interpolate(field , p[0], p[1] , sampler)
@ti.kernel
# @ti.func
def advection(self , vf: ti.template() , field : ti.template() , sampler: ti.template()):
self.semi_lagrange(vf, field.curr , field.next, self.dt , sampler)
@ti.kernel
def momentum(self, vf : ti.template()):
# TODO effect velocity by density div on flame front
# for i , j in self.sign_dis.curr:
# vf[i , j] = vf
# inv_r = ti.static(4.0 / self.resolution)
res = ti.static(int(self.resolution/2))
# source
for i , j in ti.ndrange((res - 10 , res + 10 ) , (0 , 20)):
# dir_v = ti.Vector([ res - i, 30]).normalized()
vf[i,j] += self.dt * self.out_momentum
# for j in range(self.resolution - 1):
# for i in ti.static(self.source_pos_x) :
# vf[i, j] += self.dt * self.out_momentum * ti.exp( - j * inv_r)
@ti.kernel
def divergence_vel(self , field:ti.template()):
half_inv_dx = ti.static(0.5 / self.dx)
for i , j in field:
vl = self.sample(field, i - 1, j)[0]
vr = self.sample(field, i + 1, j)[0]
vb = self.sample(field, i, j - 1)[1]
vt = self.sample(field, i, j + 1)[1]
vc = self.sample(field, i, j)
# edge check
if i == 0:
vl = -vc[0]
elif i == self.resolution - 1:
vr = -vc[0]
if j == 0:
vb = -vc[1]
elif j == self.resolution - 1:
vt = -vc[1]
# div_v
div = (vr - vl + vt - vb) * half_inv_dx
self.velocity_div[i, j] = div
# @ti.kernel
def projection(self , v_cur : ti.template(), p : ti.template() ):
self.divergence_vel(v_cur)
self.jacobi(p)
# @ti.kernel
def jacobi(self , p:ti.template()) :
for _ in ti.static(range(200)):
self.jacobi_step(p.curr , p.next)
p.swap()
@ti.kernel
def jacobi_step(self , p_cur:ti.template() , p_nxt:ti.template()):
dx_sqr = ti.static(self.dx * self.dx)
for i , j in p_cur :
# pl = p_cur[i - 1 , j]
# pr = p_cur[i + 1 , j]
# pt = p_cur[i , j + 1]
# pb = p_cur[i , j - 1]
pl = self.sample(p_cur , i - 1 , j)#p_cur[i-1, j]#self.sample(p_cur , i - 1 , j)
pr = self.sample(p_cur , i + 1 , j)#p_cur[i+1 ,j]#
pt = self.sample(p_cur , i , j + 1)#p_cur[i, j+1]#
pb = self.sample(p_cur , i , j - 1)#p_cur[i ,j-1]#
p_nxt[i,j] = 0.25 * (pl + pr + pt + pb - dx_sqr * self.velocity_div[i,j])
@ti.kernel
def update_v(self , vf : ti.template() , pf:ti.template()):
half_inv_dx = ti.static( 0.5 / self.dx )
for i,j in vf :
pl = self.sample(pf, i - 1, j)
pr = self.sample(pf, i + 1, j)
pb = self.sample(pf, i, j - 1)
pt = self.sample(pf, i, j + 1)
vf[i, j] = self.sample(vf, i, j) - half_inv_dx * ti.Vector([pr - pl, pt - pb])
@ti.kernel
def update_distance(self, sdf : ti.template() , vf : ti.template()):
# inv_r = ti.static(4.0 / (self.resolution / 20.0)**2)
res = ti.static(int(self.resolution/2))
for i ,j in ti.ndrange((res - 10 , res + 10) , (0 , 20)) :
# dx , dy = self.resolution / 2 - i , j
# d2 = dx * dx + dy * dy
sdf[i , j] = -1.0 #ti.exp(- d2 * inv_r) * -10.0
for i, j in sdf:
# dx , dy = self.resolution / 2 - i , j
# d2 = dx * dx + dy * dy
# sdf[i , j] -= ti.exp(- d2 * inv_r) * 10.0
# sdf[i , j] += self
sdf[i , j] += self.dt * self.speed #(self.speed - vf[i,j].norm())
@ti.kernel
def init_level_set(self):
sdf = ti.static(self.sign_dis.curr)
inv_r = ti.static(4.0 / (self.resolution / 20.0)**2)
for i, j in sdf:
dx , dy = self.resolution / 2 - i , j
d2 = dx * dx + dy * dy
sdf[i , 0] = ti.exp(- d2 * inv_r) * 10.0
# @ti.kernel
def init(self):
self.velocity.curr.fill([0.0,0.0])
self.pressure.curr.fill(0.0)
self.sign_dis.curr.fill(10.0)
self.pixel.fill([0.0 , 0.0 , 0.0 ])
self.init_level_set()
@ti.kernel
def render(self , sdf: ti.template()):
zero = ti.Vector([0.0 , 0.0 , 0.0])
for indices in ti.grouped(sdf):
self.pixel[indices] = self.dcolor * ti.exp(1.0/ (sdf[indices] - 0.01)) if sdf[indices] < 0.0 else zero
def step(self):
# advection
self.advection(self.velocity.curr , self.velocity , self.clamp_sampler)
self.advection(self.velocity.curr , self.sign_dis , self.clamp_sampler)
self.velocity.swap()
self.sign_dis.swap()
# externel force
self.momentum(self.velocity.curr)
# projection
self.projection(self.velocity.curr , self.pressure)
# update
self.update_v(self.velocity.curr , self.pressure.curr)
self.update_distance(self.sign_dis.curr , self.velocity.curr)
self.render(self.sign_dis.curr)
def main():
resolution = 512
ti.init(arch=ti.gpu , kernel_profiler = True)
gui = ti.GUI("Thin Flame" , res= resolution)
solver = Thin_Flame(resolution)
solver.init()
while gui.running:
solver.step()
gui.set_image(solver.pixel)
gui.show()
if __name__ == '__main__':
main()
|
1635723
|
from mal import Anime
from bs4 import BeautifulSoup
import numpy
import requests
def animerec():
p = numpy.random.randint(16000,size=1)
id = int(p[0])
# for i in range(id,16000):
try:
anime = Anime(id)
title = str(anime.title)
titlef = title.replace(' ','_')
titlef = titlef.replace(':','_')
url = 'https://myanimelist.net/anime/'+str(id)+'/'+titlef+'?q=cow&cat=anime'
get = requests.get(url)
site = get.text
soup = BeautifulSoup(site, 'html.parser')
#animeimage
img_tags = soup.find("div",attrs = {'style' : 'text-align: center;'})
x = img_tags.find('a')
y = x.findAll('img')
count = 1
fin = []
for i in y:
if count<=1:
link = i['data-src']
count+=1
else:
pass
#animesynopsis
syn_tags= soup.find('p',attrs ={'itemprop' : 'description'}).text
fin.append(link)
fin.append(title)
fin.append(syn_tags)
return fin
except ValueError:
animerec()
animerec()
|
1635739
|
def extractBorahae7WordpressCom(item):
'''
Parser for 'borahae7.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Prophet', 'The Strongest Prophet Who Had Trained 100 Heroes is Admired By His Apprentices Around The World Even As An Adventurer', 'translated'),
('fbcbtr', 'Fiancee Be Chosen By The Ring', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['tags'] == ['Uncategorized']:
titlemap = [
('FIANCEE BE CHOSEN BY THE RING — CHAPTER ', 'Fiancee Be Chosen By The Ring', 'translated'),
('THE STRONGEST PROPHET — CH', 'The Strongest Prophet Who Has Trained 100 Heroes is Admired By His Apprentices Around The World Even As An Adventurer', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
1635755
|
class Meta(type):
def __add__(self, x: str) -> str:
return 'a' + x
class C(metaclass=Meta):
...
print(C + 'x') # Okay
print(C + 1) # error: Unsupported operand types for + ("Type[C]" and "int")
|
1635809
|
from numpy import sign
def comp_rot_dir(self):
"""Compute the rotation direction of the fundamental magnetic field induced by the winding
WARNING: rot_dir = -1 to have positive rotor rotating direction, i.e. rotor position moves towards positive angle
Parameters
----------
self : LamSlotMultiWind
A LamSlotMultiWind object
Returns
-------
rot_dir : int
-1 or +1
"""
p = self.get_pole_pair_number()
# Compute unit mmf
MMF, _ = self.comp_mmf_unit(
Nt=20 * p, Na=20 * p
) # 20 points per pole over time and space is enough to capture rotating direction of fundamental mmf
# Extract fundamental from unit mmf
result_p = MMF.get_harmonics(1, "freqs", "wavenumber=" + str(p))
result_n = MMF.get_harmonics(1, "freqs", "wavenumber=" + str(-p))
if result_p["Magnitude"][0] > result_n["Magnitude"][0]:
result = result_p
else:
result = result_n
# Get frequency and wavenumber of fundamental
f = result["freqs"][0]
r = result["wavenumber"][0]
# Rotating direction is the sign of the mechanical speed of the magnetic field fundamental, i.e frequency over wavenumber
rot_dir = int(sign(f / r))
return rot_dir
|
1635830
|
import numpy as np
import pytest
import random
import torch
import densetorch as dt
NUMBER_OF_PARAMETERS_WITH_21_CLASSES = {
"152": 61993301,
"101": 46349653,
"50": 27357525,
"mbv2": 3284565,
}
NUMBER_OF_ENCODER_DECODER_LAYERS = {
"152": (465, 28),
"101": (312, 28),
"50": (159, 28),
"mbv2": (156, 27),
}
def get_dummy_input_tensor(height, width, channels=3, batch=4):
input_tensor = torch.FloatTensor(batch, channels, height, width).float()
return input_tensor
def get_network_output_shape(h, w, output_stride=4):
return np.ceil(h / output_stride), np.ceil(w / output_stride)
@pytest.fixture()
def num_classes():
return random.randint(1, 40)
@pytest.fixture()
def num_channels():
return random.randint(1, 40)
@pytest.fixture()
def input_height():
return random.randint(33, 320)
@pytest.fixture()
def input_width():
return random.randint(33, 320)
@pytest.mark.parametrize(
"enc_fn", [dt.nn.xception65, dt.nn.mobilenetv2, dt.nn.resnet18]
)
def test_encoders(enc_fn, input_height, input_width):
device = "cuda" if torch.cuda.is_available() else "cpu"
encoder = enc_fn(pretrained=False, return_idx=0).to(device)
with torch.no_grad():
input_tensor = get_dummy_input_tensor(
height=input_height, width=input_width
).to(device)
output = encoder(input_tensor)
assert len(output) == 1, f"Expected a single output, got {len(output):d}"
assert output[0].size(0) == input_tensor.size(
0
), f"Batch size mismatch, got {output[0].size(0):d}, expected {input_tensor.size(0):d}"
assert isinstance(output[0], torch.Tensor), "Expected a torch.Tensor as output"
@pytest.mark.parametrize(
"dec_fn", [dt.nn.DLv3plus, dt.nn.MTLWRefineNet, dt.nn.LWRefineNet]
)
def test_decoders(dec_fn, input_height, input_width, num_classes, num_channels):
device = "cuda" if torch.cuda.is_available() else "cpu"
decoder = dec_fn(
input_sizes=num_channels, num_classes=num_classes, collapse_ind=0
).to(device)
with torch.no_grad():
input_tensor = get_dummy_input_tensor(
height=input_height, width=input_width, channels=num_channels,
).to(device)
output = decoder(input_tensor)
if isinstance(output, list):
assert len(output) == 1, f"Expected a single output, got {len(output):d}"
output = output[0]
assert isinstance(output, torch.Tensor), "Expected a torch.Tensor as output"
assert output.size(0) == input_tensor.size(
0
), f"Batch size mismatch, got {output[0].size(0):d}, expected {input_tensor.size(0):d}"
assert (
output.size(1) == num_classes
), f"Channel size mismatch, got {output.size(1):d}, expected {num_classes:d}"
assert output.size(2) == input_tensor.size(
2
), f"Height size mismatch, got {output.size(2):d}, expected {input_tensor.size(2):d}"
assert output.size(3) == input_tensor.size(
3
), f"Width size mismatch, got {output.size(3):d}, expected {input_tensor.size(3):d}"
|
1635843
|
import numpy as np
from pyecsca.sca import (
align_correlation,
align_peaks,
align_sad,
align_dtw_scale,
align_dtw,
Trace,
InspectorTraceSet,
)
from .utils import Plottable, slow
class AlignTests(Plottable):
def test_align(self):
first_arr = np.array(
[10, 64, 120, 64, 10, 10, 10, 10, 10], dtype=np.dtype("i1")
)
second_arr = np.array([10, 10, 10, 10, 50, 80, 50, 20], dtype=np.dtype("i1"))
third_arr = np.array([70, 30, 42, 35, 28, 21, 15, 10, 5], dtype=np.dtype("i1"))
a = Trace(first_arr)
b = Trace(second_arr)
c = Trace(third_arr)
result, offsets = align_correlation(
a,
b,
c,
reference_offset=1,
reference_length=3,
max_offset=4,
min_correlation=0.65,
)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
np.testing.assert_equal(result[0].samples, first_arr)
np.testing.assert_equal(
result[1].samples,
np.array([10, 50, 80, 50, 20, 0, 0, 0], dtype=np.dtype("i1")),
)
self.assertEqual(len(offsets), 2)
self.assertEqual(offsets[0], 0)
self.assertEqual(offsets[1], 3)
@slow
def test_large_align(self):
example = InspectorTraceSet.read("test/data/example.trs")
result, _ = align_correlation(
*example, reference_offset=100000, reference_length=20000, max_offset=15000
)
self.assertIsNotNone(result)
@slow
def test_large_dtw_align(self):
example = InspectorTraceSet.read("test/data/example.trs")
result = align_dtw(*example[:5])
self.assertIsNotNone(result)
def test_peak_align(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10], dtype=np.dtype("i1")
)
second_arr = np.array(
[10, 10, 10, 10, 90, 40, 50, 20, 10, 17, 16, 10], dtype=np.dtype("i1")
)
a = Trace(first_arr)
b = Trace(second_arr)
result, _ = align_peaks(
a, b, reference_offset=2, reference_length=5, max_offset=3
)
self.assertEqual(np.argmax(result[0].samples), np.argmax(result[1].samples))
def test_sad_align(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10], dtype=np.dtype("i1")
)
second_arr = np.array(
[10, 10, 90, 40, 50, 20, 10, 17, 16, 10, 10], dtype=np.dtype("i1")
)
a = Trace(first_arr)
b = Trace(second_arr)
result, _ = align_sad(
a, b, reference_offset=2, reference_length=5, max_offset=3
)
self.assertEqual(len(result), 2)
def test_dtw_align_scale(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10, 8, 10, 12, 10, 13, 9],
dtype=np.dtype("f2"),
)
second_arr = np.array(
[10, 10, 60, 40, 90, 20, 10, 17, 16, 10, 10, 10, 10, 10, 17, 12, 10],
dtype=np.dtype("f2"),
)
third_arr = np.array(
[10, 30, 20, 21, 15, 8, 10, 37, 21, 77, 20, 28, 25, 10, 9, 10, 15, 9, 10],
dtype=np.dtype("f2"),
)
a = Trace(first_arr)
b = Trace(second_arr)
c = Trace(third_arr)
result = align_dtw_scale(a, b, c)
self.assertEqual(np.argmax(result[0].samples), np.argmax(result[1].samples))
self.assertEqual(np.argmax(result[1].samples), np.argmax(result[2].samples))
self.plot(*result)
result_other = align_dtw_scale(a, b, c, fast=False)
self.assertEqual(
np.argmax(result_other[0].samples), np.argmax(result_other[1].samples)
)
self.assertEqual(
np.argmax(result_other[1].samples), np.argmax(result_other[2].samples)
)
self.plot(*result_other)
def test_dtw_align(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10, 8, 10, 12, 10, 13, 9],
dtype=np.dtype("i1"),
)
second_arr = np.array(
[10, 10, 60, 40, 90, 20, 10, 17, 16, 10, 10, 10, 10, 10, 17, 12, 10],
dtype=np.dtype("i1"),
)
third_arr = np.array(
[10, 30, 20, 21, 15, 8, 10, 47, 21, 77, 20, 28, 25, 10, 9, 10, 15, 9, 10],
dtype=np.dtype("i1"),
)
a = Trace(first_arr)
b = Trace(second_arr)
c = Trace(third_arr)
result = align_dtw(a, b, c)
self.assertEqual(np.argmax(result[0].samples), np.argmax(result[1].samples))
self.assertEqual(np.argmax(result[1].samples), np.argmax(result[2].samples))
self.plot(*result)
result_other = align_dtw(a, b, c, fast=False)
self.assertEqual(
np.argmax(result_other[0].samples), np.argmax(result_other[1].samples)
)
self.assertEqual(
np.argmax(result_other[1].samples), np.argmax(result_other[2].samples)
)
self.plot(*result_other)
|
1635845
|
from matplotlib.testing import setup
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import matplotlib as mpl
import packaging.version
import pytest
import animatplot as amp
from tests.tools import animation_compare
from animatplot.blocks import Block, Title
setup()
class TestTitleBlock:
def test_list_of_str(self):
labels = ['timestep 0', 'timestep 1']
result = Title(labels)
assert labels == result.titles
assert len(result) == 2
def test_invalid_input(self):
with pytest.raises(TypeError):
Title(0)
with pytest.raises(TypeError):
Title([6, 7])
def test_format_str(self):
actual = Title('timestep {num}', num=[1, 2]).titles
assert actual == ['timestep 1', 'timestep 2']
actual = Title('timestep {num}', num=[1]).titles
assert actual == ['timestep 1']
def test_no_replacements(self):
actual = Title('Name').titles
assert actual == ['Name']
def test_multiple_replacements(self):
actual = Title('timestep {num}, max density {n}',
num=[1, 2], n=[500, 10]).titles
expected = ['timestep {num}, max density {n}'.format(num=1, n=500),
'timestep {num}, max density {n}'.format(num=2, n=10)]
assert actual == expected
def test_string_formatting(self):
actual = Title('timestep {values:.2f}', values=[5e7]).titles
assert actual == ['timestep 50000000.00']
def test_format_str_numpy_arrays(self):
actual = Title('timestep {num}', num=np.array([1, 2])).titles
assert actual == ['timestep 1', 'timestep 2']
# Hypothesis test that the strings are always formatted correctly?
def test_text(self):
# TODO test that the right type of object is produced?
title_block = Title('timestep {num}', num=[1, 2])
ax = plt.gca()
assert ax.get_title() == 'timestep 1'
title_block._update(1)
assert ax.get_title() == 'timestep 2'
plt.close('all')
def test_mpl_kwargs(self):
expected = {'loc': 'left', 'fontstyle': 'italic'}
actual = Title('timestep {num}', num=[1, 2], **expected)
assert actual._mpl_kwargs == expected
def assert_jagged_arrays_equal(x, y):
for x, y in zip(x, y):
npt.assert_equal(x, y)
class TestLineBlock:
def test_2d_inputs(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
assert isinstance(line_block, amp.blocks.Line)
npt.assert_equal(line_block.x, x_grid)
npt.assert_equal(line_block.y, y_data)
assert len(line_block) == len(t)
assert isinstance(line_block.line, mpl.lines.Line2D)
xdata, ydata = line_block.line.get_data()
npt.assert_equal(xdata, x)
npt.assert_equal(ydata, y_data[0, :])
def test_update(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
line_block._update(frame=1)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.line.get_ydata(), y_data[1, :])
def test_constant_x(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x, y_data)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.x[-1], x)
def test_no_x_input(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(y_data)
expected_x = np.arange(10)
npt.assert_equal(line_block.line.get_xdata(), expected_x)
def test_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9])]
line_block = amp.blocks.Line(x_data, y_data)
npt.assert_equal(line_block.y, np.array([[5, 6, 7], [4, 2, 9]]))
npt.assert_equal(line_block.x, np.array([[1, 2, 3], [1, 2, 3]]))
def test_ragged_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3, 4])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9, 10])]
with pytest.raises(ValueError) as err:
line_block = amp.blocks.Line(y_data)
assert "Must specify x data explicitly" in str(err)
line_block = amp.blocks.Line(x_data, y_data)
assert_jagged_arrays_equal(line_block.x, np.array(x_data))
assert_jagged_arrays_equal(line_block.y, np.array(y_data))
def test_bad_ragged_list_input(self):
x_data = np.array([np.array([1, 2, 3]), np.array([1, 2, 3, 4])])
y_data = np.array([np.array([5, 6, 7]), np.array([4, 2, 9, 10, 11])])
with pytest.raises(ValueError) as err:
line_block = amp.blocks.Line(x_data, y_data)
assert "x & y data must match" in str(err)
def test_bad_input(self):
# incorrect number of args
with pytest.raises(ValueError) as err:
amp.blocks.Line(1, 2, 3)
assert 'Invalid data arguments' in str(err.value)
with pytest.raises(ValueError) as err:
amp.blocks.Line()
assert 'Invalid data arguments' in str(err.value)
# No y data
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), None)
assert 'Must supply y data' in str(err.value)
with pytest.raises(ValueError) as err:
amp.blocks.Line(None)
assert 'Must supply y data' in str(err.value)
# y data not 2d
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), np.random.randn(5, 2, 2))
assert 'y data must be 2-dimensional' in str(err.value)
# 1d x doesn't match y
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), np.random.randn(4, 2))
assert 'dimensions of x must be compatible' in str(err.value)
# 2d x doesn't match y
with pytest.raises(ValueError) as err:
x = np.array([np.arange(5), np.arange(5)])
amp.blocks.Line(x, np.random.randn(4, 2), t_axis=1)
assert 'dimensions of x must be compatible' in str(err.value)
def test_kwarg_throughput(self):
x = np.array([np.arange(5), np.arange(5)])
line_block = amp.blocks.Line(x, np.random.randn(2, 5), t_axis=1,
alpha=0.5)
assert line_block.line.get_alpha() == 0.5
class TestComparisons:
@animation_compare(baseline_images='Blocks/Line', nframes=5)
def test_Line(self):
x = np.linspace(0, 2*np.pi, 20)
t = np.linspace(0, 2*np.pi, 5)
X, T = np.meshgrid(x, t)
Y = np.sin(X+T)
block = amp.blocks.Line(X, Y)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Pcolormesh', nframes=3)
def test_Pcolormesh(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Pcolormesh_corner', nframes=3)
def test_Pcolormesh_corner_positions(self):
# Test with size of Z being (nx-1)*(ny-1) like matplotlib expects for 'flat'
# shading
x = np.linspace(-2*np.pi, 2*np.pi, 10)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)[:-1, :-1, :]
block = amp.blocks.Pcolormesh(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)
return amp.Animation([block])
@pytest.mark.skipif(
packaging.version.parse(mpl.__version__) < packaging.version.parse("3.3.0"),
reason="matplotlib version too low - does not have shading='nearest'"
)
@animation_compare(baseline_images='Blocks/Pcolormesh_nearest', nframes=3)
def test_Pcolormesh_nearest(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(
X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading="nearest"
)
return amp.Animation([block])
@pytest.mark.skipif(
packaging.version.parse(mpl.__version__) < packaging.version.parse("3.3.0"),
reason="matplotlib version too low - does not have shading='nearest'"
)
@animation_compare(baseline_images='Blocks/Pcolormesh_auto', nframes=3)
def test_Pcolormesh_nearest(self):
x = np.linspace(-2*np.pi, 2*np.pi, 10)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(
X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading="auto"
)
return amp.Animation([block])
@pytest.mark.skipif(
packaging.version.parse(mpl.__version__) < packaging.version.parse("3.3.0"),
reason="matplotlib version too low - shading='gouraud' does not work before 3.3"
)
@animation_compare(baseline_images='Blocks/Pcolormesh_gouraud', nframes=1)
def test_Pcolormesh_gouraud(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 1)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(
X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading="gouraud"
)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Imshow', nframes=3)
def test_Imshow(self):
x = np.linspace(0, 1, 10)
X, Y = np.meshgrid(x, x)
U = []
for i in range(3):
U.append(X**2+Y**2+i)
block = amp.blocks.Imshow(U)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Quiver', nframes=4)
def test_Quiver(self):
x = np.linspace(0, 1, 10)
X, Y = np.meshgrid(x, x)
U, V = [], []
for i in range(4):
U.append(X**2+Y**2+i)
V.append(X**2+Y**2+i)
block = amp.blocks.Quiver(X, Y, U, V)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Nuke', nframes=3)
def test_Nuke(self):
ax = plt.gca()
sizes = []
def animate(i):
sizes.append(i+1)
ax.set_aspect("equal")
ax.pie(sizes)
block = amp.blocks.Nuke(animate, length=3, ax=ax)
return amp.Animation([block])
|
1635876
|
import FWCore.ParameterSet.Config as cms
from Configuration.ProcessModifiers.enableSonicTriton_cff import enableSonicTriton
# collect all SonicTriton-related process modifiers here
allSonicTriton = cms.ModifierChain(enableSonicTriton)
|
1635893
|
class EditorAttribute(Attribute,_Attribute):
"""
Specifies the editor to use to change a property. This class cannot be inherited.
EditorAttribute()
EditorAttribute(typeName: str,baseTypeName: str)
EditorAttribute(typeName: str,baseType: Type)
EditorAttribute(type: Type,baseType: Type)
"""
def Equals(self,obj):
"""
Equals(self: EditorAttribute,obj: object) -> bool
Returns whether the value of the given object is equal to the current
System.ComponentModel.EditorAttribute.
obj: The object to test the value equality of.
Returns: true if the value of the given object is equal to that of the current object; otherwise,false.
"""
pass
def GetHashCode(self):
""" GetHashCode(self: EditorAttribute) -> int """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,typeName: str,baseTypeName: str)
__new__(cls: type,typeName: str,baseType: Type)
__new__(cls: type,type: Type,baseType: Type)
"""
pass
def __ne__(self,*args):
pass
EditorBaseTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the base class or interface serving as a lookup key for this editor.
Get: EditorBaseTypeName(self: EditorAttribute) -> str
"""
EditorTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the editor class in the System.Type.AssemblyQualifiedName format.
Get: EditorTypeName(self: EditorAttribute) -> str
"""
TypeId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a unique ID for this attribute type.
Get: TypeId(self: EditorAttribute) -> object
"""
|
1635935
|
import multiprocessing
from typing import Union, List, Tuple
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from video_transforms import (GroupRandomHorizontalFlip, GroupOverSample,
GroupMultiScaleCrop, GroupScale, GroupCenterCrop, GroupRandomCrop,
GroupNormalize, Stack, ToTorchFormatTensor, GroupRandomScale)
def get_augmentor(is_train: bool, image_size: int, mean: List[float] = None,
std: List[float] = None, disable_scaleup: bool = False,
threed_data: bool = False, version: str = 'v1', scale_range: [int] = None,
modality: str = 'rgb', num_clips: int = 1, num_crops: int = 1, dataset: str = ''):
mean = [0.485, 0.456, 0.406] if mean is None else mean
std = [0.229, 0.224, 0.225] if std is None else std
scale_range = [256, 320] if scale_range is None else scale_range
if modality == 'sound':
augments = [
Stack(threed_data=threed_data),
ToTorchFormatTensor(div=False, num_clips_crops=num_clips * num_crops)
]
else:
augments = []
if is_train:
if version == 'v1':
augments += [
GroupMultiScaleCrop(image_size, [1, .875, .75, .66])
]
elif version == 'v2':
augments += [
GroupRandomScale(scale_range),
GroupRandomCrop(image_size),
]
if not (dataset.startswith('ststv') or 'jester' in dataset or 'mini_ststv' in dataset):
augments += [GroupRandomHorizontalFlip(is_flow=(modality == 'flow'))]
else:
scaled_size = image_size if disable_scaleup else int(image_size / 0.875 + 0.5)
if num_crops == 1:
augments += [
GroupScale(scaled_size),
GroupCenterCrop(image_size)
]
else:
flip = True if num_crops == 10 else False
augments += [
GroupOverSample(image_size, scaled_size, num_crops=num_crops, flip=flip),
]
augments += [
Stack(threed_data=threed_data),
ToTorchFormatTensor(num_clips_crops=num_clips * num_crops),
GroupNormalize(mean=mean, std=std, threed_data=threed_data)
]
augmentor = transforms.Compose(augments)
return augmentor
def build_dataflow(dataset, is_train, batch_size, workers=36, is_distributed=False):
workers = min(workers, multiprocessing.cpu_count())
shuffle = False
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if is_distributed else None
if is_train:
shuffle = sampler is None
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=workers, pin_memory=True, sampler=sampler)
return data_loader
|
1636013
|
import logging
import faiss
import torch
import torch.nn as nn
from sklearn.manifold import TSNE
def manifold_learning(x, nbit):
tsne = TSNE(nbit, init='pca', method='exact')
y = tsne.fit_transform(x)
return y
class IMHLoss(nn.Module):
def __init__(self, nbit, kmeans_iters=200, m=400, k=5, bandwidth=512., **kwargs):
super(IMHLoss, self).__init__()
self.built = False
self.nbit = nbit
self.kmeans_iters = kmeans_iters
self.m = m # base set size
self.k = k # knn size
self.bandwidth = bandwidth
self.kmeans = None
self.knn_index = None
self.base_set = None
self.losses = {}
def compute_embeddings(self, query):
"""
:param query: (n, d)
:param centroids: (m, d)
:return:
"""
try:
query = query.cpu().numpy()
except:
pass
distances, neighbors = self.kmeans.index.search(query, self.k)
gaussianw = torch.exp(- torch.from_numpy(distances) / self.bandwidth)
gaussianw = gaussianw / gaussianw.sum(dim=1, keepdim=True) # (qn, k)
base_neighbors = self.base_set[neighbors] # (qn, k, nbit)
y = (gaussianw.unsqueeze(2) * base_neighbors).sum(dim=1) # (qn, k, nbit) -> (qn, nbit)
return y
def forward(self, x):
"""
:param x: should be full dataset
:return:
"""
if self.training:
assert not self.built, 'please switch to eval mode'
device = x.device
logging.info('Kmeans Learning')
dim = x.size(1)
self.kmeans = faiss.Kmeans(d=dim, k=self.m, niter=self.kmeans_iters)
self.kmeans.train(x.cpu().numpy())
logging.info('Manifold Learning')
self.base_set = manifold_learning(self.kmeans.centroids, self.nbit)
logging.info('Computing Embedding')
v = self.compute_embeddings(x.cpu().numpy())
v = v.to(device)
quan_error = (1 - torch.cosine_similarity(v, v.sign())).mean()
self.losses['quan'] = quan_error
self.built = True
return v, quan_error
else:
assert self.built, 'please perform training'
return self.compute_embeddings(x.cpu().numpy())
def state_dict(self, destination=None, prefix='', keep_vars=False):
""" Overrides state_dict() to save also theta value"""
original_dict = super().state_dict(destination, prefix, keep_vars)
original_dict['centroids'] = self.kmeans.centroids
original_dict['base_set'] = self.base_set
original_dict['built'] = self.built
original_dict['bandwidth'] = self.bandwidth
return original_dict
def load_state_dict(self, state_dict, strict=True):
""" Overrides state_dict() to load also theta value"""
centroids = state_dict.pop('centroids')
base_set = state_dict.pop('base_set')
built = state_dict.pop('built')
bandwidth = state_dict.pop('bandwidth')
dim = centroids.shape[1]
self.kmeans = faiss.Kmeans(d=dim, k=self.m, niter=self.kmeans_iters)
self.kmeans.centroids = centroids
self.kmeans.index = faiss.IndexFlatL2(dim)
self.kmeans.index.reset()
self.kmeans.index.add(centroids)
self.built = built
self.base_set = base_set
self.bandwidth = bandwidth
super().load_state_dict(state_dict, strict)
|
1636020
|
import asyncio
import json
import logging
import os.path
import threading
from logging import Formatter
from time import sleep
from typing import Optional
from getmac import get_mac_address as gma
from sqlalchemy.exc import OperationalError
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from angrmanagement.config import Conf
try:
from slacrs import Slacrs
from slacrs.model import Poi
except ImportError as _:
Slacrs = None
Poi = None
l = logging.getLogger(__name__)
l.setLevel('INFO')
def _init_logger():
user_dir = os.path.expanduser('~')
log_dir = os.path.join(user_dir, 'am-logging')
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, 'poi_diagnose.log')
fh = logging.FileHandler(log_file)
fh.setLevel('INFO')
formatter: Formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
l.addHandler(fh)
class DiagnoseHandler:
"""
Handling POI records in slacrs
"""
def __init__(self, project_name=None, project_md5=None):
_init_logger()
self.project_name = project_name
self.project_md5 = project_md5
self._log_list = [ ]
self.workspace = None
self.slacrs_thread = None
self.user = gma()
if Slacrs is None or Poi is None:
self._active = False
else:
self._active = True
def init(self, workspace):
l.debug("workspace initing")
self.workspace = workspace
self._active = True
self.slacrs_thread = threading.Thread(target=self._commit_pois)
self.slacrs_thread.setDaemon(True)
self.slacrs_thread.start()
def get_image_id(self) -> Optional[str]:
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
return None
try:
return connector.target_image_id
except (ValueError, AttributeError):
return None
def submit_updated_poi(self, poi_id: str, poi_json):
# reference: https://github.com/checrs/slacrs7/blob/master/slacrs/plugins/arbiter.py#L81
image_id = self.get_image_id()
if image_id is None:
l.warning("Cannot submit POI %s since the current target ID is unknown.",
poi_id)
return
poi = Poi()
poi.plugin = self.user
poi.target_image_id = image_id
poi.id = poi_id
poi.poi = json.dumps(poi_json)
# Additional fields according to Slacrs's Base and Poi classes.
poi.source = self.user # https://github.com/checrs/slacrs7/blob/master/slacrs/model/poi.py#L13
poi.created_by = self.user # https://github.com/checrs/slacrs7/blob/master/slacrs/model/base.py#L17
l.debug('adding poi: %s', poi)
l.info('adding poi: %s, id: %s, id: %s ', poi, poi.id, poi_id)
self._log_list.append(poi)
l.debug('current log list: %s', self._log_list)
def get_pois(self):
if not Conf.checrs_backend_str:
return None
try:
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
# chess connector does not exist
return None
slacrs_instance = connector.slacrs_instance()
if slacrs_instance is None:
# slacrs does not exist. continue
return None
session = slacrs_instance.session()
except OperationalError:
# Cannot connect
return None
image_id = self.get_image_id()
if image_id is not None:
result = session.query(Poi).filter(Poi.target_image_id==image_id).all()
else:
result = session.query(Poi).all()
session.close()
l.debug('result: %s', result)
return result
def deactivate(self):
self._active = False
def _commit_pois(self):
l.debug("database: %s", Conf.checrs_backend_str)
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
while self._active:
sleep(3)
if self._log_list:
# we have things to submit!
try:
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
# chess connector does not exist
continue
slacrs_instance = connector.slacrs_instance()
if slacrs_instance is None:
# slacrs does not exist. continue
continue
session = slacrs_instance.session()
except OperationalError:
l.error("Failed to CHECRS backend. Try again later...")
continue
with session.no_autoflush:
while self._log_list:
poi = self._log_list.pop()
# query first to see if the poi id already exist
result = session.query(Poi).filter(Poi.id == poi.id).first()
if result is None:
l.info('Adding poi %s to slacrs', poi)
session.add(poi)
else:
l.info('Updating poi %s to slacrs', poi)
result.poi = poi.poi
l.debug('log_list: %s', self._log_list)
session.commit()
session.close()
|
1636027
|
from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeWifiConfigurationFeature(SeaBreezeFeature):
identifier = "wifi_configuration"
def get_wifi_mode(self, interface_index):
raise NotImplementedError("implement in derived class")
def set_wifi_mode(self, interface_index, wifi_mode):
raise NotImplementedError("implement in derived class")
def get_wifi_security_type(self, interface_index):
raise NotImplementedError("implement in derived class")
def set_wifi_security_type(self, interface_index, security_type):
raise NotImplementedError("implement in derived class")
def get_wifi_ssid(self, interface_index):
raise NotImplementedError("implement in derived class")
def set_wifi_ssid(self, interface_index, ssid):
raise NotImplementedError("implement in derived class")
def set_wifi_pass_phrase(self, interface_index, pass_phrase):
raise NotImplementedError("implement in derived class")
|
1636036
|
from .base import ResourceBase
from .archive import Archive
from .multivector import Multivector
from .instance import Instance
from .rawdata import RawData
from .vector import Vector
from .bound_resource import BoundResource
|
1636087
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Dict, Union
from yacs.config import CfgNode
from .jenson_shannon_divergence import jensen_shannon_divergence
from .build import LOSS_REGISTRY
class LabelSmoothingCrossEntropy(torch.nn.Module):
"""
Code copied from fastai2
"""
y_int = True
def __init__(self, loss_cfg:CfgNode, eps: float, reduction):
super(LabelSmoothingCrossEntropy, self).__init__()
self.eps = loss_cfg.EPS
self.reduction = loss_cfg.REDUCTION
def forward(self, output, target, weights):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction)
@LOSS_REGISTRY.register('label_smoothing_ce')
class LabSmoothSoftmaxCE(torch.nn.Module):
"""
Normal softmax cross entropy with added functionality
"""
def __init__(self, loss_cfg: CfgNode, do_mixup: bool, weights: List, eps: float, reduction, **kwargs):
"""
:param weights: class weights
"""
super(LabSmoothSoftmaxCE, self).__init__()
self.ohem_rate = loss_cfg.OHEM_RATE
self.do_mixup = do_mixup
self.eps = loss_cfg.EPS
self.reduction = loss_cfg.REDUCTION
self.weights = weights
self.loss_fn = LabelSmoothingCrossEntropy(loss_cfg, self.eps, self.reduction)
#self.loss_fn = CrossEntropy()
def forward(self, logits, labels, js_divergence=False):
if js_divergence:
logits, logits_aug1, logits_aug2 = torch.chunk(logits, 3, dim=0)
loss = jensen_shannon_divergence(logits, logits_aug1, logits_aug2)
else:
loss = 0
preds = torch.argmax(logits.float(), dim=1)
if self.do_mixup:
losses = self.compute_mixup_loss(logits, labels)
corrects = (labels[0] == preds)
else:
losses = self.loss_fn(logits, labels, self.weights)
corrects = (labels == preds)
if self.ohem_rate < 1:
loss += self.compute_ohem_loss(losses)
else:
loss += losses.mean()
acc = torch.sum(corrects) / (len(corrects) + 0.0)
return loss, acc
def compute_mixup_loss(self, logits: torch.Tensor, mixedup_labels_data: tuple):
"""
:param logits: computed logits
:param mixedup_labels_data:
:return:
"""
labels, shuffled_labels, lam = mixedup_labels_data
if self.weights is not None:
loss = lam * self.loss_fn(logits, labels, torch.tensor(self.weights)) + (1 - lam) * self.loss_fn(logits, shuffled_labels, torch.tensor(self.weights))
else:
loss = lam * self.loss_fn(logits, labels) + (1 - lam) * self.loss_fn(logits, shuffled_labels)
return loss
def compute_ohem_loss(self, losses: torch.Tensor):
N = losses.shape[0]
keep_size = int(N * self.ohem_rate)
ohem_losses, _ = losses.topk(keep_size)
loss = ohem_losses.mean()
return loss
|
1636115
|
import numpy as np
class ReplayBuffer:
"""Experience Replay Buffer para DQNs."""
def __init__(self, max_length, observation_space):
"""Cria um Replay Buffer.
Parâmetros
----------
max_length: int
Tamanho máximo do Replay Buffer.
observation_space: int
Tamanho do espaço de observação.
"""
self.index, self.size, self.max_length = 0, 0, max_length
self.states = np.zeros((max_length, observation_space), dtype=np.float32)
self.actions = np.zeros((max_length), dtype=np.int32)
self.rewards = np.zeros((max_length), dtype=np.float32)
self.next_states = np.zeros((max_length, observation_space), dtype=np.float32)
self.dones = np.zeros((max_length), dtype=np.float32)
def __len__(self):
"""Retorna o tamanho do buffer."""
return self.size
def update(self, state, action, reward, next_state, done):
"""Adiciona uma experiência ao Replay Buffer.
Parâmetros
----------
state: np.array
Estado da transição.
action: int
Ação tomada.
reward: float
Recompensa recebida.
state: np.array
Estado seguinte.
done: int
Flag indicando se o episódio acabou.
"""
self.states[self.index] = state
self.actions[self.index] = action
self.rewards[self.index] = reward
self.next_states[self.index] = next_state
self.dones[self.index] = done
self.index = (self.index + 1) % self.max_length
if self.size < self.max_length:
self.size += 1
def sample(self, batch_size):
"""Retorna um batch de experiências.
Parâmetros
----------
batch_size: int
Tamanho do batch de experiências.
Retorna
-------
states: np.array
Batch de estados.
actions: np.array
Batch de ações.
rewards: np.array
Batch de recompensas.
next_states: np.array
Batch de estados seguintes.
dones: np.array
Batch de flags indicando se o episódio acabou.
"""
# Escolhe índices aleatoriamente do Replay Buffer
idxs = np.random.randint(0, self.size, size=batch_size)
return (self.states[idxs], self.actions[idxs], self.rewards[idxs], self.next_states[idxs], self.dones[idxs])
|
1636119
|
import datetime
import pytest
from prisma import Client
# TODO: add tests for every database provider we support
@pytest.mark.asyncio
async def test_precision_loss(client: Client) -> None:
"""https://github.com/RobertCraigie/prisma-client-py/issues/129"""
date = datetime.datetime.utcnow()
post = await client.post.create(
data={
'title': 'My first post',
'published': False,
'created_at': date,
},
)
found = await client.post.find_first(
where={
'created_at': date,
},
)
assert found is not None
found = await client.post.find_first(
where={
'created_at': post.created_at,
},
)
assert found is not None
|
1636127
|
import warnings
from django.test import SimpleTestCase
from .models import Person
class HasAutoFieldTests(SimpleTestCase):
def test_get_warns(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
Person._meta.has_auto_field
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
'Model._meta.has_auto_field is deprecated in favor of checking if '
'Model._meta.auto_field is not None.',
)
def test_set_does_nothing(self):
Person._meta.has_auto_field = True
|
1636129
|
from io import BytesIO
import asyncio
import asyncpg
from fixtures.zenith_fixtures import ZenithEnv, Postgres
from fixtures.log_helper import log
from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
async def repeat_bytes(buf, repetitions: int):
for i in range(repetitions):
yield buf
async def copy_test_data_to_table(pg: Postgres, worker_id: int, table_name: str):
buf = BytesIO()
for i in range(1000):
buf.write(
f"{i}\tLoaded by worker {worker_id}. Long string to consume some space.\n".encode())
buf.seek(0)
copy_input = repeat_bytes(buf.read(), 5000)
pg_conn = await pg.connect_async()
await pg_conn.copy_to_table(table_name, source=copy_input)
async def parallel_load_different_tables(pg: Postgres, n_parallel: int):
workers = []
for worker_id in range(n_parallel):
worker = copy_test_data_to_table(pg, worker_id, f'copytest_{worker_id}')
workers.append(asyncio.create_task(worker))
# await all workers
await asyncio.gather(*workers)
# Load 5 different tables in parallel with COPY TO
def test_parallel_copy_different_tables(zenith_simple_env: ZenithEnv,
zenbenchmark: ZenithBenchmarker,
n_parallel=5):
env = zenith_simple_env
# Create a branch for us
env.zenith_cli(["branch", "test_parallel_copy_different_tables", "empty"])
pg = env.postgres.create_start('test_parallel_copy_different_tables')
log.info("postgres is running on 'test_parallel_copy_different_tables' branch")
# Open a connection directly to the page server that we'll use to force
# flushing the layers to disk
psconn = env.pageserver.connect()
pscur = psconn.cursor()
# Get the timeline ID of our branch. We need it for the 'do_gc' command
conn = pg.connect()
cur = conn.cursor()
cur.execute("SHOW zenith.zenith_timeline")
timeline = cur.fetchone()[0]
for worker_id in range(n_parallel):
cur.execute(f'CREATE TABLE copytest_{worker_id} (i int, t text)')
with zenbenchmark.record_pageserver_writes(env.pageserver, 'pageserver_writes'):
with zenbenchmark.record_duration('load'):
asyncio.run(parallel_load_different_tables(pg, n_parallel))
# Flush the layers from memory to disk. This is included in the reported
# time and I/O
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
# Record peak memory usage
zenbenchmark.record("peak_mem",
zenbenchmark.get_peak_mem(env.pageserver) / 1024,
'MB',
report=MetricReport.LOWER_IS_BETTER)
# Report disk space used by the repository
timeline_size = zenbenchmark.get_timeline_size(env.repo_dir, env.initial_tenant, timeline)
zenbenchmark.record('size',
timeline_size / (1024 * 1024),
'MB',
report=MetricReport.LOWER_IS_BETTER)
async def parallel_load_same_table(pg: Postgres, n_parallel: int):
workers = []
for worker_id in range(n_parallel):
worker = copy_test_data_to_table(pg, worker_id, f'copytest')
workers.append(asyncio.create_task(worker))
# await all workers
await asyncio.gather(*workers)
# Load data into one table with COPY TO from 5 parallel connections
def test_parallel_copy_same_table(zenith_simple_env: ZenithEnv,
zenbenchmark: ZenithBenchmarker,
n_parallel=5):
env = zenith_simple_env
# Create a branch for us
env.zenith_cli(["branch", "test_parallel_copy_same_table", "empty"])
pg = env.postgres.create_start('test_parallel_copy_same_table')
log.info("postgres is running on 'test_parallel_copy_same_table' branch")
# Open a connection directly to the page server that we'll use to force
# flushing the layers to disk
psconn = env.pageserver.connect()
pscur = psconn.cursor()
# Get the timeline ID of our branch. We need it for the 'do_gc' command
conn = pg.connect()
cur = conn.cursor()
cur.execute("SHOW zenith.zenith_timeline")
timeline = cur.fetchone()[0]
cur.execute(f'CREATE TABLE copytest (i int, t text)')
with zenbenchmark.record_pageserver_writes(env.pageserver, 'pageserver_writes'):
with zenbenchmark.record_duration('load'):
asyncio.run(parallel_load_same_table(pg, n_parallel))
# Flush the layers from memory to disk. This is included in the reported
# time and I/O
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
# Record peak memory usage
zenbenchmark.record("peak_mem",
zenbenchmark.get_peak_mem(env.pageserver) / 1024,
'MB',
report=MetricReport.LOWER_IS_BETTER)
# Report disk space used by the repository
timeline_size = zenbenchmark.get_timeline_size(env.repo_dir, env.initial_tenant, timeline)
zenbenchmark.record('size',
timeline_size / (1024 * 1024),
'MB',
report=MetricReport.LOWER_IS_BETTER)
|
1636181
|
import subprocess as sub
#import sys
import argparse
import datasetConfig
parser = argparse.ArgumentParser('Tasas for CER')
parser.add_argument('epochs', type=int, help='how many epochs')
parser.add_argument('flag', type=str, help='si/no with/without testing')
#parser.add_argument('folder', type=str, help='pred_logs_layer_1 or others')
args = parser.parse_args()
epochs = args.epochs
flag = args.flag
#base = args.folder + '/'
baseDir = datasetConfig.baseDir_word
base = 'pred_logs/'
#if len(sys.argv) != 3:
# print('USAGE: python3 pytasas.py <epochs> <flag: with text or not, si: with, no: not>')
# exit()
#base = 'pred_logs/'
f_cer = open(base+'wer_train.log', 'w')
f_cer_v = open(base+'wer_valid.log', 'w')
if flag == 'si':
f_cer_t = open(base+'wer_test.log', 'w')
for i in range(epochs):
gt_tr = baseDir + 'RWTH.iam_word_gt_final.train.thresh'
gt_va = baseDir + 'RWTH.iam_word_gt_final.valid.thresh'
gt_te = baseDir + 'RWTH.iam_word_gt_final.test.thresh'
decoded = base+'train_predict_seq.'+str(i)+'.log'
decoded_v = base+'valid_predict_seq.'+str(i)+'.log'
if flag == 'si':
decoded_t = base+'test_predict_seq.'+str(i)+'.log'
res_cer = sub.Popen(['./tasas_wer.sh', gt_tr, decoded], stdout=sub.PIPE)
res_cer_v = sub.Popen(['./tasas_wer.sh', gt_va, decoded_v], stdout=sub.PIPE)
if flag == 'si':
res_cer_t = sub.Popen(['./tasas_wer.sh', gt_te, decoded_t], stdout=sub.PIPE)
res_cer = res_cer.stdout.read().decode('utf8')
res_cer_v = res_cer_v.stdout.read().decode('utf8')
if flag == 'si':
res_cer_t = res_cer_t.stdout.read().decode('utf8')
res_cer = float(res_cer)/100
res_cer_v = float(res_cer_v)/100
if flag == 'si':
res_cer_t = float(res_cer_t)/100
f_cer.write(str(res_cer))
f_cer.write(' ')
f_cer_v.write(str(res_cer_v))
f_cer_v.write(' ')
if flag == 'si':
f_cer_t.write(str(res_cer_t))
f_cer_t.write(' ')
print(i)
f_cer.close()
f_cer_v.close()
if flag == 'si':
f_cer_t.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.