id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1602675
|
from Redy.Opt import *
from dis import dis
@feature(goto)
def loop1():
task1: label
task2: label
end: label
with task1:
print('task1')
x = input('where do you want to goto?[1, 2, end]')
if x == 'end':
print('jump to end')
end.jump()
elif x == '1':
print('jump to task1')
task1.jump()
elif x == '2':
print('jump to task2')
task2.jump()
raise ValueError
task2.mark()
print('task2| then turn to task1.')
task1.jump()
end.mark()
print('good bye')
macro = Macro()
@feature(macro)
def macro_example(x):
@macro.stmt
def just_return(v):
return v
just_return(1)
@macro.stmt
def print_some_and_return_1(s):
print(s)
return 1
@feature(macro)
def macro_example2():
print_some_and_return_1("abcdefg")
dis(macro_example)
print(macro_example(1))
macro_example2()
# dis(loop1)
# loop1()
macro.stmt('def m(): print("string macro")')
#
@feature(macro)
def test_string_macro():
m()
test_string_macro()
|
1602738
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.card import card
def test_card():
"""Test module card.py by downloading
card.csv and testing shape of
extracted data has 3010 rows and 34 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = card(test_path)
try:
assert x_train.shape == (3010, 34)
except:
shutil.rmtree(test_path)
raise()
|
1602782
|
import numpy as np
import matplotlib.pyplot as plt
# Generate data
n = 500
t = np.linspace(0,20.0*np.pi,n)
X = np.sin(t) # X is already between -1 and 1, scaling normally needed
|
1602783
|
import os
import shutil
from django.core.files import File
from django.conf import settings
from service_catalog.maintenance_jobs import cleanup_ghost_docs_images
from service_catalog.models import Doc
from tests.test_service_catalog.base import BaseTest
class TestMaintenanceJob(BaseTest):
def setUp(self):
super(TestMaintenanceJob, self).setUp()
self.test_root = os.path.abspath(os.path.dirname(__file__))
self._old_MEDIA_ROOT = settings.MEDIA_ROOT
# override MEDIA_ROOT for this test
settings.MEDIA_ROOT = os.path.join(self.test_root, 'media')
def tearDown(self):
# Cleanup path
if os.path.isdir(settings.MEDIA_ROOT):
shutil.rmtree(settings.MEDIA_ROOT)
# reset MEDIA_ROOT
settings.MEDIA_ROOT = self._old_MEDIA_ROOT
def test_cleanup_ghost_docs_images(self):
doc_image_path = settings.MEDIA_ROOT + os.sep + settings.MARTOR_UPLOAD_PATH
from pathlib import Path
path = Path(doc_image_path)
path.mkdir(parents=True, exist_ok=True)
# create a media
with open(doc_image_path + "/to_be_kept.jpg", 'w') as f:
to_be_kept = File(f)
to_be_kept.write("to_be_kept")
with open(doc_image_path + "/to_be_deleted.jpg", 'w') as f:
to_be_kept = File(f)
to_be_kept.write("to_be_deleted")
content = """
# Delete Images test

"""
# create a doc
Doc.objects.create(title="test doc", content=content)
deleted_media_list = cleanup_ghost_docs_images(image_path=doc_image_path)
expected_list_of_deleted_files = ["to_be_deleted.jpg"]
self.assertEqual(deleted_media_list, expected_list_of_deleted_files)
|
1602792
|
from learntools.core import *
class WorseHeuristic(ThoughtExperiment):
_hint = ("The first heuristic assigns a score of 0 to column 2, and a score of -99 to "
"column 3. What scores do you get with the second heuristic?")
_solution = ("The first heuristic is guaranteed to select column 2 to block "
"the opponent from winning. The second heuristic selects either "
"column 2 or column 3 (where it selects each with 50% probability). "
"Thus, for this game board, the first heuristic is better. In general, "
"we can expect that the first heuristic is a better heuristic, "
"since we cannot trust the second heuristic to block the opponent "
"from winning.")
class NumLeaves(EqualityCheckProblem):
_var = "num_leaves"
_expected = 7**3
_hint = "Try drawing the game tree. How many moves (columns) are possible at each turn?"
_solution = CS("num_leaves = 7*7*7")
class WhichMove(CodingProblem):
_var = "selected_move"
_hint = "For each potential move, how will the opponent respond?"
_solution = CS("selected_move = 3")
def check(self, ans):
try:
move = int(ans)
except:
assert 1==0, "Your answer should be one of `1`, `2`, or `3`."
assert move in [1, 2, 3], "Your answer should be one of `1`, `2`, or `3`."
assert move == 3, "{} is incorrect. Please try again.".format(move)
class Assumptions(ThoughtExperiment):
_hint = "What happened in the tutorial when the minimax agent played against a random opponent?"
_solution = ("We can still expect the minimax agent to perform well. On a high level, "
"assuming an optimal opponent simply overestimates the opponent, but does not "
"break the algorithm. The effect of overestimating the opponent is merely that "
"the minimax agent will take longer to win, than if it had a more accurate understanding "
"of its opponent. For instance, the minimax agent is highly unlikely to select the same column "
"three times in "
"its first three moves (since it assumes an optimal opponent that will certainly block the "
"winning play in the next move), but this is not a bad initial strategy for playing against an agent that "
"selects columns completely at random.")
class JustSubmitEx3(CodingProblem):
_hint = "Follow the instructions to create an agent."
_solution = "Follow the instructions to create an agent."
_congrats = "Thank you for creating an agent!"
_correct_message = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
WorseHeuristic,
NumLeaves,
WhichMove,
Assumptions,
JustSubmitEx3
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
1602873
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
#data = np.loadtxt("data.txt").reshape((2048,2048))
data = np.memmap("DenseOffset.off", dtype=np.float32, mode='r', shape=(100,100,2))
data1=data[:,:,0]
print(data1)
plt.imshow(data1, cmap=cm.coolwarm)
plt.show()
#plt.ylim([-2.2,0.2])
#plt.plot(data1[0,0:500])
#plt.show()
#pdata= data[:,:,1]
#nx, ny = 300, 300
#x = range(nx)
#y = range(ny)
#hf = plt.figure()
#ha = hf.add_subplot(111, projection='3d')
#X, Y = np.meshgrid(x, y) # `plot_surface` expects `x` and `y` data to be 2D
#ha.plot_surface(X, Y, pdata)
#plt.show()
|
1602883
|
import os
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
p.set_defaults(func=func)
p.description = "print name of current/working directory"
p.add_argument(
"-L",
"--logical",
action="store_true",
dest="logical",
help="use PWD from environment, even if it contains symlinks",
)
p.add_argument(
"-P",
"--physical",
action="store_true",
dest="physical",
help="avoid all symlinks",
)
return p
def func(args):
if args.logical:
print(os.getenv('PWD'))
elif args.physical:
print(os.path.realpath(os.getcwd()))
else:
print(os.getcwd())
|
1602908
|
import numpy as np
class MotionFeatureExtractor:
""" Functor for extracting motion features from a detection.
"""
def __init__(self, stats):
""" Constructor.
"""
## Dict containing mean and std of motion features.
self.stats = stats
def __call__(self, det, last=None):
""" Extracts motion features from a detection.
Inputs:
det -- Current detection.
last -- Previous detection.
"""
width = float(det["w"])
height = float(det["h"])
if last is None:
features = np.array([0.0, 0.0, width, height])
else:
x_diff = float(det["center_x"]) - float(last["center_x"])
y_diff = float(det["center_y"]) - float(last["center_y"])
frame_diff = float(det["frame"]) - float(last["frame"])
if frame_diff == 0.0:
features = np.array([0.0, 0.0, width, height])
else:
features = np.array([
x_diff / frame_diff,
y_diff / frame_diff,
width,
height])
features = features - self.stats["mean"]
features = np.divide(features, self.stats["std"])
return features
|
1602953
|
import unittest
import time
from mu.integration_tests import int_test
from mu.sims.bms_carrier import BmsCarrier
from mu.sims.sub_sims.mcp23008 import NUM_MCP23008_PINS, MCP23008_KEY
class TestMcp23008(int_test.IntTest):
def setUp(self):
super().setUp()
self.board = self.manager.start(BmsCarrier, proj_name='smoke_mcp23008')
def test_mcp23008(self):
time.sleep(0.1)
for x in range(NUM_MCP23008_PINS): # test all pins init'd
self.board.sub_sim('mcp23008').assert_pin_state(MCP23008_KEY, x, 1)
time.sleep(0.1)
for x in range(NUM_MCP23008_PINS): # test all pins toggled
self.board.sub_sim('mcp23008').assert_pin_state(MCP23008_KEY, x, 0)
if __name__ == '__main__':
unittest.main()
|
1602961
|
import importlib
from mu.protogen import stores_pb2
MODULE_NAME_FORMAT = 'mu.protogen.{}_pb2'
STORE_TYPE_NAME_FORMAT = 'Mu{}Store'
def store_from_name(type_name):
type_name = type_name.lower()
# get class from module via introspection
type_module = importlib.import_module(MODULE_NAME_FORMAT.format(type_name))
store_class = getattr(type_module, STORE_TYPE_NAME_FORMAT.format(type_name.capitalize()))
return store_class()
def store_from_enum(store_enum):
# grab enum name from enum value for introspection
type_name = stores_pb2.MuStoreType.Name(store_enum).lower()
return store_from_name(type_name)
def decode_store_info(msg):
store_info = stores_pb2.MuStoreInfo()
store_info.ParseFromString(msg)
return store_info
def decode_store(store_info):
store = store_from_enum(store_info.type)
store.ParseFromString(store_info.msg)
return store
def full_mask(store):
mask = store.__class__()
for descriptor, val in store.ListFields():
if hasattr(val, '__len__'):
getattr(mask, descriptor.name).extend([1] * len(val))
else:
setattr(mask, descriptor.name, 1)
return mask
|
1602985
|
import numpy as np
from py.forest import Node
class Cube():
def __init__(self, node):
assert isinstance(node, Node)
self.start = node.start
self.end = node.end
self.dim = node.dim
self.id_string = node.id_string
self.split_axis = node.split_axis
self.split_vals = node.split_vals
self.vol = 1
for i in range(len(self.start)):
self.vol = self.vol*(self.end[i] - self.start[i])
self.frac = 0
self.child = [Cube(child_node) for child_node in node.child]
def est_density(self, pts, total):
self.frac = len(pts)/total
if self.split_axis != -1:
split_pts = self.split_points(pts)
for i in range(len(self.child)):
self.child[i].est_density(split_pts[i], total)
def split_points(self, pts):
_, num_pts = np.shape(pts)
indices = [[] for _ in range(len(self.split_vals) + 1)]
list_vals = [self.start[self.split_axis]]
list_vals.append(self.split_vals)
list_vals.append(self.end[self.split_axis])
for i in range(num_pts):
for j in range(len(list_vals) -1):
if (pts[self.split_axis][i] >= list_vals[j]) and\
(pts[self.split_axis][i] < list_vals[j+1]):
indices[j].append(i)
split_pts = []
for j in range(len(list_vals) -1):
split_pts.append(pts[:, indices[j]])
return split_pts
def __str__(self):
str_val = "Cube ID: " + str(self.id_string) + "\n"
str_val += "Boundary: "
for i in range(self.dim):
str_val += " [" + str(self.start[i]) + ", " + str(self.end[i]) + "]"
if i < self.dim -1:
str_val += " x"
else:
str_val += "\n"
if self.split_axis != -1:
str_val += "Axis: " + str(self.split_axis) + ", "
str_val += "Split Values: " + str(self.split_vals)
return str_val
def print_cube(self):
print_list = [self]
while print_list:
cube = print_list.pop(0)
print(str(cube))
print_list.extend(cube.child)
|
1602994
|
import unittest
from securityheaders.checkers.other import XWebKitCSPDeprecatedChecker
class XWebKitCSPDeprecatedCheckerTest(unittest.TestCase):
def setUp(self):
self.x = XWebKitCSPDeprecatedChecker()
def test_checkNoHeader(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNone2(self):
hasx = dict()
hasx['x-webkit-csp'] = None
result = self.x.check(hasx)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkValid(self):
hasx5 = dict()
hasx5['x-webkit-csp'] = "default-src: 'none'"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkOther(self):
hasx5 = dict()
hasx5['content-security-policy'] = "default-src: 'none'"
self.assertEqual(self.x.check(hasx5), [])
if __name__ == '__main__':
unittest.main()
|
1603003
|
import multiprocessing as mp
from pathlib import Path
import subprocess
from unittest.mock import MagicMock
from libmuscle.mcp.tcp_server import TcpServer
from libmuscle.mcp.message import Message
from ymmsl import Reference, Settings
from .conftest import skip_if_python_only
def tcp_server_process(control_pipe):
control_pipe[0].close()
settings = Settings({'test_setting': 42})
data = {'test1': 10, 'test2': [None, True, 'testing']}
receiver = Reference('test_receiver.test_port2')
message = Message(
Reference('test_sender.test_port'),
receiver,
10, 1.0, 2.0, settings, data).encoded()
def get_message(receiver):
assert receiver == 'test_receiver.test_port2'
return message
post_office = MagicMock()
post_office.done = False
post_office.get_message = get_message
sender_instance_id = Reference('test_sender')
server = TcpServer(sender_instance_id, post_office)
control_pipe[1].send(server.get_location())
control_pipe[1].recv()
control_pipe[1].close()
server.close()
@skip_if_python_only
def test_cpp_tcp_client(log_file_in_tmpdir):
# create server process
server_pipe = mp.Pipe()
server_process = mp.Process(target=tcp_server_process, args=(server_pipe,))
server_process.start()
server_pipe[1].close()
server_loc = server_pipe[0].recv()
# create C++ client
# it receives and checks settings, and sends a log message
# see libmuscle/cpp/src/libmuscle/tests/mmp_client_test.cpp
cpp_build_dir = Path(__file__).parents[1] / 'libmuscle' / 'cpp' / 'build'
lib_paths = [
cpp_build_dir / 'grpc' / 'c-ares' / 'c-ares' / 'lib',
cpp_build_dir / 'grpc' / 'zlib' / 'zlib' / 'lib',
cpp_build_dir / 'grpc' / 'openssl' / 'openssl' / 'lib',
cpp_build_dir / 'protobuf' / 'protobuf' / 'lib',
cpp_build_dir / 'grpc' / 'grpc' / 'lib',
cpp_build_dir / 'msgpack' / 'msgpack' / 'lib']
env = {
'LD_LIBRARY_PATH': ':'.join(map(str, lib_paths))}
cpp_test_dir = cpp_build_dir / 'libmuscle' / 'tests'
cpp_test_client = cpp_test_dir / 'tcp_client_test'
result = subprocess.run([str(cpp_test_client), server_loc], env=env)
server_pipe[0].send(None)
server_pipe[0].close()
server_process.join()
assert result.returncode == 0
assert server_process.exitcode == 0
|
1603026
|
import unittest2 as unittest
import pymongo
import time
import random
import threading
from oplogreplay import OplogReplayer
SOURCE_HOST = '127.0.0.1:27017'
DEST_HOST = '127.0.0.1:27018'
TESTDB = 'testdb'
# Inherit from OplogReplayer to count number of processed_op methodcalls.
class CountingOplogReplayer(OplogReplayer):
count = 0
def process_op(self, ns, raw):
OplogReplayer.process_op(self, ns, raw)
CountingOplogReplayer.count += 1
class TestOplogReplayer(unittest.TestCase):
""" TestCase for the OplogReplayer.
Each test performs the following (see setUp and tearDown for more details):
* delete test databases
* start an OplogReplayer
* perform some actions (inserts, etc.)
* wait for the OplogReplayer to finish replaying ops
* assertions
* stop the OplogReplayer
"""
@classmethod
def setUpClass(cls):
# Create connections to both test databases.
cls.source = pymongo.Connection(SOURCE_HOST)
cls.dest = pymongo.Connection(DEST_HOST)
def _start_replay(self, **kwargs):
# Stop the OplogReplayer before starting a new one.
self._stop_replay()
#if getattr(self, 'oplogreplayer', None):
# self._stop_replay()
# Init & start OplogReplayer, in a separate thread.
self.oplogreplayer = CountingOplogReplayer(
SOURCE_HOST, DEST_HOST, poll_time=0.1, **kwargs)
self.thread = threading.Thread(target=self.oplogreplayer.start)
self.thread.start()
def _stop_replay(self):
# Stop OplogReplayer & join its thread.
if getattr(self, 'oplogreplayer', None):
self.oplogreplayer.stop()
if getattr(self, 'thread', None):
self.thread.join()
# Delete oplogreplayer & thread.
self.oplogreplayer = None
self.thread = None
def setUp(self):
# Drop test databases.
self.source.drop_database(TESTDB)
self.dest.drop_database(TESTDB)
self.dest.drop_database('oplogreplay')
# Sleep a little to allow drop database operations to complete.
time.sleep(0.05)
# Remember Database objects.
self.sourcedb = self.source.testdb
self.destdb = self.dest.testdb
# Stop replay, in case it was still running from a previous test.
self._stop_replay()
# Reset global counter & start OplogReplayer.
CountingOplogReplayer.count = 0
self._start_replay()
def tearDown(self):
self._stop_replay()
def _synchronous_wait(self, target, timeout=3.0):
""" Synchronously wait for the oplogreplay to finish.
Waits until the oplog's retry_count hits target, but at most
timeout seconds.
"""
wait_until = time.time() + timeout
while time.time() < wait_until:
if CountingOplogReplayer.count == target:
return
time.sleep(0.05)
# Synchronously waiting timed out - we should alert this.
raise Exception('retry_count was only %s/%s after a %.2fsec wait' % \
(CountingOplogReplayer.count, target, timeout))
def assertCollectionEqual(self, coll1, coll2):
self.assertEqual(coll1.count(), coll2.count(),
msg='Collections have different count.')
for obj1 in coll1.find():
obj2 = coll2.find_one(obj1)
self.assertEqual(obj1, obj2)
def assertDatabaseEqual(self, db1, db2):
self.assertListEqual(db1.collection_names(), db2.collection_names(),
msg='Databases have different collections.')
for coll in db1.collection_names():
self.assertCollectionEqual(db1[coll], db2[coll])
def test_writes(self):
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 2})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 3})
self.sourcedb.testcoll.remove({'nr': 3})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 4})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 5})
self.sourcedb.testcoll.insert({'content': '...', 'nr': 6})
self.sourcedb.testcoll.update({'nr': 6}, {'$set': {'content': 'newContent'}})
self.sourcedb.testcoll.update({'nr': 97}, {'$set': {'content': 'newContent'}})
self.sourcedb.testcoll.update({'nr': 8}, {'$set': {'content': 'newContent'}}, upsert=True)
self.sourcedb.testcoll.remove({'nr': 99})
self.sourcedb.testcoll.remove({'nr': 3})
self.sourcedb.testcoll.remove({'nr': 4})
self.sourcedb.testcoll.insert({'content': 'new content', 'nr': 3})
self.sourcedb.testcoll.insert({'content': 'new content', 'nr': 4})
# Removes and updates that don't do anything will not hit the oplog:
self._synchronous_wait(12)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
def _perform_bulk_inserts(self, nr=100):
for i in xrange(nr):
obj = { 'content': '%s' % random.random(),
'nr': random.randrange(100000) }
self.sourcedb.testcoll.insert(obj)
def test_bulk_inserts(self):
self._perform_bulk_inserts(1000)
self._synchronous_wait(1000)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
def test_discontinued_replay(self):
self._perform_bulk_inserts(200)
self._stop_replay()
self._perform_bulk_inserts(150)
self._start_replay()
self._perform_bulk_inserts(100)
self._synchronous_wait(450)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
# Test that no operation was replayed twice.
self.assertEqual(CountingOplogReplayer.count, 450)
def test_index_operations(self):
# Create an index, then test that it was created on destionation.
index = self.sourcedb.testidx.ensure_index('idxfield')
self._synchronous_wait(1)
self.assertIn(index, self.destdb.testidx.index_information())
# Delete the index, and test that it was deleted from destination.
self.sourcedb.testidx.drop_index(index)
self._synchronous_wait(2)
self.assertNotIn(index, self.destdb.testidx.index_information())
def test_replay_indexes(self):
# Create index1 on source + dest.
index1 = self.sourcedb.testidx.ensure_index('idxfield1')
# Restart OplogReplayer, without replaying indexes.
self._start_replay(replay_indexes=False)
# Create index2 on source only.
index2 = self.sourcedb.testidx.ensure_index('idxfield2')
# Delete index1 from source only.
self.sourcedb.testidx.drop_index(index1)
self._synchronous_wait(3)
# Test indexes on source and destination.
source_indexes = self.sourcedb.testidx.index_information()
self.assertNotIn(index1, source_indexes)
self.assertIn(index2, source_indexes)
dest_indexes = self.destdb.testidx.index_information()
self.assertIn(index1, dest_indexes)
self.assertNotIn(index2, dest_indexes)
def test_start_from_ts(self):
self._stop_replay()
# Should not be replayed:
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
# Get last timestamp.
obj = self.source.local.oplog.rs.find().sort('$natural', -1).limit(1)[0]
lastts = obj['ts']
# Should be replayed.
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
self._start_replay(ts=lastts)
self._synchronous_wait(1)
self.assertEqual(self.destdb.testcoll.count(), 1)
|
1603028
|
from appJar import gui
def show(btn): app.showSubWindow("sub")
def hide(btn): app.hideSubWindow("sub")
app=gui()
app.addLabel("l1", "Sub Window Demo")
app.addButton("PRESS", show)
app.startSubWindow("sub")
app.addLabel("s1", "sub")
app.addButton("Stop", hide)
app.stopSubWindow()
app.setSubWindowLocation("sub", 400,400)
app.go()
|
1603058
|
from django.db.models import Value, F, TextField
from django.db.models.functions import Concat
from sphinxql import indexes, fields
from .models import Document
class DocumentIndex(indexes.Index):
name = fields.Text(Concat(F('type__name'), Value(' '), F('number'),
output_field=TextField()))
summary = fields.Text('summary')
text = fields.Text('text')
class Meta:
model = Document
query = Document.objects.exclude(dr_series='II')
range_step = 10000
|
1603200
|
import torch
import torch.nn.functional as F
from layers import Linear, LayerNorm
from .multi_head_attention import MultiHeadAttention, AttentionMask
from typing import Optional, Callable, Dict
from dataclasses import dataclass
# This file is based on PyTorch's internal implementation
ActivationFunction = Callable[[torch.Tensor], torch.Tensor]
class TransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, src: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
src2 = self.self_attn(src, src, AttentionMask(mask, None))
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu') \
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class TransformerDecoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.dropout3 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,
memory_key_padding_mask: Optional[torch.Tensor] = None,
full_target: Optional[torch.Tensor] = None) -> torch.Tensor:
tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=AttentionMask(None, tgt_mask))
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu') \
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class TransformerEncoder(torch.nn.Module):
def __init__(self, layer, n_layers: int, *args, **kwargs):
super().__init__()
self.layers = torch.nn.ModuleList([layer(*args, **kwargs) for _ in range(n_layers)])
def forward(self, data: torch.Tensor, *args, **kwargs):
for l in self.layers:
data = l(data, *args, **kwargs)
return data
class TransformerDecoder(torch.nn.Module):
@dataclass
class State:
step: int
state: Dict[int, torch.Tensor]
def __init__(self, layer, n_layers: int, d_model: int, *args, **kwargs):
super().__init__()
self.d_model = d_model
self.layers = torch.nn.ModuleList([layer(d_model, *args, **kwargs) for _ in range(n_layers)])
def forward(self, data: torch.Tensor, *args, **kwargs):
for l in self.layers:
data = l(data, *args, **kwargs)
return data
def create_state(self, batch_size: int, max_length: int, device: torch.device) -> State:
return self.State(0, {i: torch.empty([batch_size, max_length, self.d_model], device=device)
for i in range(len(self.layers))})
def one_step_forward(self, state: State, data: torch.Tensor, *args, **kwargs):
assert data.shape[1] == 1, f"For one-step forward should have one timesteps, but shape is {data.shape}"
assert state.step < state.state[0].shape[1]
for i, l in enumerate(self.layers):
state.state[i][:, state.step:state.step+1] = data
data = l(data, *args, **kwargs, full_target=state.state[i][:, :state.step+1])
state.step += 1
return data
class Transformer(torch.nn.Module):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: ActivationFunction = F.relu, encoder_layer=TransformerEncoderLayer,
decoder_layer = TransformerDecoderLayer):
super().__init__()
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, d_model, nhead, dim_feedforward,
dropout, activation)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, d_model, nhead, dim_feedforward,
dropout, activation)
def forward(self, src: torch.Tensor, tgt: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,
src_length_mask: Optional[torch.Tensor] = None):
memory = self.encoder(src, src_length_mask)
return self.decoder(tgt, memory, tgt_mask, src_length_mask)
def generate_square_subsequent_mask(self, sz: int, device: torch.device) -> torch.Tensor:
return torch.triu(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=1)
|
1603209
|
import tensorflow as tf
import time
def activation_function(act,act_input):
act_func = None
if act == "sigmoid":
act_func = tf.nn.sigmoid(act_input)
elif act == "tanh":
act_func = tf.nn.tanh(act_input)
elif act == "relu":
act_func = tf.nn.relu(act_input)
elif act == "elu":
act_func = tf.nn.elu(act_input)
elif act == "identity":
act_func = tf.identity(act_input)
else:
raise NotImplementedError("ERROR")
return act_func
def getlocaltime():
date = time.strftime('%y-%m-%d', time.localtime())
current_time = time.strftime('%H:%M:%S', time.localtime())
|
1603219
|
import itertools
import logging
from typing import Any, Dict, List, Optional
import torch
from torch.utils.data import DataLoader
from probnmn.config import Config
from probnmn.data.datasets import JointTrainingDataset
from probnmn.data.samplers import SupervisionWeightedRandomSampler
from probnmn.models import (
ProgramPrior,
ProgramGenerator,
QuestionReconstructor,
NeuralModuleNetwork,
)
from probnmn.modules.elbo import JointTrainingElbo
from probnmn.utils.checkpointing import CheckpointManager
from ._trainer import _Trainer
logger: logging.Logger = logging.getLogger(__name__)
class JointTrainingTrainer(_Trainer):
r"""
Performs training for ``joint_training`` phase, using batches of training examples from
:class:`~probnmn.data.datasets.JointTrainingDataset`.
Parameters
----------
config: Config
A :class:`~probnmn.Config` object with all the relevant configuration parameters.
serialization_dir: str
Path to a directory for tensorboard logging and serializing checkpoints.
gpu_ids: List[int], optional (default=[0])
List of GPU IDs to use or evaluation, ``[-1]`` - use CPU.
cpu_workers: int, optional (default = 0)
Number of CPU workers to use for fetching batch examples in dataloader.
Examples
--------
>>> config = Config("config.yaml") # PHASE must be "joint_training"
>>> trainer = JointTrainingTrainer(config, serialization_dir="/tmp")
>>> evaluator = JointTrainingEvaluator(config, trainer.models)
>>> for iteration in range(100):
>>> trainer.step()
>>> # validation every 100 steps
>>> if iteration % 100 == 0:
>>> val_metrics = evaluator.evaluate()
>>> trainer.after_validation(val_metrics, iteration)
"""
def __init__(
self,
config: Config,
serialization_dir: str,
gpu_ids: List[int] = [0],
cpu_workers: int = 0,
):
self._C = config
if self._C.PHASE != "joint_training":
raise ValueError(
f"Trying to initialize a JointTrainingTrainer, expected config PHASE to be "
f"joint_training, found {self._C.PHASE}"
)
# Initialize dataloader and model.
dataset = JointTrainingDataset(
self._C.DATA.TRAIN_TOKENS,
self._C.DATA.TRAIN_FEATURES,
num_supervision=self._C.SUPERVISION,
supervision_question_max_length=self._C.SUPERVISION_QUESTION_MAX_LENGTH,
)
sampler = SupervisionWeightedRandomSampler(dataset)
dataloader = DataLoader(
dataset, batch_size=self._C.OPTIM.BATCH_SIZE, sampler=sampler, num_workers=cpu_workers
)
program_generator = ProgramGenerator.from_config(self._C)
question_reconstructor = QuestionReconstructor.from_config(self._C)
nmn = NeuralModuleNetwork.from_config(self._C)
# Load checkpoints from question_coding and module_training phases.
CheckpointManager(
program_generator=program_generator, question_reconstructor=question_reconstructor
).load(self._C.CHECKPOINTS.QUESTION_CODING)
CheckpointManager(nmn=nmn).load(self._C.CHECKPOINTS.MODULE_TRAINING)
super().__init__(
config=config,
dataloader=dataloader,
models={
"program_generator": program_generator,
"question_reconstructor": question_reconstructor,
"nmn": nmn,
},
serialization_dir=serialization_dir,
gpu_ids=gpu_ids,
)
# These will be a part of `self._models`, keep these handles for convenience.
self._program_generator = self._models["program_generator"]
self._question_reconstructor = self._models["question_reconstructor"]
self._nmn = self._models["nmn"]
# Load program prior from checkpoint, this will be frozen during question coding.
self._program_prior = ProgramPrior.from_config(self._C).to(self._device)
CheckpointManager(program_prior=self._program_prior).load(
self._C.CHECKPOINTS.PROGRAM_PRIOR
)
self._program_prior.eval()
# Instantiate an elbo module to compute evidence lower bound during `_do_iteration`.
self._elbo = JointTrainingElbo(
program_generator=self._program_generator,
question_reconstructor=self._question_reconstructor,
nmn=self._nmn,
program_prior=self._program_prior,
beta=self._C.BETA,
gamma=self._C.GAMMA,
baseline_decay=self._C.DELTA,
objective=self._C.OBJECTIVE,
)
def _do_iteration(self, batch: Dict[str, Any]) -> Dict[str, Any]:
# Separate out examples with supervision and without supervision, these two lists will be
# mutually exclusive.
supervision_indices = batch["supervision"].nonzero().squeeze()
no_supervision_indices = (1 - batch["supervision"]).nonzero().squeeze()
# Pick a subset of questions without (GT) program supervision, sample programs and pass
# through the neural module network.
question_tokens_no_supervision = batch["question"][no_supervision_indices]
image_features_no_supervision = batch["image"][no_supervision_indices]
answer_tokens_no_supervision = batch["answer"][no_supervision_indices]
# keys: {"reconstruction_likelihood", "kl_divergence", "elbo", "reinforce_reward",
# "nmn_loss"}
elbo_output_dict = self._elbo(
question_tokens_no_supervision,
image_features_no_supervision,
answer_tokens_no_supervision,
)
nmn_loss = elbo_output_dict.pop("nmn_loss")
loss_objective = self._C.GAMMA * nmn_loss - elbo_output_dict["elbo"]
if self._C.OBJECTIVE == "ours":
# ------------------------------------------------------------------------------------
# Supervision loss (program generator + question reconstructor):
# Ignore question reconstructor for "baseline" objective, it's gradients do not
# interfere with program generator anyway.
# \alpha * ( \log{q_\phi (z'|x')} + \log{p_\theta (x'|z')} )
program_tokens_supervision = batch["program"][supervision_indices]
question_tokens_supervision = batch["question"][supervision_indices]
# keys: {"predictions", "loss"}
pg_output_dict_supervision = self._program_generator(
question_tokens_supervision,
program_tokens_supervision,
decoding_strategy="sampling",
)
qr_output_dict_supervision = self._question_reconstructor(
program_tokens_supervision,
question_tokens_supervision,
decoding_strategy="sampling",
)
program_generation_loss_supervision = pg_output_dict_supervision["loss"].mean()
question_reconstruction_loss_supervision = qr_output_dict_supervision["loss"].mean()
# ------------------------------------------------------------------------------------
loss_objective = loss_objective + self._C.ALPHA * (
program_generation_loss_supervision + question_reconstruction_loss_supervision
)
loss_objective.backward()
# Clamp all gradients between (-5, 5)
for parameter in itertools.chain(
self._program_generator.parameters(),
self._question_reconstructor.parameters(),
self._nmn.parameters(),
):
if parameter.grad is not None:
parameter.grad.clamp_(min=-5, max=5)
iteration_output_dict = {"loss": {"nmn": nmn_loss}, "elbo": elbo_output_dict}
if self._C.OBJECTIVE == "ours":
iteration_output_dict["loss"].update(
{
"question_reconstruction_gt": question_reconstruction_loss_supervision,
"program_generation_gt": program_generation_loss_supervision,
}
)
return iteration_output_dict
def after_validation(self, val_metrics: Dict[str, Any], iteration: Optional[int] = None):
r"""
Set ``"metric"`` key in ``val_metrics``, this governs learning rate scheduling and keeping
track of best checkpoint (in ``super`` method). This metric will be answer accuracy.
Super method will perform learning rate scheduling, serialize checkpoint, and log all
the validation metrics to tensorboard.
Parameters
----------
val_metrics: Dict[str, Any]
Validation metrics of :class:`~probnmn.models.nmn.NeuralModuleNetwork`.
Returned by ``evaluate`` method of
:class:`~probnmn.evaluators.joint_training_evaluator.JointTrainingEvaluator`.
iteration: int, optional (default = None)
Iteration number. If ``None``, use the internal :attr:`self._iteration` counter.
"""
val_metrics["metric"] = val_metrics["nmn"]["answer_accuracy"]
super().after_validation(val_metrics, iteration)
|
1603226
|
from unittest.mock import (
Mock,
patch,
)
import pytest
from auctions.application.use_cases import PlacingBidUseCase
from auctions.application.use_cases.placing_bid import PlacingBidInputDto, PlacingBidOutputDto
from auctions.domain.entities import (
Auction,
Bid,
)
from auctions.domain.factories import get_dollars
from auctions.domain.value_objects import Money
@pytest.fixture()
def bidder_id() -> int:
return 1
@pytest.fixture()
def amount() -> Money:
return get_dollars('10.00')
@pytest.fixture()
def input_dto(exemplary_auction_id: int, bidder_id: int, amount: Money) -> PlacingBidInputDto:
return PlacingBidInputDto(bidder_id, exemplary_auction_id, amount)
def test_loads_auction_using_id(
exemplary_auction_id: int,
auctions_repo_mock: Mock,
input_dto: PlacingBidInputDto
) -> None:
PlacingBidUseCase().execute(input_dto)
auctions_repo_mock.get.assert_called_once_with(exemplary_auction_id)
def test_makes_an_expected_bid(
input_dto: PlacingBidInputDto,
auction: Auction
) -> None:
with patch.object(Auction, 'make_a_bid', wraps=auction.make_a_bid) as make_a_bid_mock:
PlacingBidUseCase().execute(input_dto)
make_a_bid_mock.assert_called_once_with(
Bid(id=None, amount=input_dto.amount, bidder_id=input_dto.bidder_id)
)
def test_saves_auction(
auctions_repo_mock: Mock,
auction: Auction,
input_dto: PlacingBidInputDto
) -> None:
PlacingBidUseCase().execute(input_dto)
auctions_repo_mock.save.assert_called_once_with(auction)
def test_notifies_winner(
email_gateway_mock: Mock,
auction: Auction,
input_dto: PlacingBidInputDto
) -> None:
PlacingBidUseCase().execute(input_dto)
email_gateway_mock.notify_about_winning_auction.assert_called_once_with(input_dto.auction_id, input_dto.bidder_id)
def test_presents_output_dto(
input_dto: PlacingBidInputDto,
placing_bid_output_boundary_mock: Mock,
auction: Auction,
) -> None:
PlacingBidUseCase().execute(input_dto)
desired_output_dto = PlacingBidOutputDto(is_winner=True, current_price=input_dto.amount)
placing_bid_output_boundary_mock.present.assert_called_once_with(desired_output_dto)
|
1603238
|
from unittest import mock
from django.contrib.auth.models import User
from django.urls import reverse
import pytest
from .. import settings
from ..forms import KeyRegistrationForm
from ..models import WebAuthnKey
def test_list_webauthn_keys(admin_client):
response = admin_client.get(reverse("kagi:webauthn-keys"))
assert list(response.context_data["webauthnkey_list"]) == []
assert response.status_code == 200
def test_webauthn_keys_str_return_the_username(admin_client):
user = User.objects.get(pk=1)
key = user.webauthn_keys.create(key_name="SoloKey", sign_count=0)
assert str(key) == "admin - SoloKey"
def test_add_webauthn_key(admin_client):
response = admin_client.get(reverse("kagi:add-webauthn-key"))
assert response.status_code == 200
assert isinstance(response.context_data["form"], KeyRegistrationForm)
def test_totp_device_deletion_works(admin_client):
user = User.objects.get(pk=1)
key = user.webauthn_keys.create(key_name="SoloKey", sign_count=0)
response = admin_client.get(reverse("kagi:webauthn-keys"))
assert response.status_code == 200
assert len(response.context_data["webauthnkey_list"]) == 1
response = admin_client.post(
reverse("kagi:webauthn-keys"), {"delete": "checked", "key_id": key.pk}
)
assert response.status_code == 302
assert response.url == reverse("kagi:webauthn-keys")
assert WebAuthnKey.objects.count() == 0
# Testing view begin activate
def test_begin_activate_return_user_credential_options(admin_client):
ukey = "<KEY>"
with mock.patch("kagi.views.api.util.generate_ukey", return_value=ukey):
response = admin_client.post(
reverse("kagi:begin-activate"), {"key_name": "SoloKey"}
)
assert response.status_code == 200
credential_options = response.json()
assert "challenge" in credential_options
assert credential_options["rp"] == {
"name": settings.RELYING_PARTY_NAME,
"id": settings.RELYING_PARTY_ID,
}
assert credential_options["user"] == {
"id": ukey,
"name": "admin",
"displayName": "",
"icon": settings.WEBAUTHN_ICON_URL,
}
assert "pubKeyCredParams" in credential_options
assert credential_options["extensions"] == {"webauthn.loc": True}
def test_begin_activate_fails_if_key_name_is_missing(admin_client):
response = admin_client.post(reverse("kagi:begin-activate"), {"key_name": ""})
assert response.status_code == 400
assert response.json() == {"errors": {"key_name": ["This field is required."]}}
# Testing view verify credential info
def test_webauthn_verify_credential_info(admin_client):
# Setup the session
response = admin_client.post(
reverse("kagi:begin-activate"), {"key_name": "SoloKey"}
)
credential_options = response.json()
challenge = credential_options["challenge"]
trusted_attestation_cert_required = (
settings.WEBAUTHN_TRUSTED_ATTESTATION_CERT_REQUIRED
)
self_attestation_permitted = settings.WEBAUTHN_SELF_ATTESTATION_PERMITTED
none_attestation_permitted = settings.WEBAUTHN_NONE_ATTESTATION_PERMITTED
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
webauthn_registration_response = (
mocked_webauthn.WebAuthnRegistrationResponse.return_value
)
verify = webauthn_registration_response.verify.return_value
verify.public_key.decode.return_value = "public-key"
verify.credential_id.decode.return_value = "credential-id"
verify.sign_count = 0
response = admin_client.post(
reverse("kagi:verify-credential-info"), {"registration": "payload"}
)
mocked_webauthn.WebAuthnRegistrationResponse.assert_called_with(
settings.RELYING_PARTY_ID,
"http://testserver",
{"registration": ["payload"]},
challenge,
settings.WEBAUTHN_TRUSTED_CERTIFICATES,
trusted_attestation_cert_required,
self_attestation_permitted,
none_attestation_permitted,
uv_required=False, # User validation
)
webauthn_registration_response.verify.assert_called_once()
assert response.status_code == 200
assert response.json() == {"success": "User successfully registered."}
def test_webauthn_verify_credential_info_fails_if_registration_is_invalid(admin_client):
# Setup the session
response = admin_client.post(
reverse("kagi:begin-activate"), {"key_name": "SoloKey"}
)
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
webauthn_registration_response = (
mocked_webauthn.WebAuthnRegistrationResponse.return_value
)
verify = webauthn_registration_response.verify
verify.side_effect = ValueError("An error occurred")
response = admin_client.post(
reverse("kagi:verify-credential-info"), {"registration": "payload"}
)
assert response.status_code == 400
assert response.json() == {"fail": "Registration failed. Error: An error occurred"}
def test_webauthn_verify_credential_info_fails_if_credential_id_already_exists(
admin_client,
):
# Setup the session
response = admin_client.post(
reverse("kagi:begin-activate"), {"key_name": "SoloKey"}
)
# Create the WebAuthnKey
user = User.objects.get(pk=1)
user.webauthn_keys.create(
key_name="SoloKey", sign_count=0, credential_id="credential-id"
)
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
webauthn_registration_response = (
mocked_webauthn.WebAuthnRegistrationResponse.return_value
)
verify = webauthn_registration_response.verify.return_value
verify.credential_id.decode.return_value = "credential-id"
response = admin_client.post(
reverse("kagi:verify-credential-info"), {"registration": "payload"}
)
assert response.status_code == 400
assert response.json() == {"fail": "Credential ID already exists."}
# Testing view begin assertion
@pytest.mark.django_db
def test_begin_assertion_return_user_credential_options(client):
# We need to create a couple of WebAuthnKey for our user.
user = User.objects.create_user("admin", "<EMAIL>", "admin")
user.webauthn_keys.create(
key_name="SoloKey 1",
sign_count=0,
credential_id="credential-id-1",
ukey="abcd",
public_key="pubkey1",
)
user.webauthn_keys.create(
key_name="SoloKey 2",
sign_count=0,
credential_id="credential-id-2",
ukey="efgh",
public_key="pubkey2",
)
ukey = "<KEY>"
challenge = "k31d65xGDFb0VUq4MEMXmWpuWkzPs889"
with mock.patch("kagi.views.api.util.generate_ukey", return_value=ukey):
with mock.patch(
"kagi.views.api.util.generate_challenge", return_value=challenge
):
# We authenticate with username/password
response = client.post(
reverse("kagi:login"), {"username": "admin", "password": "<PASSWORD>"}
)
assert response.status_code == 302
assert response.url == reverse("kagi:verify-second-factor")
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
assertion_dict = {
"challenge": "tOOk7MPjGWlezrP6o6tGOXSH0ZesUREO",
"allowCredentials": [
{
"type": "public-key",
"id": "ePqP9Mi...512GSYg",
"transports": ["usb", "nfc", "ble", "internal"],
},
{
"type": "public-key",
"id": "qhibXokRKbPA...O1WW7nF",
"transports": ["usb", "nfc", "ble", "internal"],
},
],
"rpId": "localhost",
"timeout": 60000,
}
mocked_webauthn.WebAuthnAssertionOptions.return_value.assertion_dict = (
assertion_dict
)
response = client.post(reverse("kagi:begin-assertion"))
assert response.status_code == 200
assert response.json() == assertion_dict
# Testing view verify assertion
@pytest.mark.django_db
def test_verify_assertion_validates_the_user_webauthn_key(client):
# We need to create a couple of WebAuthnKey for our user.
user = User.objects.create_user("admin", "<EMAIL>", "admin")
user.webauthn_keys.create(
key_name="SoloKey",
sign_count=0,
credential_id="credential-id",
ukey="abcd",
public_key="pubkey",
)
response = client.post(
reverse("kagi:login"), {"username": "admin", "password": "<PASSWORD>"}
)
assert response.status_code == 302
assert response.url == reverse("kagi:verify-second-factor")
# We authenticate with username/password
challenge = "k31d65xGDFb0VUq4MEMXmWpuWkzPs889"
with mock.patch("kagi.views.api.util.generate_challenge", return_value=challenge):
response = client.post(reverse("kagi:begin-assertion"))
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
webauthn_assertion_response = (
mocked_webauthn.WebAuthnAssertionResponse.return_value
)
verify = webauthn_assertion_response.verify
verify.return_value = 1
response = client.post(
reverse("kagi:verify-assertion"),
{"id": "credential-id", "assertion": "payload"},
)
mocked_webauthn.WebAuthnUser.assert_called_with(
"abcd",
"admin",
"",
settings.WEBAUTHN_ICON_URL,
"credential-id",
"pubkey",
0,
settings.RELYING_PARTY_ID,
)
webauthn_user = mocked_webauthn.WebAuthnUser.return_value
webauthn_assertion_response = mocked_webauthn.WebAuthnAssertionResponse
webauthn_assertion_response.assert_called_with(
webauthn_user,
{"id": ["credential-id"], "assertion": ["payload"]},
challenge,
"http://testserver",
uv_required=False,
)
assert response.status_code == 200
assert response.json() == {
"success": "Successfully authenticated as admin",
"redirect_to": reverse("kagi:two-factor-settings"),
}
# Are we truly logged in?
response = client.get(reverse("kagi:two-factor-settings"))
assert response.status_code == 200
# Testing view verify assertion
@pytest.mark.django_db
def test_verify_assertion_fails_if_missing_user_webauthn_key(client):
# We need to create a couple of WebAuthnKey for our user.
user = User.objects.create_user("admin", "<EMAIL>", "admin")
user.webauthn_keys.create(
key_name="SoloKey",
sign_count=0,
credential_id="wrong-id",
ukey="abcd",
public_key="pubkey",
)
response = client.post(
reverse("kagi:login"), {"username": "admin", "password": "<PASSWORD>"}
)
assert response.status_code == 302
assert response.url == reverse("kagi:verify-second-factor")
# We authenticate with username/password
challenge = "k31d65xGDFb0VUq4MEMXmWpuWkzPs889"
with mock.patch("kagi.views.api.util.generate_challenge", return_value=challenge):
response = client.post(reverse("kagi:begin-assertion"))
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
webauthn_assertion_response = (
mocked_webauthn.WebAuthnAssertionResponse.return_value
)
verify = webauthn_assertion_response.verify
verify.return_value = 1
response = client.post(
reverse("kagi:verify-assertion"),
{"id": "credential-id", "assertion": "payload"},
)
assert response.status_code == 400
assert response.json() == {"fail": "Key does not exist."}
@pytest.mark.django_db
def test_verify_assertion_validates_the_assertion(client):
# We need to create a couple of WebAuthnKey for our user.
user = User.objects.create_user("admin", "<EMAIL>", "admin")
user.webauthn_keys.create(
key_name="SoloKey",
sign_count=0,
credential_id="credential-id",
ukey="abcd",
public_key="pubkey",
)
response = client.post(
reverse("kagi:login"), {"username": "admin", "password": "<PASSWORD>"}
)
assert response.status_code == 302
assert response.url == reverse("kagi:verify-second-factor")
# We authenticate with username/password
challenge = "k31d65xGDFb0VUq4MEMXmWpuWkzPs889"
response = client.get(reverse("kagi:verify-second-factor"))
assert response.status_code == 200
with mock.patch("kagi.views.api.util.generate_challenge", return_value=challenge):
response = client.post(reverse("kagi:begin-assertion"))
with mock.patch("kagi.views.api.webauthn") as mocked_webauthn:
webauthn_assertion_response = (
mocked_webauthn.WebAuthnAssertionResponse.return_value
)
verify = webauthn_assertion_response.verify
verify.side_effect = ValueError("An error occurred")
response = client.post(
reverse("kagi:verify-assertion"),
{"id": "credential-id", "assertion": "payload"},
)
assert response.status_code == 400
assert response.json() == {"fail": "Assertion failed. Error: An error occurred"}
|
1603281
|
import ipaddress
from collections import OrderedDict
import tldextract
import validators
def valid_domain(item):
if validators.domain(item) is not True:
return False
x = tldextract.extract(item)
return x.subdomain is ''
def valid_fqdn(item):
if validators.domain(item) is not True:
return False
x = tldextract.extract(item)
return x.subdomain is not ''
def valid_ipv4_address(item):
try:
ipaddress.IPv4Address(item)
return True
except ValueError:
return False
def valid_ipv4_network(item):
try:
ipaddress.IPv4Network(item)
return True
except ValueError:
return False
def valid_ipv6_address(item):
try:
ipaddress.IPv6Address(item)
return True
except ValueError:
return False
def valid_ipv6_network(item):
try:
ipaddress.IPv6Network(item)
return True
except ValueError:
return False
validator_functions = OrderedDict()
validator_functions['ipv4_address'] = valid_ipv4_address
validator_functions['ipv4_network'] = valid_ipv4_network
validator_functions['ipv6_address'] = valid_ipv6_address
validator_functions['ipv6_network'] = valid_ipv6_network
validator_functions['url'] = validators.url
validator_functions['fqdn'] = valid_fqdn
validator_functions['domain'] = valid_domain
validator_functions['email'] = validators.email
validator_functions['mac_address'] = validators.mac_address
validator_functions['md5'] = validators.md5
validator_functions['sha1'] = validators.sha1
validator_functions['sha224'] = validators.sha224
validator_functions['sha256'] = validators.sha256
validator_functions['sha512'] = validators.sha512
validator_functions['pan'] = validators.card.card_number
def guess_item_type(item):
for item_type, function in validator_functions.items():
if function(item):
return item_type
return None
|
1603288
|
from PIL import Image, ImageDraw, ImageFont
from .conf import COLOR
from .util import getTemplatePath, comma, getrgb, get_scale
class COORDINATES():
MIN_BAR_SIZE = 3
MIN_AXIS_LABEL_WIDTH = 180
GAP_LABEL_AND_BAR = 1
def __init__(self, chrom, spos, epos, xscale, w, h, debug=False):
self.chrom = chrom
self.spos = spos
self.epos = epos
self.glen = self.epos - self.spos
self.font = None
self.font_size = 12
self.axisloc = "bottom" # or "top" or "middle"
self.bgcolor = "FFFFFF"
self.axiscolor = "000000"
self.labelcolor = "000000"
self.w = w
self.h = h
self.im = None
self.axis_pos_list = []
self.bar_size = 3
self.single_font_size = None
self.xscale = xscale
self.debug = debug
if self.debug:
self.h += 20
def set_font(self, font_size=12):
self.font_size = font_size
self.font = ImageFont.truetype(getTemplatePath('VeraMono.ttf'), font_size)
def resize_height(self):
self.single_font_size = self.font.getsize('C')
if self.axisloc == "middle":
self.h = max(self.h, self.single_font_size[1] + self.MIN_BAR_SIZE * 2)
self.bar_size = int((self.h - self.single_font_size[1]) / 2) - self.GAP_LABEL_AND_BAR * 2
else:
self.h = max(self.h, self.single_font_size[1] + self.MIN_BAR_SIZE)
self.bar_size = self.h - self.single_font_size[1] - self.GAP_LABEL_AND_BAR
def cal_axis(self):
min_base_number = int(self.MIN_AXIS_LABEL_WIDTH / (self.w/self.glen))
for k in [1, 5, 10, 20, 50, 100, 150, 200, 300, 400, 500, 800, 1000, 1500, 2000, 3000, 5000, 10000, 20000, 50000, 100000]:
if min_base_number <= k:
axis_base_unit = k
break
unitlen = 10 ** (len(str(axis_base_unit)) - 1)
axis_spos = int(self.spos/unitlen) * unitlen + axis_base_unit
self.axis_pos_list = range(axis_spos, self.epos, axis_base_unit)
self.axis_x_list = []
for posi in self.axis_pos_list:
self.axis_x_list.append(self.xscale.xmap[posi]['cpos'])
# self.axis_x_list.append(round((posi - self.spos) * self.scale_x - axis_base_unit/2, 0))
def draw(self, dr):
self.resize_height()
self.cal_axis()
x1 = 0
x2 = self.w
yi = 0
if self.axisloc == "top" or self.axisloc == "middle":
dr.line([(x1, yi), (x2, yi)], fill=getrgb(self.axiscolor), width=1)
for xi in self.axis_x_list:
dr.line([(xi, yi), (xi, yi + self.bar_size)], fill=getrgb(self.axiscolor), width=1)
yi += self.bar_size
for i, posi in enumerate(self.axis_pos_list):
pos1 = comma(posi)
xi = self.axis_x_list[i]
d = int(len(pos1) * self.single_font_size[0]) / 2
if self.axisloc == "middle":
dr.text((xi - d, yi), pos1, font=self.font, fill=getrgb(self.labelcolor))
else:
dr.text((xi - d, yi), pos1, font=self.font, fill=getrgb(self.labelcolor))
if self.debug:
for posi in range(self.spos, self.epos+1):
d = int(self.single_font_size[0]/2)
xi = self.xscale.xmap[posi]['cpos']
last_digit = str(posi)[-1]
dr.text((xi - d, yi + 20), last_digit, font=self.font, fill=getrgb(self.labelcolor))
pos_str = self.chrom
dr.text((1, yi), pos_str, font=self.font, fill=getrgb(self.labelcolor))
if self.axisloc == "bottom" or self.axisloc == "middle":
yi = self.h - 1
dr.line([(x1, yi), (x2, yi)], fill=getrgb(self.axiscolor), width=1)
for xi in self.axis_x_list:
dr.line([(xi, yi), (xi, yi - self.bar_size)], fill=getrgb(self.axiscolor), width=1)
def get_image(self):
if self.im is None:
if self.font is None:
self.set_font()
self.im = Image.new('RGBA', (self.w, self.h), getrgb(self.bgcolor))
dr = ImageDraw.Draw(self.im)
self.draw(dr)
return self.im
|
1603302
|
import logging
import os
from dart.client.python.dart_client import Dart
from dart.config.config import configuration
from dart.engine.emr.metadata import EmrActionTypes
from dart.model.action import Action, ActionData, ActionState
from dart.model.graph import SubGraphDefinition, EntityType, Relationship, Ref, SubGraphDefinitionData
from dart.model.subscription import Subscription, SubscriptionData
from dart.model.trigger import Trigger, TriggerData
from dart.model.workflow import Workflow, WorkflowData
from dart.trigger.subscription import subscription_batch_trigger
_logger = logging.getLogger(__name__)
def add_emr_engine_sub_graphs(config):
engine_config = config['engines']['emr_engine']
opts = engine_config['options']
dart = Dart(opts['dart_host'], opts['dart_port'], opts['dart_api_version'])
assert isinstance(dart, Dart)
_logger.info('saving emr_engine sub_graphs')
engine_id = None
for e in dart.get_engines():
if e.data.name == 'emr_engine':
engine_id = e.id
if not engine_id:
raise
subgraph_definitions = [
SubGraphDefinition(data=SubGraphDefinitionData(
name='consume_subscription_workflow',
description='Add to a datastore to create entities for loading a dataset on an ongoing basis',
engine_name='emr_engine',
related_type=EntityType.datastore,
related_is_a=Relationship.PARENT,
workflows=[
Workflow(id=Ref.workflow(1), data=WorkflowData(
name='emr-workflow-consume_subscription',
datastore_id=Ref.parent(),
engine_name='emr_engine',
)),
],
subscriptions=[
Subscription(id=Ref.subscription(1), data=SubscriptionData(
name='emr-subscription',
dataset_id=''
)),
],
triggers=[
Trigger(id=Ref.trigger(1), data=TriggerData(
name='emr-trigger-subscription-1G-batch',
trigger_type_name=subscription_batch_trigger.name,
workflow_ids=[Ref.workflow(1)],
args={
'subscription_id': Ref.subscription(1),
'unconsumed_data_size_in_bytes': 1000*1000*1000
}
)),
],
actions=[
Action(id=Ref.action(1), data=ActionData(
name='emr-action-consume_subscription',
action_type_name=EmrActionTypes.consume_subscription.name,
engine_name='emr_engine',
workflow_id=Ref.workflow(1),
state=ActionState.TEMPLATE,
args={'subscription_id': Ref.subscription(1)}
)),
]
))
]
for e in subgraph_definitions:
s = dart.save_subgraph_definition(e, engine_id)
_logger.info('created subgraph_definition: %s' % s.id)
if __name__ == '__main__':
add_emr_engine_sub_graphs(configuration(os.environ['DART_CONFIG']))
|
1603333
|
from typing import List, Callable
from .clip import Clip
from .follow import Follow
from .game import Game
from .model import Model
from .stream import Stream
from .user import User
from .video import Video
__all__: List[Callable] = [
Clip,
Follow,
Game,
Model,
Stream,
User,
Video,
]
|
1603380
|
from __future__ import division
import os
import sys
import cv2
import argparse
import glob
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import draw, transform
from scipy.optimize import minimize
from PIL import Image
import objs
import utils
#fp is in cam-ceil normal, height is in cam-floor normal
def data2scene(fp_points, height):
# cam-ceiling / cam-floor
scale = (height - 1.6) / 1.6
#layout_fp, fp_points = fit_layout(fp, scale=None, max_cor=12)
size = 512
ratio = 20/size
fp_points = fp_points.astype(float)
fp_points[0] -= size/2
fp_points[1] -= size/2
fp_points *= scale
fp_points[0] += size/2
fp_points[1] += size/2
fp_points = fp_points.astype(int)
scene = objs.Scene()
scene.cameraHeight = 1.6
scene.layoutHeight = float(height)
scene.layoutPoints = []
for i in range(fp_points.shape[1]):
fp_xy = (fp_points[:,i] - size/2) * ratio
xyz = (fp_xy[1], 0, fp_xy[0])
scene.layoutPoints.append(objs.GeoPoint(scene, None, xyz))
scene.genLayoutWallsByPoints(scene.layoutPoints)
scene.updateLayoutGeometry()
return scene
def f1_score(pred, gt):
TP = np.zeros(gt.shape); FP = np.zeros(gt.shape)
FN = np.zeros(gt.shape); TN = np.zeros(gt.shape)
TP[(pred==gt) & (pred == 1)] = 1
FP[(pred!=gt) & (pred == 1)] = 1
FN[(pred!=gt) & (gt == 1)] = 1
TN[(pred==gt) & (pred == 0)] = 1
TP = np.sum(TP); FP = np.sum(FP)
FN = np.sum(FN); TN = np.sum(TN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
accuracy = (TP + TN) / (gt.shape[0]*gt.shape[1])
f1_score = 2 / ((1 / precision) + (1 / recall))
return f1_score
def fit_layout(data, max_cor=12):
ret, data_thresh = cv2.threshold(data, 0.5, 1,0)
data_thresh = np.uint8(data_thresh)
#data_img, data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
sub_x, sub_y, w, h = cv2.boundingRect(data_cnt[0])
data_sub = data_thresh[sub_y:sub_y+h,sub_x:sub_x+w]
#data_img, data_cnt, data_heri = cv2.findContours(data_sub, 1, 2)
data_cnt, data_heri = cv2.findContours(data_sub, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
data_cnt = data_cnt[0]
epsilon = 0.005*cv2.arcLength(data_cnt,True)
approx = cv2.approxPolyDP(data_cnt, epsilon,True)
x_lst = [0,]
y_lst = [0,]
for i in range(len(approx)):
p1 = approx[i][0]
p2 = approx[(i+1)%len(approx)][0]
if (p2[0]-p1[0]) == 0:
slope = 10
else:
slope = abs((p2[1]-p1[1]) / (p2[0]-p1[0]))
if slope <= 1:
s = int((p1[1] + p2[1])/2)
y_lst.append(s)
elif slope > 1:
s = int((p1[0] + p2[0])/2)
x_lst.append(s)
x_lst.append(data_sub.shape[1])
y_lst.append(data_sub.shape[0])
x_lst.sort()
y_lst.sort()
diag = math.sqrt(math.pow(data_sub.shape[1],2) + math.pow(data_sub.shape[0],2))
def merge_near(lst):
group = [[0,]]
for i in range(1, len(lst)):
if lst[i] - np.mean(group[-1]) < diag * 0.05:
group[-1].append(lst[i])
else:
group.append([lst[i],])
group = [int(np.mean(x)) for x in group]
return group
x_lst = merge_near(x_lst)
y_lst = merge_near(y_lst)
#print(x_lst)
#print(y_lst)
img = np.zeros((data_sub.shape[0],data_sub.shape[1],3))
for x in x_lst:
cv2.line(img,(x,0), (x,data_sub.shape[0]),(0,255,0),1)
for y in y_lst:
cv2.line(img,(0,y), (data_sub.shape[1],y),(255,0,0),1)
ans = np.zeros((data_sub.shape[0],data_sub.shape[1]))
for i in range(len(x_lst)-1):
for j in range(len(y_lst)-1):
sample = data_sub[y_lst[j]:y_lst[j+1] , x_lst[i]:x_lst[i+1]]
score = sample.mean()
if score >= 0.5:
ans[y_lst[j]:y_lst[j+1] , x_lst[i]:x_lst[i+1]] = 1
pred = np.uint8(ans)
#pred_img, pred_cnt, pred_heri = cv2.findContours(pred, 1, 3)
pred_cnt, pred_heri = cv2.findContours(pred, 1, 3)
polygon = [(p[0][1], p[0][0]) for p in pred_cnt[0][::-1]]
Y = np.array([p[0]+sub_y for p in polygon])
X = np.array([p[1]+sub_x for p in polygon])
fp_points = np.concatenate( (Y[np.newaxis,:],X[np.newaxis,:]), axis=0)
layout_fp = np.zeros(data.shape)
rr, cc = draw.polygon(fp_points[0], fp_points[1])
rr = np.clip(rr, 0, data.shape[0]-1)
cc = np.clip(cc, 0, data.shape[1]-1)
layout_fp[rr,cc] = 1
if False:
img = np.zeros((data_sub.shape[0],data_sub.shape[1],3))
for i in range(len(approx)):
p1 = approx[i][0]
p2 = approx[(i+1)%len(approx)][0]
slope = abs((p2[1]-p1[1]) / (p2[0]-p1[0]))
if slope <= 1:
cv2.line(img,(p1[0], p1[1]), (p2[0], p2[1]),(255,0,0),1)
elif slope > 1:
cv2.line(img,(p1[0], p1[1]), (p2[0], p2[1]),(0,255,0),1)
#cv2.drawContours(img, [approx], 0, (,255,0), 1)
fig = plt.figure()
plt.axis('off')
plt.imshow(img)
#plt.show()
fig.savefig('D:/CVPR/figure/post/002/contour2', bbox_inches='tight',transparent=True, pad_inches=0)
if False:
fig = plt.figure()
plt.axis('off')
plt.imshow(layout_fp)
fig.savefig('D:/CVPR/figure/post/002/layout_fp', bbox_inches='tight',transparent=True, pad_inches=0)
#plt.show()
if False:
fig = plt.figure()
plt.axis('off')
ax1 = fig.add_subplot(2,3,1)
ax1.imshow(data)
ax2 = fig.add_subplot(2,3,2)
ax2.imshow(data_thresh)
ax3 = fig.add_subplot(2,3,3)
ax3.imshow(data_sub)
ax4 = fig.add_subplot(2,3,4)
#data_sub = data_sub[:,:,np.newaxis]
#ax4.imshow(img + np.concatenate( (data_sub,data_sub,data_sub),axis=2) * 0.25)
ax4.imshow(img)
ax5 = fig.add_subplot(2,3,5)
ax5.imshow(ans)
ax6 = fig.add_subplot(2,3,6)
ax6.imshow(layout_fp)
plt.show()
return layout_fp, fp_points
'''
def fit_layout_old(data, max_cor=12):
#find max connective component
ret, data_thresh = cv2.threshold(data, 0.5, 1,0)
data_thresh = np.uint8(data_thresh)
data_img, data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
# crop data sub as f1 true
sub_x, sub_y, w, h = cv2.boundingRect(data_cnt[0])
data_sub = data_thresh[sub_y:sub_y+h,sub_x:sub_x+w]
pred = np.ones(data_sub.shape)
min_score = 0.05
def optim(corid):
def loss(x):
h_, w_ = int(x[0]),int(x[1])
box = [[0,0,h_,w_], [0,w_,h_,w], [h_,w_,h,w], [h_,0,h,w_]]
sample = pred.copy()
sample[box[corid][0]:box[corid][2],
box[corid][1]:box[corid][3]] = 0
return -f1_score(sample, data_sub)
res_lst = []
for st in [0.1, 0.25]:
stp = [[h*st, w*st],[h*st, w*(1-st)],[h*(1-st), w*(1-st)],[h*(1-st), w*st]]
res = minimize(loss, np.array(stp[corid]), method='nelder-mead',
options={'xtol': 1e-8, 'disp': False})
res_lst.append(res)
res_lst.sort(key=lambda x: x.fun, reverse=False)
return res_lst[0]
######
res = optim(0)
ul = res.x.astype(int)
res = optim(1)
ur = res.x.astype(int)
res = optim(2)
dr = res.x.astype(int)
res = optim(3)
dl = res.x.astype(int)
print([ul, ur, dr, dl])
s_ul = ul[0]*ul[1] / (w*h)
s_ur = ur[0]*(w-ur[1]) / (w*h)
s_dr = (h-dr[0])*(w-dr[1]) / (w*h)
s_dl = (h-dl[0])*dl[1] / (w*h)
print([s_ul, s_ur, s_dr, s_dl])
sort_idx = list(np.argsort([s_ul, s_ur, s_dr, s_dl])[::-1])
assert max_cor in [4, 6, 8, 10, 12]
max_idx = (max_cor-4)/2
if s_ul > min_score and (sort_idx.index(0) < max_idx):
pred[0:int(ul[0]), 0:int(ul[1])] = 0
if s_ur > min_score and (sort_idx.index(1) < max_idx):
pred[0:int(ur[0]), int(ur[1]):w] = 0
if s_dr > min_score and (sort_idx.index(2) < max_idx):
pred[int(dr[0]):h, int(dr[1]):w] = 0
if s_dl > min_score and (sort_idx.index(3) < max_idx):
pred[int(dl[0]):h, 0:int(dl[1])] = 0
pred = np.uint8(pred)
pred_img, pred_cnt, pred_heri = cv2.findContours(pred, 1, 3)
polygon = [(p[0][1], p[0][0]) for p in pred_cnt[0][::-1]]
Y = np.array([p[0]+sub_y for p in polygon])
X = np.array([p[1]+sub_x for p in polygon])
fp_points = np.concatenate( (Y[np.newaxis,:],X[np.newaxis,:]), axis=0)
layout_fp = np.zeros(data.shape)
rr, cc = draw.polygon(fp_points[0], fp_points[1])
rr = np.clip(rr, 0, data.shape[0]-1)
cc = np.clip(cc, 0, data.shape[1]-1)
layout_fp[rr,cc] = 1
if False:
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.imshow(data_sub)
ax2 = fig.add_subplot(1,2,2)
ax2.imshow(pred)
plt.show()
return layout_fp, fp_points
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
args = parser.parse_args()
data_path = args.i
#for filepath in glob.iglob(data_path + '/*.npy'):
#for i in range(404):
#for i in [91, 104, 145, 159, 167, 194, 215, 223, 253, 256, 261, 266, 300, 304, 357, 358]:
for i in [261]:
filepath = os.path.join(data_path, '{0}.npy'.format(i))
print(filepath)
data = np.load(filepath, encoding = 'bytes')[()]
#color = data['color']
#fp_floor = data['fp_floor']
fp_pred = data['pred_fp_merge']
layout_fp, fp_points = fit_layout(fp_pred)
#fit_layout(fp_pred)
#print(fp_points)
|
1603428
|
from ipwhois import IPWhois
from metadata import MetadataPlugin
class WhoIsPlugin(MetadataPlugin):
def __init__(self):
MetadataPlugin.__init__(self)
self.name = "WhoIs"
def run(self):
try:
ip_whois = IPWhois(self._dst_ip)
raw_res = ip_whois.lookup()
res = []
for k,v in raw_res.iteritems():
if not v is None:
res.append("%s: %s" % (k,v))
return ",".join(res)
except Exception, e:
return ""
|
1603471
|
import unittest
from prestans import exception
from prestans.parser import AttributeFilter
from prestans import types
class ModelUnitTest(unittest.TestCase):
def test_required(self):
class MyModel(types.Model):
pass
required_default = MyModel()
self.assertTrue(required_default._required)
required_true = MyModel(required=True)
self.assertTrue(required_true._required)
required_false = MyModel(required=False)
self.assertFalse(required_false._required)
def test_default(self):
pass
def test_description(self):
class MyModel(types.Model):
pass
description_default = MyModel()
self.assertIsNone(description_default._description)
description_value = MyModel(description="description")
self.assertEqual(description_value._description, "description")
def test_attribute_count(self):
class EmptyModel(types.Model):
pass
self.assertEqual(EmptyModel().attribute_count(), 0)
class BasicTypesOnly(types.Model):
name = types.String()
age = types.Integer()
self.assertEqual(BasicTypesOnly().attribute_count(), 2)
class ModelWithArray(types.Model):
name = types.String()
age = types.Integer()
tags = types.Array(element_template=types.String())
self.assertEqual(ModelWithArray().attribute_count(), 3)
class SubModel(types.Model):
pass
class ModelWithSub(types.Model):
name = types.String()
age = types.Integer()
sub = SubModel()
self.assertEqual(ModelWithSub().attribute_count(), 3)
class ModelWithSubAndArray(types.Model):
name = types.String()
age = types.Integer()
tags = types.Array(element_template=types.String())
sub = SubModel()
self.assertEqual(ModelWithSubAndArray().attribute_count(), 4)
def test_blueprint(self):
class MyModel(types.Model):
nick_name = types.String(required=True)
first_name = types.String(required=True)
last_name = types.String(required=False)
blueprint = MyModel(required=False, description="description").blueprint()
self.assertEqual(blueprint["type"], "model")
self.assertEqual(blueprint["constraints"]["required"], False)
self.assertEqual(blueprint["constraints"]["description"], "description")
self.assertEqual(blueprint["fields"]["nick_name"], MyModel.nick_name.blueprint())
self.assertEqual(blueprint["fields"]["first_name"], MyModel.first_name.blueprint())
self.assertEqual(blueprint["fields"]["last_name"], MyModel.last_name.blueprint())
def test_blueprint_bad_attribute(self):
class ModelWithBadAttribute(types.Model):
name = "string"
self.assertRaises(TypeError, ModelWithBadAttribute().blueprint)
def test_setattr(self):
class SubModel(types.Model):
string = types.String()
class MyModel(types.Model):
boolean = types.Boolean()
float = types.Float()
age = types.Integer(maximum=120)
name = types.String()
sub_model = SubModel()
sub_model = SubModel()
sub_model.string = "string"
self.assertEqual(sub_model.string, "string")
my_model = MyModel()
my_model.boolean = True
my_model.name = "name"
my_model.age = 21
my_model.sub_model = sub_model
self.assertEqual(my_model.boolean, True)
self.assertEqual(my_model.name, "name")
self.assertEqual(my_model.age, 21)
self.assertEqual(my_model.sub_model, sub_model)
self.assertEqual(my_model.sub_model.string, "string")
self.assertRaises(KeyError, my_model.__setattr__, "missing", "missing")
self.assertRaises(exception.ValidationError, my_model.__setattr__, "age", 121)
def test_create_instance_attributes(self):
class MyModel(types.Model):
string = types.String(default="default")
nothing = None
my_model = MyModel()
self.assertEqual(my_model.string, "default")
my_model = MyModel(string="string")
self.assertEqual(my_model.string, "string")
self.assertEqual(my_model.nothing, None)
def test_get_attribute_keys(self):
class MyModel(types.Model):
name = types.String()
tags = types.Array(element_template=types.String())
my_model = MyModel()
self.assertEqual(my_model.get_attribute_keys(), ["name", "tags"])
def test_get_attribute_filter_base(self):
attribute_filter = types.Model().get_attribute_filter()
self.assertEqual(attribute_filter.keys(), [])
def test_get_attribute_filter(self):
class SubModel(types.Model):
colour = types.String()
class MyModel(types.Model):
name = types.String()
sub = SubModel()
my_model = MyModel()
attribute_filter = my_model.get_attribute_filter(default_value=True)
self.assertTrue(attribute_filter.name)
self.assertTrue(attribute_filter.sub)
self.assertTrue(attribute_filter.sub.colour)
self.assertEqual(attribute_filter.keys(), ["name", "sub"])
def test_attribute_rewrite_map(self):
class MyModel(types.Model):
name = types.String()
first_name = types.String()
last_name = types.String()
rewrite_map = {
"first_name": "a_c",
"last_name": "b_c",
"name": "c"
}
my_model = MyModel()
self.assertEqual(my_model.attribute_rewrite_map(), rewrite_map)
def test_attribute_rewrite_reverse_map(self):
class MyModel(types.Model):
name = types.String()
first_name = types.String()
last_name = types.String()
reverse_map = {
"a_c": "first_name",
"b_c": "last_name",
"c": "name"
}
my_model = MyModel()
self.assertEqual(my_model.attribute_rewrite_reverse_map(), reverse_map)
def test_contains(self):
class SubModel(types.Model):
pass
# check if key can be found in model
class MyModel(types.Model):
name = types.String()
birthday = types.Date()
tags = types.Array(element_template=types.String())
sub = SubModel()
sub_array = types.Array(element_template=SubModel())
my_model = MyModel()
self.assertTrue("name" in my_model)
self.assertTrue("birthday" in my_model)
self.assertTrue("tags" in my_model)
self.assertTrue("sub" in my_model)
self.assertTrue("sub_array" in my_model)
self.assertFalse("missing"in my_model)
# check if keys can be found in model and base class
class ModelWithSingleBase(MyModel):
extra = types.String()
single_base = ModelWithSingleBase()
self.assertTrue("name" in single_base)
self.assertTrue("birthday" in single_base)
self.assertTrue("tags" in single_base)
self.assertTrue("sub" in single_base)
self.assertTrue("sub_array" in single_base)
self.assertTrue("extra" in single_base)
self.assertFalse("missing" in single_base)
class ModelWithMultiBase(ModelWithSingleBase):
another = types.String()
multi_base = ModelWithMultiBase()
self.assertTrue("name" in multi_base)
self.assertTrue("birthday" in multi_base)
self.assertTrue("tags" in multi_base)
self.assertTrue("sub" in multi_base)
self.assertTrue("sub_array" in multi_base)
self.assertTrue("extra" in multi_base)
self.assertTrue("another" in multi_base)
self.assertFalse("missing" in multi_base)
def test_generate_attribute_token_rewrite_map(self):
class MyModel(types.Model):
boolean = types.Boolean()
float = types.Float()
integer = types.Integer()
string = types.String()
my_model = MyModel()
rewrite_map = my_model.generate_attribute_token_rewrite_map()
self.assertEqual(
rewrite_map,
{
"boolean": "a",
"float": "b",
"integer": "c",
"string": "d"
}
)
def test_generate_attribute_tokens(self):
class MyModel(types.Model):
boolean = types.Boolean()
float = types.Float()
integer = types.Integer()
string = types.String()
my_model = MyModel()
tokens = my_model.generate_attribute_tokens()
self.assertEqual(tokens, ["boolean", "float", "integer", "string"])
def test_generate_minified_keys(self):
self.assertEqual(types.Model.generate_minified_keys(3), ["a", "b", "c"])
self.assertEqual(types.Model.generate_minified_keys(5), ["a", "b", "c", "d", "e"])
self.assertEqual(types.Model.generate_minified_keys(3, "_"), ["_a", "_b", "_c"])
self.assertEqual(types.Model.generate_minified_keys(5, "_"), ["_a", "_b", "_c", "_d", "_e"])
self.assertEqual(types.Model.generate_minified_keys(29), [
"a", "b", "c", "d", "e", "f", "g", "h", "i",
"j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z",
"aa", "ab", "ac"
])
self.assertEqual(types.Model.generate_minified_keys(55), [
"a", "b", "c", "d", "e", "f", "g", "h", "i",
"j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z",
"aa", "ab", "ac", "ad", "ae", "af", "ag", "ah", "ai",
"aj", "ak", "al", "am", "an", "ao", "ap", "aq", "ar",
"as", "at", "au", "av", "aw", "ax", "ay", "az",
"ba", "bb", "bc"
])
def test__generate_attribute_key(self):
self.assertEqual(types.Model.generate_attribute_key(0), "a")
self.assertEqual(types.Model.generate_attribute_key(1), "b")
self.assertEqual(types.Model.generate_attribute_key(25), "z")
self.assertEqual(types.Model.generate_attribute_key(26), "aa")
self.assertEqual(types.Model.generate_attribute_key(27), "bb")
self.assertEqual(types.Model.generate_attribute_key(51), "zz")
self.assertEqual(types.Model.generate_attribute_key(52), "aaa")
self.assertEqual(types.Model.generate_attribute_key(54), "ccc")
self.assertEqual(types.Model.generate_attribute_key(77), "zzz")
class ModelAsSerializable(unittest.TestCase):
def test_as_serializable(self):
from datetime import date
from datetime import datetime
from datetime import time
class SubModel(types.Model):
name = types.String()
class MyModel(types.Model):
boolean = types.Boolean()
float = types.Float()
integer = types.Integer()
string = types.String()
date = types.Date()
datetime = types.DateTime()
time = types.Time()
sub = SubModel()
my_model = MyModel()
my_model.boolean = True
my_model.float = 33.3
my_model.integer = 22
my_model.string = "string"
my_model.date = date(2018, 1, 18)
my_model.datetime = datetime(2018, 1, 18, 13, 14, 15)
my_model.time = time(12, 13, 14)
my_model.sub.name = "name"
serialized = my_model.as_serializable()
self.assertTrue(isinstance(serialized, dict))
self.assertEqual(serialized["boolean"], True)
self.assertEqual(serialized["float"], 33.3)
self.assertEqual(serialized["integer"], 22)
self.assertEqual(serialized["string"], "string")
self.assertEqual(serialized["date"], "2018-01-18")
self.assertEqual(serialized["datetime"], "2018-01-18 13:14:15")
self.assertEqual(serialized["time"], "12:13:14")
self.assertEqual(serialized["sub"]["name"], "name")
def test_as_serializable_minified(self):
from datetime import date
from datetime import datetime
from datetime import time
class SubModel(types.Model):
name = types.String()
class MyModel(types.Model):
boolean = types.Boolean()
date = types.Date()
datetime = types.DateTime()
float = types.Float()
integer = types.Integer()
string = types.String()
sub = SubModel()
time = types.Time()
my_model = MyModel()
my_model.boolean = True
my_model.float = 33.3
my_model.integer = 22
my_model.string = "string"
my_model.date = date(2018, 1, 18)
my_model.datetime = datetime(2018, 1, 18, 13, 14, 15)
my_model.time = time(12, 13, 14)
my_model.sub.name = "name"
serialized = my_model.as_serializable(minified=True)
self.assertTrue(isinstance(serialized, dict))
self.assertEqual(serialized["a"], True)
self.assertEqual(serialized["b"], "2018-01-18")
self.assertEqual(serialized["c"], "2018-01-18 13:14:15")
self.assertEqual(serialized["d"], 33.3)
self.assertEqual(serialized["e"], 22)
self.assertEqual(serialized["f"], "string")
self.assertEqual(serialized["g"]["a"], "name")
self.assertEqual(serialized["h"], "12:13:14")
def test_as_serializable_filtered_default_true(self):
from datetime import date
from datetime import datetime
from datetime import time
from prestans.parser import AttributeFilter
class SubModel(types.Model):
name = types.String()
class MyModel(types.Model):
boolean = types.Boolean()
date = types.Date()
datetime = types.DateTime()
float = types.Float()
integer = types.Integer()
string = types.String()
sub = SubModel()
time = types.Time()
my_model = MyModel()
my_model.boolean = True
my_model.float = 33.3
my_model.integer = 22
my_model.string = "string"
my_model.date = date(2018, 1, 18)
my_model.datetime = datetime(2018, 1, 18, 13, 14, 15)
my_model.time = time(12, 13, 14)
my_model.sub.name = "name"
attribute_filter = AttributeFilter.from_model(MyModel(), True)
attribute_filter.float = False
attribute_filter.string = False
serialized = my_model.as_serializable(attribute_filter=attribute_filter)
self.assertTrue(isinstance(serialized, dict))
self.assertEqual(serialized["boolean"], True)
self.assertTrue("float" not in serialized)
self.assertEqual(serialized["integer"], 22)
self.assertTrue("string" not in serialized)
self.assertEqual(serialized["date"], "2018-01-18")
self.assertEqual(serialized["datetime"], "2018-01-18 13:14:15")
self.assertEqual(serialized["time"], "12:13:14")
self.assertEqual(serialized["sub"]["name"], "name")
def test_as_serializable_filtered_default_false(self):
from datetime import date
from datetime import datetime
from datetime import time
from prestans.parser import AttributeFilter
class SubModel(types.Model):
name = types.String()
class MyModel(types.Model):
boolean = types.Boolean()
date = types.Date()
datetime = types.DateTime()
float = types.Float()
integer = types.Integer()
string = types.String()
sub = SubModel()
time = types.Time()
my_model = MyModel()
my_model.boolean = True
my_model.float = 33.3
my_model.integer = 22
my_model.string = "string"
my_model.date = date(2018, 1, 18)
my_model.datetime = datetime(2018, 1, 18, 13, 14, 15)
my_model.time = time(12, 13, 14)
my_model.sub.name = "name"
attribute_filter = AttributeFilter.from_model(MyModel(), False)
attribute_filter.float = True
attribute_filter.string = True
serialized = my_model.as_serializable(attribute_filter=attribute_filter)
self.assertEqual(serialized, {"float": 33.3, "string": "string"})
attribute_filter = AttributeFilter.from_model(MyModel(), False)
attribute_filter.sub.name = True
serialized = my_model.as_serializable(attribute_filter=attribute_filter)
self.assertEqual(serialized, {"sub": {"name": "name"}})
def test_as_serializable_filtered_only_child_of_type_model(self):
from prestans.parser import AttributeFilter
class SubModel(types.Model):
name = types.String()
class ParentModel(types.Model):
sub = SubModel()
attribute_filter = AttributeFilter.from_model(ParentModel(), False)
attribute_filter.sub.name = True
parent_model = ParentModel()
parent_model.sub.name = "james"
serialized = parent_model.as_serializable(attribute_filter=attribute_filter)
self.assertEqual(serialized, {"sub": {"name": "james"}})
def test_none_attributes_skips_further_checks(self):
class Person(types.Model):
first_name = types.String(required=True)
last_name = types.String(required=False)
person = Person(first_name="Carol")
serialized = person.as_serializable()
self.assertEqual(serialized["first_name"], "Carol")
self.assertEqual(serialized["last_name"], None)
class ModelValidate(unittest.TestCase):
def test_required_rejects_none(self):
class MyModel(types.Model):
pass
self.assertRaises(exception.RequiredAttributeError, MyModel(required=True).validate, None)
def test_required_rejects_non_dict_type(self):
class MyModel(types.Model):
pass
self.assertRaises(exception.RequiredAttributeError, MyModel(required=True).validate, False)
self.assertRaises(exception.RequiredAttributeError, MyModel(required=True).validate, 3)
self.assertRaises(exception.RequiredAttributeError, MyModel(required=True).validate, 3.33)
self.assertRaises(exception.RequiredAttributeError, MyModel(required=True).validate, "string")
def test_not_required_accepts_none(self):
class MyModel(types.Model):
pass
self.assertEqual(MyModel(required=False).validate(None), None)
def test_sets_none_for_invisible_attributes(self):
class MyModel(types.Model):
visible = types.String(default="visible")
invisible = types.String(default="invisible")
my_model = MyModel()
import logging
logging.error(my_model.visible)
logging.error(my_model.invisible)
self.assertEqual(my_model.visible, "visible")
self.assertEqual(my_model.invisible, "invisible")
attribute_filter = AttributeFilter.from_model(MyModel(), default_value=False)
attribute_filter.visible = True
validated = my_model.validate({}, attribute_filter)
self.assertEqual(validated.visible, "visible")
self.assertEqual(validated.invisible, None)
attribute_filter.visible = False
attribute_filter.invisible = True
validated = my_model.validate({}, attribute_filter)
self.assertIsNone(validated.visible)
self.assertEqual(validated.invisible, "invisible")
@unittest.skip(reason="these are ignored instead of raising TypeError since prestans 2.5.0")
def test_rejects_bad_attribute_type(self):
class MyModel(types.Model):
bad_attribute_type = "string"
self.assertRaises(TypeError, MyModel().validate, {})
def test_child_data_collection(self):
class ChildModel(types.Model):
age = types.Integer()
class ParentModel(types.Model):
name = types.String()
child = ChildModel()
parent_model = ParentModel()
parent_model.name = "Nathan"
parent_model.child.age = 30
validated = ParentModel().validate(parent_model.as_serializable())
self.assertEqual(validated.name, "Nathan")
self.assertEqual(validated.child.age, 30)
def test_child_model_filtered(self):
class ChildModel(types.Model):
child_name = types.String()
child_age = types.Integer()
class ParentModel(types.Model):
parent_name = types.String()
parent_percent = types.Float()
child = ChildModel()
parent_model = ParentModel()
parent_model.parent_name = "Nathan"
parent_model.parent_percent = 33.3
parent_model.child.child_name = "Steve"
parent_model.child.child_age = 30
parent_filter = AttributeFilter.from_model(ParentModel(default=False))
parent_filter.parent_name = True
parent_filter.parent_percent = True
parent_filter.child.child_name = True
parent_filter.child.child_age = True
validated = ParentModel().validate(parent_model.as_serializable(attribute_filter=parent_filter))
self.assertEqual(validated.parent_name, "Nathan")
self.assertEqual(validated.parent_percent, 33.3)
self.assertEqual(validated.child.child_name, "Steve")
self.assertEqual(validated.child.child_age, 30)
parent_filter.parent_name = False
parent_filter.child.child_name = False
validated = ParentModel().validate(
parent_model.as_serializable(attribute_filter=parent_filter),
attribute_filter=parent_filter
)
self.assertEqual(validated.parent_name, None)
self.assertEqual(validated.parent_percent, 33.3)
self.assertEqual(validated.child.child_name, None)
self.assertEqual(validated.child.child_age, 30)
def test_child_array_filtered(self):
class ChildModel(types.Model):
child_name = types.String()
child_age = types.Integer()
class ParentModel(types.Model):
parent_name = types.String()
parent_percent = types.Float()
children = types.Array(element_template=ChildModel())
parent_model = ParentModel()
parent_model.parent_name = "Nathan"
parent_model.parent_percent = 33.3
child_model = ChildModel()
child_model.child_name = "Steve"
child_model.child_age = 30
parent_model.children.append(child_model)
parent_filter = AttributeFilter.from_model(ParentModel(default=False))
parent_filter.parent_name = True
parent_filter.parent_percent = True
parent_filter.children.child_name = True
parent_filter.children.child_age = True
validated = ParentModel().validate(parent_model.as_serializable(attribute_filter=parent_filter))
self.assertEqual(validated.parent_name, "Nathan")
self.assertEqual(validated.parent_percent, 33.3)
self.assertEqual(validated.children[0].child_name, "Steve")
self.assertEqual(validated.children[0].child_age, 30)
parent_filter.parent_name = False
parent_filter.children.child_name = False
validated = ParentModel().validate(
parent_model.as_serializable(attribute_filter=parent_filter),
attribute_filter=parent_filter
)
self.assertEqual(validated.parent_name, None)
self.assertEqual(validated.parent_percent, 33.3)
self.assertEqual(validated.children[0].child_name, None)
self.assertEqual(validated.children[0].child_age, 30)
def test_multi_levels_of_array_filtered(self):
class ChildB(types.Model):
child_b_name = types.String()
child_b_age = types.Integer()
class ChildA(types.Model):
child_a_name = types.String()
child_a_age = types.Integer()
children = types.Array(element_template=ChildB())
class ParentModel(types.Model):
parent_name = types.String()
parent_percent = types.Float()
children = types.Array(element_template=ChildA())
parent_model = ParentModel()
parent_model.parent_name = "Nathan"
parent_model.parent_percent = 33.3
child_model_a = ChildA()
child_model_a.child_a_name = "Steve"
child_model_a.child_a_age = 30
child_model_b = ChildB()
child_model_b.child_b_name = "Betty"
child_model_b.child_b_age = 54
child_model_a.children.append(child_model_b)
parent_model.children.append(child_model_a)
parent_filter = AttributeFilter.from_model(ParentModel(default=False))
parent_filter.parent_name = True
parent_filter.parent_percent = True
parent_filter.children.child_a_name = True
parent_filter.children.child_a_age = True
parent_filter.children.children.child_b_name = True
parent_filter.children.children.child_b_age = True
validated = ParentModel().validate(parent_model.as_serializable(attribute_filter=parent_filter))
self.assertEqual(validated.parent_name, "Nathan")
self.assertEqual(validated.parent_percent, 33.3)
self.assertEqual(validated.children[0].child_a_name, "Steve")
self.assertEqual(validated.children[0].child_a_age, 30)
self.assertEqual(validated.children[0].children[0].child_b_name, "Betty")
self.assertEqual(validated.children[0].children[0].child_b_age, 54)
parent_filter.parent_name = False
parent_filter.children.child_a_name = False
parent_filter.children.children.child_b_name = False
validated = ParentModel().validate(
parent_model.as_serializable(attribute_filter=parent_filter),
attribute_filter=parent_filter
)
self.assertEqual(validated.parent_name, None)
self.assertEqual(validated.parent_percent, 33.3)
self.assertEqual(validated.children[0].child_a_name, None)
self.assertEqual(validated.children[0].child_a_age, 30)
self.assertEqual(validated.children[0].children[0].child_b_name, None)
self.assertEqual(validated.children[0].children[0].child_b_age, 54)
def test_minified_true(self):
class Person(types.Model):
first_name = types.String()
last_name = types.String()
person = Person(first_name="john", last_name="smith")
person_validated = person.validate(person.as_serializable(minified=True), minified=True)
self.assertEqual(person_validated.as_serializable(), {"first_name": "john", "last_name": "smith"})
self.assertEqual(person_validated.as_serializable(minified=True), {"a_c": "john", "b_c": "smith"})
def test_child_failing_to_validate_raises_validation_error(self):
class Person(types.Model):
first_name = types.String(required=True)
last_name = types.String(required=True)
person = Person(first_name="john")
self.assertRaises(exception.ValidationError, Person().validate, person.as_serializable())
|
1603562
|
import os
import torch
import logging
from model import DeepSpeech
class Observer(object):
'''
Train Observer base class.
'''
def __init__(self, logger):
self.logger = logger
def on_epoch_start(self, model, epoch): pass
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results): pass
def on_batch_start(self, model, epoch, batch_no): pass
def on_batch_end(self, model, optimizer, epoch, batch_no, loss_results, wer_results, cer_results, avg_loss): pass
def to_np(x):
return x.data.cpu().numpy()
class TensorboardWriter(Observer):
"""
Update Tensorboard at the end of each epoch
"""
def __init__(self, id, log_dir, log_params):
super().__init__(logging.getLogger('TensorboardWriter'))
os.makedirs(log_dir, exist_ok=True)
from tensorboardX import SummaryWriter
self.id = id
self.log_params = log_params
self.tensorboard_writer = SummaryWriter(log_dir)
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results):
self.logger.debug("Updating tensorboard for epoch {} {}".format(epoch + 1, loss_results))
values = {
'Avg Train Loss': loss_results[epoch],
'Avg WER': wer_results[epoch],
'Avg CER': cer_results[epoch],
}
self.tensorboard_writer.add_scalars(self.id, values, epoch + 1)
if self.log_params:
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)
if value.grad is not None:
self.tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)
class CheckpointWriter(Observer):
"""
Save model checkpoint at the end of epoch
"""
def __init__(self, save_folder):
super().__init__(logging.getLogger('CheckpointWriter'))
self.logger.debug("CheckpointWriter")
self.save_folder = save_folder
os.makedirs(save_folder, exist_ok=True)
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results):
self.logger.debug("Saving checkpoint {}".format(epoch + 1))
file_path = '%s/deepspeech_%d.pth' % (self.save_folder, epoch + 1)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results),
file_path)
class CheckpointBatchWriter(Observer):
"""
Save model checkpoint every number of mini-batches
"""
def __init__(self, save_folder, checkpoint_per_batch):
super().__init__(logging.getLogger('CheckpointBatchWriter'))
self.logger.debug("CheckpointBatchWriter")
self.save_folder = save_folder
self.checkpoint_per_batch = checkpoint_per_batch
os.makedirs(save_folder, exist_ok=True)
def on_batch_end(self, model, optimizer, epoch, batch_no, loss_results, wer_results, cer_results, avg_loss):
if batch_no > 0 and (batch_no + 1) % self.checkpoint_per_batch == 0:
file_path = '%s/deepspeech_checkpoint_epoch_%d_iter_%d.pth' % (self.save_folder, epoch + 1, batch_no + 1)
self.logger.debug("Saving checkpoint model to %s" % file_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, iteration=batch_no,
loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results, avg_loss=avg_loss),
file_path)
class VisdomWriter(Observer):
def __init__(self, id, epochs):
super().__init__(logging.getLogger('VisdomWriter'))
from visdom import Visdom
self.viz = Visdom()
self.opts = dict(title=id, ylabel='', xlabel='Epoch', legend=['Loss', 'WER', 'CER'])
self.viz_window = None
self.epochs = torch.arange(1, epochs + 1)
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results):
self.logger.debug('Updating Visdom')
x_axis = self.epochs[0:epoch + 1]
y_axis = torch.stack(
(loss_results[0:epoch + 1], wer_results[0:epoch + 1], cer_results[0:epoch + 1]), dim=1)
if self.viz_window is None:
self.viz_window = self.viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
else:
self.viz.line(
X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose(0, 1), # Visdom fix
Y=y_axis,
win=self.viz_window,
update='replace',
)
|
1603585
|
from plugin.models.m_sync.result import SyncResult, SyncResultError, SyncResultException
from plugin.models.m_sync.status import SyncStatus
|
1603602
|
from backend.common.sitevars.sitevar import Sitevar
class NotificationsEnable(Sitevar[bool]):
@staticmethod
def key() -> str:
return "notifications.enable"
@staticmethod
def description() -> str:
return "For enabling/disabling all notifications"
@staticmethod
def default_value() -> bool:
return True
@classmethod
def notifications_enabled(cls) -> bool:
return cls.get()
@classmethod
def enable_notifications(cls, enable: bool) -> None:
cls.update(
should_update=lambda v: v is not enable,
update_f=lambda _: enable,
)
|
1603618
|
from tortoise import Tortoise
from app.core import settings
async def init_db() -> None:
await Tortoise.init(
db_url=str(settings.DATABASE_URL), modules={"models": settings.APP_MODELS}
)
await Tortoise.generate_schemas()
|
1603620
|
import numpy as np
import tensorflow as tf
from util import xavier_init
class SparseAutoencoder(object):
def __init__(self, num_input, num_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(),
scale=0.1):
self.num_input = num_input
self.num_hidden = num_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.sparsity_level = np.repeat([0.05], self.num_hidden).astype(np.float32)
self.sparse_reg = 0.0
# model
self.x = tf.placeholder(tf.float32, [None, self.num_input])
self.hidden_layer = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((num_input,)),
self.weights['w1']),
self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden_layer, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) + self.sparse_reg \
* self.kl_divergence(
self.sparsity_level, self.hidden_layer)
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.session = tf.Session()
self.session.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.num_input, self.num_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.num_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.num_hidden, self.num_input], dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.num_input], dtype = tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.session.run((self.cost, self.optimizer), feed_dict = {self.x: X,
self.scale: self.training_scale
})
return cost
def kl_divergence_old(self, p, p_hat):
return tf.reduce_mean(p * tf.log(p) - p * tf.log(p_hat) + (1 - p) * tf.log(1 - p) - (1 - p) * tf.log(1 - p_hat))
def kl_divergence(self, p, p_hat):
return tf.reduce_mean(p*(tf.log(p)/tf.log(p_hat)) + (1-p)*(tf.log(1-p)/tf.log(1-p_hat)))
def calculate_total_cost(self, X):
return self.session.run(self.cost, feed_dict = {self.x: X,
self.scale: self.training_scale
})
def transform(self, X):
return self.session.run(self.hidden_layer, feed_dict = {self.x: X,
self.scale: self.training_scale
})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size = self.weights["b1"])
return self.session.run(self.reconstruction, feed_dict = {self.hidden_layer: hidden})
def reconstruct(self, X):
return self.session.run(self.reconstruction, feed_dict = {self.x: X,
self.scale: self.training_scale
})
def get_weights(self):
return self.session.run(self.weights['w1'])
def get_biases(self):
return self.session.run(self.weights['b1'])
|
1603634
|
from os.path import join, isdir
import glob
from subprocess import call
import numpy as np
from rastervision.common.utils import _makedirs
from rastervision.common.settings import VALIDATION
from rastervision.semseg.tasks.utils import (
make_prediction_img, plot_prediction, predict_x)
from rastervision.semseg.models.factory import SemsegModelFactory
MAKE_VIDEOS = 'make_videos'
def make_videos(run_path, options, generator):
model_factory = SemsegModelFactory()
videos_path = join(run_path, 'videos')
_makedirs(videos_path)
checkpoints_path = join(run_path, 'delta_model_checkpoints')
if not isdir(checkpoints_path):
print('Cannot make videos without delta_model_checkpoints.')
return
model_paths = glob.glob(join(checkpoints_path, '*.h5'))
model_paths.sort()
models = []
for model_path in model_paths:
model = model_factory.make_model(options, generator)
model.load_weights(model_path, by_name=True)
models.append(model)
split_gen = generator.make_split_generator(
VALIDATION, target_size=options.eval_target_size,
batch_size=1, shuffle=False, augment_methods=None, normalize=True,
only_xy=False)
for video_ind, batch in \
enumerate(split_gen):
x = np.squeeze(batch.x, axis=0)
y = np.squeeze(batch.y, axis=0)
display_y = generator.dataset.one_hot_to_rgb_batch(y)
all_x = np.squeeze(batch.all_x, axis=0)
make_video(
x, display_y, all_x, models, videos_path, video_ind,
options, generator)
if video_ind == options.nb_videos - 1:
break
def make_video(x, y, all_x, models, videos_path, video_ind, options,
generator):
video_path = join(videos_path, str(video_ind))
_makedirs(video_path)
for frame_ind, model in enumerate(models):
y_pred = make_prediction_img(
x, options.target_size[0],
lambda x: generator.dataset.one_hot_to_rgb_batch(
predict_x(x, model)))
print(video_ind)
print(frame_ind)
frame_path = join(
video_path, 'frame_{:0>4}.png'.format(frame_ind))
plot_prediction(generator, all_x, y, y_pred, frame_path)
frames_path = join(video_path, 'frame_%04d.png')
video_path = join(videos_path, '{}.mp4'.format(video_ind))
call(['avconv',
'-r', '2',
'-i', frames_path,
'-vf', 'scale=trunc(in_w/2)*2:trunc(in_h/2)*2',
video_path])
|
1603635
|
from PIL import Image
import pytest
from yoga.image.encoders import png
class Test_big_endian_unint32_bytes_to_python_int(object):
def test_uint32_value(self):
assert (
png.big_endian_uint32_bytes_to_python_int(b"\xAA\xBB\xCC\xDD")
== 0xAABBCCDD
)
class Test_python_int_to_big_endian_uint32_bytes(object):
def test_python_int_value(self):
assert (
png.python_int_to_big_endian_uint32_bytes(0xAABBCCDD)
== b"\xAA\xBB\xCC\xDD"
)
class Test_get_png_structure(object):
@pytest.fixture
def png_image(self):
return open("test/images/alpha.png", "rb").read()
def test_png_structure(self, png_image):
png_structure = png.get_png_structure(png_image)
assert png_structure["size"] == 2832
assert len(png_structure["chunks"]) == 6
assert png_structure["chunks"][0]["type"] == "IHDR"
assert png_structure["chunks"][1]["type"] == "sBIT"
assert png_structure["chunks"][2]["type"] == "pHYs"
assert png_structure["chunks"][3]["type"] == "tEXt"
assert png_structure["chunks"][4]["type"] == "IDAT"
assert png_structure["chunks"][5]["type"] == "IEND"
class Test_get_IHDR_info(object):
def test_width(self):
ihdr_info = png.get_IHDR_info(
b"\x00\x00\xC0\xFE\x00\x00\x00\xEE\x08\x00\x00\x00\x00"
# |width |height |bit|clr|cmp|flt|interlace
)
assert ihdr_info["width"] == 49406
def test_height(self):
ihdr_info = png.get_IHDR_info(
b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x00"
# |width |height |bit|clr|cmp|flt|interlace
)
assert ihdr_info["height"] == 238
def test_bit_depth(self):
ihdr_info = png.get_IHDR_info(
b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x00"
# |width |height |bit|clr|cmp|flt|interlace
)
assert ihdr_info["bit_depth"] == 8
@pytest.mark.parametrize(
"ihdr, id_,name",
[
# fmt: off
# .|width |height |bit|clr|cmp|flt|interlace
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x00", 0, "grayscale"),
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x02\x00\x00\x00", 2, "truecolour"),
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x03\x00\x00\x00", 3, "indexed-colour"),
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x04\x00\x00\x00", 4, "grayscale-alpha"),
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x06\x00\x00\x00", 6, "truecolour-alpha"),
# fmt: on
],
)
def test_colour_type(self, ihdr, id_, name):
ihdr_info = png.get_IHDR_info(ihdr)
assert ihdr_info["colour_type"] == id_
assert ihdr_info["colour_type_str"] == name
def test_compression_method(self):
ihdr_info = png.get_IHDR_info(
b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x00"
# |width |height |bit|clr|cmp|flt|interlace
)
assert ihdr_info["compression_method"] == 0
assert ihdr_info["compression_method_str"] == "deflate"
def test_filter_method(self):
ihdr_info = png.get_IHDR_info(
b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x00"
# |width |height |bit|clr|cmp|flt|interlace
)
assert ihdr_info["filter_method"] == 0
assert ihdr_info["filter_method_str"] == "adaptative"
@pytest.mark.parametrize(
"ihdr, id_,name",
[
# fmt: off
# .|width |height |bit|clr|cmp|flt|interlace
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x00", 0, "no-interlace"),
(b"\x00\x00\x00\xFF\x00\x00\x00\xEE\x08\x00\x00\x00\x01", 1, "Adam7"),
# fmt: on
],
)
def test_interlace_method(self, ihdr, id_, name):
ihdr_info = png.get_IHDR_info(ihdr)
assert ihdr_info["interlace_method"] == id_
assert ihdr_info["interlace_method_str"] == name
class Test_assemble_png_from_chunks(object):
def test_assemble_png_from_chunks(self):
chunks = [
{
"type": "IHDR",
"data": b"\x00\x00\x00\x78\x00\x00\x00\x78\x08\x06\x00\x00\x00",
},
{
"type": "tEXt",
"data": b"Foo\0Bar",
},
{
"type": "IDAT",
"data": b"\xAA\xBB",
},
{
"type": "IEND",
"data": b"",
},
]
expected_png = b"\x89PNG\r\n\x1A\n"
# IHDR
expected_png += b"\x00\x00\x00\x0D" # length
expected_png += b"IHDR" # type
expected_png += b"\x00\x00\x00\x78\x00\x00\x00\x78\x08\x06\x00\x00\x00"
expected_png += b"\x39\x64\x36\xD2" # CRC
# tEXt
expected_png += b"\x00\x00\x00\x07" # length
expected_png += b"tEXt" # type
expected_png += b"Foo\0Bar" # data
expected_png += b"\xC8\x97\x2E\x75" # CRC
# IDAT
expected_png += b"\x00\x00\x00\x02" # length
expected_png += b"IDAT" # type
expected_png += b"\xAA\xBB" # data
expected_png += b"\x74\xA0\x83\xDD" # CRC
# IEND
expected_png += b"\x00\x00\x00\x00" # length
expected_png += b"IEND" # type
expected_png += b"\xAE\x42\x60\x82" # CRC
assert png.assemble_png_from_chunks(chunks) == expected_png
class Test_is_png(object):
def test_png_file(self):
with open("test/images/alpha.png", "rb") as image_file:
image_data = image_file.read()
assert png.is_png(image_data) is True
def test_jpeg_file(self):
with open("test/images/image1.jpg", "rb") as image_file:
image_data = image_file.read()
assert png.is_png(image_data) is False
class Test_optimize_png(object):
@pytest.mark.parametrize(
"filename",
[
"test/images/edgecases/calibre-gui.png",
"test/images/edgecases/keepassxc.png",
"test/images/edgecases/vlc.png",
],
)
def test_output_png_never_larger_than_input_png(self, filename):
with open(filename, "rb") as image_file:
image_data = image_file.read()
image_file.seek(0)
image = Image.open(image_file)
output = png.optimize_png(image, image_data)
assert len(output) <= len(image_data)
|
1603660
|
from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Dropout(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "torch.nn.functional")
assert (op == "dropout")
#assert (len(args) == 1)
self.shape = args[0]['shape']
self.type = args[0]['dtype']
self.dir = d.dir
return
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
#Ignoring the cost of writing and reading the mask
return Utility.typeToBytes(self.type) * self.elems() * 2
def flops(self):
# Note: This is approximate and depends on the RNG
return 5*self.elems()
|
1603694
|
from django.test import Client, TestCase
from slurpee.models import ExternalData
from slurpee.constants import P_OVERLAY
from systems.tests.utils import create_fake_host
import simplejson as json
class ExternalDataTests(TestCase):
def setUp(self):
serial = 'asdf'
self.external_serial = serial + 'asdf'
self.s = create_fake_host(
hostname='fakehost.mozilla.com', serial=serial
)
ExternalData.objects.create(
system=self.s,
name='serial',
source_name='serial',
data=self.external_serial, # conflict data
source='foo-source',
policy=P_OVERLAY
)
self.c = Client()
def test_conflicts_page(self):
"""Animals that can speak are correctly identified"""
resp = self.c.get(
"/slurpee/conflicts/?search={0}".format(self.s.hostname),
follow=True
)
self.assertEqual(200, resp.status_code)
def test_sync(self):
"""Animals that can speak are correctly identified"""
resp = self.c.post("/en-US/systems/sync_external_data/", {
'attr': 'serial',
'source': 'foo-source',
'system_pk': self.s.pk
})
self.assertEqual(200, resp.status_code, json.loads(resp.content))
# Refresh the object cache
s = self.s.__class__.objects.get(pk=self.s.pk)
self.assertEqual(self.external_serial, s.serial)
|
1603736
|
import os
from backend.settings import logging, logger, static_path
ACL_MODEL = 'standard'
logger = logging.getLogger('.'.join([logger.name, 'acl']))
controller_static_path = os.path.join(static_path, "controller")
interfaces_path = os.path.join(controller_static_path, "interfaces")
acl_path = os.path.join(controller_static_path, "acl")
RESERVED_ACL_NAMES = ['dont_touch']
|
1603773
|
from collections import deque
d = deque()
N = int(input())
for _ in range(N):
cmd, *args = input().split()
getattr(d, cmd)(*args)
print (*[item for item in d], sep = " ")
|
1603789
|
import numpy as np
# pythran export calculate_z(int, complex[], complex[], int[])
def calculate_z(maxiter, zs, cs, output):
"""Calculate output list using Julia update rule"""
# omp parallel for schedule(guided)
for i in range(len(zs)):
n = 0
z = zs[i]
c = cs[i]
# while n < maxiter and abs(z) < 2:
while n < maxiter and (z.real * z.real + z.imag * z.imag) < 4:
z = z * z + c
n += 1
output[i] = n
return output
|
1603807
|
def extractYurikatransWordpressCom(item):
'''
Parser for 'yurikatrans.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('Arms Otome Ch', 'Arms Otome', 'translated'),
('Levelmaker', 'Levelmaker -Raising Levels While Living in Another World-', 'translated'),
('The Strongest Fairy', 'Is the strongest in another world a hero? a demon lord? No! it’s a fairy desu!', 'translated'),
('Eiyuu no Musume Chapter ', 'Eiyuu no Musume to Shite Umarekawatta Eiyuu wa Futatabi Eiyuu o Mezasu (WN) ', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('The Pebble', 'I am just a〈Former〉Pebble! ~The Healing Golem and Monster User~', 'translated'),
('The Strongest Fairy', 'Is the strongest in another world a hero? a demon lord? No! it’s a fairy desu!', 'translated'),
('Levelmaker', 'Levelmaker -Raising Levels While Living in Another World-', 'translated'),
('I Reincarnated', 'I Was Reincarnated but I Don\'t Know Why', 'translated'),
('<NAME>', 'Vampire Yukine\'s Otherworld Journey', 'translated'),
('Fief Strengthening', 'Fief Strengthening', 'translated'),
('<NAME>', 'Vamp<NAME>', 'translated'),
('Arms Otome', 'Arms Otome', 'translated'),
('Eiyuu Musume', 'Eiyuu no Musume to Shite Umarekawatta Eiyuu wa Futatabi Eiyuu o Mezasu (WN)', 'translated'),
('eiyuu no musume', 'Eiyuu no Musume to Shite Umarekawatta Eiyuu wa Futatabi Eiyuu o Mezasu (WN)', 'translated'),
('Goddess', 'Being Recognized as an Evil God, I Changed My Job to Guardian Deity of the Beastmen Country', 'translated'),
('dungeon master', 'I Became a 《Dungeon Master》 In a Different World', 'translated'),
('Daybreak Summoner', 'Daybreak Summoner ~I will protect that girl who summoned me into this world with everything I’ve got~!', 'translated'),
('Luggage Carrier Dragon Slayer', 'Luggage Carrier Dragon Slayer!', 'translated'),
('Kansutoppu', 'Kansutoppu!', 'translated'),
('saint mari', 'Emblem of the Incarnated Saint and the Dragon ~The Airheaded Goddess aims to be a top Adventurer~', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
1603808
|
import numpy as np
def softmax_func(x):
"""
Numerically stable softmax function. For more details
about numerically calculations please refer:
http://www.deeplearningbook.org/slides/04_numerical.pdf
:param x:
:return:
"""
stable_values = x - np.max(x, axis=1, keepdims=True)
return np.exp(stable_values) / np.sum(np.exp(stable_values), axis=1, keepdims=True)
def log_sum_exp(x):
"""
log_sum_exp is a very useful function in machine learning.
It can be seen in many places including cross-entropy error.
However, the naive implementation is numerically unstable.
Therefore, we use the following implementation. For more details
please refer: http://www.deeplearningbook.org/slides/04_numerical.pdf
:param x:
:return:
"""
mx = np.max(x, axis=1, keepdims=True)
safe = x - mx
return mx + np.log(np.sum(np.exp(safe), axis=1, keepdims=True))
# Following two methods were used in the initial version of the convolution operations.
# Later we introduced fast Cython versions of `im2col` and `col2im` implementations.
# Hence, these two methods are obsolete.
def im2col(image, filter_size=(3, 3), padding=(0, 0), stride=(1, 1)):
M, C, h, w, = image.shape
filter_height = filter_size[0]
filter_width = filter_size[1]
padding_height = padding[0]
padding_width = padding[1]
stride_height = stride[0]
stride_width = stride[1]
x_padded = np.pad(image, ((0, 0),
(0, 0),
(padding_height, padding_height),
(padding_width, padding_width)),
mode='constant')
h_new = int((h - filter_height + 2 * padding_height) / stride_height + 1)
w_new = int((w - filter_width + 2 * padding_width) / stride_width + 1)
out = np.zeros((filter_width * filter_height * C, M * h_new * w_new), dtype=image.dtype)
itr = 0
for i in range(h_new):
for j in range(w_new):
for m in range(M):
start_i = stride_height * i
end_i = stride_height * i + filter_width
start_j = stride_width * j
end_j = stride_width * j + filter_height
out[:, itr] = x_padded[m, :, start_i:end_i, start_j:end_j].ravel()
itr += 1
return out
def col2im(cols, x_shape, filter_size=(3, 3), padding=(0, 0), stride=(1, 1)):
N, C, H, W = x_shape
filter_height = filter_size[0]
filter_width = filter_size[1]
padding_height = padding[0]
padding_width = padding[1]
stride_height = stride[0]
stride_width = stride[1]
H_padded, W_padded = H + 2 * padding_height, W + 2 * padding_width
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
idx = 0
for i in range(0, H_padded - filter_height + 1, stride_height):
for j in range(0, W_padded - filter_width + 1, stride_width):
for m in range(N):
col = cols[:, idx]
col = col.reshape((C, filter_height, filter_width))
x_padded[m, :, i:i + filter_height, j:j + filter_width] += col
idx += 1
if padding[0] or padding[1] > 0:
return x_padded[:, :, padding_height:-padding_height, padding_width:-padding_width]
else:
return x_padded
|
1603828
|
import logging
from rich.logging import RichHandler
from telethon import TelegramClient
from tgpy.api import API
from tgpy.app_config import Config
from tgpy.console import console
from tgpy.context import Context
__version__ = "0.4.1"
logging.basicConfig(
level=logging.INFO, format='%(message)s', datefmt="[%X]", handlers=[RichHandler()]
)
class App:
config: Config = None
client: TelegramClient = None
api: API = None
ctx: Context
def __init__(self):
self.ctx = Context()
self.api = API()
app = App()
|
1603861
|
def get_provider_info():
return {
"package-name": "airflow-provider-fivetran",
"name": "Fivetran Provider",
"description": "A Fivetran provider for Apache Airflow.",
"hook-class-names": ["fivetran_provider.hooks.fivetran.FivetranHook"],
"extra-links":["fivetran_provider.operators.fivetran.RegistryLink"],
"versions": ["1.0.1"]
}
|
1603869
|
from ctypes import *
libupper = CDLL("/Users/below/lib/libup.dylib")
libupper.mytoupper.argtypes = [c_char_p, c_char_p]
libupper.mytoupper.restype = c_int
inStr = create_string_buffer(b"This is a test!")
outStr = create_string_buffer(250)
len = libupper.mytoupper(inStr, outStr)
print(inStr.value)
print(outStr.value)
print(len)
|
1603886
|
import os
import json
import time
import jsonpatch
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from basicauth import encode
from retrying import retry
from dart.model.action import Action, ActionState
from dart.model.dataset import Dataset
from dart.model.datastore import Datastore
from dart.model.engine import Engine, ActionContext
from dart.model.event import Event
from dart.model.exception import DartRequestException
from dart.model.graph import Graph, SubGraphDefinition
from dart.model.query import Filter
from dart.model.query import Operator
from dart.model.subscription import Subscription, SubscriptionElementStats, SubscriptionState, SubscriptionElement
from dart.model.trigger import Trigger, TriggerType
from dart.model.workflow import Workflow, WorkflowInstance, WorkflowInstanceState
from dart.config.config import configuration
from dart.util.nudge_requests import make_nudge_request
config_path = os.environ['DART_CONFIG']
config = configuration(config_path)
auth_config = config['auth']
# Disable 'InsecureRequestWarning: Unverified HTTPS request is being made...' warnings in local dev mode.
if auth_config.get('use_auth') and (auth_config.get('dart_server') == 'https://localhost:5000'):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class Dart(object):
def __init__(self, host, port=80, api_version=1):
self._host = host
self._port = port
self._api_version = api_version
self._base_url = '://%s:%s/api/%s' % (self._host, self._port, self._api_version)
# We cannot access the user-id from the session (since no session exists).
# AS a result we cannot access apikey/secret for user in db.
# This makes sense since the client should already retrieved his keys from the database.
# We will expose the key/secret via config variables.
# These dart_client_key/dart_client_secret should be used by the dart_client only.
# These values will be set during web worker startup (call to server.py) in user/apikey tables.
if auth_config.get('use_auth'):
if auth_config.get('dart_client_key') and auth_config.get('dart_client_secret'):
self._credential = auth_config.get('dart_client_key')
self._secret = auth_config.get('dart_client_secret')
self._base_url = 'https' + self._base_url
else:
raise DartRequestException("dart_client_key and dart_client_secret must both exist.")
else:
# The credential/secret default values are set in order to prevent exception while calculating to hmac.
# The base url is http since no https end point will exist (local dev only)
self._credential = "cred"
self._secret = "secret"
self._base_url = 'http' + self._base_url
def save_engine(self, engine):
""" :type engine: dart.model.engine.Engine
:rtype: dart.model.engine.Engine """
if engine.id:
return self._request('put', '/engine/%s' % engine.id, data=engine.to_dict(), model_class=Engine)
return self._request('post', '/engine', data=engine.to_dict(), model_class=Engine)
def get_engine(self, engine_id):
""" :type engine_id: str
:rtype: dart.model.engine.Engine """
return self._request('get', '/engine/%s' % engine_id, model_class=Engine)
def get_engines(self):
""" :rtype: list[dart.model.engine.Engine] """
return self._request_list('get', '/engine', model_class=Engine)
def save_subgraph_definition(self, subgraph_definition, engine_id):
""" :type engine_id: str
:type subgraph_definition: dart.model.graph.SubGraphDefinition
:rtype: dart.model.graph.SubGraphDefinition """
return self._request('post', '/engine/%s/subgraph_definition' % engine_id, data=subgraph_definition.to_dict(),
model_class=SubGraphDefinition)
def get_subgraph_definition(self, subgraph_definition_id):
""" :type subgraph_definition_id: str
:rtype: dart.model.subgraph_definition.Engine """
return self._request('get', '/subgraph_definition/%s' % subgraph_definition_id, model_class=SubGraphDefinition)
def get_subgraph_definitions(self, engine_id):
""" :rtype: list[dart.model.graph.SubGraphDefinition] """
return self._request_list('get', '/engine/%s/subgraph_definition' % engine_id, model_class=SubGraphDefinition)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=1000*10, stop_max_delay=1000*60*2)
def engine_action_checkout(self, action_id):
""" :type action_id: str
:rtype: dart.model.engine.ActionContext """
assert action_id is not None, 'action_id must be provided'
return self._request('put', '/engine/action/%s/checkout' % action_id, None, model_class=ActionContext)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=1000*10, stop_max_delay=1000*60*2)
def engine_action_checkin(self, action_id, action_result):
""" :type action_id: str
:type action_result: dart.model.engine.ActionResult
:rtype: dict """
return self._get_response_data('put', '/engine/action/%s/checkin' % action_id, data=action_result.to_dict())
def delete_engine(self, engine_id):
""" :type engine_id: str """
self._get_response_data('delete', '/engine/%s' % engine_id)
def save_dataset(self, dataset):
""" :type dataset: dart.model.dataset.Dataset
:rtype: dart.model.dataset.Dataset """
if dataset.id:
return self._request('put', '/dataset/%s' % dataset.id, data=dataset.to_dict(), model_class=Dataset)
return self._request('post', '/dataset', data=dataset.to_dict(), model_class=Dataset)
def get_dataset(self, dataset_id):
""" :type dataset_id: str
:rtype: dart.model.dataset.Dataset """
return self._request('get', '/dataset/%s' % dataset_id, model_class=Dataset)
def delete_dataset(self, dataset_id):
""" :type dataset_id: str """
self._get_response_data('delete', '/dataset/%s' % dataset_id)
def save_datastore(self, datastore):
""" :type datastore: dart.model.datastore.Datastore
:rtype: dart.model.datastore.Datastore """
if datastore.id:
return self._request('put', '/datastore/%s' % datastore.id, data=datastore.to_dict(), model_class=Datastore)
return self._request('post', '/datastore', data=datastore.to_dict(), model_class=Datastore)
def get_datastore(self, datastore_id):
""" :type datastore_id: str
:rtype: dart.model.datastore.Datastore """
return self._request('get', '/datastore/%s' % datastore_id, model_class=Datastore)
def patch_datastore(self, datastore, **data_properties):
""" :type action: dart.model.datastore.Datastore
:rtype: dart.model.datastore.Datastore """
p = self._get_patch(datastore, data_properties)
return self._request('patch', '/datastore/%s' % datastore.id, data=p.patch, model_class=Datastore)
def delete_datastore(self, datastore_id):
""" :type datastore_id: str """
self._get_response_data('delete', '/datastore/%s' % datastore_id)
def patch_action(self, action, **data_properties):
""" :type action: dart.model.action.Action
:rtype: dart.model.action.Action """
p = self._get_patch(action, data_properties)
return self._request('patch', '/action/%s' % action.id, data=p.patch, model_class=Action)
@staticmethod
def _get_patch(model, data_properties):
updated_model_dict = model.to_dict()
for k, v in data_properties.iteritems():
updated_model_dict['data'][k] = v
return jsonpatch.make_patch(model.to_dict(), updated_model_dict)
def save_actions(self, actions, datastore_id=None, workflow_id=None):
""" :type actions: list[dart.model.action.Action]
:rtype: list[dart.model.action.Action] """
# the ^ operator on two bool values is an xor
assert bool(datastore_id) ^ bool(workflow_id), 'please pass either datastore_id or workflow_id'
for a in actions:
assert not a.id, 'updating an action is not supported - action has id: %s' % a.id
data = [a.to_dict() for a in actions]
if datastore_id:
return self._request_list('post', '/datastore/%s/action' % datastore_id, data=data, model_class=Action)
if workflow_id:
return self._request_list('post', '/workflow/%s/action' % workflow_id, data=data, model_class=Action)
def await_action_completion(self, action_id, timeout_seconds=2):
""" :type action_id: str
:rtype: dart.model.action.Action """
finished_states = [ActionState.COMPLETED, ActionState.FAILED]
while True:
action = self.get_action(action_id)
if action.data.state in finished_states:
return action
time.sleep(timeout_seconds)
def get_action(self, action_id):
""" :type action_id: str
:rtype: dart.model.action.Action """
return self._request('get', '/action/%s' % action_id, model_class=Action)
def find_actions(self, filters=None):
""" :type filters: list[dart.model.query.Filter]
:rtype: list[dart.model.action.Action] """
limit = 20
offset = 0
while True:
fs_string = json.dumps([' '.join([f.key, f.operator, f.value]) for f in filters or []])
params = {'limit': limit, 'offset': offset, 'filters': fs_string}
results = self._request_list('get', '/action', params=params, model_class=Action)
if len(results) == 0:
break
for e in results:
yield e
offset += limit
def get_actions(self, datastore_id=None, workflow_id=None):
""" :type datastore_id: str
:type workflow_id: str
:rtype: list[dart.model.action.Action] """
assert datastore_id or workflow_id, 'datastore_id and/or workflow_id must be provided'
filters = []
if datastore_id:
filters.append(Filter('datastore_id', Operator.EQ, datastore_id))
if workflow_id:
filters.append(Filter('workflow_id', Operator.EQ, workflow_id))
return self.find_actions(filters)
def delete_action(self, action_id):
""" :type action_id: str """
self._get_response_data('delete', '/action/%s' % action_id)
def save_workflow(self, workflow, datastore_id=None):
""" :type workflow: dart.model.workflow.Workflow
:type datastore_id: str
:rtype: dart.model.workflow.Workflow """
if workflow.id:
return self._request('put', '/workflow/%s' % workflow.id, data=workflow.to_dict(), model_class=Workflow)
datastore_id = datastore_id or workflow.data.datastore_id
assert datastore_id, 'datastore_id must be provided to save a new workflow'
return self._request('post', '/datastore/%s/workflow' % datastore_id, data=workflow.to_dict(),
model_class=Workflow)
def patch_workflow(self, workflow, **data_properties):
""" :type workflow: dart.model.workflow.Workflow
:rtype: dart.model.workflow.Workflow """
p = self._get_patch(workflow, data_properties)
return self._request('patch', '/workflow/%s' % workflow.id, data=p.patch, model_class=Workflow)
def get_workflow(self, workflow_id):
""" :type workflow_id: str
:rtype: dart.model.workflow.Workflow """
return self._request('get', '/workflow/%s' % workflow_id, model_class=Workflow)
def manually_trigger_workflow(self, workflow_id):
""" :type workflow_id: str """
self._get_response_data('post', '/workflow/%s/do-manual-trigger' % workflow_id)
def await_workflow_completion(self, workflow_id, num_instances=1, timeout_seconds=2):
""" :type workflow_id: str
:type num_instances: int
:rtype: list[dart.model.workflow.WorkflowInstance] """
finished_states = [WorkflowInstanceState.COMPLETED, WorkflowInstanceState.FAILED]
while True:
wfis = self.get_workflow_instances(workflow_id)
num_finished = sum([1 for wfi in wfis if wfi.data.state in finished_states])
if num_finished >= num_instances:
return wfis
time.sleep(timeout_seconds)
def get_workflow_instances(self, workflow_id):
""" :type workflow_id: str
:rtype: list[dart.model.workflow.WorkflowInstance] """
return self._request_list('get', '/workflow/%s/instance' % workflow_id, model_class=WorkflowInstance)
def delete_workflow(self, workflow_id):
""" :type workflow_id: str """
self._get_response_data('delete', '/workflow/%s' % workflow_id)
def delete_workflow_instances(self, workflow_id):
""" :type workflow_id: str """
self._get_response_data('delete', '/workflow/%s/instance' % workflow_id)
def save_subscription(self, subscription, dataset_id=None):
""" :type subscription: dart.model.subscription.Subscription
:type dataset_id: str
:rtype: dart.model.subscription.Subscription """
if subscription.id:
return self._request('put', '/subscription/%s' % subscription.id, data=subscription.to_dict(),
model_class=Subscription)
dataset_id = dataset_id or subscription.data.dataset_id
assert dataset_id, 'dataset_id must be provided to save a new subscription'
return self._request('post', '/dataset/%s/subscription' % subscription.data.dataset_id,
data=subscription.to_dict(), model_class=Subscription)
def patch_subscription(self, subscription, **data_properties):
""" :type subscription: dart.model.subscription.Subscription
:rtype: dart.model.subscription.Subscription """
p = self._get_patch(subscription, data_properties)
return self._request('patch', '/subscription/%s' % subscription.id, data=p.patch, model_class=Subscription)
def await_subscription_generation(self, subscription_id, timeout_seconds=2):
""" :type subscription_id: str
:rtype: dart.model.subscription.Subscription """
while True:
subscription = self.get_subscription(subscription_id)
if subscription.data.state not in [SubscriptionState.QUEUED, SubscriptionState.GENERATING]:
return subscription
time.sleep(timeout_seconds)
def get_subscription(self, subscription_id):
""" :type subscription_id: str
:rtype: dart.model.subscription.Subscription """
return self._request('get', '/subscription/%s' % subscription_id, model_class=Subscription)
def assign_subscription_elements(self, action_id):
""" :type action_id: str """
return self._request('get', '/action/%s/subscription/assign' % action_id)
def wait_for_nudge_activation(self, nudge_sub_id, sleep_time=10, retries=9):
"""Wait for a nudge subscription to become active.
:type sleep_time: int
:type retries: int
:type nudge_subs_id: str
:rtype bool
:returns True if the subscription has activated
"""
for _ in xrange(retries+1):
response = make_nudge_request(
url='{}/GetSubscription'.format(config.get('nudge').get('host_url')),
json={'SubscriptionId': nudge_sub_id},
)
state = response.json()['State']
if state == 'INACTIVE':
raise Exception('Nudge subscription has been deactivated')
if state == 'ACTIVE':
return True
time.sleep(sleep_time)
return False
@staticmethod
def create_nudge_batch(nudge_subscription_id):
""" :type nudge_subscription_id: str
:rtype dict"""
host_url = config.get('nudge').get('host_url')
json_body = {
'SubscriptionId': nudge_subscription_id
}
return make_nudge_request(url='%s/CreateBatch' % host_url,
json=json_body).json()
@staticmethod
def get_nudge_batch_elements(nudge_subscription_id, batch_id):
""" :type nudge_subscription_id: str
:type batch_id: str
:rtype Nudge.SubscriptionElement"""
limit = 10000
offset = 0
host_url = config.get('nudge').get('host_url')
while True:
response = make_nudge_request(
url='%s/GetBatchElements' % host_url,
json={
'SubscriptionId': nudge_subscription_id,
'BatchId': batch_id,
'Limit': limit,
'Offset': offset
},
)
elements = response.json()['Elements']
if len(elements) == 0:
break
for e in elements:
yield e
offset += limit
@staticmethod
def get_latest_nudge_batches(nudge_subscription_id, prev_batch_id=None):
""" :type nudge_subscription_id: str
:type prev_batch_id: str
:rtype list[str]"""
host_url = config.get('nudge').get('host_url')
json_body = {
'SubscriptionId': nudge_subscription_id,
}
if prev_batch_id:
json_body['PreviousBatchId'] = prev_batch_id
return make_nudge_request(url='%s/GetSubscriptionBatches' % host_url,
json=json_body).json()['Batches']
@staticmethod
def ack_nudge_elements(nudge_subscription_id, batch_id):
""" :type nudge_subscription_id: str
:type batch_id: str
:rtype dict"""
host_url = config.get('nudge').get('host_url')
json_body = {
'SubscriptionId': nudge_subscription_id,
'BatchId': batch_id,
}
return make_nudge_request(url='%s/Consume' % host_url,
json=json_body).json()
def get_subscription_elements(self, action_id):
""" :type action_id: str
:rtype: list[dart.model.subscription.SubscriptionElement] """
limit = 10000
offset = 0
while True:
params = {'limit': limit, 'offset': offset}
results = self._request_list('get', '/action/%s/subscription/elements' % action_id, params=params,
model_class=SubscriptionElement)
if len(results) == 0:
break
for e in results:
yield e
offset += limit
def find_subscription_elements(self, subscription_id, state=None, processed_after_s3_path=None):
""" :type subscription_id: str
:type state: str
:type processed_after_s3_path: str
:rtype: list[dart.model.subscription.SubscriptionElement] """
limit = 10000
offset = 0
while True:
params = {
'limit': limit,
'offset': offset,
'state': state,
'processed_after_s3_path': processed_after_s3_path if processed_after_s3_path else None
}
results = self._request_list('get', '/subscription/%s/elements' % subscription_id, params=params,
model_class=SubscriptionElement)
if len(results) == 0:
break
for e in results:
yield e
offset += limit
def get_subscription_element_stats(self, subscription_id):
""" :type subscription_id: str
:rtype: list[dart.model.subscription.SubscriptionElementStats] """
return self._request_list('get', '/subscription/%s/element_stats' % subscription_id,
model_class=SubscriptionElementStats)
def delete_subscription(self, subscription_id):
""" :type subscription_id: str """
self._get_response_data('delete', '/subscription/%s' % subscription_id)
def save_trigger(self, trigger):
""" :type trigger: dart.model.trigger.Trigger
:rtype: dart.model.trigger.Trigger """
return self._request('post', '/trigger', data=trigger.to_dict(), model_class=Trigger)
def patch_trigger(self, trigger, **data_properties):
""" :type trigger: dart.model.trigger.Trigger
:rtype: dart.model.trigger.Trigger """
p = self._get_patch(trigger, data_properties)
return self._request('patch', '/trigger/%s' % trigger.id, data=p.patch, model_class=Trigger)
def get_trigger(self, trigger_id):
""" :type trigger_id: str
:rtype: dart.model.trigger.Trigger """
return self._request('get', '/trigger/%s' % trigger_id, model_class=Trigger)
def get_trigger_types(self):
""" :rtype: dart.model.trigger.TriggerType """
return self._request_list('get', '/trigger_type', model_class=TriggerType)
def delete_trigger(self, trigger_id):
""" :type trigger_id: str """
self._get_response_data('delete', '/trigger/%s' % trigger_id)
def save_event(self, event):
""" :type event: dart.model.event.Event
:rtype: dart.model.event.Event """
if event.id:
return self._request('put', '/event/%s' % event.id, data=event.to_dict(), model_class=Event)
return self._request('post', '/event', data=event.to_dict(), model_class=Event)
def patch_event(self, event, **data_properties):
""" :type event: dart.model.event.Event
:rtype: dart.model.event.Event """
p = self._get_patch(event, data_properties)
return self._request('patch', '/event/%s' % event.id, data=p.patch, model_class=Event)
def get_event(self, event_id):
""" :type event_id: str
:rtype: dart.model.event.Event """
return self._request('get', '/event/%s' % event_id, model_class=Event)
def delete_event(self, event_id):
""" :type event_id: str """
self._get_response_data('delete', '/event/%s' % event_id)
def get_entity_graph(self, entity_type, entity_id):
""" :type entity_type: str
:type entity_id: str
:rtype: dart.model.graph.Graph """
return self._request('get', '/graph/%s/%s' % (entity_type, entity_id), model_class=Graph)
def _get_response_data(self, method, url_prefix, data=None, params=None):
basic_auth_signature = encode(self._credential, self._secret)
headers = {
'Authorization': basic_auth_signature
}
response = requests.request(method, self._base_url + '/' + url_prefix.lstrip('/'), headers=headers, json=data,
params=params, verify=False)
try:
data = response.json()
if data['results'] == 'ERROR':
raise
return data['results']
except:
raise DartRequestException(response)
def _request(self, method, url_prefix=None, data=None, params=None, model_class=None):
response_data = self._get_response_data(method, url_prefix, data, params)
return None if not model_class else model_class.from_dict(response_data)
def _request_list(self, method, url_prefix=None, data=None, params=None, model_class=None):
elements = self._get_response_data(method, url_prefix, data, params)
return [model_class.from_dict(e) for e in elements]
|
1603889
|
import ee
from ee_plugin import Map
# Computed area filter.
# Find US counties smaller than 3k square kilometers in area.
# Load counties from TIGER boundaries table
counties = ee.FeatureCollection('TIGER/2016/Counties')
# Map a function over the counties to set the area of each.
def func_blc(f):
# Compute area in square meters. Convert to hectares.
areaHa = f.area().divide(100 * 100)
# A new property called 'area' will be set on each feature.
return f.set({'area': areaHa})
countiesWithArea = counties.map(func_blc)
# Filter to get only smaller counties.
smallCounties = countiesWithArea.filter(ee.Filter.lt('area', 3e5))
Map.addLayer(smallCounties, {'color': '900000'})
Map.setCenter(-119.7, 38.26, 7)
|
1603899
|
from django.urls import reverse
from django.test import TestCase
from ..models import Badge
from .mixins import BadgeFixturesMixin
class BadgeDetailViewTestCase(TestCase):
"""
BadgeDetail view test case.
"""
def setUp(self):
self.badge = Badge.objects.create(
name='Djangonaut',
slug='djangonaut',
description='Django Developer',
image=u'')
self.badge_with_hyphen = Badge.objects.create(
name='<NAME>',
slug='super-djangonaut',
description='Super Django Developer',
image=u'')
def test_view(self):
for badge in [self.badge, self.badge_with_hyphen]:
res = self.client.get(badge.get_absolute_url())
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'badgify/badge_detail.html')
def test_view_404(self):
res = self.client.get(reverse('badge_detail', kwargs={'slug': 'foobar'}))
self.assertEqual(res.status_code, 404)
class BadgeListViewTestCase(TestCase, BadgeFixturesMixin):
"""
BadgeList view test case.
"""
def setUp(self):
self.badges = []
badges, slugs = self.get_dummy_badges()
for badge in badges:
badge.save()
self.badges.append(badge)
def test_view(self):
res = self.client.get(reverse('badge_list'))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'badgify/badge_list.html')
|
1603932
|
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
if n == 0: return True
i, l1, l2 = 0, len(flowerbed), len(flowerbed) - 1
while n > 0 and i < l1:
if flowerbed[i] == 1:
i += 2
else:
if i == l2 or flowerbed[i + 1] == 0:
flowerbed[i] = 1
n -= 1
i += 2
else:
i += 1
return n == 0
|
1603933
|
import warnings
from itertools import tee, starmap
from operator import gt
from copy import copy
import numpy as np
import pandas as pd
import bioframe
def assign_view_paired(
features,
view_df,
cols_paired=["chrom1", "start1", "end1", "chrom2", "start2", "end2"],
cols_view=["chrom", "start", "end"],
features_view_cols=["region1", "region2"],
view_name_col="name",
drop_unassigned=False,
):
"""Assign region names from the view to each feature
Assigns a regular 1D view independently to each side of a bedpe-style dataframe.
Will add two columns with region names (`features_view_cols`)
Parameters
----------
features : pd.DataFrame
bedpe-style dataframe
view_df : pandas.DataFrame
ViewFrame specifying region start and ends for assignment. Attempts to
convert dictionary and pd.Series formats to viewFrames.
cols_paired : list of str
he names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
cols_view : list of str
The names of columns containing the chromosome, start and end of the
genomic intervals in the view. The default values are 'chrom', 'start', 'end'.
features_view_cols : list of str
Names of the columns where to save the assigned region names
view_name_col : str
Column of ``view_df`` with region names. Default 'name'.
drop_unassigned : bool
If True, drop intervals in df that do not overlap a region in the view.
Default False.
"""
features = features.copy()
features.reset_index(inplace=True, drop=True)
cols_left = cols_paired[:3]
cols_right = cols_paired[3:]
bioframe.core.checks.is_bedframe(features, raise_errors=True, cols=cols_left)
bioframe.core.checks.is_bedframe(features, raise_errors=True, cols=cols_right)
view_df = bioframe.core.construction.make_viewframe(
view_df, view_name_col=view_name_col, cols=cols_view
)
features = bioframe.assign_view(
features,
view_df,
drop_unassigned=drop_unassigned,
df_view_col=features_view_cols[0],
view_name_col=view_name_col,
cols=cols_left,
cols_view=cols_view,
)
features[cols_right[1:]] = features[cols_right[1:]].astype(
int
) # gets cast to float above...
features = bioframe.assign_view(
features,
view_df,
drop_unassigned=drop_unassigned,
df_view_col=features_view_cols[1],
view_name_col=view_name_col,
cols=cols_right,
cols_view=cols_view,
)
return features
def assign_regions(features, supports):
"""
DEPRECATED. Will be removed in the future versions and replaced with bioframe.overlap()
For each feature in features dataframe assign the genomic region (support)
that overlaps with it. In case if feature overlaps multiple supports, the
region with largest overlap will be reported.
"""
index_name = features.index.name # Store the name of index
features = (
features.copy()
.reset_index()
.rename({"index" if index_name is None else index_name: "native_order"}, axis=1)
) # Store the original features' order as a column with original index
if "chrom" in features.columns:
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=["chrom", "start", "end"],
cols2=["chrom", "start", "end"],
keep_order=True,
return_overlap=True,
suffixes=("_1", "_2"),
)
overlap_columns = overlap.columns # To filter out duplicates later
overlap["overlap_length"] = overlap["overlap_end"] - overlap["overlap_start"]
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values("overlap_length", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
)
# Copy single column with overlapping region name:
features["region"] = overlap["name_2"]
if "chrom1" in features.columns:
for idx in ("1", "2"):
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=[f"chrom{idx}", f"start{idx}", f"end{idx}"],
cols2=[f"chrom", f"start", f"end"],
keep_order=True,
return_overlap=True,
suffixes=("_1", "_2"),
)
overlap_columns = overlap.columns # To filter out duplicates later
overlap[f"overlap_length{idx}"] = (
overlap[f"overlap_end{idx}"] - overlap[f"overlap_start{idx}"]
)
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values(f"overlap_length{idx}", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
)
# Copy single column with overlapping region name:
features[f"region{idx}"] = overlap["name_2"]
# Form a single column with region names where region1 == region2, and np.nan in other cases:
features["region"] = np.where(
features["region1"] == features["region2"], features["region1"], np.nan
)
features = features.drop(
["region1", "region2"], axis=1
) # Remove unnecessary columns
features = features.set_index("native_order") # Restore the original index
features.index.name = index_name # Restore original index title
return features
def assign_supports(features, supports, labels=False, suffix=""):
"""
Assign support regions to a table of genomic intervals.
Obsolete, replaced by assign_regions now.
Parameters
----------
features : DataFrame
Dataframe with columns `chrom`, `start`, `end`
or `chrom1`, `start1`, `end1`, `chrom2`, `start2`, `end2`
supports : array-like
Support areas
"""
features = features.copy()
supp_col = pd.Series(index=features.index, data=np.nan)
c = "chrom" + suffix
s = "start" + suffix
e = "end" + suffix
for col in (c, s, e):
if col not in features.columns:
raise ValueError(
'Column "{}" not found in features data frame.'.format(col)
)
for i, region in enumerate(supports):
# single-region support
if len(region) in [3, 4]:
sel = (features[c] == region[0]) & (features[e] > region[1])
if region[2] is not None:
sel &= features[s] < region[2]
# paired-region support
elif len(region) == 2:
region1, region2 = region
sel1 = (features[c] == region1[0]) & (features[e] > region1[1])
if region1[2] is not None:
sel1 &= features[s] < region1[2]
sel2 = (features[c] == region2[0]) & (features[e] > region2[1])
if region2[2] is not None:
sel2 &= features[s] < region2[2]
sel = sel1 | sel2
supp_col.loc[sel] = i
if labels:
supp_col = supp_col.map(lambda i: supports[int(i)], na_action="ignore")
return supp_col
def assign_regions_to_bins(bin_ids, regions_span):
regions_binsorted = (
regions_span[(regions_span["bin_start"] >= 0) & (regions_span["bin_end"] >= 0)]
.sort_values(["bin_start", "bin_end"])
.reset_index()
)
bin_reg_idx_lo = regions_span["bin_start"].searchsorted(bin_ids, "right") - 1
bin_reg_idx_hi = regions_span["bin_end"].searchsorted(bin_ids, "right")
mask_assigned = (bin_reg_idx_lo == bin_reg_idx_hi) & (bin_reg_idx_lo >= 0)
region_ids = pd.array([pd.NA] * len(bin_ids))
region_ids[mask_assigned] = regions_span["name"][bin_reg_idx_lo[mask_assigned]]
return region_ids
def make_cooler_view(clr, ucsc_names=False):
"""
Generate a full chromosome viewframe
using cooler's chromsizes
Parameters
----------
clr : cooler
cooler-object to extract chromsizes
ucsc_names : bool
Use full UCSC formatted names instead
of short chromosome names.
Returns
-------
cooler_view : viewframe
full chromosome viewframe
"""
cooler_view = bioframe.make_viewframe(clr.chromsizes)
if ucsc_names:
# UCSC formatted names
return cooler_view
else:
# rename back to short chromnames
cooler_view["name"] = cooler_view["chrom"]
return cooler_view
def view_from_track(track_df):
bioframe.core.checks._verify_columns(track_df, ["chrom", "start", "end"])
return bioframe.make_viewframe(
[
(chrom, df.start.min(), df.end.max())
for chrom, df in track_df.groupby("chrom")
]
)
def mask_cooler_bad_bins(track, bintable):
"""
Mask (set to NaN) values in track where bin is masked in bintable.
Currently used in `cli.get_saddle()`.
TODO: determine if this should be used elsewhere.
Parameters
----------
track : tuple of (DataFrame, str)
bedGraph-like dataframe along with the name of the value column.
bintable : tuple of (DataFrame, str)
bedGraph-like dataframe along with the name of the weight column.
Returns
-------
track : DataFrame
New bedGraph-like dataframe with bad bins masked in the value column
"""
# TODO: update to new track format
track, name = track
bintable, clr_weight_name = bintable
track = pd.merge(
track[["chrom", "start", "end", name]], bintable, on=["chrom", "start", "end"]
)
track.loc[~np.isfinite(track[clr_weight_name]), name] = np.nan
track = track[["chrom", "start", "end", name]]
return track
def align_track_with_cooler(
track, clr, view_df=None, clr_weight_name="weight", mask_bad_bins=True
):
"""
Sync a track dataframe with a cooler bintable.
Checks that bin sizes match between a track and a cooler,
merges the cooler bintable with the track, and
propagates masked regions from a cooler bintable to a track.
Parameters
----------
track : pd.DataFrame
bedGraph-like track DataFrame to check
clr : cooler
cooler object to check against
view_df : bioframe.viewframe or None
Optional viewframe of regions to check for their number of bins with assigned track values.
If None, constructs a view_df from cooler chromsizes.
clr_weight_name : str
Name of the column in the bin table with weight
mask_bad_bins : bool
Whether to propagate null bins from cooler bintable column clr_weight_name
to the 'value' column of the output clr_track. Default True.
Returns
-------
clr_track
track dataframe that has been aligned with the cooler bintable
and has columns ['chrom','start','end','value']
"""
from .checks import is_track, is_cooler_balanced
try:
is_track(track, raise_errors=True)
except Exception as e:
raise ValueError("invalid input track") from e
# since tracks are currently allowed to have flexible column names
c, s, e, v = track.columns[:4]
# using median to allow for shorter / longer last bin on any chromosome
track_bin_width = int((track[e] - track[s]).median())
if not (track_bin_width == clr.binsize):
raise ValueError(
"mismatch between track and cooler bin size, check track resolution"
)
clr_track = (
(clr.bins()[:])
.copy()
.merge(
track.rename(columns={c: "chrom", s: "start", e: "end", v: "value"}),
how="left",
on=["chrom", "start"],
suffixes=("", "_"),
)
)
if clr_weight_name:
try:
is_cooler_balanced(clr, clr_weight_name=clr_weight_name, raise_errors=True)
except Exception as e:
raise ValueError(
f"no column {clr_weight_name} detected in input cooler bintable"
) from e
else:
clr_track[clr_weight_name] = 1.0
valid_bins = clr_track[clr_weight_name].notna()
num_valid_bins = valid_bins.sum()
num_assigned_bins = (clr_track["value"][valid_bins].notna()).sum()
if num_assigned_bins == 0:
raise ValueError("no track values assigned to cooler bintable")
elif num_assigned_bins < 0.5 * np.sum(valid_bins):
warnings.warn("less than 50% of valid bins have been assigned a value")
view_df = make_cooler_view(clr) if view_df is None else view_df
for region in view_df.itertuples(index=False):
track_region = bioframe.select(clr_track, region)
num_assigned_region_bins = track_region["value"].notna().sum()
if num_assigned_region_bins == 0:
raise ValueError(
f"no track values assigned to region {bioframe.to_ucsc_string(region)}"
)
if mask_bad_bins:
clr_track.loc[~valid_bins, "value"] = np.nan
return clr_track[["chrom", "start", "end", "value"]]
|
1603937
|
class Solution:
def sumEvenAfterQueries(self, A: List[int], queries: List[List[int]]) -> List[int]:
evenSum = sum(num for num in A if num % 2 == 0)
result = []
for val, index in queries:
if A[index] % 2 == 0:
evenSum -= A[index]
A[index] += val
if A[index] % 2 == 0:
evenSum += A[index]
result.append(evenSum)
return result
|
1603975
|
from honeygrove.config import Config
from honeygrove.core.ServiceController import ServiceController
from honeygrove.services.ListenService import ListenService
import twisted.internet.reactor
import unittest
class ListenServiceTest(unittest.TestCase):
listen = None
Controller = None
@classmethod
def setUpClass(cls):
Config.listenServicePorts = [9991, 9992]
def setUp(self):
ListenServiceTest.listen = ListenService()
ListenServiceTest.Controller = ServiceController()
ListenServiceTest.Controller.listen = ListenServiceTest.listen
def tearDown(self):
ListenServiceTest.listen.stopService()
twisted.internet.reactor.callFromThread(twisted.internet.reactor.stop)
def testInit(self):
"""
Test if all Ports are initialisiert
"""
self.assertEqual(ListenServiceTest.listen._port, [9991, 9992])
self.assertEqual(ListenServiceTest.listen._stop, True)
self.assertEqual(ListenServiceTest.listen._transport, dict([]))
def testStart(self):
"""
Tests if the service is active after start
"""
self.assertRaises(KeyError, lambda: ListenServiceTest.listen._transport[9991])
self.assertRaises(KeyError, lambda: ListenServiceTest.listen._transport[9992])
ListenServiceTest.listen.startService()
self.assertNotEqual(ListenServiceTest.listen._transport[9991], None)
self.assertNotEqual(ListenServiceTest.listen._transport[9992], None)
def testStopOnPort(self):
"""
Tests if an specific service can start on a port used by ListenService
"""
ListenServiceTest.listen.startService()
self.assertNotEqual(ListenServiceTest.listen._transport[9991], None)
self.assertNotEqual(ListenServiceTest.listen._transport[9992], None)
ListenServiceTest.Controller.startService("serviceControllerTestService")
self.assertRaises(KeyError, lambda: ListenServiceTest.listen._transport[9991])
def testStartOnPort(self):
"""
Test if the service will start automaticly after a service stops on the port
"""
ListenServiceTest.Controller.startService("serviceControllerTestService")
ListenServiceTest.listen.startService()
ListenServiceTest.listen.stopOnPort(9991)
self.assertNotEqual(ListenServiceTest.listen._transport[9992], None)
ListenServiceTest.Controller.stopService("serviceControllerTestService")
self.assertNotEqual(ListenServiceTest.listen._transport[9991], None)
|
1603976
|
import arcade
import imgui
import imgui.core
from imdemo.page import Page
class WindowMenu(Page):
def draw(self):
flags = imgui.WINDOW_MENU_BAR
imgui.begin("Child Window - File Browser", flags=flags)
if imgui.begin_menu_bar():
if imgui.begin_menu('File'):
imgui.menu_item('Close')
imgui.end_menu()
imgui.end_menu_bar()
imgui.end()
def install(app):
app.add_page(WindowMenu, "windowmenu", "Window Menu")
|
1604022
|
import random
class QA:
def __init__(self, question, correctAnswer, otherAnswers):
self.question = question
self.corrAnsw = correctAnswer
self.otherAnsw = otherAnswers
qaList = [QA("Where is New Delhi?", "in India", ["in Pakistan", "in China"]),
QA("What is the capital of Angola?", "Luanda", ["Sydney", "New York", "Angola doesn't exist"]),
QA("Which of the following is not in Europe?", "South Africa", ["England", "Spain", "Germany"]),
QA("Which of the following is not a capital of South Africa?", "Durban", ["Pretoria", "Cape Town"]),
QA("Which of the following is not an African country?", "Malaysia", ["Madagascar", "Djibouti", "South Africa", "Zimbabwe"])]
corrCount = 0
random.shuffle(qaList)
for qaItem in qaList:
print(qaItem.question)
print("Possible answers are:")
possible = qaItem.otherAnsw + [qaItem.corrAnsw]
random.shuffle(possible)
count = 0
while count < len(possible):
print(str(count+1) + ": " + possible[count])
count += 1
print("Please enter the number of your answer:")
userAnsw = input()
while not userAnsw.isdigit():
print("That was not a number. Please enter the number of your answer:")
userAnsw = input()
userAnsw = int(userAnsw)
while not (userAnsw > 0 and userAnsw <= len(possible)):
print("That number doesn't correspond to any answer. Please enter the number of your answer:")
userAnsw = input()
if possible[userAnsw-1] == qaItem.corrAnsw:
print("Your answer was correct.")
corrCount += 1
else:
print("Your answer was wrong.")
print("Correct answer was: " + qaItem.corrAnsw)
print("")
print("You answered " + str(corrCount) + " of " + str(len(qaList)) + " questions correctly.")
|
1604027
|
import os
from ....utils.common import redirected_stdio
from bloom.generators.debian.generator import em
from bloom.generators.debian.generator import get_changelogs
from bloom.generators.debian.generator import format_description
from catkin_pkg.packages import find_packages
test_data_dir = os.path.join(os.path.dirname(__file__), 'test_generator_data')
def test_get_changelogs():
with redirected_stdio():
packages = dict([(pkg.name, pkg) for path, pkg in find_packages(test_data_dir).items()])
assert 'bad_changelog_pkg' in packages
get_changelogs(packages['bad_changelog_pkg'])
def test_unicode_templating():
with redirected_stdio():
packages = dict([(pkg.name, pkg) for path, pkg in find_packages(test_data_dir).items()])
assert 'bad_changelog_pkg' in packages
chlogs = get_changelogs(packages['bad_changelog_pkg'])
template = "@(changelog)"
em.expand(template, {'changelog': chlogs[0][2]})
def test_format_description():
assert '' == format_description('')
assert '.' == format_description('.')
assert 'Word.' == format_description('Word.')
assert 'Word' == format_description('Word')
assert '.' == format_description(' .')
assert '.' == format_description(' . ')
assert 'Word.\n Other words.' == format_description('Word. Other words.')
assert 'The first sentence, or synopsis.\n The second sentence. Part of the long description, but all in a single paragraph.' == format_description('The first sentence, or synopsis. The second sentence. Part of the long description, but all in a single paragraph.')
assert '..' == format_description('..')
assert 'The my_package package' == format_description('The my_package package')
assert 'First sentence with a version nr: 2.4.5, some other text.\n And then some other text.' == format_description('First sentence with a version nr: 2.4.5, some other text. And then some other text.')
assert 'More punctuation! This will split here.\n And the rest.' == format_description('More punctuation! This will split here. And the rest.')
assert 'v1.2.3 with v5.3.7 and ! Split after this.\n Long description here.' == format_description('v1.2.3 with v5.3.7 and ! Split after this. Long description here.\n\n')
# no whitespace between <p>'s, no split
assert 'some embedded html markup.the other sentence.' == format_description('<h1>some embedded</h1>\n<p>html markup.</p><p>the other sentence.</p>')
|
1604036
|
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Returns a list of the SQL statements required to return all tables in the database to the state they were in just after they were installed."
output_transaction = True
def handle_noargs(self, **options):
from django.core.management.sql import sql_flush
return u'\n'.join(sql_flush(self.style, only_django=True)).encode('utf-8')
|
1604074
|
def abs(x):
if x > 0:
return x
else:
return -x
def sqrt(x):
eps = 1e-10
x = float(x)
r = x/2
residual = r**2 - x
while abs(residual) > eps:
r_d = -residual/(2*r)
r += r_d
residual = r**2 - x
return r
def p(x):
prec = 11
l = list(iter(str(x)))[:prec]
if not "." in l:
l.append(".")
l.append("0")
while len(l) < prec:
l.append("0")
s = ""
for c in l:
s += c
return s
print p(sqrt(1))
print p(sqrt(2))
print p(sqrt(3))
print p(sqrt(4))
print p(sqrt(5))
print p(sqrt(6))
print p(sqrt(7000))
|
1604078
|
import pytest
from homework.models import Answer
pytestmark = [
pytest.mark.django_db,
pytest.mark.usefixtures('purchase'),
]
@pytest.fixture
def _no_purchase(purchase):
purchase.setattr_and_save('paid', None)
def get_answer():
return Answer.objects.last()
def test_creation(api, question, another_answer):
api.post('/api/v2/homework/answers/', {
'text': 'Горите в аду!',
'question': question.slug,
'parent': another_answer.slug,
})
created = get_answer()
assert created.question == question
assert created.parent == another_answer
assert created.author == api.user
assert created.text == 'Горите в аду!'
def test_without_parent(api, question):
api.post('/api/v2/homework/answers/', {
'question': question.slug,
'text': 'Верните деньги!',
})
created = get_answer()
assert created.parent is None
def test_empty_parent(api, question):
api.post('/api/v2/homework/answers/', {
'parent': None,
'question': question.slug,
'text': 'Верните деньги!',
})
created = get_answer()
assert created.parent is None
@pytest.mark.xfail(reason='WIP: will per-course permissions later')
@pytest.mark.usefixtures('_no_purchase')
def test_403_for_not_purchased_users(api, question):
api.post('/api/v2/homework/answers/', {
'question': question.slug,
'text': 'Верните деньги!',
}, expected_status_code=403)
@pytest.mark.usefixtures('_no_purchase')
def test_ok_for_users_with_permission(api, question):
api.user.add_perm('homework.question.see_all_questions')
api.post('/api/v2/homework/answers/', {
'question': question.slug,
'text': 'Верните деньги!',
}, expected_status_code=201)
@pytest.mark.usefixtures('_no_purchase')
def test_ok_for_userpusers(api, question):
api.user.is_superuser = True
api.user.save()
api.post('/api/v2/homework/answers/', {
'question': question.slug,
'text': 'Верните деньги!',
}, expected_status_code=201)
|
1604114
|
import logging
from nltk.tokenize import sent_tokenize
from pynsett.auxiliary.names_modifier import SentenceNamesModifier, assign_proper_index_to_nodes_names, \
DiscourseNamesModifier
from pynsett.discourse.anaphora import AllenCoreferenceVisitorsFactory
from pynsett.discourse.global_graph_visitors import GraphJoinerVisitor, CoreferenceJoinerVisitor
from pynsett.discourse.paragraphs import SimpleParagraphTokenizer
from pynsett.discourse.single_tokens_visitors import HeadTokenVisitor
from ..drt import Drs
_logger = logging.getLogger(__name__)
class DiscourseBase:
_sentences_list = []
_drs_list = []
_discourse = Drs.create_empty()
@property
def drs_list(self):
return self._drs_list
@property
def connected_components(self):
from igraph import WEAK
g_list = self._discourse._g.clusters(mode=WEAK).subgraphs()
return [Drs(g) for g in g_list]
def __getitem__(self, item):
return self._sentences_list[item], self._drs_list[item]
def __len__(self):
return len(self._sentences_list)
def get_discourse_drs(self):
return self._discourse
def apply(self, function):
return self._discourse.apply(function)
class Paragraph(DiscourseBase):
def __init__(self, text):
self._discourse = Drs.create_empty()
self._drs_list = []
self._sentences_list = sent_tokenize(text)
word_nodes = []
for sentence_index, sentence in enumerate(self._sentences_list):
try:
if sentence == '.':
continue
drs = Drs.create_from_natural_language(sentence)
word_nodes += assign_proper_index_to_nodes_names(drs.word_nodes, sentence_index)
drs.apply(HeadTokenVisitor(sentence_index))
drs.apply(SentenceNamesModifier(sentence_index))
self._drs_list.append(drs)
except Exception as e:
_logger.warning('Exception caught in Discourse: ' + str(e))
coreference_visitor_factory = AllenCoreferenceVisitorsFactory(word_nodes)
for i, drs in enumerate(self._drs_list):
drs.apply(coreference_visitor_factory.create())
self.__create_discourse_graph()
def __create_discourse_graph(self):
if len(self._drs_list) == 1:
self._discourse = self._drs_list[0]
return
for drs in self._drs_list:
self._discourse.apply(GraphJoinerVisitor(drs))
self._discourse.apply(CoreferenceJoinerVisitor())
class Discourse(DiscourseBase):
def __init__(self, text):
paragraphs = self.__divide_into_paragraphs(self.__sanitize_text(text))
if len(paragraphs) > 1:
[paragraph._discourse.apply(DiscourseNamesModifier(i)) for i, paragraph in enumerate(paragraphs)]
self._sentences_list = self.__aggregate_sentence_list_from_paragrahs(paragraphs)
self._drs_list = self.__aggregate_drs_list_from_paragrahs(paragraphs)
self._discourse = self.__aggregate_discourse_from_paragrahs(paragraphs)
def __aggregate_discourse_from_paragrahs(self, paragraphs):
return Drs.create_union_from_list_of_drs([paragraph.get_discourse_drs() for paragraph in paragraphs])
def __aggregate_sentence_list_from_paragrahs(self, paragraphs):
sentence_list = []
for paragraph in paragraphs:
sentence_list += paragraph._sentences_list
return sentence_list
def __aggregate_drs_list_from_paragrahs(self, paragraphs):
drs_list = []
for paragraph in paragraphs:
drs_list += paragraph.drs_list
return drs_list
def __divide_into_paragraphs(self, text):
paragraphs_texts = SimpleParagraphTokenizer().get_paragraphs(text)
paragraphs = []
for text in paragraphs_texts:
paragraphs.append(Paragraph(text))
return paragraphs
def __sanitize_text(self, text):
text = text.replace('\n', '.\n')
text = text.replace('.[', '. [')
text = text.replace('...', '.')
text = text.replace('..', '.')
text = text.replace('\n.', '\n')
return text
|
1604174
|
from setuptools import setup
setup(
name='rpp',
version='0.4',
install_requires=[
'ply',
'attrs',
],
)
|
1604198
|
def minimise(on_set, off_set, used_columns=set()):
"""Minimise a set of keys and masks.
Parameters
----------
on_set : {(key, mask), ...}
Set of keys and masks to minimise.
off_set : {(key, mask), ...}
Set of keys and masks which should *not* be covered by the minimised
version of the "on-set".
Returns
-------
{(key, mask), ...}
A set of keys and masks which covers all the terms in the on-set while
covering none of the terms in the off-set.
Uses the "Critical Column First" algorithm presented by:
<NAME>, and <NAME>. "An efficient flow monitoring algorithm
using a flexible match structure." High Performance Switching and
Routing (HPSR), 2016 IEEE 17th International Conference on. IEEE, 2016.
"""
# Copy the set of columns that have been chosen already
used_columns = {x for x in used_columns}
if len(off_set) == 0:
# If there is no off-set then yield a key and mask combination which
# will match everything in the on-set.
any_ones = 0x00000000 # Bits which are 1 in any entry
all_ones = 0xffffffff # Bits which are 1 in all entries
all_selected = 0xffffffff # Bits which are 1 in all masks
# Determine which bits to set to 0, 1 and X
for key, mask in on_set:
any_ones |= key
all_ones &= key
all_selected &= mask
any_zeros = ~all_ones
new_xs = any_ones ^ any_zeros
mask = new_xs & all_selected # Combine new Xs with existing Xs
key = all_ones & mask
yield key, mask
else:
# Otherwise determine a column that can be used to break the on- and
# off-sets apart.
on_xs, on_zeros, on_ones = _count_bits(on_set)
off_xs, off_zeros, off_ones = _count_bits(off_set)
no_xs = tuple(not(a or b) for a, b in zip(on_xs, off_xs))
zeros = tuple(a - b for a, b in zip(on_zeros, off_zeros))
ones = tuple(a - b for a, b in zip(on_ones, off_ones))
scores = tuple(max(p0, p1) for p0, p1 in zip(zeros, ones))
# Get the best column
best_column = None
for i, (score, valid) in enumerate(zip(scores, no_xs)):
if valid and i not in used_columns:
if best_column is None or scores[best_column] < score:
best_column = i
# Break the entries apart based on the value of this column
new_on_set_zeros, new_on_set_ones = _break_set(on_set, best_column)
new_off_set_zeros, new_off_set_ones = _break_set(off_set, best_column)
used_columns.add(best_column) # Mark the column as used
if len(new_on_set_zeros) > 0:
for entry in minimise(new_on_set_zeros, new_off_set_zeros,
used_columns):
yield entry
if len(new_on_set_ones) > 0:
for entry in minimise(new_on_set_ones, new_off_set_ones,
used_columns):
yield entry
def _count_bits(entries):
xs = [False for _ in range(32)]
zeros = [0 for _ in range(32)]
ones = [0 for _ in range(32)]
for key, mask in entries:
for i in range(32):
bit = 1 << i
if mask & bit:
if not key & bit:
zeros[i] += 1
else:
ones[i] += 1
else:
xs[i] = True
return tuple(xs), tuple(zeros), tuple(ones)
def _break_set(entries, bit):
zeros = set()
ones = set()
bit = 1 << bit # Select the bit
for key, mask in entries:
assert mask & bit
if not key & bit:
zeros.add((key, mask))
else:
ones.add((key, mask))
return tuple(zeros), tuple(ones)
|
1604219
|
from .base_fetcher import BaseFetcher
from integrations.models import Artist, Release
from spotipy.oauth2 import SpotifyOAuth
import datetime
import os
from dateutil.parser import parse
class SpotifyFetcher(BaseFetcher):
def integration_identifier(self):
return('spotify')
def activate_integration(self):
client_id = os.environ.get('SPOTIFY_KEY', '')
client_secret = os.environ.get('SPOTIFY_SECRET', '')
sp_oauth = SpotifyOAuth(client_id, client_secret, None)
token_info = sp_oauth.refresh_access_token(self.integration.refresh_token)
self.integration.access_token = token_info['access_token']
self.integration.refresh_token = token_info['refresh_token']
self.integration.save()
def fetch_artists(self):
token = self.integration.access_token
limit = 50
url = f"https://api.spotify.com/v1/me/following?type=artist&limit={limit}&access_token={token}"
return(
self.fetch_data(url,
path_to_data=['artists', 'items'],
path_to_next=['artists', 'next']
)
)
def update_or_create_artists(self, artists):
for artist in artists:
find_by = {"integration": self.integration, "integration_artist_id": artist["id"]}
update = {"name": artist["name"]}
Artist.objects.update_or_create(**find_by, defaults=update)
return(self.integration.artist_set.all())
def fetch_artist_releases(self, artist):
artist_id = artist.integration_artist_id
token = self.integration.access_token
limit = 50
url = f"https://api.spotify.com/v1/artists/{artist_id}/albums?limit={limit}&access_token={token}"
return(
self.fetch_data(url,
path_to_data=['items'],
path_to_next=['next']
)
)
def update_or_create_artist_releases(self, artist, releases):
for release in releases:
find_by = {"artist": artist, "integration_release_id": release["id"]}
try:
release_date = parse(release['release_date'])
except ValueError:
release_date = str(datetime.date.today())
cover_url = release['images']
if len(cover_url) > 0:
cover_url = max(release['images'], key=lambda image: image['width'])['url']
else:
cover_url = ''
update = {
"title": release["name"],
"cover_url": cover_url,
"date": release_date,
"release_type": release["album_type"],
"integration_url": release['external_urls']['spotify'],
}
Release.objects.update_or_create(**find_by, defaults=update)
|
1604248
|
from selenium import webdriver
#browser exposes an executable file
#Through Selenium test we need to invoke the executable file which will then invoke actual browser
#driver = webdriver.Chrome(executable_path="C:\\chromedriver.exe")
#driver=webdriver.Firefox(executable_path="C:\\geckodriver.exe")
driver = webdriver.Ie(executable_path="C:\\IEDriverServer.exe")
driver.maximize_window()
driver.get("https://rahulshettyacademy.com/") #get method to hit url on browser
print(driver.title)
print(driver.current_url)
driver.get("https://rahulshettyacademy.com/AutomationPractice/")
driver.minimize_window()
driver.back()
driver.refresh()
driver.close()
|
1604302
|
import logging
from flask import request
from flask_restplus import Resource
from biolink.datamodel.serializers import compact_association_set, association_results
from ontobio.golr.golr_associations import search_associations, GolrFields
from biolink.api.restplus import api
from biolink import USER_AGENT
MAX_ROWS=10000
log = logging.getLogger(__name__)
parser = api.parser()
parser.add_argument('subject', action='append', help='Entity ids to be examined, e.g. NCBIGene:9342, NCBIGene:7227, NCBIGene:8131, NCBIGene:157570, NCBIGene:51164, NCBIGene:6689, NCBIGene:6387')
class EntitySetHomologs(Resource):
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self):
"""
Returns homology associations for a given input set of genes
"""
args = parser.parse_args()
subjects = args['subject']
del args['subject']
M=GolrFields()
rel = 'RO:0002434' # TODO; allow other types
results = search_associations(
subjects=subjects,
select_fields=[M.SUBJECT, M.RELATION, M.OBJECT],
use_compact_associations=True,
relation=rel,
rows=MAX_ROWS,
facet_fields=[],
user_agent=USER_AGENT,
**args
)
return results
|
1604374
|
import os
import copy
import scipy
import numpy as np
import matplotlib.pyplot as plt
from astropy import wcs
from astropy.io import fits
from astropy.table import Table, Column
import astropy.units as u
from astropy.coordinates import SkyCoord
from .display import display_single, SEG_CMAP
from .utils import img_cutout
from .imtools import imshift, imdelete, magnify, blkavg
class Celestial(object):
'''
Class for ``Celestial`` object.
This class is basically a celestial body from observational perspective.
It has its image, header, WCS. The mask which masks out contaminations can also be stored as an attribute.
Then this ``Celestial`` object can be saved to FITS file, can be shifted, resized, rotated, etc.
What's more, the user could check the image/mask/masked image simply by invoke ``Celestial.display_image()``.
This class can also be inherited to make other classes.
'''
def __init__(self, img, mask=None, header=None, dataset='Dragonfly', scale_bar_length=5):
'''
Initialize ``Celestial`` object.
Please note that all WCS information is derived from header!
We operate on header directly instead of wcs.
Parameters:
img (numpy 2-D array): image array.
mask (numpy 2-D array, optional): mask array. 1 means the pixel will be masked.
header: header of image, containing WCS information.
Typically it is ``astropy.io.fits.header`` object. If ``header=None``, it will create a default WCS.
dataset (str): The description of the input data.
scale_bar_length (float): Scale bar length when displaying.
Returns:
None
'''
self.shape = img.shape # in ndarray format
self.dataset = dataset
hdu = fits.PrimaryHDU(img, header=header)
self._image = hdu.data
if mask is not None:
self._mask = mask
# Sky position
ny, nx = img.shape
self.ny = ny
self.nx = nx
self.header = hdu.header
self.wcs = wcs.WCS(header)
if header is not None:
try:
self.pixel_scale = abs(header['CD1_1'] * 3600)
except:
self.pixel_scale = abs(header['PC1_1'] * 3600)
self.ra_cen, self.dec_cen = list(map(float, self.wcs.wcs_pix2world(ny // 2, nx // 2, 1)))
# This follows lower-left, lower-right, upper-right, upper-left.
self.ra_bounds, self.dec_bounds = self.wcs.wcs_pix2world([0, img.shape[1], img.shape[1], 0],
[0, 0, img.shape[0], img.shape[0]], 1)
self.sky_bounds = np.append(self.ra_bounds[2:], self.dec_bounds[1:3])
c1 = SkyCoord(ra=self.sky_bounds[0], dec=self.sky_bounds[2], unit='deg')
c2 = SkyCoord(ra=self.sky_bounds[1], dec=self.sky_bounds[3], unit='deg')
self.diag_radius = c1.separation(c2) / 2
else:
self.pixel_scale = 1
# initial length for scale bar when displaying
self.scale_bar_length = scale_bar_length
@property
def image(self):
return self._image
@image.setter
def image(self, img_array):
self._image = img_array
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, mask_array):
self._mask = mask_array
@property
def hscmask(self):
return self._mask
@hscmask.setter
def hscmask(self, mask_array):
self._hscmask = mask_array
@property
def variance(self):
return self._variance
@variance.setter
def variance(self, variance_array):
self._variance = variance_array
# Save 2-D numpy array to ``fits``
def save_to_fits(self, fits_file_name, data='image', overwrite=True):
"""
Save image or mask of this ``Celestial`` object to ``fits`` file.
We operate wcs directly on header!
Parameters:
fits_file_name (str): File name of ``fits`` file
data (str): can be 'image' or 'mask'
overwrite (bool): Default is True
Returns:
None
"""
if data == 'image':
data_use = self.image
elif data == 'mask':
data_use = self.mask
else:
raise ValueError('Data can only be "image" or "mask".')
img_hdu = fits.PrimaryHDU(data_use, header=self.header)
if os.path.islink(fits_file_name):
os.unlink(fits_file_name)
img_hdu.writeto(fits_file_name, overwrite=overwrite)
return img_hdu
# Shift image/mask
def shift_image(self, dx, dy, method='spline', order=3, cval=0.0):
'''Shift the image of Celestial object. The WCS of image will also be changed.
Parameters:
dx (float): shift distance (in pixel) along x (horizontal).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
dy (float): shift distance (in pixel) along y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'spline', lanczos' or 'iraf'.
If using 'iraf', default interpolation is 'poly3. 'Lanczos' requires ``GalSim`` installed.
order (int): the order of Spline or Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
shift_image (ndarray): shifted image, the "image" attribute of ``Celestial`` class will also be changed accordingly.
'''
ny, nx = self.image.shape
if abs(dx) > nx or abs(ny) > ny:
raise ValueError('# Shift distance is beyond the image size.')
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import ``galsim`` failed! Please check if ``galsim`` is installed!')
# Begin shift
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
galimg = galimg.shift(dx=dx * self.pixel_scale, dy=dy * self.pixel_scale)
result = galimg.drawImage(scale=self.pixel_scale, nx=nx, ny=ny)#, wcs=AstropyWCS(self.wcs))
self._image = result.array
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result.array
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'image')
imshift('./_temp.fits', './_shift_temp.fits', dx, dy, interp_type='poly3', boundary_type='constant')
try:
hdu = fits.open('./_shift_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.image = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
self.wcs = wcs.WCS(self.header)
hdu.close()
imdelete('./*temp.fits')
return self.image
elif method == 'spline':
from scipy.ndimage.interpolation import shift
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = shift(self.image, [dy, dx], order=order, mode='constant', cval=cval)
self._image = result
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'iraf'.")
def shift_mask(self, dx, dy, method='spline', order=3, cval=0.0):
'''Shift the mask of Celestial object.
Parameters:
dx (float): shift distance (in pixel) along x (horizontal).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the mask "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
dy (float): shift distance (in pixel) along y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the mask "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'spline', lanczos' or 'iraf'.
If using 'iraf', default interpolation is 'poly3. 'Lanczos' requires ``GalSim`` installed.
order (int): the order of Spline or Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
shift_mask (ndarray): shifted mask. The "mask" attribute of ``Celestial`` class will also be changed accordingly.
'''
ny, nx = self.mask.shape
if abs(dx) > nx or abs(ny) > ny:
raise ValueError('# Shift distance is beyond the image size.')
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import ``galsim`` failed! Please check if ``galsim`` is installed!')
# Begin shift
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
galimg = galimg.shift(dx=dx * self.pixel_scale, dy=dy * self.pixel_scale)
result = galimg.drawImage(scale=self.pixel_scale, nx=nx, ny=ny)#, wcs=AstropyWCS(self.wcs))
self._mask = result.array
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result.array
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'mask')
imshift('./_temp.fits', './_shift_temp.fits', dx, dy, interp_type='poly3', boundary_type='constant')
try:
hdu = fits.open('./_shift_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.mask = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
self.wcs = wcs.WCS(self.header)
hdu.close()
imdelete('./*temp.fits')
return self.mask
elif method == 'spline':
from scipy.ndimage.interpolation import shift
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = shift(self.mask, [dy, dx], order=order, mode='constant', cval=cval)
self._mask = result
# Change the WCS of image
hdr = copy.deepcopy(self.header)
if 'CRPIX1' in hdr:
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(self.header)
return result
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'iraf'.")
def shift_Celestial(self, dx, dy, method='spline', order=3, cval=0.0):
'''Shift the Celestial object, including image and mask.
Parameters:
dx (float): shift distance (in pixel) along x (horizontal).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
dy (float): shift distance (in pixel) along y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT" (as seen in DS9), dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'spline', lanczos' or 'iraf'.
If using 'iraf', default interpolation is 'poly3. 'Lanczos' requires ``GalSim`` installed.
order (int): the order of Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
None
'''
self.shift_image(dx, dy, method=method, order=order, cval=cval)
if hasattr(self, 'mask'):
if abs(np.sum(self.mask)) > 1e-5:
self.shift_mask(dx, dy, method=method, order=order, cval=cval)
def _resize_header_wcs(self, f):
hdr = copy.deepcopy(self.header)
w = wcs.WCS(hdr)
if f > 1:
hdr['CRPIX1'] = hdr['CRPIX1'] * f # + (1 - f * 1)
# (1 - f * x1), where x1=1 is the starting index
hdr['CRPIX2'] = hdr['CRPIX2'] * f # + (1 - f * 1)
# Delete "CDELT"
if "CDELT1" in hdr or "CDELT2" in hdr:
for i in hdr['CDELT*'].keys():
del hdr[i]
if "PC1_1" in hdr or "PC2_2" in hdr:
for i in hdr['PC?_?'].keys():
del hdr[i]
if "LTV1" in hdr:
for i in hdr['LTV*'].keys():
del hdr[i]
for i in hdr['LTM*'].keys():
del hdr[i]
hdr['CD1_1'] /= f
hdr['CD2_2'] /= f
if "CD1_2" in hdr:
hdr['CD1_2'] /= f
if "CD2_1" in hdr:
hdr['CD2_1'] /= f
else:
b = round(1 / f)
hdr['CRPIX1'] = hdr['CRPIX1'] / b
hdr['CRPIX2'] = hdr['CRPIX2'] / b
# Delete "CDELT"
if "CDELT1" in hdr or "CDELT2" in hdr:
for i in hdr['CDELT*'].keys():
del hdr[i]
if "PC1_1" in hdr or "PC2_2" in hdr:
for i in hdr['PC?_?'].keys():
del hdr[i]
if "LTV1" in hdr:
for i in hdr['LTV*'].keys():
del hdr[i]
for i in hdr['LTM*'].keys():
del hdr[i]
hdr['CD1_1'] *= b
hdr['CD2_2'] *= b
if "CD1_2" in hdr:
hdr['CD1_2'] *= b
if "CD2_1" in hdr:
hdr['CD2_1'] *= b
return hdr
def resize_image(self, f, method='cubic', order=3, cval=0.0):
'''
Zoom/Resize the image of Celestial object.
f > 1 means the image will be resampled (finer)! f < 1 means the image will be degraded.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the image will be resized to smaller one.
method (str): interpolation method. Use 'spline', 'iraf', or 'lanczos', 'cubic', 'quintic'.
We recommend using 'spline' or 'iraf. The last three methods require ``GalSim`` installed.
Other methods are now consistent with "iraf" results.
order (int): the order Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
resize_image (ndarray): resized image. The "image" attribute of ``Celestial`` class will also be changed accordingly.
'''
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
ny, nx = self.image.shape
if f > 1:
result = galimg.drawImage(scale=self.pixel_scale / f,
nx=int((nx -1) * f + 1), ny=int((ny - 1)* f + 1))
self.header = self._resize_header_wcs(f)
self.header['CRPIX1'] += (1 - f * 1)
self.header['CRPIX2'] += (1 - f * 1)
self._image = result.array
self.shape = self.image.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_image(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
from math import ceil
b = round(1 / f)
nxout = ceil(nx / b)
nyout = ceil(ny / b)
result = galimg.drawImage(scale=self.pixel_scale * b,
nx=nxout, ny=nyout)
self.header = self._resize_header_wcs(f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._image = result.array
self.shape = self.image.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 0.5 - 1 / b / 2
self.shift_image(-dshift, -dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
return self.image
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'image')
if f > 1:
magnify('./_temp.fits', './_resize_temp.fits', f, f)
else:
blkavg('./_temp.fits', './_resize_temp.fits',
round(1/f), round(1/f), option='sum')
try:
hdu = fits.open('./_resize_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.image = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
#### Remove redundant PC keywords ###
for i in self.header['PC*'].keys():
del self.header[i]
#####################################
self.wcs = wcs.WCS(self.header)
self.pixel_scale /= f
hdu.close()
imdelete('./*temp.fits')
return self.image
elif method == 'spline':
ny, nx = self.image.shape
if f > 1:
from scipy import ndimage
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
nx_zoomed = (nx - 1) * f + 1
f_eff = nx_zoomed / nx
result = ndimage.zoom(self.image, f_eff, order=order)
result *= 1/(f_eff**2) # Multiplying by this factor to conserve flux
self.header = self._resize_header_wcs(f)
#self.header['CRPIX1'] += (1 - f * 1)
#self.header['CRPIX2'] += (1 - f * 1)
self._image = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_image(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
b = round(1 / f)
ny_bin = int( ny / b )
nx_bin = int( nx / b )
shape = (ny_bin, b, nx_bin, b)
x_crop = int( nx_bin * b )
y_crop = int( ny_bin * b )
result = self.image[0:y_crop, 0:x_crop].reshape(shape).sum(3).sum(1)
self.header = self._resize_header_wcs(f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._image = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos', 'spline' or 'iraf'.")
def resize_mask(self, f, method='cubic', order=5, cval=0.0):
'''
Zoom/Resize the mask of Celestial object.
f > 1 means the mask will be resampled (finer)! f < 1 means the mask will be degraded.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the mask will be resized to smaller one.
method (str): interpolation method. Use 'spline', 'iraf', or 'lanczos', 'cubic', 'quintic'.
We recommend using 'spline' or 'iraf. The last three methods require ``GalSim`` installed.
Other methods are now consistent with "iraf" results.
order (int): the order Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
resize_mask (ndarray): resized image. The "mask" attribute of ``Celestial`` class will also be changed accordingly.
'''
if not hasattr(self, 'mask'):
raise ValueError("This object doesn't have mask yet!")
if method == 'lanczos' or method == 'cubic' or method == 'quintic':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
if method == 'lanczos':
galimg = InterpolatedImage(Image(self.mask, dtype=float),
scale=self.pixel_scale, x_interpolant=Lanczos(order))
else:
galimg = InterpolatedImage(Image(self.mask, dtype=float),
scale=self.pixel_scale, x_interpolant=method)
ny, nx = self.mask.shape
if f > 1:
result = galimg.drawImage(scale=self.pixel_scale / f,
nx=int((nx -1) * f + 1), ny=int((ny - 1)* f + 1))
self.header = self._resize_header_wcs(self.mask, f)
self.header['CRPIX1'] += (1 - f * 1)
self.header['CRPIX2'] += (1 - f * 1)
self._mask = result.array
self.shape = self.mask.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_mask(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
from math import ceil
b = round(1 / f)
nxout = ceil(nx / b)
nyout = ceil(ny / b)
result = galimg.drawImage(scale=self.pixel_scale * b,
nx=nxout, ny=nyout)
self.header = self._resize_header_wcs(self.mask, f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._mask = result.array
self.shape = self.image.shape
self.header['NAXIS1'] = result.array.shape[1]
self.header['NAXIS2'] = result.array.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 0.5 - 1 / b / 2
self.shift_image(-dshift, -dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
return self.mask
elif method == 'iraf':
self.save_to_fits('./_temp.fits', 'mask')
if f > 1:
magnify('./_temp.fits', './_resize_temp.fits', f, f)
else:
blkavg('./_temp.fits', './_resize_temp.fits',
round(1/f), round(1/f), option='sum')
try:
hdu = fits.open('./_resize_temp.fits')
except Exception as e:
raise ValueError('Interpolation using IRAF filed with error "{}". \n Please try another interpolation method!'.format(e))
self.mask = hdu[0].data
self.shape = hdu[0].data.shape
self.header = hdu[0].header
self.wcs = wcs.WCS(self.header)
self.pixel_scale /= f
hdu.close()
imdelete('./*temp.fits')
return self.mask
elif method == 'spline':
ny, nx = self.mask.shape
if f > 1:
from scipy import ndimage
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
nx_zoomed = (nx - 1) * f + 1
f_eff = nx_zoomed / nx
result = ndimage.zoom(self.image, f_eff, order=order)
result *= 1/(f_eff**2) # Multiplying by this factor to conserve flux
self.header = self._resize_header_wcs(self.mask, f)
self.header['CRPIX1'] += (1 - f * 1)
self.header['CRPIX2'] += (1 - f * 1)
self._mask = result
self.shape = self.mask.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
dshift = 2 * (1 - f * 1) % 0.5
self.shift_image(dshift, dshift, method='spline')
# We don't want to shift wcs.
self.header['CRPIX1'] -= dshift
self.header['CRPIX2'] -= dshift
self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
else:
b = round(1 / f)
ny_bin = int( ny / b )
nx_bin = int( nx / b )
shape = (ny_bin, b, nx_bin, b)
x_crop = int( nx_bin * b )
y_crop = int( ny_bin * b )
result = self.mask[0:y_crop, 0:x_crop].reshape(shape).sum(3).sum(1)
self.header = self._resize_header_wcs(self.image, f)
self.header['CRPIX1'] += 0.5 - 1 / b / 2
self.header['CRPIX2'] += 0.5 - 1 / b / 2
self._mask = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale *= b
self.wcs = wcs.WCS(self.header)
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos', 'spline' or 'iraf'.")
def resize_Celestial(self, f, method='cubic', order=5, cval=0.0):
'''
Resize the Celestial object, including both image and mask.
f > 1 means the image/mask will be resampled! f < 1 means the image/mask will be degraded.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the mask will be resized to smaller one.
method (str): interpolation method. Use 'lanczos' or 'spline' or 'iraf'. 'Lanczos' requires ``GalSim`` installed.
order (int): the order Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
None
'''
self.resize_image(f, method=method, order=order, cval=cval)
if hasattr(self, 'mask'):
self.resize_mask(f, method=method, order=order, cval=cval)
# Display image/mask
def display_image(self, **kwargs):
"""
Take a peek at the image, using "zscale", "arcsinh" streching and "viridis" colormap. You can change them by adding ``**kwargs``.
Parameters:
``**kwargs``: arguments in ``mrf.display.display_single``.
Returns:
None
"""
display_single(self.image, pixel_scale=self.pixel_scale,
scale_bar_length=self.scale_bar_length, **kwargs)
def display_mask(self, **kwargs):
"""
Take a peek at the mask.
Parameters:
``**kwargs``: arguments in ``mrf.display.display_single``.
Returns:
None
"""
display_single(self.mask, scale='linear', pixel_scale=self.pixel_scale,
cmap=SEG_CMAP, scale_bar_length=self.scale_bar_length, **kwargs)
def display_Celestial(self, **kwargs):
"""
Take a peek at the masked image, using "zscale", "arcsinh" streching and "viridis" colormap. You can change them by adding ``**kwargs``.
Parameters:
``**kwargs``: arguments in ``mrf.display.display_single``.
Returns:
None
"""
if hasattr(self, 'mask'):
display_single(self.image * (~self.mask.astype(bool)), pixel_scale=self.pixel_scale,
scale_bar_length=self.scale_bar_length, **kwargs)
else:
self.display_image()
"""
elif method == 'spline':
## This only works for ZOOM! NEED BKGAVG!
from scipy.ndimage import zoom
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
ny, nx = self.image.shape
print(ny, nx, f)
result = zoom(self.image, float(f), order=order, mode='constant', cval=cval)
result /= f**2 # preserve total flux
self.header = self._resize_header_wcs(self.image, f)
self._image = result
self.shape = self.image.shape
self.header['NAXIS1'] = result.shape[1]
self.header['NAXIS2'] = result.shape[0]
self.pixel_scale /= f
self.wcs = wcs.WCS(self.header)
#### Cautious! The following block could be wrong! ####
## Probably you'll need extra shift of image
#dshift = 2 * (1 - f * 1) % 0.5
#self.shift_image(dshift, dshift, method=method)
# We don't want to shift wcs.
#self.header['CRPIX1'] -= dshift
#self.header['CRPIX2'] -= dshift
#self.wcs = wcs.WCS(self.header)
#### Cautious! The above block could be wrong! ####
print(result.shape[1], result.shape[0])
dx = int((nx - 1) * f + 1) - result.shape[1]
dy = int((ny - 1) * f + 1) - result.shape[0]
print(dx, dy)
result = self.image
# Pad the image to fit the shape of `iraf` results
if dy != 0:
if dy < 0:
result = result[-dy:, :]
if dx != 0:
if dx < 0:
result = result[:, -dx:]
#result = np.append(result, np.zeros(result.shape[0], dx), axis=1)
self._image = result
#return result
"""
class Star(Celestial):
"""
This ``Star`` class is the inheritance of ``Celestial`` class.
It represents a small cutout, which is typically a star.
Other than the functions inherited from ``Celestial``, ``Star`` object has extra functions such as ``centralize``, ``mask_out_contam``.
"""
def __init__(self, img, header, starobj, colnames=['x', 'y'], halosize=40,
padsize=50, mask=None, hscmask=None):
"""
Initialize ``Star`` object.
Parameters:
img (numpy 2-D array): the image from which the cutout of star is made.
header: header of image, containing WCS information. Typically it is ``astropy.io.fits.header`` object.
starobj: A row of ``astropy.table.Table``, containing basic information of the star, such as ``ra``, `dec`` and magnitudes.
colnames (list of str): indicating the columns which contains position of the star. It could be ['x', 'y'] or ['ra', 'dec'].
halosize (float): the radial size of cutout. If ``halosize=40``, the square cutout will be 80 * 80 pix.
padsize (float): The image will be padded in order to make cutout of stars near the edge of input image.
``padsize`` should be equal to or larger than ``halosize``.
mask (numpy 2-D array): the mask of input big image.
hscmask (numpy 2-D array): the hscmask of input image.
Returns:
None
"""
Celestial.__init__(self, img, mask, header=header)
if hscmask is not None:
self.hscmask = hscmask
self.name = 'star'
self.scale_bar_length = 3
# Trim the image to star size
# starobj should at least contain x, y, (or ra, dec)
if 'x' in colnames or 'y' in colnames:
# Position of a star, in numpy convention
x_int = int(starobj['x'])
y_int = int(starobj['y'])
dx = -1.0 * (starobj['x'] - x_int)
dy = -1.0 * (starobj['y'] - y_int)
elif 'ra' in colnames or 'dec' in colnames:
w = self.wcs
x, y = w.wcs_world2pix(starobj['ra'], starobj['dec'], 0)
x_int = int(x)
y_int = int(y)
dx = -1.0 * (x - x_int)
dy = -1.0 * (y - y_int)
halosize = int(halosize)
# Make padded image to deal with stars near the edges
padsize = int(padsize)
ny, nx = self.image.shape
im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))
im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.image
# Star itself, but no shift here.
halo = im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1,
x_int + padsize - halosize: x_int + padsize + halosize + 1]
self._image = halo
self.shape = halo.shape
self.cen_xy = [x_int, y_int]
self.dx = dx
self.dy = dy
try:
# FLux
self.flux = starobj['flux']
self.fluxann = starobj['flux_ann']
self.fluxauto = starobj['flux_auto']
except:
pass #raise Warning('No flux assigned to the star!')
if hasattr(self, 'mask'):
im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))
im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.mask
# Mask itself, but no shift here.
halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1,
x_int + padsize - halosize: x_int + padsize + halosize + 1])
self._mask = halo
if hasattr(self, 'hscmask'):
im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))
im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.hscmask
# Mask itself, but no shift here.
halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1,
x_int + padsize - halosize: x_int + padsize + halosize + 1])
self.hscmask = halo
def centralize(self, method='spline', order=5, cval=0.0):
"""
Shift the cutout to the true position of the star using interpolation.
Parameters:
method (str): interpolation method. Options are "iraf" and "lanczos". "Lanczos" requires ``GalSim`` installed.
order (int): the order of Lanczos interpolation (>0).
cval (float): value to fill the edges. Default is 0.
Returns:
None
"""
self.shift_Celestial(self.dx, self.dy, method=method, order=order, cval=cval)
def sub_bkg(self, sigma=4.5, deblend_cont=0.0001, verbose=True):
"""
Subtract the locally-measured background of ``Star`` object. The sky is measured by masking out objects using ``sep``.
Be cautious and be aware what you do when using this function.
Parameters:
sigma (float): The sigma in ``SExtractor``.
deblend_cont (float): Deblending parameter.
verbose (bool): Whether print out background value.
Returns:
None
"""
# Actually this should be estimated in larger cutuouts.
# So make another cutout (larger)!
from astropy.convolution import convolve, Box2DKernel
from .image import extract_obj, seg_remove_cen_obj
from sep import Background
img_blur = convolve(abs(self.image), Box2DKernel(2))
img_objects, img_segmap = extract_obj(abs(img_blur), b=10, f=4, sigma=sigma, minarea=2, pixel_scale=self.pixel_scale,
deblend_nthresh=32, deblend_cont=deblend_cont,
sky_subtract=False, show_fig=False, verbose=False)
bk = Background(self.image, img_segmap != 0)
glbbck = bk.globalback
self.globalback = glbbck
if verbose:
print('# Global background: ', glbbck)
self.image -= glbbck
def get_masked_image(self, cval=np.nan):
"""
Mask image according to the mask.
Parameter:
cval: value to fill the void. Default is NaN, but sometimes NaN is problematic.
Return:
imgcp (numpy 2-D array): masked image.
"""
if not hasattr(self, 'mask'):
print("This ``Star`` object doesn't have a ``mask``!")
return self.image
else:
imgcp = copy.copy(self.image)
imgcp[self.mask.astype(bool)] = cval
return imgcp
def mask_out_contam(self, sigma=4.5, deblend_cont=0.0001, blowup=True,
show_fig=True, verbose=True):
"""
Mask out contamination in the cutout of star. Contamination may be stars, galaxies or artifacts.
This function uses ``sep`` to identify and mask contamination.
** DO THIS AFTER CENTERIZING! **
Parameters:
sigma (float): The sigma in ``SExtractor``. Default is 4.5.
deblend_cont (float): Deblending parameter. Default is 0.0005.
blowup (bool): Whether blow up the segmentation mask by convolving a 1.5 pixel Gaussian kernel.
show_fig (bool): Whether show the figure.
verbose (bool): Whether print out results.
Returns:
None
"""
from astropy.convolution import convolve, Box2DKernel
from .utils import extract_obj, seg_remove_cen_obj
img_blur = convolve(abs(self.image), Box2DKernel(2))
img_objects, img_segmap = extract_obj(abs(img_blur), b=10, f=3, sigma=sigma, minarea=1, pixel_scale=self.pixel_scale,
deblend_nthresh=72, deblend_cont=deblend_cont, flux_aper=None,
sky_subtract=True, show_fig=show_fig, verbose=verbose)
# remove central object from segmap
cen_obj = img_objects[img_segmap[img_segmap.shape[1]//2, img_segmap.shape[0]//2] - 1]
img_segmap = seg_remove_cen_obj(img_segmap)
detect_mask = (img_segmap != 0).astype(float)
if blowup is True:
from astropy.convolution import convolve, Gaussian2DKernel
cv = convolve(1e3 * detect_mask / np.nansum(detect_mask), Gaussian2DKernel(1.5))
detect_mask = (cv > 0.5).astype(float)
self.mask = detect_mask
#imgcp = copy.copy(self.image)
#imgcp[detect_mask.astype(bool)] = cval
#self.image = imgcp
# Shift mask will be very horrible!!! Hence we still don't use self.mask.
# Instead we directly mask out on the image.
return
|
1604403
|
import logging
import sys
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_str)
def get_verbose_logger(module_name):
"""Gets a logger that writes to the console using a StreamHandler
Args:
**module_name (str)**: Name of the module requesting the logger
Returns:
A logger using a StreamHandler
"""
logger = logging.getLogger(module_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def get_file_logger(module_name):
"""Gets a logger that writes to a file using a FileHandler
Args:
**module_name (str)**: Name of the module requesting the logger
Returns:
A logger using a FileHandler
"""
logger = logging.getLogger(module_name)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('obscurepy.log')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def get_null_logger(module_name):
"""Gets a logger with a NullHandler that actually does no logging
Args:
**module_name (str)**: Name of the module requesting the logger
Returns:
A logger using a NullHandler
"""
logger = logging.getLogger(module_name)
logger.setLevel(logging.WARNING)
handler = logging.NullHandler()
handler.setLevel(logging.WARNING)
logger.addHandler(handler)
return logger
|
1604411
|
import os
import matplotlib.pyplot as plt
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def make_image2(xy,img_folder,prefix):
if not os.path.exists(img_folder):
os.makedirs(img_folder);
fig_num=len(xy);
mydpi=100;
for i in range(fig_num):
#fig = plt.figure(figsize=(32/mydpi,32/mydpi))
fig = plt.figure(figsize=(128/mydpi,128/mydpi))
plt.xlim(-200, 200)
plt.ylim(-200, 200)
plt.axis('off');
color=['r','b','g','k','y','m','c'];
for j in range(len(xy[0])):
#plt.scatter(xy[i,j,1],xy[i,j,0],c=color[j%len(color)],s=0.5);
plt.scatter(xy[i,j,1],xy[i,j,0],c=color[j%len(color)],s=5);
fig.savefig(img_folder+prefix+"_"+str(i)+".png",dpi=mydpi);
|
1604444
|
from functools import lru_cache
from typing import Optional
import pydantic
SETTINGS_ENV_FILE = "~/.planetarycomputer/settings.env"
SETTINGS_ENV_PREFIX = "PC_SDK_"
DEFAULT_SAS_TOKEN_ENDPOINT = "https://planetarycomputer.microsoft.com/api/sas/v1/token"
class Settings(pydantic.BaseSettings):
"""PC SDK configuration settings
Settings defined here are attempted to be read in two ways, in this order:
* environment variables
* environment file: ~/.planetarycomputer/settings.env
That is, any settings defined via environment variables will take precedence
over settings defined in the environment file, so can be used to override.
All settings are prefixed with `PC_SDK_`
"""
# PC_SDK_SUBSCRIPTION_KEY: subscription key to send along with token
# requests. If present, allows less restricted rate limiting.
subscription_key: Optional[str] = None
# PC_SDK_SAS_URL: The planetary computer SAS endpoint URL.
# This will default to the main planetary computer endpoint.
sas_url: str = DEFAULT_SAS_TOKEN_ENDPOINT
class Config:
env_file = SETTINGS_ENV_FILE
env_prefix = SETTINGS_ENV_PREFIX
@staticmethod
@lru_cache(maxsize=1)
def get() -> "Settings":
return Settings()
def set_subscription_key(key: str) -> None:
"""Sets the Planetary Computer API subscription key to use
within the process that loaded this module. Ths does not write
to the settings file.
Args:
key: The Planetary Computer API subscription key to use
for methods inside this library that can utilize the key,
such as SAS token generation.
"""
Settings.get().subscription_key = key
|
1604447
|
import logging
import random
import pandas as pd
from atpy.portfolio.portfolio_manager import PortfolioManager, MarketOrder, Type
from pyevents.events import EventFilter
class RandomStrategy:
"""Random buy/sell on each step"""
def __init__(self, listeners, bar_event_stream, portfolio_manager: PortfolioManager, max_buys_per_step=1, max_sells_per_step=1):
"""
:param listeners: listeners environment
:param bar_event_stream: bar events
:param portfolio_manager: Portfolio manager
:param max_buys_per_step: maximum buy orders per time step (one bar)
:param max_sells_per_step: maximum sell orders per time step (one bar)
"""
self.listeners = listeners
bar_event_stream += self.on_bar_event
self.portfolio_manager = portfolio_manager
self.max_buys_per_step = max_buys_per_step
self.max_sells_per_step = max_sells_per_step
def on_bar_event(self, data):
buys = random.randint(0, min(len(data.index.get_level_values('symbol')), self.max_buys_per_step))
for _ in range(buys):
symbol = data.sample().index.get_level_values('symbol')[0]
volume = random.randint(1, data.loc[pd.IndexSlice[:, symbol], :].iloc[-1]['volume'])
o = MarketOrder(Type.BUY, symbol, volume)
logging.getLogger(__name__).debug('Placing new order ' + str(o))
self.listeners({'type': 'order_request', 'data': o})
quantities = self.portfolio_manager.quantity()
sells = random.randint(0, min(len(quantities), self.max_sells_per_step))
selected_symbols = set()
orders = list()
for _ in range(sells):
symbol, volume = random.choice(list(quantities.items()))
while symbol in selected_symbols:
symbol, volume = random.choice(list(quantities.items()))
selected_symbols.add(symbol)
orders.append(MarketOrder(Type.SELL, symbol, random.randint(1, min(self.portfolio_manager.quantity(symbol), volume))))
for o in orders:
logging.getLogger(__name__).debug('Placing new order ' + str(o))
self.listeners({'type': 'order_request', 'data': o})
def order_requests_stream(self):
return EventFilter(listeners=self.listeners,
event_filter=lambda e: True if ('type' in e and e['type'] == 'order_request') else False,
event_transformer=lambda e: (e['data'],))
|
1604457
|
from qpylib import qpylib
def test_submodules_imported():
assert qpylib.app_qpylib is not None
assert qpylib.asset_qpylib is not None
assert qpylib.json_qpylib is not None
assert qpylib.log_qpylib is not None
assert qpylib.offense_qpylib is not None
assert qpylib.rest_qpylib is not None
assert qpylib.util_qpylib is not None
|
1604491
|
import hashlib, json, os, gzip, sys, re
from unidecode import unidecode
def tab_file(fname, cols):
for line in file(fname).readlines():
vals = line.strip().split("\t")
yield dict(zip(cols, vals))
def _id(uri):
return hashlib.md5(uri).hexdigest()[:16]
has_unicode = re.compile(r'[^\0x00-0x7f]')
def transliterate(alt_name):
if has_unicode.search(alt_name["name"]):
try:
xlit = unidecode(alt_name["name"].decode("utf8"))
except (UnicodeDecodeError, UnicodeEncodeError):
try:
xlit = unidecode(alt_name["name"].decode("latin1"))
except (UnicodeEncodeError, UnicodeEncodeError):
return
if xlit != alt_name["name"]:
addl_name = alt_name.copy()
addl_name["lang"] = alt_name["lang"] + ":ascii"
addl_name["name"] = xlit
return addl_name
def Result(cursor, arraysize=10000):
'An iterator that uses fetchmany to keep memory usage down'
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
cols = [c.name for c in cursor.description]
for result in results:
yield dict(zip(cols, result))
class Dump(object):
def __init__(self, template, max_rows=1000):
self.max_rows = max_rows
self.rows = 0
self.content = ""
self.template = template
path = os.path.dirname(template)
if not os.path.exists(path): os.makedirs(path)
def write_bulk(self, index, doc_type, doc_id, place):
self.content += json.dumps({"index": {"_id": doc_id, "_index":index, "_type": doc_type}})
self.write_place(place)
def write(self, uri, place):
self.content += json.dumps({"index": {"_id":_id(uri)}})
try:
self.write_place(place)
except(UnicodeDecodeError):
print place
def write_place(self, place):
self.content += "\n" + json.dumps(place, sort_keys=True) + "\n"
self.rows += 1
if self.rows % (long(self.max_rows) / 10) == 0L: print >>sys.stderr, "\r% 9d" % self.rows,
if self.rows % long(self.max_rows) == 0L: self.flush()
def flush(self, final=0):
fname = self.template % (int(self.rows/long(self.max_rows))+final)
print >>sys.stderr, " ", fname
out = gzip.open(fname, "wb")
out.write(self.content)
out.close()
self.content = ""
def close(self):
self.flush(final=1)
|
1604517
|
from aspen import log
from gratipay.application import Application
log('Instantiating Application from gunicorn_entrypoint')
website = Application().website
|
1604519
|
import math
from textblob import TextBlob
from nlp_profiler.constants import NOT_APPLICABLE, NaN
### Sentiment analysis
def sentiment_polarity_summarised(polarity: str) -> str:
if (not polarity) or (polarity == NOT_APPLICABLE):
return NOT_APPLICABLE
if 'negative' in polarity.lower():
return 'Negative'
if 'positive' in polarity.lower():
return 'Positive'
return polarity
# Docs: https://textblob.readthedocs.io/en/dev/quickstart.html
### See https://en.wikipedia.org/wiki/Words_of_estimative_probability
### The General Area of Possibility
sentiment_polarity_to_words_mapping = [
["Very positive", 99, 100], # Certain: 100%: Give or take 0%
["Quite positive", 87, 99], # Almost Certain: 93%: Give or take 6%
["Pretty positive", 51, 87], # Probable: 75%: Give or take about 12%
["Neutral", 49, 51], # Chances About Even: 50%: Give or take about 10%
["Pretty negative", 12, 49], # Probably Not: 30%: Give or take about 10%
["Quite negative", 2, 12], # Almost Certainly Not 7%: Give or take about 5%
["Very negative", 0, 2] # Impossible 0%: Give or take 0%
]
def sentiment_polarity(score: float) -> str:
if math.isnan(score):
return NOT_APPLICABLE
score = float(score)
score = (score + 1) / 2 # see https://stats.stackexchange.com/questions/70801/how-to-normalize-data-to-0-1-range
score *= 100
for _, each_slab in enumerate(sentiment_polarity_to_words_mapping): # pragma: no cover
# pragma: no cover => early termination leads to loss of test coverage info
if (score >= each_slab[1]) and (score <= each_slab[2]):
return each_slab[0]
def sentiment_polarity_score(text: str) -> float:
if (not isinstance(text, str)) or (len(text.strip()) == 0):
return NaN
return TextBlob(text).sentiment.polarity
|
1604584
|
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.linear')
def convert_Linear(ctx):
input = ctx.method_args[0]
weight = get_arg(ctx, 'weight', 1, None)
bias = get_arg(ctx, 'bias', 2, None)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# reshape to ...xNx1x1
layer = ctx.network.add_shuffle(input_trt)
layer.reshape_dims = tuple(input_trt.shape) + (1, 1)
bias_trt = trt.Weights(torch_dtype_to_trt(weight.dtype))
if bias is not None:
bias_trt = bias.detach().cpu().numpy()
# add fully connected
layer = ctx.network.add_fully_connected(
input=layer.get_output(0),
num_outputs=int(weight.shape[0]),
kernel=weight.detach().cpu().numpy(),
bias=bias_trt)
# reshape back to N
layer = ctx.network.add_shuffle(layer.get_output(0))
layer.reshape_dims = tuple(output.shape[1:])
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 10)])
def test_Linear_basic():
return torch.nn.Linear(10, 5)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 10)])
def test_Linear_no_bias():
return torch.nn.Linear(10, 5, bias=False)
|
1604601
|
from typing import Union
class TensorAdapter:
def zeros(self, size: int, dtype: str):
raise NotImplemented()
def argmax(self, arr):
raise NotImplemented()
def get(self, tensor, pos):
raise NotImplemented()
try:
import numpy as np
class NumpyAdapter(TensorAdapter):
def zeros(self, size: int, dtype: Union[str, 'np.dtype']):
return np.zeros(size, dtype=dtype)
def argmax(self, arr):
return np.argmax(arr)
def get(self, arr, pos):
return arr[pos]
except ImportError:
class NumpyAdapter(TensorAdapter):
def zero(self, _size: int, _dtype: str):
raise RuntimeError("numpy library is not installed")
def argmax(self, _arr):
raise RuntimeError("numpy library is not installed")
def get(self, _arr, _pos):
raise RuntimeError("numpy library is not installed")
_numpy_adapter = NumpyAdapter()
try:
import torch
class PyTorchAdapter(TensorAdapter):
def zeros(self, size: int, dtype: Union[str, 'torch.dtype']):
if isinstance(dtype, str):
dtype = torch.__getattribute__(dtype)
return torch.zeros(size, dtype=dtype)
def argmax(self, arr):
return torch.argmax(arr)
def get(self, arr, pos):
return arr[pos].item()
except ImportError:
class PyTorchAdapter(TensorAdapter):
def zero(self, _size: int, _dtype: str):
raise RuntimeError("torch library is not installed")
def argmax(self, _arr):
raise RuntimeError("torch library is not installed")
def get(self, _arr, _pos):
raise RuntimeError("torch library is not installed")
_pytorch_adapter = PyTorchAdapter()
|
1604635
|
import torch
from torch import nn
import torch.nn.functional as F
from backbones.unet import Encoder as unet_encoder, Decoder as unet_decoder
from backbones.resnet import resnet50 as resnet_encoder, Decoder as resnet_decoder
from utils.topology import get_circle
import neural_renderer as nr
__all__ = ['CircleNet']
class CircleNet(nn.Module):
def __init__(self,
args):
super(CircleNet, self).__init__()
self.num_nudes = args.num_nodes
self.dec_dim = args.dec_dim
self.dec_size = args.dec_size
self.image_size = args.image_size
self.stages = args.stages
if args.arch == 'resnet':
kwargs = {'stages': self.stages}
res_dims = [256, 512, 1024, 2048]
self.backbone = resnet_encoder(pretrained=True, **kwargs)
dec_skip_dims = [res_dims[i] for i in self.stages][::-1]
self.disp = resnet_decoder(dec_skip_dims, 2, self.dec_dim, self.dec_size, drop=args.drop)
elif args.arch == 'unet':
self.backbone = unet_encoder(args.enc_dim, drop=args.drop)
self.disp = unet_decoder(self.backbone.dims, drop=args.drop)
self.texture_size = 2
self.camera_distance = 1
self.elevation = 0
self.azimuth = 0
self.renderer = nr.Renderer(camera_mode='look_at', image_size=self.image_size, light_intensity_ambient=1,
light_intensity_directional=1, perspective=False)
def forward(self, x, iter=3):
features = self.backbone(x)
nodes, faces = get_circle(x.shape[0], self.dec_size, self.num_nudes, x.device)
output_masks = []
output_points = []
disp = torch.tanh(self.disp(features))
for i in range(iter):
if disp.shape[2:] != x.shape[2:]:
disp = F.interpolate(disp, size=x.shape[2:], mode='bilinear')
nodes[..., 1] = nodes[..., 1] * -1
# Sample and move
Pxx = F.grid_sample(disp[:, 0:1], nodes).transpose(3, 2)
Pyy = F.grid_sample(disp[:, 1:2], nodes).transpose(3, 2)
dP = torch.cat((Pxx, Pyy), -1)
nodes = nodes + dP
nodes[..., 1] = nodes[..., 1] * -1
# Render mask
z = torch.ones((nodes.shape[0], 1, nodes.shape[2], 1)).to(nodes.device)
P3d = torch.cat((nodes, z), 3)
P3d = torch.squeeze(P3d, dim=1)
faces = torch.squeeze(faces, dim=1).to(nodes.device)
mask = self.renderer(P3d, faces, mode='silhouettes').unsqueeze(1)
# Stack outputs
output_masks.append(mask)
output_points.append(nodes)
if self.training:
return output_masks, output_points
else:
return output_masks[-1]
|
1604664
|
import numpy as np
from scipy.fftpack import ss_diff
import torch
from torch import optim
class SPSA(object):
def __init__(self, model,norm, device, eps, learning_rate, delta, spsa_samples, sample_per_draw,
nb_iter, data_name, early_stop_loss_threshold=None, IsTargeted=None):
self.model = model
self.device = device
self.IsTargeted = IsTargeted
self.eps = eps #0.05
self.learning_rate = learning_rate #0.01
self.delta = delta #0.01
spsa_samples = spsa_samples if spsa_samples else sample_per_draw
self.spsa_samples = (spsa_samples // 2) *2
self.sample_per_draw = (sample_per_draw // self.spsa_samples) * self.spsa_samples
self.nb_iter = nb_iter #20
self.norm = norm # np.inf
self.data_name = data_name
self.early_stop_loss_threshold = early_stop_loss_threshold
self.clip_min = 0
self.clip_max = 1
if self.data_name=="cifar10" and self.IsTargeted:
raise AssertionError('cifar10 dont support targeted attack')
def clip_eta(self, batchsize, eta, norm, eps):
if norm == np.inf:
eta = torch.clamp(eta, -eps, eps)
elif norm == 2:
normVal = torch.norm(eta.view(batchsize, -1), self.norm, 1)#求范数
mask = normVal<=eps
scaling = eps/normVal
scaling[mask] = 1
eta = eta*scaling.view(batchsize, 1, 1, 1)
else:
raise NotImplementedError
return eta
def _get_batch_sizes(self, n, max_batch_size):
batches = [max_batch_size for _ in range(n // max_batch_size)]
if n % max_batch_size > 0:
batches.append(n % max_batch_size)
return batches
def _compute_spsa_gradient(self, loss_fn, x, delta, nb_sample, max_batch_size):
grad = torch.zeros_like(x)
x = x.unsqueeze(0)
x = x.expand(max_batch_size, *x.shape[1:]).contiguous()
v = torch.empty_like(x[:, :1, ...])
for batch_size in self._get_batch_sizes(nb_sample, max_batch_size):
x_ = x[:batch_size]
vb = v[:batch_size]
vb = vb.bernoulli_().mul_(2.0).sub_(1.0)
v_ = vb.expand_as(x_).contiguous()
x_shape = x_.shape
x_ = x_.view(-1, *x.shape[2:])
v_ = v_.view(-1, *v.shape[2:])
df = loss_fn(delta * v_) - loss_fn(- delta * v_)
df = df.view(-1, *[1 for _ in v_.shape[1:]])
grad_ = df / (2. * delta * v_)
grad_ = grad_.view(x_shape)
grad_ = grad_.sum(dim=0, keepdim=False)
grad += grad_
grad /= nb_sample
return grad
def _is_adversarial(self,x, y, y_target):
output = torch.argmax(self.model(x), dim=1)
if self.IsTargeted:
return output == y_target
else:
return output != y
def _margin_logit_loss(self, logits, labels, target_label):
if self.IsTargeted:
correct_logits = logits.gather(1, target_label[:, None]).squeeze(1)
logit_indices = torch.arange(logits.size()[1], dtype=target_label.dtype, device=target_label.device)[None, :].expand(target_label.size()[0], -1)
incorrect_logits = torch.where(logit_indices == target_label[:, None], torch.full_like(logits, float("-inf")), logits)
max_incorrect_logits, _ = torch.max(incorrect_logits, 1)
return max_incorrect_logits -correct_logits
else:
correct_logits = logits.gather(1, labels[:, None]).squeeze(1)
logit_indices = torch.arange(logits.size()[1], dtype=labels.dtype, device=labels.device)[None, :].expand(labels.size()[0], -1)
incorrect_logits = torch.where(logit_indices == labels[:, None], torch.full_like(logits, float("-inf")), logits)
max_incorrect_logits, _ = torch.max(incorrect_logits, 1)
return -(max_incorrect_logits-correct_logits)
def spsa(self,x, y, y_target):
device = self.device
eps = self.eps
batchsize = x.shape[0]
learning_rate = self.learning_rate
delta = self.delta
spsa_samples = self.spsa_samples
nb_iter = self.nb_iter
v_x = x.to(device)
v_y = y.to(device)
if self._is_adversarial(v_x, v_y, y_target):
self.detail['queries'] = 0
self.detail['success'] = True
return v_x
perturbation = (torch.rand_like(v_x) * 2 - 1) * eps
optimizer = optim.Adam([perturbation], lr=learning_rate)
self.detail['success'] = False
queries = 0
while queries+self.sample_per_draw <= nb_iter:
queries += self.sample_per_draw
def loss_fn(pert):
input1 = v_x + pert
input1 = torch.clamp(input1, self.clip_min, self.clip_max)
logits = self.model(input1)
return self._margin_logit_loss(logits, v_y.expand(len(pert)), y_target.expand(len(pert))) if self.IsTargeted else self._margin_logit_loss(logits, v_y.expand(len(pert)), None)
spsa_grad = self._compute_spsa_gradient(loss_fn, v_x, delta=delta, nb_sample=spsa_samples, max_batch_size=self.sample_per_draw)
perturbation.grad = spsa_grad
optimizer.step()
clip_perturbation = self.clip_eta(batchsize, perturbation, self.norm, eps)
adv_image = torch.clamp(v_x + clip_perturbation, self.clip_min, self.clip_max)
perturbation.add_((adv_image - v_x) - perturbation)
loss = loss_fn(perturbation).item()
if (self.early_stop_loss_threshold is not None and loss < self.early_stop_loss_threshold):
break
if self._is_adversarial(adv_image, v_y, y_target):
self.detail['success'] = True
break
self.detail['queries'] = queries
return adv_image
def forward(self, xs, ys, ys_target):
adv_xs = []
self.detail = {}
for i in range(len(xs)):
print(i + 1, end=' ')
if self.data_name=='cifar10':
adv_x = self.spsa(xs[i].unsqueeze(0), ys[i].unsqueeze(0), None)
else:
adv_x = self.spsa(xs[i].unsqueeze(0), ys[i].unsqueeze(0), ys_target[i].unsqueeze(0))
if self.norm==np.inf:
distortion = torch.mean((adv_x - xs[i].unsqueeze(0))**2) / ((1-0)**2) #mean_square_distance
else:
distortion = torch.mean((adv_x - xs[i].unsqueeze(0))**2) / ((1-0)**2)
print(distortion.item(), end=' ')
print(self.detail)
adv_xs.append(adv_x)
adv_xs = torch.cat(adv_xs, 0)
return adv_xs
|
1604679
|
import update_index
def test_iter_plugins(mocker):
client = mocker.MagicMock()
client.list_packages.return_value = ["pytest-plugin-a", "pytest-plugin-b"]
client.package_releases.return_value = ["1.0"]
client.browse.return_value = [("pytest-plugin-c", "2.0")]
client.release_data = lambda name, version: dict(name=name, version=version, summary="")
results = update_index.iter_plugins(client, {"pytest-plugin-a"})
assert list(results) == [("pytest-plugin-b", "1.0", ""), ("pytest-plugin-c", "2.0", "")]
results = update_index.iter_plugins(client, {"pytest-plugin-a"}, consider_classifier=False)
assert list(results) == [("pytest-plugin-b", "1.0", "")]
|
1604681
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import hashlib
from typing import Dict, Tuple, Optional
class MypyFileCache(object):
def __init__(self):
# type: () -> None
self._cache = {} # type: Dict[str, Tuple[str, str]]
def lookup(self, filename_hash, file_hash):
# type: (str, int) -> Optional[str]
result = self._cache.get(filename_hash)
if result is None:
return None
if result[0] != file_hash:
return None
return result[1]
def store(self, filename, file_hash, output):
# type: (str, str, str) -> None
self._cache[hashlib.md5(filename.encode('utf-8')).hexdigest()] = (file_hash, output)
|
1604697
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1
import numpy as np
class MultiModal(object):
def __init__(self, mode, learning_rate=0.0001):
self.mode = mode
self.learning_rate = learning_rate
self.hidden_repr_size = 128
self.no_classes = 19
def modDrop(self, layer, is_training, p_mod=.9, keep_prob=.8):
'''
As in Neverova et al. 'ModDrop': std dropout + modality dropping on the input
'''
layer = slim.dropout(layer, keep_prob=keep_prob,
is_training=is_training)
on = tf.cast(tf.random_uniform([1]) - p_mod < 0, tf.float32)
return tf.cond(is_training, lambda: on * layer, lambda: layer)
def single_stream(self, images, modality, is_training, reuse=False):
with tf.variable_scope(modality, reuse=reuse):
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
_, end_points = resnet_v1.resnet_v1_50(
images, self.no_classes, is_training=is_training, reuse=reuse)
# last bottleneck before logits
net = end_points[modality + '/resnet_v1_50/block4']
if 'autoencoder' in self.mode:
return net
with tf.variable_scope(modality + '/resnet_v1_50', reuse=reuse):
bottleneck = slim.conv2d(net, self.hidden_repr_size, [
7, 7], padding='VALID', activation_fn=tf.nn.relu, scope='f_repr')
net = slim.conv2d(bottleneck, self.no_classes, [
1, 1], activation_fn=None, scope='_logits_')
if ('train_hallucination' in self.mode or 'test_disc' in self.mode or 'train_eccv' in self.mode):
return net, bottleneck
return net
def D(self, features, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse):
with slim.arg_scope([slim.fully_connected], weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=tf.constant_initializer(0.0)):
net = slim.fully_connected(
features, 1024, activation_fn=tf.nn.relu, scope='disc_fc1')
# ~ if self.mode == 'train_hallucination_p2':
res = slim.fully_connected(
net, 1024, activation_fn=None, scope='disc_res1')
net = tf.nn.relu(res + net)
res = slim.fully_connected(
net, 1024, activation_fn=None, scope='disc_res2')
net = tf.nn.relu(res + net)
net = slim.fully_connected(
net, 2048, activation_fn=tf.nn.relu, scope='disc_fc2')
net = slim.fully_connected(
net, 3076, activation_fn=tf.nn.relu, scope='disc_fc3')
if self.mode == 'train_hallucination_p2':
net = slim.fully_connected(
net, self.no_classes + 1, activation_fn=None, scope='disc_prob')
elif self.mode == 'train_hallucination':
net = slim.fully_connected(
net, 1, activation_fn=tf.sigmoid, scope='disc_prob')
else:
print('Unrecognized mode')
return net
def decoder(self, features, is_training, reuse=False):
# input features from the resnet should be (batch_size, 7, 7, 2048)
with tf.variable_scope('decoder', reuse=reuse):
with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=is_training):
# (batch_size, 14, 14, channels)
net = slim.conv2d_transpose(
features, 1024, [3, 3], scope='conv_transpose1')
net = slim.batch_norm(net, scope='bn1')
# (batch_size, 28, 28, channels)
net = slim.conv2d_transpose(
net, 512, [3, 3], scope='conv_transpose2')
net = slim.batch_norm(net, scope='bn2')
# (batch_size, 56, 56, channels)
net = slim.conv2d_transpose(
net, 256, [5, 5], scope='conv_transpose3')
net = slim.batch_norm(net, scope='bn3')
# (batch_size, 112, 112, channels)
net = slim.conv2d_transpose(
net, 128, [5, 5], scope='conv_transpose4')
net = slim.batch_norm(net, scope='bn4')
net = slim.conv2d_transpose(net, 3, [
5, 5], activation_fn=tf.nn.tanh, scope='conv_transpose_out') # (batch_size, 224, 224, 3)
# normalize output
RGB_MEAN = tf.constant([123.68, 116.779, 103.939],
dtype=tf.float32, name='rgb_mean')
net = 255 * net - RGB_MEAN
return net
def build_model(self):
if '_rgb' in self.mode or '_depth' in self.mode:
modality = self.mode.split('_')[-1]
self.images = tf.placeholder(
tf.float32, [None, 224, 224, 3], modality + '_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.logits = self.single_stream(
self.images, modality=modality, is_training=self.is_training)
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
if 'train_' in self.mode:
# training stuff
t_vars = tf.trainable_variables()
train_vars = t_vars
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.one_hot(self.labels, self.no_classes)))
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar(
'classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
elif 'train_double_stream' in self.mode:
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
if self.mode == 'train_double_stream_moddrop':
self.depth_images = self.modDrop(
self.depth_images, is_training=self.is_training)
self.rgb_images = self.modDrop(
self.rgb_images, is_training=self.is_training)
self.depth_logits = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.logits = (self.depth_logits + self.rgb_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# training stuff
t_vars = tf.trainable_variables()
train_vars = t_vars
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.one_hot(self.labels, self.no_classes)))
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar('classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
elif self.mode == 'test_ensemble_baseline':
# not used, just to recycle eval function
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb1_logits = self.single_stream(
self.rgb_images, modality='rgb1', is_training=self.is_training)
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.logits = (self.rgb1_logits + self.rgb_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
elif 'train_hallucination' in self.mode:
# depth & hall streams
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.depth_logits, self.depth_features = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
self.hall_logits, self.hall_features = self.single_stream(
self.rgb_images, modality='hall', is_training=self.is_training)
# overall acc_hall
self.pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# ~ #hall_acc
# ~ self.hall_pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
# ~ self.hall_correct_pred = tf.equal(self.hall_pred, self.labels)
# ~ self.hall_accuracy = tf.reduce_mean(tf.cast(self.hall_correct_pred, tf.float32))
# ~ #depth_acc
# ~ self.depth_pred = tf.argmax(tf.squeeze(self.depth_logits), 1)
# ~ self.depth_correct_pred = tf.equal(self.depth_pred, self.labels)
# ~ self.depth_accuracy = tf.reduce_mean(tf.cast(self.depth_correct_pred, tf.float32))
# discriminator
self.logits_real = self.D(self.depth_features, reuse=False)
self.logits_fake = self.D(self.hall_features, reuse=True)
# losses
if self.mode == 'train_hallucination':
self.d_loss_real = tf.reduce_mean(
tf.square(self.logits_real - tf.ones_like(self.logits_real)))
self.d_loss_fake = tf.reduce_mean(
tf.square(self.logits_fake - tf.zeros_like(self.logits_fake)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss = tf.reduce_mean(
tf.square(self.logits_fake - tf.ones_like(self.logits_fake)))
# ~ self.d_optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
# ~ self.g_optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif self.mode == 'train_hallucination_p2':
fake_labels = self.labels + self.no_classes - \
self.labels # the last class is the fake one
self.d_loss_real = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits_real,
labels=tf.one_hot(self.labels, self.no_classes + 1)))
self.d_loss_fake = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits_fake,
labels=tf.one_hot(fake_labels, self.no_classes + 1)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits_fake,
labels=tf.one_hot(self.labels, self.no_classes + 1)))
else:
print('Error building model')
self.d_optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.g_optimizer = tf.train.AdamOptimizer(self.learning_rate)
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'hall' in var.name]
# train ops
with tf.variable_scope('train_op', reuse=False):
self.d_train_op = slim.learning.create_train_op(
self.d_loss, self.d_optimizer, variables_to_train=d_vars)
self.g_train_op = slim.learning.create_train_op(
self.g_loss, self.g_optimizer, variables_to_train=g_vars)
# summaries
d_loss_summary = tf.summary.scalar('d_loss', self.d_loss)
g_loss_summary = tf.summary.scalar('g_loss', self.g_loss)
# hall_acc_summary = tf.summary.scalar('hall_acc', self.accuracy)
self.summary_op = tf.summary.merge(
[d_loss_summary, g_loss_summary])
elif self.mode == 'finetune_hallucination':
# depth & hall streams
# not used, just to recycle eval function
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.hall_logits = self.single_stream(
self.rgb_images, modality='hall', is_training=self.is_training)
self.logits = (self.rgb_logits + self.hall_logits) / 2.
# overall acc_hall
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# ~ #hall_acc
# ~ self.hall_pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
# ~ self.hall_correct_pred = tf.equal(self.hall_pred, self.labels)
# ~ self.hall_accuracy = tf.reduce_mean(tf.cast(self.hall_correct_pred, tf.float32))
# ~ #rgb_acc
# ~ self.rgb_pred = tf.argmax(tf.squeeze(self.rgb_logits), 1)
# ~ self.rgb_correct_pred = tf.equal(self.rgb_pred, self.labels)
# ~ self.rgb_accuracy = tf.reduce_mean(tf.cast(self.rgb_correct_pred, tf.float32))
# training stuff
t_vars = tf.trainable_variables()
train_vars = t_vars
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.one_hot(self.labels, self.no_classes)))
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar('classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
elif self.mode == 'test_moddrop':
# rgb & blank depth streams
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
# bad trick to blank out depth....
self.blank_depth = self.depth_images - self.depth_images
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
# swap between the two
self.depth_logits = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
# ~ self.depth_logits = self.single_stream(self.blank_depth, modality='depth', is_training=self.is_training)
# overall acc
# swap between the two
self.logits = (self.rgb_logits + self.depth_logits) / 2.
# ~ self.logits = self.rgb_logits
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
elif self.mode == 'test_hallucination':
# rgb & hall streams
# not used, just to recycle eval function
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.hall_logits = self.single_stream(
self.rgb_images, modality='hall', is_training=self.is_training)
# overall acc
self.logits = (self.rgb_logits + self.hall_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# hall_acc
# ~ self.hall_pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
# ~ self.hall_correct_pred = tf.equal(self.hall_pred, self.labels)
# ~ self.hall_accuracy = tf.reduce_mean(tf.cast(self.hall_correct_pred, tf.float32))
# ~ #rgb_acc
# ~ self.rgb_pred = tf.argmax(tf.squeeze(self.rgb_logits), 1)
# ~ self.rgb_correct_pred = tf.equal(self.rgb_pred, self.labels)
# ~ self.rgb_accuracy = tf.reduce_mean(tf.cast(self.rgb_correct_pred, tf.float32))
elif self.mode == 'train_autoencoder' or self.mode == 'test_autoencoder':
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_features = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.reconstructed_depth = self.decoder(
self.rgb_features, is_training=self.is_training)
self.loss = tf.reduce_mean(
tf.square(self.depth_images - self.reconstructed_depth))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = slim.learning.create_train_op(
self.loss, self.optimizer)
loss_summary = tf.summary.scalar('reconstruction_loss', self.loss)
rec_depth_summary = tf.summary.image(
'reconstructed', self.reconstructed_depth)
depth_image_summary = tf.summary.image('depth', self.depth_images)
self.summary_op = tf.summary.merge(
[loss_summary, rec_depth_summary, depth_image_summary])
elif 'test_double_stream' in self.mode:
# to load precomputed reconstructed images
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.depth_logits = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.logits = (self.depth_logits + self.rgb_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
elif self.mode == 'test_disc':
# depth & rgb streams
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits, self.rgb_features = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.depth_logits, self.depth_features = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
# overall acc_hall
self.logits = (self.rgb_logits + self.hall_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# depth_acc
self.depth_pred = tf.argmax(tf.squeeze(self.depth_logits), 1)
self.depth_correct_pred = tf.equal(self.depth_pred, self.labels)
self.depth_accuracy = tf.reduce_mean(
tf.cast(self.depth_correct_pred, tf.float32))
# rgb_acc
self.rgb_pred = tf.argmax(tf.squeeze(self.rgb_logits), 1)
self.rgb_correct_pred = tf.equal(self.rgb_pred, self.labels)
self.rgb_accuracy = tf.reduce_mean(
tf.cast(self.rgb_correct_pred, tf.float32))
# discriminator
self.logits_preds = tf.nn.softmax(
self.D(self.rgb_features, reuse=False))
self.logits_preds = tf.nn.softmax(
self.D(self.depth_features, reuse=True))
elif 'train_eccv' in self.mode:
# depth & hall streams
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.depth_logits, self.depth_features = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
self.hall_logits, self.hall_features = self.single_stream(
self.rgb_images, modality='hall', is_training=self.is_training)
# overall acc_hall
self.pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# ~ #hall_acc
# ~ self.hall_pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
# ~ self.hall_correct_pred = tf.equal(self.hall_pred, self.labels)
# ~ self.hall_accuracy = tf.reduce_mean(tf.cast(self.hall_correct_pred, tf.float32))
# ~ #depth_acc
# ~ self.depth_pred = tf.argmax(tf.squeeze(self.depth_logits), 1)
# ~ self.depth_correct_pred = tf.equal(self.depth_pred, self.labels)
# ~ self.depth_accuracy = tf.reduce_mean(tf.cast(self.depth_correct_pred, tf.float32))
# losses
loss_hall_class = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.hall_logits, labels=tf.one_hot(self.labels, self.no_classes)))
# loss_hall_distill = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
# logits=self.hall_logits, labels=tf.nn.softmax(self.depth_logits / 1)))
T = 10
teacher_softmax_depth = tf.nn.softmax(self.depth_logits / T)
loss_distill_depth = tf.nn.softmax_cross_entropy_with_logits(
labels=teacher_softmax_depth, logits=self.hall_logits / T)
loss_distill_depth = tf.reduce_mean(loss_distill_depth)
loss_distill_depth_scaled = loss_distill_depth * T * T
loss_hall_distill = loss_distill_depth_scaled
loss_hall_rect_static = tf.reduce_sum(tf.square(tf.subtract(tf.sigmoid(
self.depth_features), tf.sigmoid(self.hall_features))))
loss_hall_rect_static2 = tf.losses.mean_squared_error(
self.depth_features, self.hall_features, reduction=tf.losses.Reduction.SUM)
self.loss_hall_class = loss_hall_class
self.loss_hall_distill = loss_hall_distill
self.loss_hall_rect_static = loss_hall_rect_static
self.loss_hall_rect_static2 = loss_hall_rect_static2
self.loss = loss_hall_class + loss_hall_distill + loss_hall_rect_static * 0.01
t_vars = tf.trainable_variables()
train_vars = [var for var in t_vars if 'hall' in var.name]
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar('classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
|
1604771
|
import pytest
@pytest.mark.parametrize(
"file, result, expected",
(
("src/dafny/utils/MathHelpers.dfy", "passed", "passed"),
("src/dafny/utils/Helpers.dfy", "failed", "failed"),
),
)
def test_proof_result(file, result, expected):
assert file.endswith(".dfy")
assert result == expected
@pytest.mark.parametrize(
"file2, result, expected",
(
("src/dafny/utils/MathHelpers.dfy", "passed", "passed"),
("src/dafny/utils/Helpers.dfy", "failed", "failed"),
),
)
def test_proof_resultfailing(file2, result, expected):
assert file2.endswith(".dfy")
assert result == expected
|
1604778
|
from ml_tutor.model import BaseModelRegression
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
class LinearRegression(BaseModelRegression):
def __init__(self, learning_rate=0.0001, num_iter=100000, tol=0.00001, visual_training=True):
"""
Creates the Linear Regression algorithm.
:param learning_rate: Learning rate gives the rate of speed where the gradient moves during gradient descent
:param num_iter: Number of times to go through the dataset to train the model.
:param tol: If the difference between old and new values for model parameters are less than this number, training stops.
:param visual_training: If True - the training process will be visualized [NOTE: only in Jupyter Notebook and Google Colab]
"""
super(BaseModelRegression, self).__init__()
self.learning_rate = learning_rate
self.num_iter = num_iter
self.tol = tol
self.visual_training = visual_training
if not super().__is_visual_on__():
self.visual_training = False
print("Visualization is only supported in Jupyter Notebook and Google Colab.")
self.randn_id = None
# Gradient descent params
self.starting_b = 0
self.starting_m = 0
self.b_history = []
self.m_history = []
print("If your dataset is sparse for visual training, random feature will be selected to match required shape.")
print("Required shape for this algorithm is: [N, 1].")
def fit(self, X, y):
"""
Train the model using features (X) as training data and y as target values.
:param X: Features from a dataset
:param y: Target values (This is what you want to predict)
"""
self.X = X
self.y = y
if len(self.y.shape) < 2:
self.y = np.expand_dims(self.y, axis=1)
if len(self.X.shape) < 2:
self.X = np.expand_dims(self.X, axis=1)
if self.X.shape[1] > 1:
if self.visual_training:
print("The dataset is sparse for visual training. This algorithm works only on shape [N, 1].")
print("Random feature selected to match required size.")
print("Current shape of your data: {}".format(self.X.shape))
self.randn_id = np.random.randint(0, self.X.shape[1])
print("Column selected on id: {}".format(self.randn_id))
self.X = self.X[:, self.randn_id]
if len(self.X.shape) < 2:
self.X = np.expand_dims(self.X, axis=1)
print("New shape of your data: {}".format(self.X.shape))
# calling gradient descent function, and output of it is going to be our the best possible (according to our dataset) M and B
self.__gradient_descent__(self.starting_b, self.starting_m)
def __gradient_descent__(self, b, m):
"""
main function for the gradient descent
:param b: Bias or constant
:param m: coefficient for X
"""
self.new_b = b
self.new_m = m
for i in range(self.num_iter):
candidate_m, candidate_b = self.__gradient_descent_step__(self.new_b, self.new_m)
if all(np.abs(candidate_m - self.new_m) <= self.tol) and \
all(np.abs(candidate_b - self.new_b) <= self.tol):
break
self.new_m = candidate_m
self.new_b = candidate_b
if i % 1000 == 0:
self.b_history.append(self.new_b)
self.m_history.append(self.new_m)
if self.visual_training:
self.__visual_training__()
def __visual_training__(self):
"""
Helper function used to crete real time visualization of the training process.
"""
# Import only relevant libraries for Jupyter Notebook if needed
from IPython import display
for i in range(len(self.b_history)):
plt.close()
plt.clf()
plt.figure(figsize=(12, 10))
plt.scatter(self.X, self.y, c='b', label="Training set")
plt.plot(self.X, np.add(np.multiply(self.X, self.m_history[i]), self.b_history[i]), c='r',
label="Regression line")
plt.title("Linear Regression - Training process")
plt.xlabel("Feature value")
plt.ylabel("Target value")
plt.legend(framealpha=1, frameon=True)
display.display(plt.gcf())
display.display()
time.sleep(1)
display.clear_output(wait=True)
def __gradient_descent_step__(self, b, m):
"""
Helper function for Gradient descent. Performs a single step of the gradient optimization.
"""
candidated_b = b - np.multiply(self.learning_rate,
np.sum(-np.multiply(2 / float(len(self.X)),
np.subtract(self.y,
np.add(np.multiply(self.X, m), b))), axis=0))
candidated_m = m - np.multiply(self.learning_rate,
np.sum(np.multiply(2 / float(len(self.X)),
np.multiply(-self.X,
np.subtract(self.y,
np.add(np.multiply(self.X, m), b)))),
axis=0))
return candidated_m, candidated_b
def predict(self, X):
"""
This method performs predictions on the unseen data from your dataset.
:param X: Data samples used to perform prediction on. (Generally a test set)
:return: Predicted labels for each data sample
"""
if X.shape[1] > 2:
if self.visual_training:
X = X[:, self.randn_id]
if X.shape[1] < 2:
X = np.expand_dims(X, axis=1)
y_pred = np.add(np.multiply(X, self.new_m), self.new_b)
return y_pred
def score(self, real, predicted):
"""
Return the MSE computed on real vs. predicted classes.
:param real: Expected targets(generally found in the dataset)
:param predicted: Predicted values by the algorithm
:return: Mean squared error computed on real vs. predicted classes [0. - 1.]
"""
assert len(real) == len(predicted)
return mean_squared_error(real, predicted)
def sklearn_version(self):
"""
Auto-generates sklearn code for a selected algorithm.
NOTE: This function will automatically add one more code cell to your Jupyter Notebook/Google Colab (with the sklearn code inside).
"""
if not super().__is_visual_on__():
print("Supported only in Jupyter Notebook and Google Colab.")
return NotImplementedError
if super().__is_google_colab__():
return "This method is not supported in Google Colab for now :/"
from IPython.core.getipython import get_ipython
contents = """
# If you don't have Sklearn installed execute line below
# pip install sklearn
# This is how you can import LinearRegression using sklearn library
from sklearn.linear_model import LinearRegression
# Define regressor with selected parameters
model = LinearRegression()
# Train the model using dataset you desire
model.fit(X_train, y_train)
# Finally, use trained model to make predictions
predictions = model.predict(X_test)
# Use Score method to make predictions
print(model.score(X_test, y_test))
"""
shell = get_ipython()
payload = dict(
source='set_next_input',
text=contents,
replace=False,
)
shell.payload_manager.write_payload(payload, single=False)
def how_it_works(self, video=False):
"""
Generates theory on how the algorithm works right in the Jupyter Notebook/Google colab.
:param video: Some people prefer video tutorials over reading version. Set this parameter to True if you want video tutorial instead. :)
"""
if not super().__is_visual_on__():
print("Supported only in Jupyter Notebook and Google Colab.")
return NotImplementedError
from IPython.core.getipython import get_ipython
if not video:
content = u"""
<div>
<h1>Linear Regression — Understanding the Theory</h1>
<br>
<img src="https://miro.medium.com/max/770/0*c39Seo5WzCpU4GAn">
<br>
<br>
<p>
Linear regression is probably the simplest approach for statistical learning. It is a good starting point for more advanced approaches, and in fact, many fancy statistical learning techniques can be seen as an extension of linear regression. Therefore, understanding this simple model will build a good base before moving on to more complex approaches.
<br><br>
Linear regression is very good to answer the following questions:<br><br>
- Is there a relationship between 2 variables?<br>
- How strong is the relationship?<br>
- Which variable contributes the most?<br>
- How accurately can we estimate the effect of each variable?<br>
- How accurately can we predict the target?<br>
- Is the relationship linear? (duh)<br>
- Is there an interaction effect?<br>
</p>
<p>
<h2>Estimating the coefficients</h2><br><br>
Let’s assume we only have one variable and one target. Then, linear regression is expressed as:
<br><br>
<img src="https://miro.medium.com/max/770/1*B-U6j1vxqqaYjgTZgunxIg@2x.png">
<br><br>
In the equation above, the betas are the coefficients. These coefficients are what we need in order to make predictions with our model.<br><br>
So how do we find these parameters?<br><br>
To find the parameters, we need to minimize the least squares or the sum of squared errors. Of course, the linear model is not perfect and it will not predict all the data accurately, meaning that there is a difference between the actual value and the prediction. The error is easily calculated with:
<br><br>
<img src="https://miro.medium.com/max/727/1*ly-QBw2oLDVx9M7MzxkKnw@2x.png">
<br><br>
But why are the errors squared?<br><br>
We square the error, because the prediction can be either above or below the true value, resulting in a negative or positive difference respectively. If we did not square the errors, the sum of errors could decrease because of negative differences and not because the model is a good fit. Also, squaring the errors penalizes large differences, and so the minimizing the squared errors “guarantees” a better model. Let’s take a look at a graph to better understand.
<br><br>
<img src="https://miro.medium.com/max/770/1*3CgiH8QI0ZN5LfdmK2t6XQ@2x.png">
<br><br>
In the graph above, the red dots are the true data and the blue line is linear model. The grey lines illustrate the errors between the predicted and the true values. The blue line is thus the one that minimizes the sum of the squared length of the grey lines.
<br><br>After some math that is too heavy for a blog post, you can finally estimate the coefficients with the following equations:<br><br>
<br><br>
<img src="https://miro.medium.com/max/614/1*YOiQ9UpR-A2jHvGR6JZwtQ@2x.png"><br><br>
<img src="https://miro.medium.com/max/339/1*t9rzyx0zh7o5Zx1Y-IQOvg@2x.png">
<br><br>
Where x bar and y bar represent the mean.
<br><br>
<h2>Estimate the relevancy of the coefficients</h2>
<br><br>
Now that you have coefficients, how can you tell if they are relevant to predict your target?<br><br>
The best way is to find the p-value. The p-value is used to quantify statistical significance; it allows to tell whether the null hypothesis is to be rejected or not.<br><br>
The null hypothesis?<br><br>
For any modelling task, the hypothesis is that there is some correlation between the features and the target. The null hypothesis is therefore the opposite: there is no correlation between the features and the target.<br><br>
So, finding the p-value for each coefficient will tell if the variable is statistically significant to predict the target. As a general rule of thumb, if the p-value is less than 0.05: there is a strong relationship between the variable and the target.
<br><br>
<h2>Assess the accuracy of the model</h2>
<br><br>
You found out that your variable was statistically significant by finding its p-value. Great!<br><br>
Now, how do you know if your linear model is any good?<br><br>
To assess that, we usually use the RSE (residual standard error) and the R² statistic.<br><br>
The first error metric is simple to understand: the lower the residual errors, the better the model fits the data (in this case, the closer the data is to a linear relationship).<br><br>
As for the R² metric, it measures the proportion of variability in the target that can be explained using a feature X. Therefore, assuming a linear relationship, if feature X can explain (predict) the target, then the proportion is high and the R² value will be close to 1. If the opposite is true, the R² value is then closer to 0.
<br><br>
</p>
<h1>Author and source:</h1>
<h2>Author: <a target="_blank" href="https://towardsdatascience.com/@marcopeixeiro"><NAME></a></h2>
<h2>To find more resources go to the source of the post: <a target="_blank" href="https://towardsdatascience.com/linear-regression-understanding-the-theory-7e53ac2831b5">Towards data science post</a></h2>
</div>
"""
get_ipython().run_cell_magic(u'html', u'', content)
else:
content = u"""
<div>
<h1> K-Means - How it works? </h1>
<iframe width="560" height="315" src="https://www.youtube.com/embed/kHwlB_j7Hkc" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</div>
"""
get_ipython().run_cell_magic(u'markdown', u'', content)
def interview_questions(self):
"""
Generates commonly asked interview questions about the algorithm in the Jupyter Notebook/Google colab.
"""
if not super().__is_visual_on__():
print("Supported only in Jupyter Notebook and Google Colab.")
return NotImplementedError
from IPython.core.getipython import get_ipython
content = u"""
<h1> Linear Regression Interview Questions </h1>
<h2> 1. Can you list out the critical assumptions of linear regression?</h2>
<p>
There are three crucial assumptions one has to make in linear regression. They are,
<ol>
<li>It is imperative to have a linear relationship between the dependent and independent A scatter plot can prove handy to check out this fact.</li>
<li>The independent variables in the dataset should not exhibit any multi-collinearity. In case they do, it should be at the barest minimum. There should be a restriction on their value depending on the domain requirement.</li>
<li>Homoscedasticity is one of the most critical It states that there should be an equal distribution of errors.</li>
</ol>
</p>
<h2> 2. What is the primary difference between R square and adjusted R square?</h2>
<p>
In linear regression, you use both these values for model validation. However, there is a clear distinction between the two. R square accounts for the variation of all independent variables on the dependent variable. In other words, it considers each independent variable for explaining the variation. In the case of Adjusted R square, it accounts for the significant variables alone for indicating the percentage of variation in the model. By significant, we refer to the P values less than 0.05.
</p>
<h2>3. What is the importance of the F-test in a linear model?</h2>
<p>
The F-test is a crucial one in the sense that it tests the goodness of the model. When you reiterate the model to improve the accuracy with the changes, the F-test proves its utility in understanding the effect of the overall regression.
</p>
<h2>4. What are the disadvantages of the linear regression model?</h2>
<p>
One of the most significant demerits of the linear model is that it is sensitive and dependent on the outliers. It can affect the overall result. Another notable demerit of the linear model is overfitting. Similarly, underfitting is also a significant disadvantage of the linear model.
</p>
<h3> The questions and answers taken from: [<a href="https://www.digitalvidya.com/blog/most-commonly-asked-interview-questions-on-linear-regression">link</a>]</h3>
<h3> Quiz like questions: <a href="https://www.analyticsvidhya.com/blog/2017/07/30-questions-to-test-a-data-scientist-on-linear-regression/" target="_blank">link</a></h3>
"""
get_ipython().run_cell_magic(u'html', u'', content)
|
1604793
|
import numpy as np
import matplotlib.pyplot as plt
from multilayer_perceptron import MLP
from gradient_boosting_decision_tree import GBDT
from xgboost import XGBoost
from random_forest import RandomForest
from adaboost import AdaBoost
from factorization_machines import FactorizationMachines
from support_vector_machine import SVM
from k_nearest_neighbor import kNearestNeighbor
def gen_linear(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (x.sum(axis=1) > 0) * 1
def gen_circle(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (np.square(x).sum(axis=1) > 0.6) * 1
def gen_xor(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, np.array([(xi[0] * xi[1] > 0) for xi in x]) * 1
def gen_spiral(train_num):
r = 0.8 * np.arange(train_num) / train_num
y = np.arange(train_num) % 2
t = 1.75 * r * 2 * np.pi + y * np.pi
x = np.c_[r * np.sin(t) + np.random.random(train_num) /
10, r * np.cos(t) + np.random.random(train_num) / 10]
return x, y * 1
def gen_moon(train_num):
y = np.arange(train_num) % 2
x0 = (y - 0.5) * (.5 - np.cos(np.linspace(0, np.pi, train_num))) + \
np.random.random(train_num) / 10
x1 = (y - 0.5) * (.5 - 2 * np.sin(np.linspace(0, np.pi, train_num))
) + np.random.random(train_num) / 10
return np.c_[x0, x1], y
# visualize decision boundary change
def boundary_vis_plots(model, x, y, subplot=[1, 1, 1]):
plt.subplot(subplot[0], subplot[1], subplot[2])
xx, yy = np.meshgrid(np.linspace(-1, 1, 50), np.linspace(-1, 1, 50))
pred = model.predict(np.c_[xx.ravel(), yy.ravel()])
zz = pred.reshape(xx.shape) if len(pred.shape) == 1 or pred.shape[
1] == 1 else pred[:, 1].reshape(xx.shape)
if subplot[2] <= subplot[1]:
plt.title(type(model).__name__)
plt.contourf(xx, yy, zz, levels=np.linspace(
zz.min(), zz.max(), 40), cmap=plt.cm.RdBu)
plt.contour(xx, yy, zz, levels=[0.5], colors='darkred')
plt.scatter(x[:, 0], x[:, 1], c=np.array(
['red', 'blue'])[y], s=10, edgecolors='k')
if subplot[2] == subplot[0] * subplot[1]:
plt.show()
def main():
data_loaders = [gen_linear, gen_circle, gen_xor, gen_spiral, gen_moon]
models = [
(kNearestNeighbor, {'k': 5}),
(FactorizationMachines, {'learning_rate': 1, 'embedding_dim': 1}),
(SVM, {}),
(AdaBoost, {'esti_num': 10}),
(RandomForest, {'tree_num': 20, 'max_depth': 3}),
(XGBoost, {'tree_num': 20, 'max_depth': 3}),
(MLP, {'act_type': 'Tanh', 'opt_type': 'Adam', 'layers': [
2, 8, 7, 2], 'epochs': 200, 'learning_rate': 0.5, 'lmbda': 1e-4})
]
for i, data_loader in enumerate(data_loaders):
x, y = data_loader(256)
for j, model in enumerate(models):
clf = model[0](**model[1])
clf.fit(x, y if not j in [2, 3] else 2 * y - 1)
boundary_vis_plots(clf, x, y, subplot=[len(
data_loaders), len(models), len(models) * i + 1 + j])
if __name__ == "__main__":
main()
|
1604823
|
from subprocess import CompletedProcess
from unittest import TestCase
from unittest.mock import patch, call
from data_acquisition_framework.services.youtube.youtube_dl_api import YoutubeDL
class TestYoutubeDL(TestCase):
def setUp(self):
self.youtube_dl_service = YoutubeDL()
def test_init(self):
self.assertEqual('youtube-dl', self.youtube_dl_service.youtube_call)
def test_get_videos(self): # Not working
test_channel_url = 'https://youtube.com/channel/abcd'
expected_video_list = ['']
actual_video_list = self.youtube_dl_service.get_videos(test_channel_url)
self.assertEqual(expected_video_list, actual_video_list)
@patch('data_acquisition_framework.services.youtube.youtube_dl_api.subprocess')
def test_youtube_download_with_no_retries(self, mock_subprocess):
test_archive_path = '/archive.txt'
test_download_path = '/downloads'
test_video_id = 'testid'
test_output = ""
mock_subprocess.run.return_value = test_output
with patch.object(self.youtube_dl_service, 'check_and_log_download_output') as mock_check_and_log:
mock_check_and_log.return_value = False
remove_video_flag, video_id = self.youtube_dl_service.youtube_download(test_video_id, test_archive_path,
test_download_path)
self.assertFalse(remove_video_flag)
self.assertEqual(test_video_id, video_id)
mock_subprocess.run.assert_called_once()
mock_check_and_log.assert_called_once_with(test_output)
@patch('data_acquisition_framework.services.youtube.youtube_dl_api.subprocess')
def test_youtube_download_with_retries(self, mock_subprocess):
test_archive_path = '/archive.txt'
test_download_path = '/downloads'
test_video_id = 'testid'
test_output = ""
mock_subprocess.run.return_value = test_output
with patch.object(self.youtube_dl_service, 'check_and_log_download_output') as mock_check_and_log:
mock_check_and_log.return_value = True
remove_video_flag, video_id = self.youtube_dl_service.youtube_download(test_video_id, test_archive_path,
test_download_path)
self.assertTrue(remove_video_flag)
self.assertEqual(test_video_id, video_id)
self.assertEqual(4, mock_subprocess.run.call_count)
mock_check_and_log.assert_has_calls([call(test_output), call(test_output), call(test_output),
call(test_output)])
def test_check_and_log_without_error(self):
test_output = CompletedProcess(args='', returncode=1, stdout=b'Download success')
flag = self.youtube_dl_service.check_and_log_download_output(test_output)
self.assertFalse(flag)
@patch('data_acquisition_framework.services.youtube.youtube_dl_api.logging')
def test_check_and_log_raises_exit(self, mock_logging):
test_output = CompletedProcess(args='', returncode=1, stdout=b'',
stderr=b'ERROR:": HTTP Error 429"\n')
with self.assertRaises(SystemExit):
self.youtube_dl_service.check_and_log_download_output(test_output)
@patch('data_acquisition_framework.services.youtube.youtube_dl_api.logging')
def test_check_and_log_with_yt_errors(self, mock_logging):
test_output = CompletedProcess(args='', returncode=1, stdout=b'',
stderr=b'HTTP Error 404: Not Found"\n')
flag = self.youtube_dl_service.check_and_log_download_output(test_output)
self.assertTrue(flag)
@patch('data_acquisition_framework.services.youtube.youtube_dl_api.logging')
def test_check_and_log_with_other_errors(self, mock_logging):
test_output = CompletedProcess(args='', returncode=1, stdout=b'',
stderr=b'ERROR:Incomplete YouTube ID testid.\n')
flag = self.youtube_dl_service.check_and_log_download_output(test_output)
self.assertTrue(flag)
|
1604842
|
import logging
import re
from django.conf import settings
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client
from helium.common.utils.commonutils import HeliumError
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Helium Edu"
__version__ = "1.4.46"
logger = logging.getLogger(__name__)
class HeliumPhoneError(HeliumError):
pass
client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)
def send_sms(phone, message):
client.api.account.messages.create(
to=phone,
from_=settings.TWILIO_SMS_FROM,
body=message)
def verify_number(phone):
try:
cleaned_phone = re.sub("[()\-+\s]", "", phone)
logger.info(f"Asking Twilio to validate {cleaned_phone}")
number = client.lookups.phone_numbers(cleaned_phone).fetch()
return number.phone_number
except TwilioRestException:
raise HeliumPhoneError("Oops, that looks like an invalid phone number.")
|
1604892
|
import os
import json
import time
import threading
import rospy
from std_msgs.msg import String
from lg_msg_defs.msg import WindowGeometry
from appctl_support import ProcController
from lg_msg_defs.msg import ApplicationState
from lg_msg_defs.srv import MediaAppsInfoResponse
from lg_common.helpers import get_app_instances_ids
from lg_common import ManagedApplication, ManagedWindow
from lg_common.helpers import get_app_instances_to_manage
ROS_NODE_NAME = "lg_media"
DEFAULT_APP = "gst_video_sync"
DEFAULT_ARGS = " -a 10.42.42.255"
SRV_QUERY = '/'.join(('', ROS_NODE_NAME, "query"))
class ManagedGstreamer(ManagedApplication):
"""
Instance corresponds to a gstreamer application managed entity.
"""
def __init__(self, url, slug, window, respawn=True, extra_args=''):
self.window = window
self.url = url
self.slug = slug
self.respawn = respawn
self.extra_args = extra_args
super(ManagedGstreamer, self).__init__(window=window,
respawn=self.respawn,
cmd=self._build_cmd())
def __str__(self):
"""
String representation
"""
r = "state='%s' URL='%s'" % (self.state, self.url)
return r
def __repr__(self):
"""
Direct call representation
"""
r = "state='%s' URL='%s'" % (self.state, self.url)
return r
def _build_cmd(self):
cmd = []
cmd.extend([rospy.get_param("~application_path", DEFAULT_APP)])
cmd.extend(rospy.get_param("~application_flags", DEFAULT_ARGS).split())
if self.extra_args != '':
cmd.extend(self.extra_args.split())
cmd.append("-d") # Disable hardware decoding -- for compatibility
cmd.extend(["-n", str(self.slug)])
cmd.extend(["-u", self.url])
if self.window:
cmd.extend(["-x", str(self.window.geometry.x)])
cmd.extend(["-y", str(self.window.geometry.y)])
cmd.extend(["-w", str(self.window.geometry.width)])
cmd.extend(["-h", str(self.window.geometry.height)])
#if self.respawn:
# cmd.extend(["-loop", "0"])
rospy.logdebug("GStreamer POOL: gst_video_sync cmd: %s" % cmd)
return cmd
def execute_command(self, command):
raise NotImplementedError()
def change_url(self, url):
raise NotImplementedError()
def update_geometry(self, geometry):
raise NotImplementedError()
class GstreamerPool(object):
"""
Manages pool of GstreamerInstances in self.gstreamers
dict(id => GstreamerInstance)
"""
def __init__(self, viewport_name):
self.gstreamers = {} # key: app id, value: GstreamerInstance
self.viewport_name = viewport_name
self.lock = threading.Lock()
rospy.on_shutdown(self.clear)
def clear(self):
with self.lock:
for k in list(self.gstreamers.keys()):
self.gstreamers[k].close()
del self.gstreamers[k]
def _unpack_incoming_gstreamers(self, gstreamers):
"""
Converts incoming AdhocMedias to a dictionary where keys are ids
It will filter out all 'non-gstreamer' adhoc medias
"""
return {m.id: m for m in gstreamers if m.media_type == 'video'}
def _partition_existing_medias(self, incoming_medias):
"""
Determine which media id's belong to existing assets.
"""
existing_media_urls = [m.url for m in list(self.gstreamers.values())]
def media_exists(media):
return media.url in existing_media_urls
existing_media_ids = [m.id for m in incoming_medias if media_exists(m)]
fresh_media_ids = [m.id for m in incoming_medias if not media_exists(m)]
return existing_media_ids, fresh_media_ids
def handle_ros_message(self, data):
"""
Handles AdhocMedias messages and manages GstreamerInstances in GstreamerPool
"""
with self.lock:
incoming_gstreamers = self._unpack_incoming_gstreamers(data.medias)
incoming_gstreamers_ids = set(incoming_gstreamers.keys())
current_gstreamers_ids = get_app_instances_ids(self.gstreamers)
existing_media_ids, fresh_media_ids = self._partition_existing_medias(list(incoming_gstreamers.values()))
# gstreamers to remove
for gstreamer_pool_id in current_gstreamers_ids:
if gstreamer_pool_id in existing_media_ids:
rospy.loginfo("Media already playing: %s" % gstreamer_pool_id)
continue
rospy.loginfo("Removing gstreamer id %s" % gstreamer_pool_id)
self._remove_gstreamer(gstreamer_pool_id)
# gstreamers to create
for gstreamer_pool_id in fresh_media_ids:
rospy.loginfo("Creating gstreamer with id %s" % gstreamer_pool_id)
self._create_gstreamer(gstreamer_pool_id, incoming_gstreamers[gstreamer_pool_id])
return True
def get_media_apps_info(self, request):
"""
Connected to a service call, returns content of the internal
container tracking currently running managed applications.
"""
with self.lock:
d = {app_id: str(app_info) for app_id, app_info in list(self.gstreamers.items())}
return MediaAppsInfoResponse(json=json.dumps(d))
def _create_gstreamer(self, gstreamer_id, incoming_gstreamer):
"""
Start a ManagedApplication instance according to the details in the
media argument and return process instance, FIFO file (full path) to
drive the gstreamer application and resource URL.
"""
geometry = WindowGeometry(x=incoming_gstreamer.geometry.x,
y=incoming_gstreamer.geometry.y,
width=incoming_gstreamer.geometry.width,
height=incoming_gstreamer.geometry.height)
gstreamer_window = ManagedWindow(geometry=geometry,
w_name=str(gstreamer_id),
layer=ManagedWindow.LAYER_ABOVE)
if incoming_gstreamer.on_finish == "nothing" or incoming_gstreamer.on_finish == "close":
respawn = False
else:
respawn = True
gstreamer = ManagedGstreamer(url=incoming_gstreamer.url,
slug=gstreamer_id,
window=gstreamer_window,
respawn=respawn,
extra_args=incoming_gstreamer.extra_args)
gstreamer.set_state(ApplicationState.VISIBLE)
rospy.logdebug("MPlayer Pool: started new gstreamer instance %s on viewport %s with id %s" % (self.viewport_name, incoming_gstreamer, gstreamer_id))
self.gstreamers[gstreamer_id] = gstreamer
return True
def _remove_gstreamer(self, gstreamer_pool_id):
"""
Wipe out gstreamer instance - both from the screen and memory
"""
gstreamer_instance = self.gstreamers[gstreamer_pool_id]
rospy.logdebug("Stopping app id '%s', Gstreamer instance %s:" % (gstreamer_pool_id, gstreamer_instance))
gstreamer_instance.close()
del self.gstreamers[gstreamer_pool_id]
def handle_soft_relaunch(self, *args, **kwargs):
gstreamers = list(self.gstreamers.keys())
for gstreamer in gstreamers:
self._remove_gstreamer(gstreamer)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
1604894
|
from airflow import DAG
from airflow_kubernetes_job_operator.kubernetes_legacy_job_operator import (
KubernetesLegacyJobOperator,
)
# from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {"owner": "tester", "start_date": days_ago(2), "retries": 0}
dag = DAG(
"legacy-job-operator-example",
default_args=default_args,
description="Test base job operator",
schedule_interval=None,
)
bash_script = """
#/usr/bin/env bash
echo "Starting"
TIC_COUNT=0
cur_count=0
while true; do
cur_count=$((cur_count + 1))
if [ "$cur_count" -ge "$TIC_COUNT" ]; then
break
fi
date
sleep 1
done
echo "Complete"
"""
# BashOperator(bash_command="date", task_id="test-bash", dag=dag)
KubernetesLegacyJobOperator(
task_id="legacy-test-job-success",
image="ubuntu",
cmds=["bash", "-c", bash_script],
dag=dag,
is_delete_operator_pod=True,
)
KubernetesLegacyJobOperator(
task_id="legacy-test-job-fail",
image="ubuntu",
cmds=["bash", "-c", bash_script + "\nexit 99"],
dag=dag,
is_delete_operator_pod=True,
)
|
1604932
|
import FWCore.ParameterSet.Config as cms
#
# module to combine the persistent genParticles
# from the top decay and top mothers
#
genEvtSingleTop = cms.EDProducer("StGenEventReco",
src = cms.InputTag("decaySubset"),
init = cms.InputTag("initSubset")
)
|
1604935
|
from office365.entity_collection import EntityCollection
from office365.teams.channels.channel import Channel
class ChannelCollection(EntityCollection):
"""Team's collection"""
def __init__(self, context, resource_path=None):
super(ChannelCollection, self).__init__(context, Channel, resource_path)
def add(self, display_name, description=None):
"""Create a new channel in a Microsoft Team, as specified in the request body.
:param str description: Optional textual description for the channel.
:param str display_name: Channel name as it will appear to the user in Microsoft Teams.
:rtype: Channel
"""
return super(ChannelCollection, self).add(
displayName=display_name,
description=description
)
|
1604962
|
from deepaffects.realtime.deepaffects_realtime_pb2 import SegmentChunk
def segment_chunk(content, encoding="wav", languageCode="en-US", sampleRate=8000, segmentOffset=0, duration=0):
"""segment_chunk.
Args:
encoding : Audio Encoding,
languageCode: language code ,
sampleRate: sample rate of audio ,
content: base64 encoded audio,
duration: in seconds,
segmentOffset: offset of the segment in complete audio stream
"""
if duration < 3:
raise ValueError('Chunk duration should be greater than 3 sec.')
return SegmentChunk(
content=content,
encoding=encoding,
languageCode=languageCode,
sampleRate=sampleRate,
duration=duration,
segmentOffset=segmentOffset)
|
1604963
|
from __future__ import print_function, unicode_literals, with_statement, division
from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext as _
import string
class Command(BaseCommand):
help = _('This command generates SECRET_KEY')
# Default length is 50
length = 50
# Allowed characters
allowed_chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
def add_arguments(self, parser):
"""
Define optional arguments with default values
"""
parser.add_argument('--length', default=self.length,
type=int, help=_('SECRET_KEY length default=%d' % self.length))
parser.add_argument('--alphabet', default=self.allowed_chars,
type=str, help=_('alphabet to use default=%s' % self.allowed_chars))
def handle(self, *args, **options):
length = options.get('length')
alphabet = options.get('alphabet')
secret_key = str(get_random_string(length=length, allowed_chars=alphabet))
print('SECRET_KEY: %s' % secret_key)
|
1604964
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
str_to_int,
)
class ZippCastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?zippcast\.com/(?:video/|videoview\.php\?.*\bvplay=)(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
# m3u8, hq direct link
'url': 'http://www.zippcast.com/video/c9cfd5c7e44dbc29c81',
'md5': '5ea0263b5606866c4d6cda0fc5e8c6b6',
'info_dict': {
'id': 'c9cfd5c7e44dbc29c81',
'ext': 'mp4',
'title': '[Vinesauce] Vinny - Digital Space Traveler',
'description': 'Muted on youtube, but now uploaded in it\'s original form.',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'vinesauce',
'view_count': int,
'categories': ['Entertainment'],
'tags': list,
},
}, {
# f4m, lq ipod direct link
'url': 'http://www.zippcast.com/video/b79c0a233e9c6581775',
'only_matching': True,
}, {
'url': 'http://www.zippcast.com/videoview.php?vplay=c9cfd5c7e44dbc29c81&auto=no',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.zippcast.com/video/%s' % video_id, video_id)
formats = []
video_url = self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?)\1', webpage,
'video url', default=None, group='url')
if video_url:
formats.append({
'url': video_url,
'format_id': 'http',
'preference': 0, # direct link is almost always of worse quality
})
src_url = self._search_regex(
r'src\s*:\s*(?:escape\()?(["\'])(?P<url>http://.+?)\1',
webpage, 'src', default=None, group='url')
ext = determine_ext(src_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
src_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage)
uploader = self._search_regex(
r'<a[^>]+href="https?://[^/]+/profile/[^>]+>([^<]+)</a>',
webpage, 'uploader', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
view_count = str_to_int(self._search_regex(
r'>([\d,.]+) views!', webpage, 'view count', fatal=False))
categories = re.findall(
r'<a[^>]+href="https?://[^/]+/categories/[^"]+">([^<]+),?<',
webpage)
tags = re.findall(
r'<a[^>]+href="https?://[^/]+/search/tags/[^"]+">([^<]+),?<',
webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'view_count': view_count,
'categories': categories,
'tags': tags,
'formats': formats,
}
|
1604968
|
from gym.envs.registration import register
from . import env_v1
register(
id='ObstacleAvoidance-v0',
entry_point='environments.carla_enviroments.env_v1_ObstacleAvoidance.env_v1:ObstacleAvoidanceScenario',
trials = 10,
reward_threshold = 100.,
)
register(
id='ObstacleAvoidance-v1',
entry_point='environments.carla_enviroments.env_v1_ObstacleAvoidance.env_v1_two_eyes:ObstacleAvoidanceScenarioTwoEyes',
trials = 10,
reward_threshold = 100.,
)
register(
id='ObstacleAvoidance-v2',
entry_point='environments.carla_enviroments.env_v1_ObstacleAvoidance.env_v1_dynamic:ObstacleAvoidanceScenarioDynamic',
trials = 10,
reward_threshold = 100.,
)
|
1605054
|
import torch
import torch.nn as nn
def Conv1x1ReLU(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.ReLU6(inplace=True)
)
def Conv3x3ReLU(in_channels,out_channels,stride,padding):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding),
nn.ReLU6(inplace=True)
)
class LossBranch(nn.Module):
def __init__(self,in_channels, mid_channels=64):
super(LossBranch, self).__init__()
self.conv1 = Conv1x1ReLU(in_channels, mid_channels)
self.conv2_score = Conv1x1ReLU(mid_channels, mid_channels)
self.classify = nn.Conv2d(in_channels=mid_channels, out_channels=2, kernel_size=1, stride=1)
self.conv2_bbox = Conv1x1ReLU(mid_channels, mid_channels)
self.regress = nn.Conv2d(in_channels=mid_channels, out_channels=4, kernel_size=1, stride=1)
def forward(self, x):
x = self.conv1(x)
cls = self.classify(self.conv2_score(x))
reg = self.regress(self.conv2_bbox(x))
return cls,reg
class LFFDBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(LFFDBlock, self).__init__()
mid_channels = out_channels
self.downsampling = True if stride == 2 else False
if self.downsampling:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=3, stride=stride, padding=0)
self.branch1_relu1 = nn.ReLU6(inplace=True)
self.branch1_conv1 = Conv3x3ReLU(in_channels=mid_channels, out_channels=mid_channels, stride=1, padding=1)
self.branch1_conv2 = nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU6(inplace=True)
def forward(self, x):
if self.downsampling:
x = self.conv(x)
out = self.branch1_conv2(self.branch1_conv1(self.branch1_relu1(x)))
return self.relu(out+x)
class LFFD(nn.Module):
def __init__(self, classes_num = 2):
super(LFFD, self).__init__()
self.tiny_part1 = nn.Sequential(
Conv3x3ReLU(in_channels=3, out_channels=64, stride=2, padding = 0),
LFFDBlock(in_channels=64, out_channels=64, stride=2),
LFFDBlock(in_channels=64, out_channels=64, stride=1),
LFFDBlock(in_channels=64, out_channels=64, stride=1),
)
self.tiny_part2 = LFFDBlock(in_channels=64, out_channels=64, stride=1)
self.small_part1 = LFFDBlock(in_channels=64, out_channels=64, stride=2)
self.small_part2 = LFFDBlock(in_channels=64, out_channels=64, stride=1)
self.medium_part = nn.Sequential(
LFFDBlock(in_channels=64, out_channels=128, stride=2),
LFFDBlock(in_channels=128, out_channels=128, stride=1),
)
self.large_part1 = LFFDBlock(in_channels=128, out_channels=128, stride=2)
self.large_part2 = LFFDBlock(in_channels=128, out_channels=128, stride=1)
self.large_part3 = LFFDBlock(in_channels=128, out_channels=128, stride=1)
self.loss_branch1 = LossBranch(in_channels=64)
self.loss_branch2 = LossBranch(in_channels=64)
self.loss_branch3 = LossBranch(in_channels=64)
self.loss_branch4 = LossBranch(in_channels=64)
self.loss_branch5 = LossBranch(in_channels=128)
self.loss_branch6 = LossBranch(in_channels=128)
self.loss_branch7 = LossBranch(in_channels=128)
self.loss_branch8 = LossBranch(in_channels=128)
def forward(self, x):
branch1 = self.tiny_part1(x)
branch2 = self.tiny_part2(branch1)
branch3 = self.small_part1(branch2)
branch4 = self.small_part2(branch3)
branch5 = self.medium_part(branch4)
branch6 = self.large_part1(branch5)
branch7 = self.large_part2(branch6)
branch8 = self.large_part3(branch7)
cls1,loc1 = self.loss_branch1(branch1)
cls2,loc2 = self.loss_branch2(branch2)
cls3,loc3 = self.loss_branch3(branch3)
cls4,loc4 = self.loss_branch4(branch4)
cls5,loc5 = self.loss_branch5(branch5)
cls6,loc6 = self.loss_branch6(branch6)
cls7,loc7 = self.loss_branch7(branch7)
cls8,loc8 = self.loss_branch8(branch8)
cls = torch.cat([cls1.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls2.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls3.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls4.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls5.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls6.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls7.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
cls8.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1)], dim=1)
loc = torch.cat([loc1.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc2.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc3.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc4.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc5.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc6.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc7.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc8.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1)], dim=1)
out = (cls,loc)
return out
if __name__ == '__main__':
net = LFFD()
print(net)
input = torch.randn(1,3,480,640)
output = net(input)
print(output[0].shape)
print(output[1].shape)
|
1605098
|
from __future__ import absolute_import, unicode_literals, print_function
import logging
import pprint
from builtins import str, object
from future.utils import python_2_unicode_compatible, iteritems
from .compat import PY2, PY2_STR
log = logging.getLogger(__name__)
__all__ = ('compile',
'TypeMatch',
'RegexpMatch',
'MissingKey')
def compile(spec):
"""
Args:
spec (dict): A specification dict that attempts to "break" test dicts
Returns:
JsonMatcher.
"""
return JsonMatcher(spec)
class JsonMatcher(object):
"""Matches candidate dictionaries against a spec. Track any mismatches,
'breaks'."""
def __init__(self, spec):
"""
Args:
spec (dict|list): the dictionary or list that we're comparing to.
Any value, nested or otherwise, can be a type in order to match
a range of values.
"""
self.spec = spec
def matches(self, test_d, **kwargs):
"""
Return True if `test_d` matches the specification dict.
Kwargs:
forwarded to JsonMatcher.breaks.
"""
return (not self.breaks(test_d, **kwargs))
def assert_matches(self, test_d, assertion_msg=None, **kwargs):
"""
Assert that a test dict matches the schema. If not, print the breaks
and throw an AssertionError.
Kwargs:
assertion_msg (str): Message to display if the assertion fails.
other kwargs are forwarded to JsonMatcher.breaks.
"""
bs = self.breaks(test_d, **kwargs)
msg = assertion_msg or "Candidate dict doesn't match schema."
if bs:
print(bs.breaks_str)
raise AssertionError(msg)
def breaks(self, test_d, is_ordered=True):
"""
Return None if `test_d` is an acceptable match to `self.spec`,
Breaks object otherwise.
Args:
test_d (dict|list): the dictionary or list that may not be
acceptable
Kwargs:
is_ordered (bool): should we care about the order of a
list?
Returns:
Breaks or None.
"""
breaks = Breaks(self.spec, test_d)
exp_d = self.spec
if isinstance(self.spec, (list, tuple)):
exp_d = self._seq_to_dict(exp_d, is_ordered)
if isinstance(test_d, (list, tuple)):
test_d = self._seq_to_dict(test_d, is_ordered)
bs = self._find_breaks(exp_d, test_d, tuple(), is_ordered, breaks)
return bs if bs else None
def _find_breaks(self, exp_d, test_d, key_trail, is_ordered, breaks):
"""
Internal (mis)matching function. Fill out our Breaks object and return
it.
Args:
exp_d (dict)
test_d (dict): the dictionary that may not be acceptable
key_trail (list): an accumulation of keys as we descend into the
comparison
is_ordered (bool): should we care about the order of a
list?
breaks (Breaks): for tracking mismatches
Returns:
Breaks.
"""
if not isinstance(test_d, dict):
log.info("`test_d` isn't a dict! Can't compare.")
breaks.add_break('', TypeMatch(dict), type(test_d))
return breaks
exp_key_set = set(exp_d.keys())
test_key_set = set(test_d.keys())
only_in_exp = (exp_key_set - test_key_set)
only_in_test = (test_key_set - exp_key_set)
for key in only_in_exp:
this_key_trail = key_trail + (key,)
breaks.add_break(this_key_trail,
exp_d.get(key),
MissingKey())
for key in only_in_test:
this_key_trail = key_trail + (key,)
breaks.add_break(this_key_trail,
MissingKey(),
test_d.get(key))
if only_in_test or only_in_exp:
log.info("Someone is missing keys.")
return breaks
# PY3: `for key, val in exp_d.items()`
for key, val in iteritems(exp_d):
test_val = test_d[key]
this_key_trail = key_trail + (key,)
# don't append a mismatch if we recurse
append_diff = True
val_to_record = None
if isinstance(val, dict):
is_val_match = self._find_breaks(val,
test_val,
this_key_trail,
is_ordered,
breaks)
append_diff = False
elif isinstance(val, (list, tuple)):
exp_val_dict = self._seq_to_dict(val, is_ordered)
test_val_dict = self._seq_to_dict(test_val, is_ordered)
is_val_match = self._find_breaks(exp_val_dict,
test_val_dict,
this_key_trail,
is_ordered,
breaks)
append_diff = False
elif isinstance(val, type):
if val == PY2_STR and PY2:
val = (val, str)
else:
val = (val,)
is_val_match = isinstance(test_val, val)
val_to_record = TypeMatch(*val)
elif hasattr(val, 'match'):
# regexp object
try:
is_val_match = bool(val.match(test_val))
except TypeError:
is_val_match = False
val_to_record = RegexpMatch(val.pattern)
elif callable(val):
# use `val` as a callable
try:
is_val_match = val(test_val)
except Exception as e:
log.info("Value match for '%s' failed with %s." %
(this_key_trail, e))
is_val_match = False
else:
# kick to object equality
is_val_match = (val == test_val)
val_to_record = val_to_record or val
if not is_val_match and append_diff:
breaks.add_break(this_key_trail, val_to_record, test_val)
return breaks
def _seq_to_dict(self, seq, is_ordered=False):
"""Convert a sequence to a dict where each value is keyed by the seq
index."""
seq = seq or []
if not is_ordered:
seq = sorted(seq)
return dict(list(zip(list(range(len(seq))), seq)))
@python_2_unicode_compatible
class Breaks(object):
"""Represents the diff between a specification dict and a test dict."""
def __init__(self, spec, against):
"""
Args:
spec (dict): the specification dict
against (dict): the dict we're testing against
"""
self.spec = spec
self.against = against
self.paths_to_breaks = {}
def add_break(self, path_tuple, spec_val, against_val):
self.paths_to_breaks[path_tuple] = (spec_val, against_val)
def __bool__(self):
return bool(self.paths_to_breaks)
@property
def breaks_str(self):
"""Print a comparison of expected vs. got."""
return ("Expected:\n%s\n" % pprint.pformat(self.spec)
+ "\nGot:\n%s\n" % pprint.pformat(self.against)
+ "\nDiffs:\n%s\n" % pprint.pformat(self.paths_to_breaks))
def __str__(self):
return "<%d breaks>" % (len(list(self.paths_to_breaks.keys())))
@python_2_unicode_compatible
class TypeMatch(object):
def __init__(self, *to_match):
"""
Args:
to_match ([type, ...]): a list of types to match on
"""
self.to_match = to_match
def is_match(self, val):
return isinstance(val, self.to_match)
def __eq__(self, other):
return other.to_match == self.to_match
def __str__(self):
return "TypeMatch({})".format(', '.join([
str(t) for t in self.to_match
]))
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class RegexpMatch(object):
"""Represents a match that expects a value fitting a regexp pattern."""
def __init__(self, pattern):
self.pattern = pattern
def __eq__(self, other):
return getattr(other, 'pattern', other) == self.pattern
def __str__(self):
return "RegexpMatch(r'{}')".format(self.pattern)
def __repr__(self):
return self.__str__()
class MissingKey(object):
"""Represents a missing key in one of the dicts."""
def __eq__(self, other):
return isinstance(other, MissingKey)
def __str__(self):
return "<MissingKey>"
def __repr__(self):
return self.__str__()
|
1605113
|
import sys
from pyiron_atomistics import Project, __version__
pr = Project("tests/static/backwards/")
for job in pr.iter_jobs(recursive=True, convert_to_object=False):
if job.name == "sphinx":
job = job.to_object()
job.run()
print("job {} loaded from {}".format(job.id, sys.argv[0]))
|
1605135
|
from ... import UP, DOWN, LEFT, RIGHT, UP_2, DOWN_2, LEFT_2, RIGHT_2
class Movable:
move_up = UP
move_up_alt = UP_2
move_down = DOWN
move_down_alt = DOWN_2
move_left = LEFT
move_left_alt = LEFT_2
move_right = RIGHT
move_right_alt = RIGHT_2
lr_step = 1
ud_step = 1
wrap_height = None
wrap_width = None
bounded = False
def on_press(self, key):
top, left = self.top, self.left
height, width = self.height, self.width
bounded = self.bounded
if key == self.move_up or key == self.move_up_alt:
if not bounded or top > 0:
self.top -= self.ud_step
elif key == self.move_down or key == self.move_down_alt:
if not bounded or top + height < self.parent.height:
self.top += self.ud_step
elif key == self.move_left or key == self.move_left_alt:
if not bounded or left > 0:
self.left -= self.lr_step
elif key == self.move_right or key == self.move_right_alt:
if not bounded or left + width < self.parent.width:
self.left += self.lr_step
else:
return super().on_press(key)
if self.wrap_height:
self.top %= self.wrap_height
if self.wrap_width:
self.left %= self.wrap_width
return True
|
1605203
|
import FWCore.ParameterSet.Config as cms
from DQM.L1TMonitorClient.L1TOccupancyClient_cfi import *
|
1605276
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from en_transformer.en_transformer import EnTransformer
torch.set_default_dtype(torch.float64)
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader, len_thres = 200):
while True:
for data in loader:
if data.seqs.shape[1] > len_thres:
continue
yield data
transformer = EnTransformer(
num_tokens = 21,
dim = 32,
dim_head = 64,
heads = 4,
depth = 4,
rel_pos_emb = True, # there is inherent order in the sequence (backbone atoms of amino acid chain)
neighbors = 16
)
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = BATCH_SIZE,
dynamic_batching = False
)
dl = cycle(data['train'])
optim = Adam(transformer.parameters(), lr=1e-3)
transformer = transformer.cuda()
for _ in range(10000):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seqs, coords, masks = batch.seqs, batch.crds, batch.msks
seqs = seqs.cuda().argmax(dim = -1)
coords = coords.cuda().type(torch.float64)
masks = masks.cuda().bool()
l = seqs.shape[1]
coords = rearrange(coords, 'b (l s) c -> b l s c', s = 14)
# keeping only the backbone coordinates
coords = coords[:, :, 0:3, :]
coords = rearrange(coords, 'b l s c -> b (l s) c')
seq = repeat(seqs, 'b n -> b (n c)', c = 3)
masks = repeat(masks, 'b n -> b (n c)', c = 3)
noised_coords = coords + torch.randn_like(coords)
feats, denoised_coords = transformer(seq, noised_coords, mask = masks)
loss = F.mse_loss(denoised_coords[masks], coords[masks])
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
|
1605291
|
import numpy as np
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
plt.axis([-10, 10, -10, 10])
# Define properties of the "bouncing balls"
n = 10
pos = (20 * np.random.sample(n*2) - 10).reshape(n, 2)
vel = (0.3 * np.random.normal(size=n*2)).reshape(n, 2)
sizes = 100 * np.random.sample(n) + 100
# Colors where each row is (Red, Green, Blue, Alpha). Each can go
# from 0 to 1. Alpha is the transparency.
colors = np.random.sample([n, 4])
# Draw all the circles and return an object ``circles`` that allows
# manipulation of the plotted circles.
circles = plt.scatter(pos[:,0], pos[:,1], marker='o', s=sizes, c=colors)
for i in range(100):
pos = pos + vel
bounce = abs(pos) > 10 # Find balls that are outside walls
vel[bounce] = -vel[bounce] # Bounce if outside the walls
circles.set_offsets(pos) # Change the positions
plt.pause(0.05)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.