id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11507780
|
from distutils.core import setup
setup(
name='permission',
version='0.4.1',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/hustlzp/permission',
packages=['permission'],
license='MIT',
description='Simple and flexible permission control for Flask app.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
11507796
|
import os
import sys
import argparse
import binascii
try:
import re
except ImportError:
import ure as re
import socket
try:
import socketserver
except ImportError:
try:
import SocketServer as socketserver
except ImportError:
import upy.socketserver as socketserver
import errno
import rpcBind, rpcRequest
from dcerpc import MSRPCHeader, MSRPC_BIND, MSRPC_REQUEST, MSRPC_ALTERCTX
try:
IOError
except NameError:
class IOError(OSError):
pass
if hasattr(os, 'fork'):
class TCPServer(socketserver.ForkingTCPServer):
pass
else: # os.fork not implemented on Windows
class TCPServer(socketserver.ThreadingTCPServer):
pass
config = {}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ip", nargs="?", action="store", default="0.0.0.0", help="The IP address to listen on. The default is \"0.0.0.0\" (all interfaces).", type=str)
parser.add_argument("port", nargs="?", action="store", default=1688, help="The network port to listen on. The default is \"1688\".", type=int)
parser.add_argument("-e", "--epid", dest="epid", default=None, help="Use this flag to manually specify an ePID to use. If no ePID is specified, a random ePID will be generated.", type=str)
parser.add_argument("-l", "--lcid", dest="lcid", default=None, help="Use this flag to manually specify an LCID for use with randomly generated ePIDs. Default is user default language.", type=int)
parser.add_argument("-c", "--client-count", dest="CurrentClientCount", default=None, help="Use this flag to specify the current client count. Default is 26. A number >25 is required to enable activation.", type=int)
parser.add_argument("-a", "--activation-interval", dest="VLActivationInterval", default=120, help="Use this flag to specify the activation interval (in minutes). Default is 120 minutes (2 hours).", type=int)
parser.add_argument("-r", "--renewal-interval", dest="VLRenewalInterval", default=1440 * 7, help="Use this flag to specify the renewal interval (in minutes). Default is 10080 minutes (7 days).", type=int)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_const", const=True, default=False, help="Use this flag to enable verbose output.")
parser.add_argument("-d", "--debug", dest="debug", action="store_const", const=True, default=False, help="Use this flag to enable debug output. Implies \"-v\".")
parser.add_argument("-s", "--sqlite", dest="sqlite", action="store_const", const=True, default=False, help="Use this flag to store request information from unique clients in an SQLite database.")
parser.add_argument("-o", "--log", dest="log", action="store_const", const=True, default=False, help="Use this flag to enable logging to a file.")
parser.add_argument("-w", "--hwid", dest="hwid", action="store", default='364F463A8863D35F', help="Use this flag to specify a HWID. The HWID must be an 16-character string of hex characters. The default is \"364F463A8863D35F\".")
parsed = parser.parse_args()
try:
config.update(vars(parsed))
except NameError: # vars not supported on micropython
config.update(dict((o.dest, getattr(parsed, o.dest)) for o in parser.pos))
config.update(dict((o.dest, getattr(parsed, o.dest)) for o in parser.opt))
# Sanitize HWID
try:
config['hwid'] = binascii.a2b_hex(''.join([e for e in config['hwid'].strip('0x') if re.match(r'[0-9a-fA-F]', e)]))
if len(binascii.b2a_hex(config['hwid'])) < 16:
print("Error: HWID \"%s\" is invalid. Hex string is too short." % binascii.b2a_hex(config['hwid']))
return
elif len(binascii.b2a_hex(config['hwid'])) > 16:
print("Error: HWID \"%s\" is invalid. Hex string is too long." % binascii.b2a_hex(config['hwid']))
return
except TypeError:
print("Error: HWID \"%s\" is invalid. Odd-length hex string." % binascii.b2a_hex(config['hwid']))
return
if not config['lcid']:
# http://stackoverflow.com/questions/3425294/how-to-detect-the-os-default-language-in-python
if hasattr(sys, 'implementation') and sys.implementation.name == 'micropython':
config['lcid'] = 1033
elif os.name == 'nt':
import ctypes
config['lcid'] = ctypes.windll.kernel32.GetUserDefaultUILanguage() # TODO: or GetSystemDefaultUILanguage?
else:
import locale
try:
config['lcid'] = next(k for k, v in locale.windows_locale.items() if v == locale.getdefaultlocale()[0])
except StopIteration:
config['lcid'] = 1033
if config['debug']:
config['verbose'] = True
try:
import sqlite3
except ImportError:
print("Warning: Module \"sqlite3\" is not installed--database support disabled.")
config['dbSupport'] = False
else:
config['dbSupport'] = True
TCPServer.address_family = socket.getaddrinfo(config['ip'], config['port'], 0, socket.SOCK_DGRAM)[0][0]
try:
server = TCPServer((config['ip'], config['port']), kmsServer)
except OSError: # micropython can't recognize 2-tuple server_address
server = TCPServer((config['ip'], config['port'], socket.AF_INET6), kmsServer)
server.timeout = 5
print("TCP server listening at %s on port %d." % (config['ip'],config['port']))
server.serve_forever()
class kmsServer(socketserver.BaseRequestHandler):
def setup(self):
print("Connection accepted: %s:%d" % (self.client_address[0],self.client_address[1]))
def handle(self):
while True:
# self.request is the TCP socket connected to the client
try:
data = self.request.recv(1024)
except socket.error as e:
if e.errno == errno.ECONNRESET:
print("Error: Connection reset by peer.")
break
else:
raise
if not data:
print("No data received!")
break
# data = bytearray(data.strip())
# print binascii.b2a_hex(str(data))
packetType = MSRPCHeader(data)['type']
if packetType in (MSRPC_BIND, MSRPC_ALTERCTX):
if config['verbose']:
print("RPC bind request received.")
handler = rpcBind.handler(data, config)
elif packetType == MSRPC_REQUEST:
if config['verbose']:
print("Received activation request.")
handler = rpcRequest.handler(data, config)
else:
print("Error: Invalid RPC request type", packetType)
break
res = handler.populate().__bytes__()
self.request.send(res)
if packetType == MSRPC_BIND:
if config['verbose']:
print("RPC bind acknowledged.")
elif packetType == MSRPC_REQUEST:
if config['verbose']:
print("Responded to activation request.")
break
def finish(self):
self.request.close()
print("Connection closed: %s:%d" % (self.client_address[0],self.client_address[1]))
if __name__ == "__main__":
main()
|
11507812
|
from datetime import datetime
import os.path
from freezegun import freeze_time
from notesdir.models import FileQuery, FileInfoReq, LinkInfo, FileQuerySort, FileQuerySortField, FileInfo
def test_referent_skips_invalid_urls():
assert LinkInfo('foo', 'file://no[').referent() is None
def test_referent_skips_non_file_schemes():
assert LinkInfo('foo', 'http:///bar').referent() is None
def test_referent_skips_non_local_hosts():
assert LinkInfo('foo', 'file://example.com/bar').referent() is None
def test_referent_matches_absolute_paths():
assert LinkInfo('foo', '/bar').referent() == '/bar'
assert LinkInfo('foo', 'file:///bar').referent() == '/bar'
assert LinkInfo('foo', 'file://localhost/bar').referent() == '/bar'
def test_referent_matches_relative_paths():
assert LinkInfo('/baz/foo', 'bar').referent() == '/baz/bar'
def test_referent_resolves_symlinks(fs):
fs.cwd = '/cwd'
fs.create_symlink('/cwd/bar', '/cwd/target')
assert LinkInfo('foo', 'bar/baz').referent() == '/cwd/target/baz'
def test_referent_ignores_query_and_fragment():
assert LinkInfo('/foo', 'bar#baz').referent() == '/bar'
assert LinkInfo('/foo', 'bar?baz').referent() == '/bar'
def test_referent_resolves_relative_to_referrer(fs):
fs.cwd = '/meh'
assert LinkInfo('/foo/bar', 'baz').referent() == os.path.realpath('../foo/baz')
def test_referent_handles_special_characters():
assert LinkInfo('/foo', 'hi%20there%21').referent() == '/hi there!'
assert LinkInfo('/foo', 'hi+there%21').referent() == '/hi there!'
def test_referent_self():
assert LinkInfo('/foo/bar', 'bar#baz').referent() == '/foo/bar'
assert LinkInfo('/foo/bar', '#baz').referent() == '/foo/bar'
@freeze_time('2012-02-03T04:05:06Z')
def test_guess_created(fs):
info = FileInfo('foo')
assert info.guess_created() is None
fs.create_file('foo')
assert info.guess_created().isoformat() == '2012-02-03T04:05:06+00:00'
info.created = datetime(1, 2, 3, 4, 5, 6)
assert info.guess_created() == datetime(1, 2, 3, 4, 5, 6)
def test_parse_query():
strquery = 'tag:first+tag,second -tag:third,fourth+tag tag:fifth sort:created,-backlinks'
expected = FileQuery(
include_tags={'first tag', 'second', 'fifth'},
exclude_tags={'third', 'fourth tag'},
sort_by=[FileQuerySort(FileQuerySortField.CREATED),
FileQuerySort(FileQuerySortField.BACKLINKS_COUNT, reverse=True)])
assert FileQuery.parse(strquery) == expected
def test_apply_sorting():
data = [
FileInfo('/a/one', tags={'baz'},
backlinks=[LinkInfo(referrer='whatever', href='whatever')]),
FileInfo('/b/two', title='Beta', created=datetime(2010, 1, 15)),
FileInfo('/c/Three', title='Gamma', created=datetime(2012, 1, 9),
backlinks=[LinkInfo(referrer='whatever', href='whatever'),
LinkInfo(referrer='whatever', href='whatever')]),
FileInfo('/d/four', title='delta', created=datetime(2012, 1, 9), tags={'foo', 'bar'})
]
assert FileQuery.parse('sort:path').apply_sorting(data) == data
assert FileQuery.parse('sort:-path').apply_sorting(data) == list(reversed(data))
assert FileQuery.parse('sort:filename').apply_sorting(data) == [data[3], data[0], data[2], data[1]]
assert FileQuery(sort_by=[FileQuerySort(FileQuerySortField.FILENAME, ignore_case=False)]).apply_sorting(data) == [
data[2], data[3], data[0], data[1]]
assert FileQuery.parse('sort:title').apply_sorting(data) == [data[1], data[3], data[2], data[0]]
assert FileQuery(sort_by=[FileQuerySort(FileQuerySortField.TITLE, ignore_case=False)]).apply_sorting(data) == [
data[1], data[2], data[3], data[0]]
assert FileQuery(sort_by=[FileQuerySort(FileQuerySortField.TITLE, missing_first=True)]).apply_sorting(data) == [
data[0], data[1], data[3], data[2]]
assert FileQuery(sort_by=[FileQuerySort(FileQuerySortField.TITLE,
missing_first=True,
reverse=True)]).apply_sorting(data) == [
data[2], data[3], data[1], data[0]]
assert FileQuery.parse('sort:created').apply_sorting(data) == [data[1], data[2], data[3], data[0]]
assert FileQuery.parse('sort:-created').apply_sorting(data) == [data[0], data[2], data[3], data[1]]
assert FileQuery(sort_by=[FileQuerySort(FileQuerySortField.CREATED, missing_first=True)]).apply_sorting(data) == [
data[0], data[1], data[2], data[3]]
assert FileQuery.parse('sort:-tags').apply_sorting(data) == [data[3], data[0], data[1], data[2]]
assert FileQuery.parse('sort:-backlinks').apply_sorting(data) == [data[2], data[0], data[1], data[3]]
assert FileQuery.parse('sort:created,title').apply_sorting(data) == [data[1], data[3], data[2], data[0]]
assert FileQuery.parse('sort:created,-title').apply_sorting(data) == [data[1], data[2], data[3], data[0]]
def test_parse_info_req():
expected = FileInfoReq(path=True, backlinks=True)
assert FileInfoReq.parse('path,backlinks') == expected
assert FileInfoReq.parse(['path', 'backlinks']) == expected
assert FileInfoReq.parse(expected) == expected
|
11507852
|
import os
import django
import pytest
from django.db import transaction
from django.test import runner
# Setup at the module level because django settings need to be
# initialized before importing other django code.
# This file is only imported by pytest when running tests.
os.environ['DJANGO_SETTINGS_MODULE'] = 'yawn.settings.test'
django.setup()
@pytest.fixture(scope='session')
def setup_django():
"""Provide a test database and django configuration"""
from yawn.worker.models import Queue
manager = runner.DiscoverRunner(verbosity=1, interactive=False)
old_config = manager.setup_databases()
# create the default queue outside the transaction
Queue.get_default_queue()
yield
manager.teardown_databases(old_config)
class TestPassedRollback(Exception):
"""Signal to rollback after a test passes"""
@pytest.fixture(autouse=True)
def test_transaction(setup_django, request):
if 'no_transaction' in request.keywords:
# the database reconnect test cannot work inside a transaction
# so provide a way to disable transactions.
yield
else:
try:
with transaction.atomic():
yield
raise TestPassedRollback()
except TestPassedRollback:
pass
@pytest.fixture()
def client():
from django.contrib.auth.models import User
from rest_framework.test import APIClient
user = User.objects.create_user('test_user', is_staff=True)
api_client = APIClient()
api_client.force_authenticate(user)
return api_client
@pytest.fixture()
def run():
"""Setup a workflow and run to test with"""
from yawn.workflow.models import WorkflowName
from yawn.task.models import Template
name = WorkflowName.objects.create(name='workflow1')
workflow = name.new_version(parameters={'parent': True, 'child': False})
task1 = Template.objects.create(workflow=workflow, name='task1', command='')
task2 = Template.objects.create(workflow=workflow, name='task2', command='')
task2.upstream.add(task1)
return workflow.submit_run(parameters={'child': True})
|
11507901
|
import asyncio
import math
from PIL import Image
import deeppyer
async def main():
print('[tests] Generating gradient image...')
img = Image.new('RGB', (100, 100))
for y in range(100):
for x in range(100):
distanceToCenter = math.sqrt((x - 100 / 2) ** 2 + (y - 100 / 2) ** 2)
distanceToCenter = float(distanceToCenter) / (math.sqrt(2) * 100 / 2)
r = 0 * distanceToCenter + 255 * (1 - distanceToCenter)
g = 0 * distanceToCenter + 255 * (1 - distanceToCenter)
b = 0 * distanceToCenter + 255 * (1 - distanceToCenter)
img.putpixel((x, y), (int(r), int(g), int(b)))
img.save('./tests/gradient.jpg')
print('[tests] Deepfrying gradient...')
img2 = await deeppyer.deepfry(img)
img2.save('./tests/gradient-fried.jpg')
img2 = await deeppyer.deepfry(img, type=deeppyer.DeepfryTypes.BLUE)
img2.save('./tests/gradient-fried-blue.jpg')
print('[tests] Image successfully deepfried.'
'Saved at `./test/gradient-fried.jpg`. and `./test/gradient-fried-blue.jpg`')
print('[tests] Deepfrying `./test/test.jpg` with flares.')
img = Image.open('./tests/human-test.jpg')
img2 = await deeppyer.deepfry(img)
img2.save('./tests/human-fried.jpg')
img2 = await deeppyer.deepfry(img, type=deeppyer.DeepfryTypes.BLUE)
img2.save('./tests/human-fried-blue.jpg')
print('[tests] Human image successfully deepfried.'
'Saved at `./test/human-fried.jpg` and `./test/human-fried-blue.jpg`.')
print('[tests] All tests successfully completed.')
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
11507916
|
from functools import partial
from fastmri_recon.evaluate.metrics.tf_metrics import *
from fastmri_recon.models.training.compile import compound_l1_mssim_loss
from fastmri_recon.models.subclassed_models.vnet import Conv
mssim = partial(compound_l1_mssim_loss, alpha=0.9999)
mssim.__name__ = "mssim"
CUSTOM_TF_OBJECTS = {
'keras_psnr': keras_psnr,
'keras_ssim': keras_ssim,
'compound_l1_mssim_loss': compound_l1_mssim_loss,
'mssim': mssim,
'Conv': Conv,
}
|
11507919
|
from abc import ABC, abstractmethod
from logging import getLogger
from typing import Generic, Iterable, Iterator, List, Optional, Tuple, Type, TypeVar, Union
from intervaltree import Interval, IntervalTree
from tqdm import tqdm
from .cfg import DAG
from .tracing import BasicBlockEntry, FunctionInvocation, ProgramTrace, TraceEvent
log = getLogger(__file__)
V = TypeVar("V")
T = TypeVar("T", bound="ParseTree")
class ParseTree(ABC, Generic[V]):
__slots__ = "value", "_descendants"
def __init__(self, value: V):
self.value: V = value
self._descendants: Optional[int] = None
@property
@abstractmethod
def children(self: T) -> List[T]:
raise NotImplementedError()
def to_dag(self) -> DAG["ParseTree[V]"]:
dag: DAG[ParseTree[V]] = DAG()
if self.children:
dag.add_edges_from((node, child) for node in self.preorder_traversal() for child in node.children)
else:
dag.add_node(self)
return dag
@property
def descendants(self) -> int:
if self._descendants is None:
for n in self.postorder_traversal():
n._descendants = sum(c._descendants for c in n.children) + len(n) # type: ignore
return self._descendants # type: ignore
def postorder_traversal(self: T) -> Iterator[T]:
s: List[Tuple[bool, T]] = [(False, self)]
while s:
expanded_children, node = s.pop()
if not expanded_children and node.children:
s.append((True, node))
s.extend((False, child) for child in reversed(node.children)) # type: ignore
else:
# all of node's children have been expanded
yield node
def preorder_traversal(self: T) -> Iterator[T]:
s: List[T] = [self]
while s:
node = s.pop()
yield node
s.extend(reversed(node.children))
@abstractmethod
def clone(self: T) -> T:
raise NotImplementedError()
def is_leaf(self) -> bool:
return not bool(self.children)
def leaves(self: T) -> Iterator[T]:
for t in self.preorder_traversal():
if t.is_leaf():
yield t # type: ignore
def __getitem__(self: T, child_index: int) -> T:
return self.children[child_index]
def __iter__(self: T) -> Iterator[T]:
return iter(self.children)
def __len__(self):
return len(self.children)
def __str__(self):
ret = ""
stack = [self]
while stack:
n = stack.pop()
if isinstance(n, str):
ret = f"{ret}{n}"
continue
value_name = str(n.value)
if not n.children:
ret = f"{ret}{value_name}"
else:
if value_name:
ret = f"{ret}{value_name} ["
stack.append("]")
for i, c in reversed(list(enumerate(n.children))):
if i > 0:
stack.append(" ")
stack.append(c)
return ret
IPT = TypeVar("IPT", bound="ImmutableParseTree")
class ImmutableParseTree(Generic[V], ParseTree[V]):
__slots__ = "_children"
def __init__(self: IPT, value: V, children: Iterable[IPT] = ()):
super().__init__(value)
self._children: List[IPT] = list(children)
@property
def children(self: IPT) -> List[IPT]:
return self._children
def clone(self: IPT) -> IPT:
class IPTNode:
def __init__(self, node: IPT, parent: Optional["IPTNode"] = None): # noqa: F821
self.node: IPT = node
self.children: Optional[List[IPT]] = None
self.parent: Optional[IPTNode] = parent
to_clone: List[IPTNode] = [IPTNode(self)]
while to_clone:
ipt_node = to_clone[-1]
if ipt_node.children is None:
ipt_node.children = []
to_clone.extend(IPTNode(child, ipt_node) for child in reversed(ipt_node.node.children))
else:
to_clone.pop()
cloned = self.__class__(value=ipt_node.node.value, children=ipt_node.children)
if ipt_node.parent is not None:
assert ipt_node.parent.children is not None
ipt_node.parent.children.append(cloned)
else:
assert len(to_clone) == 0
return cloned
raise ValueError("This should never be reachable")
MPT = TypeVar("MPT", bound="MutableParseTree")
class MutableParseTree(Generic[V], ImmutableParseTree[V]):
@ImmutableParseTree.children.setter # type: ignore
def children(self: MPT, new_children: List[MPT]):
self._children = new_children
def add_child(self: MPT, new_child: MPT):
self._children.append(new_child)
def __setitem__(self: MPT, child_index: int, new_child: MPT):
self.children[child_index] = new_child
def escape_byte(byte_value: int) -> str:
if byte_value == ord("\n"):
b = "\\n"
elif byte_value == ord("\t"):
b = "\\t"
elif byte_value == ord("\r"):
b = "\\r"
elif byte_value == ord('"'):
b = '\\"'
elif byte_value == ord("\\"):
b = "\\\\"
elif ord(" ") <= byte_value <= ord("~"):
b = chr(byte_value)
else:
b = f"\\x{byte_value:02x}"
return b
def highlight_offset(text: bytes, offset, highlight_length=20) -> str:
length_div_2 = highlight_length // 2
start_offset = max(offset - length_div_2, 0)
end_offset = min(offset + length_div_2, len(text))
before = 0
offset_len = 1
ret = ""
for i, b in enumerate(text[start_offset:end_offset]):
byte_text = escape_byte(b)
if i < offset - start_offset:
before += len(byte_text)
elif i == offset - start_offset:
offset_len = len(byte_text)
ret = f"{ret}{byte_text}"
ret = f"\"{ret}\"\n {' ' * before}{'^' * offset_len}"
return ret
class Terminal:
def __init__(self, terminal: Union[bytes, str]):
if isinstance(terminal, str):
terminal = terminal.encode("utf-8")
self.terminal: bytes = terminal
def __add__(self, other: Union[bytes, str, "Terminal"]) -> "Terminal":
if isinstance(other, Terminal):
other = other.terminal
elif isinstance(other, str):
other = other.encode("utf-8")
return Terminal(self.terminal + other)
def __eq__(self, other):
return isinstance(other, Terminal) and other.terminal == self.terminal
def __hash__(self):
return hash(self.terminal)
def __repr__(self):
return f"{self.__class__.__name__}(terminal={self.terminal!r})"
def __str__(self):
ret = '"'
for i in self.terminal:
ret = f"{ret}{escape_byte(i)}"
return f'{ret}"'
class Start:
def __str__(self):
return "<START>"
N = TypeVar("N", bound=ParseTree[Union[Start, TraceEvent, Terminal]])
def trace_to_tree(
trace: ProgramTrace,
node_type: Type[N] = ParseTree[Union[Start, TraceEvent, Terminal]], # type: ignore
include_terminals: bool = True,
) -> N:
if trace.entrypoint is None:
raise ValueError(f"Trace {trace} does not have an entrypoint!")
root: N = node_type(Start())
entrypoint_node = node_type(trace.entrypoint)
root.children.append(entrypoint_node)
function_stack: List[Tuple[FunctionInvocation, N]] = [(trace.entrypoint, entrypoint_node)]
with tqdm(
unit=" functions", leave=False, desc="extracting a parse tree", total=trace.num_function_calls_that_touched_taint()
) as t:
while function_stack:
function, node = function_stack.pop()
t.update(1)
for bb in tqdm(
function.basic_blocks(), unit=" basic blocks", leave=False, desc=function.function.demangled_name, delay=1.0
):
child_node = node_type(bb)
node.children.append(child_node)
if include_terminals:
for token in bb.taints().regions():
node.children.append(node_type(Terminal(token.value)))
func = bb.called_function
if func is not None:
if not func.touched_taint:
log.debug(f"skipping call to {func.function.demangled_name} because it did not touch taint")
continue
child_node = node_type(func)
node.children.append(child_node)
function_stack.append((func, child_node))
return root
G = TypeVar("G", bound="NonGeneralizedParseTree")
class NonGeneralizedParseTree(MutableParseTree[Union[Start, TraceEvent, Terminal]]):
def __init__(self: G, value: Union[Start, TraceEvent, Terminal], children: Iterable[G] = ()):
super().__init__(value=value, children=children)
self.consumed: List[Tuple[int, int]]
if isinstance(value, BasicBlockEntry):
self.intervals: IntervalTree = IntervalTree(self._consumed_intervals())
else:
self.intervals = IntervalTree()
self._begin: Optional[int] = None
self._end: Optional[int] = None
@property
def begin_offset(self) -> int:
if self._begin is not None:
return self._begin
return self.intervals.begin()
@property
def end_offset(self) -> int:
if self._end is not None:
return self._end
return self.intervals.end()
def terminals(self) -> Iterator[Terminal]:
for leaf in self.leaves(): # type: ignore
assert isinstance(leaf.value, Terminal)
yield leaf.value
def matches(self) -> bytes:
return b"".join(terminal.terminal for terminal in self.terminals())
def verify_bounds(self, check_overlap=True, check_coverage=True, check_missing_children=True):
covered_input_bytes = IntervalTree()
for child in self.children:
if check_overlap and covered_input_bytes.overlaps(child.begin_offset, child.end_offset):
overlap = ", ".join([interval.data for interval in covered_input_bytes[child.begin_offset: child.end_offset]])
raise ValueError(f"Child node {child.value!s} of {self.value!s} overlaps with these siblings: " f"{overlap!r}")
if child.end_offset > child.begin_offset:
covered_input_bytes.addi(child.begin_offset, child.end_offset, str(child.value))
if (
check_coverage
and not self.is_leaf()
and (covered_input_bytes.begin() != self.begin_offset or covered_input_bytes.end() != self.end_offset)
):
raise ValueError(
f"Node {self.value!s} was expected to have bounds ({self.begin_offset}, "
f"{self.end_offset}), but its children only covered bounds "
f"({covered_input_bytes.begin()}, {covered_input_bytes.end()})"
)
if check_missing_children and not self.is_leaf():
covered_input_bytes.merge_overlaps(strict=False)
if len(covered_input_bytes) > 1:
missing = IntervalTree.from_tuples([(self.begin_offset, self.end_offset)]) - covered_input_bytes
missing_str = ", ".join(f"[{i.begin}:{i.end}]" for i in missing)
raise ValueError(f"Node {self.value!s} is missing children that cover these byte ranges: {missing_str}")
def verify(self, string: bytes):
offset: int = 0
remaining: bytes = string
last_non_terminal: str = "<START>"
for node in self.preorder_traversal():
# first, make sure none of our children overlap and that our entire range is covered
node.verify_bounds()
if not isinstance(node.value, Terminal):
last_non_terminal = str(node.value)
continue
terminal = node.value
if not remaining.startswith(terminal.terminal):
raise ValueError(
f"Expected byte sequence {terminal!s} at byte offset {offset} produced by production "
f"{last_non_terminal}, but instead found:\n"
f"{highlight_offset(text=string, offset=offset)}"
)
terminal_length = len(terminal.terminal)
remaining = remaining[terminal_length:]
offset += terminal_length
def simplify(self):
for node in tqdm(self.postorder_traversal(), leave=False, desc="simplifying parse tree", unit=" nodes"):
if len(node.children) == 1:
child = node.children[0]
if isinstance(child.value, BasicBlockEntry) and len(child.children) == 1:
node.children = [child.children[0]]
def _winners(self, to_compare: "NonGeneralizedParseTree") -> Optional[List[int]]:
if self.end_offset <= to_compare.begin_offset:
# we do not have overlap
return None
# record all of our last-used times in the overlap
our_last_used = []
their_last_used = []
for i in range(to_compare.begin_offset, self.end_offset + 1):
our_intervals = self.intervals[i]
if our_intervals:
assert len(our_intervals) == 1
last_used = next(iter(our_intervals)).data
if last_used is None:
last_used = -1
our_last_used.append(last_used)
else:
our_last_used.append(-1)
their_intervals = to_compare.intervals[i]
if their_intervals:
assert len(their_intervals) == 1
last_used = next(iter(their_intervals)).data
if last_used is None:
last_used = -1
their_last_used.append(last_used)
else:
their_last_used.append(-1)
return [our_last - their_last for our_last, their_last in zip(our_last_used, their_last_used)]
def best_partition(self, right_sibling: "NonGeneralizedParseTree") -> Optional[int]:
winners = self._winners(right_sibling)
if winners is None:
# we do not overlap
return None
# TODO: See if we can improve this algorithm
best_point = self.end_offset
best_badness = None
for point in range(0, len(winners) + 1):
# find the optimal overlap point to partition our intervals
badness = 0
for i, winner in enumerate(winners):
if (i < point and winner < 0) or (i >= point and winner > 0):
badness += 1
if best_badness is None or badness < best_badness:
best_point = point
best_badness = badness
return best_point
def best_subset(self, parent: "NonGeneralizedParseTree") -> Tuple[int, int]:
winners = self._winners(parent)
if winners is None:
raise ValueError("The child does not overlap with its parent! This should never happen.")
# TODO: See if we can improve this algorithm
left_offset = 0
right_offset = len(winners)
while winners[left_offset] < 0 and left_offset < right_offset:
left_offset += 1
while left_offset < right_offset and winners[right_offset - 1] < 0:
right_offset -= 1
return left_offset, right_offset
def deconflict_sibling(self, right_sibling: "NonGeneralizedParseTree"):
best_point = self.best_partition(right_sibling)
if best_point is not None:
self.intervals.chop(right_sibling.begin_offset + best_point, self.end_offset)
right_sibling.intervals.chop(0, right_sibling.begin_offset + best_point)
def deconflict_parent(self, parent: "NonGeneralizedParseTree"):
left_offset, right_offset = self.best_subset(parent)
self.intervals.chop(self.begin_offset + left_offset, self.end_offset - right_offset)
def bottom_up_pass(self):
# first, remove any children that do not produce a terminal
self.children = [child for child in self.children if child.begin_offset < child.end_offset]
# ensure that none of our children's intervals overlap
for child, right_sibling in zip(self.children, self.children[1:]):
child.deconflict_sibling(right_sibling)
for child in self.children:
# update our intervals based off of the child
self.intervals |= child.intervals
self.intervals.split_overlaps()
self.intervals.merge_overlaps(data_reducer=max)
if __debug__:
self.verify_bounds(check_overlap=True, check_coverage=False, check_missing_children=False)
def top_down_pass(self):
self._begin = self.begin_offset
self._end = self.end_offset
if self._end <= self._begin:
self.children = []
else:
self.intervals = IntervalTree()
self.intervals.addi(self._begin, self._end)
for child in self.children:
# make sure all of our children are within our own interval
child.intervals.chop(0, self._begin)
if child.end_offset > self._end:
child.intervals.chop(self._end, child.end_offset)
if child.begin_offset < child.end_offset:
# did we touch a byte more recently than one of our children? if so, rob them of that terminal
child.deconflict_parent(self)
def _consumed_intervals(self) -> Iterator[Interval]:
if not isinstance(self.value, BasicBlockEntry):
return
for region in self.value.taints().regions():
yield Interval(region.offset, region.offset + region.length, self.value.uid)
def trace_to_non_generalized_tree(trace: ProgramTrace) -> NonGeneralizedParseTree:
tree = trace_to_tree(trace, NonGeneralizedParseTree, False)
inputs = list(trace.inputs)
if len(inputs) != 1:
raise ValueError(f"Trace {trace!r} must have exactly one input; found {len(inputs)}")
inputstr = inputs[0].content
for node in tqdm(
tree.postorder_traversal(),
total=tree.descendants + 1,
leave=False,
desc=" deconflicting parse tree ranges",
unit=" nodes",
):
if isinstance(node.value, Start):
node.intervals[0: len(inputstr)] = 0
node.bottom_up_pass()
for node in tqdm(
tree.preorder_traversal(), total=tree.descendants + 1, leave=False, desc=" finalizing parse tree ranges", unit=" nodes"
):
if isinstance(node.value, Terminal):
continue
node_begin = node.begin_offset
node_end = node.end_offset
node.top_down_pass()
# add terminals
last_end = node_begin
new_children: List[NonGeneralizedParseTree] = []
for child in node.children:
if child.begin_offset >= child.end_offset:
continue
if last_end < child.begin_offset:
terminal = NonGeneralizedParseTree(Terminal(inputstr[last_end: child.begin_offset]))
terminal.intervals.addi(last_end, child.begin_offset)
new_children.append(terminal)
new_children.append(child)
last_end = child.end_offset
if last_end < node_end:
terminal = NonGeneralizedParseTree(Terminal(inputstr[last_end:node_end]))
terminal.intervals.addi(last_end, node_end)
new_children.append(terminal)
assert new_children
node.children = new_children
if __debug__:
tree.verify(inputstr)
return tree
|
11507963
|
class Solution:
def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:
visitedRooms, queue, N = set(), deque([0]), len(rooms)
while queue:
currentRoom = queue.popleft()
if currentRoom not in visitedRooms:
for neighbour in rooms[currentRoom]:
queue.append(neighbour)
visitedRooms.add(currentRoom)
return len(visitedRooms) == N
|
11507977
|
ClearReadout=0
Reset =1
Configure =2
Unconfigure =3
BeginRun =4
EndRun =5
BeginStep =6
EndStep =7
Enable =8
Disable =9
SlowUpdate =10
Unused_11 =11
L1Accept =12
NumberOf =13
|
11508072
|
import pytest
from botx import File
pytestmark = pytest.mark.asyncio
async def test_sending_command_result(bot, client, message):
await bot.send_message(
"some text",
message.credentials,
)
assert client.notifications[0]
async def test_sending_notification_using_send_message(bot, client, message):
await bot.send_message(
"some text",
message.credentials.copy(update={"sync_id": None}),
)
assert client.notifications[0]
async def test_adding_file(bot, client, message):
sending_file = File.from_string("some content", "file.txt")
await bot.send_message(
"some text",
message.credentials,
file=sending_file.file,
)
command_result = client.notifications[0]
assert command_result.file == sending_file
|
11508075
|
import pytest
from services.data.db_utils import DBResponse, DBPagination
import json
from aiohttp.test_utils import make_mocked_request
from services.ui_backend_service.api.utils import (
format_response, format_response_list,
pagination_query,
builtin_conditions_query,
custom_conditions_query,
resource_conditions,
filter_from_conditions_query
)
pytestmark = [pytest.mark.unit_tests]
def test_format_response():
request = make_mocked_request(
'GET', '/runs?_limit=10', headers={'Host': 'test'})
db_response = DBResponse(
response_code=200, body={"foo": "bar"})
expected_response = {
"data": {"foo": "bar"},
"status": 200,
"links": {
"self": "http://test/runs?_limit=10"
},
"query": {"_limit": "10"},
}
status, response = format_response(request, db_response)
assert json.dumps(response) == json.dumps(expected_response)
assert status == 200
def test_format_response_list():
request = make_mocked_request(
'GET', '/runs?_limit=1&_page=1', headers={'Host': 'test'})
db_response = DBResponse(response_code=200, body=[{"foo": "bar"}])
pagination = DBPagination(limit=1, offset=0, count=1, page=1)
expected_response = {
"data": [{"foo": "bar"}],
"status": 200,
"links": {
"self": "http://test/runs?_limit=1&_page=1",
"first": "http://test/runs?_limit=1&_page=1",
"prev": "http://test/runs?_limit=1&_page=1",
"next": "http://test/runs?_limit=1&_page=2",
"last": None
},
"pages": {
"self": 1,
"first": 1,
"prev": 1,
"next": 2,
"last": None
},
"query": {
"_limit": "1",
"_page": "1"
},
}
status, response = format_response_list(request, db_response, pagination, 1)
assert json.dumps(response) == json.dumps(expected_response)
assert status == 200
def test_format_response_list_next_page_null():
request = make_mocked_request(
'GET', '/runs?_limit=10&_page=2', headers={'Host': 'test'})
db_response = DBResponse(response_code=200, body=[{"foo": "bar"}])
pagination = DBPagination(limit=10, offset=0, count=1, page=2)
expected_response = {
"data": [{"foo": "bar"}],
"status": 200,
"links": {
"self": "http://test/runs?_limit=10&_page=2",
"first": "http://test/runs?_limit=10&_page=1",
"prev": "http://test/runs?_limit=10&_page=1",
"next": None,
"last": None
},
"pages": {
"self": 2,
"first": 1,
"prev": 1,
"next": None,
"last": None
},
"query": {
"_limit": "10",
"_page": "2"
},
}
status, response = format_response_list(request, db_response, pagination, 2)
assert json.dumps(response) == json.dumps(expected_response)
assert status == 200
def test_pagination_query_defaults():
request = make_mocked_request('GET', '/runs')
page, limit, offset, order, groups, group_limit = pagination_query(request=request)
assert page == 1
assert limit == 10
assert offset == 0
assert order == None
assert groups == None
assert group_limit == 10
def test_pagination_query_custom():
request = make_mocked_request(
'GET', '/runs?_limit=5&_page=3&_order=foo&_group=bar')
page, limit, offset, order, groups, group_limit = pagination_query(
request=request, allowed_order=["foo"],
allowed_group=["bar"])
assert page == 3
assert limit == 5
assert offset == 10
assert order == ["\"foo\" DESC"]
assert groups == ["\"bar\""]
assert group_limit == 10
def test_pagination_query_custom_order_asc():
request = make_mocked_request('GET', '/runs?_order=%2Bfoo')
_, _, _, order, _, _ = pagination_query(
request=request, allowed_order=["foo"])
assert order == ["\"foo\" ASC"]
def test_pagination_query_not_allowed():
request = make_mocked_request(
'GET', '/runs?_limit=5&_page=3&_order=none&_group=none')
_, _, _, order, groups, _ = pagination_query(request=request)
assert order == None
assert groups == None
def test_builtin_conditions_query_tags_all():
request = make_mocked_request(
'GET', '/runs?_tags=foo,bar')
conditions, values = builtin_conditions_query(request)
assert len(conditions) == 1
assert conditions[0] == "tags||system_tags ?& array[%s,%s]"
assert len(values) == 2
assert values[0] == "foo"
assert values[1] == "bar"
def test_builtin_conditions_query_tags_all_explicit():
request = make_mocked_request(
'GET', '/runs?_tags:all=foo,bar')
conditions, values = builtin_conditions_query(request)
assert len(conditions) == 1
assert conditions[0] == "tags||system_tags ?& array[%s,%s]"
assert len(values) == 2
assert values[0] == "foo"
assert values[1] == "bar"
def test_builtin_conditions_query_tags_any():
request = make_mocked_request(
'GET', '/runs?_tags:any=foo,bar')
conditions, values = builtin_conditions_query(request)
assert len(conditions) == 1
assert conditions[0] == "tags||system_tags ?| array[%s,%s]"
assert len(values) == 2
assert values[0] == "foo"
assert values[1] == "bar"
def test_builtin_conditions_query_tags_likeall():
request = make_mocked_request(
'GET', '/runs?_tags:likeall=foo,bar')
conditions, values = builtin_conditions_query(request)
assert len(conditions) == 1
assert conditions[0] == "tags||system_tags::text LIKE ALL(array[%s,%s])"
assert len(values) == 2
assert values[0] == "%foo%"
assert values[1] == "%bar%"
def test_builtin_conditions_query_tags_likeany():
request = make_mocked_request(
'GET', '/runs?_tags:likeany=foo,bar')
conditions, values = builtin_conditions_query(request)
assert len(conditions) == 1
assert conditions[0] == "tags||system_tags::text LIKE ANY(array[%s,%s])"
assert len(values) == 2
assert values[0] == "%foo%"
assert values[1] == "%bar%"
def test_custom_conditions_query():
operators = {
"flow_id": ["\"flow_id\" = %s", "{}"],
"flow_id:eq": ["\"flow_id\" = %s", "{}"],
"flow_id:ne": ["\"flow_id\" != %s", "{}"],
"flow_id:lt": ["\"flow_id\" < %s", "{}"],
"flow_id:le": ["\"flow_id\" <= %s", "{}"],
"flow_id:gt": ["\"flow_id\" > %s", "{}"],
"flow_id:ge": ["\"flow_id\" >= %s", "{}"],
"flow_id:co": ["\"flow_id\" ILIKE %s", "%{}%"],
"flow_id:sw": ["\"flow_id\" ILIKE %s", "{}%"],
"flow_id:ew": ["\"flow_id\" ILIKE %s", "%{}"]
}
for op, query in operators.items():
where = query[0]
val = query[1]
request = make_mocked_request(
"GET", "/runs?{0}=HelloFlow,AnotherFlow&_tags=foo&user_name=dipper&{0}=ThirdFlow".format(op))
conditions, values = custom_conditions_query(
request, allowed_keys=["flow_id"])
assert len(conditions) == 2
assert conditions[0] == "({0} OR {0})".format(where)
assert conditions[1] == "({0})".format(where)
assert len(values) == 3
assert values[0] == val.format("HelloFlow")
assert values[1] == val.format("AnotherFlow")
assert values[2] == val.format("ThirdFlow")
def test_custom_conditions_query_allow_any_key():
request = make_mocked_request(
"GET", "/runs?flow_id=HelloFlow&status=completed")
conditions, values = custom_conditions_query(
request, allowed_keys=None)
assert len(conditions) == 2
assert conditions[0] == "(\"flow_id\" = %s)"
assert conditions[1] == "(\"status\" = %s)"
assert len(values) == 2
assert values[0] == "HelloFlow"
assert values[1] == "completed"
def test_resource_conditions():
path, query, _ = resource_conditions(
"/runs?flow_id=HelloFlow&status=running")
assert path == "/runs"
assert query.get("flow_id") == "HelloFlow"
assert query.get("status") == "running"
# TODO: test out the returned filter_fn as well?
def test_filter_from_conditions_query():
# setup a test list for filtering
_run_1 = {"run": "test_1", "ts_epoch": 6, "tags": ["a", "b"], "system_tags": ["1", "2"]}
_run_2 = {"run": "test_2", "ts_epoch": 1, "tags": ["b", "c", "-a-"], "system_tags": ["2", "3"]}
_run_3 = {"run": "test", "ts_epoch": 6, "tags": ["a", "c", "-b-"], "system_tags": ["1", "3"]}
_test_data = [_run_1, _run_2, _run_3]
# mock request for AND
request = make_mocked_request(
'GET', '/?run=test&ts_epoch:gt=1',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=['run', 'ts_epoch'])
_list = list(filter(_filter, _test_data))
assert _list == [_run_3]
# mock request for combined AND, OR
request = make_mocked_request(
'GET', '/?run=test,test_1&ts_epoch:gt=1',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=['run', 'ts_epoch'])
_list = list(filter(_filter, _test_data))
assert _list == [_run_1, _run_3]
# mock request for _tags:any filter
request = make_mocked_request(
'GET', '/?_tags:any=a,2',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=['_tags'])
_list = list(filter(_filter, _test_data))
assert _list == [_run_1, _run_2, _run_3]
# mock request for _tags:all filter
request = make_mocked_request(
'GET', '/?_tags:all=a,2',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=['_tags'])
_list = list(filter(_filter, _test_data))
assert _list == [_run_1]
# mock request for _tags:likeany filter
request = make_mocked_request(
'GET', '/?_tags:likeany=a',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=['_tags'])
_list = list(filter(_filter, _test_data))
assert _list == [_run_1, _run_2, _run_3]
# mock request for _tags:likeall filter
request = make_mocked_request(
'GET', '/?_tags:likeall=b,3',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=['_tags'])
_list = list(filter(_filter, _test_data))
assert _list == [_run_2, _run_3]
# test non-existent fields in query (should still work, not match any records)
request = make_mocked_request(
'GET', '/?nonexistent:eq=b,3',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=None)
_list = list(filter(_filter, _test_data))
assert _list == []
# test comparison operators with non-numeric values
request = make_mocked_request(
'GET', '/?ts_epoch:gt=*#*#',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=None)
_list = list(filter(_filter, _test_data))
assert _list == []
# test no-op filter with no params
request = make_mocked_request(
'GET', '/',
headers={'Host': 'test'}
)
_filter = filter_from_conditions_query(request, allowed_keys=None)
_list = list(filter(_filter, _test_data))
assert _list == [_run_1, _run_2, _run_3]
|
11508133
|
from .basic import BasicAPIv2Connector
from .standard import APIv2Connector
from .model import ModelAPIv2Connector
|
11508172
|
import dash_bootstrap_components as dbc
from dash import html
pagination = html.Div(
[
html.Div("Small"),
dbc.Pagination(max_value=5, size="sm"),
html.Div("Default"),
dbc.Pagination(max_value=5),
html.Div("Large"),
dbc.Pagination(max_value=5, size="lg"),
]
)
|
11508182
|
from eth_account import (
Account,
)
from eth_utils import (
apply_to_return_value,
is_checksum_address,
is_string,
to_checksum_address,
)
from hexbytes import (
HexBytes,
)
from web3s.contract import (
Contract,
)
from web3s.iban import (
Iban,
)
from web3s.module import (
Module,
)
from web3s.utils.blocks import (
select_method_for_block_identifier,
)
from web3s.utils.decorators import (
deprecated_for,
)
from web3s.utils.empty import (
empty,
)
from web3s.utils.encoding import (
to_hex,
)
from web3s.utils.filters import (
BlockFilter,
LogFilter,
TransactionFilter,
)
from web3s.utils.toolz import (
assoc,
merge,
)
from web3s.utils.transactions import (
assert_valid_transaction_params,
extract_valid_transaction_params,
get_buffered_gas_estimate,
get_required_transaction,
replace_transaction,
wait_for_transaction_receipt,
)
from web3s.utils.decorators import async_apply_to_return_value
class Eth(Module):
account = Account()
defaultAccount = empty
defaultBlock = "latest"
defaultContractFactory = Contract
iban = Iban
gasPriceStrategy = None
@deprecated_for("doing nothing at all")
def enable_unaudited_features(self):
pass
def namereg(self):
raise NotImplementedError()
def icapNamereg(self):
raise NotImplementedError()
@property
async def protocolVersion(self):
return await self.web3s.manager.request_blocking("eth_protocolVersion", [])
@property
async def syncing(self):
return await self.web3s.manager.request_blocking("eth_syncing", [])
@property
async def coinbase(self):
return await self.web3s.manager.request_blocking("eth_coinbase", [])
@property
async def mining(self):
return await self.web3s.manager.request_blocking("eth_mining", [])
@property
async def hashrate(self):
return await self.web3s.manager.request_blocking("eth_hashrate", [])
@property
async def chainId(self):
result=await self.web3s.manager.request_blocking("eth_chainId", [])
return int(result,base=16)
@property
async def gasPrice(self):
return await self.web3s.manager.request_blocking("eth_gasPrice", [])
@property
async def accounts(self):
return await self.web3s.manager.request_blocking("eth_accounts", [])
@property
async def blockNumber(self):
return await self.web3s.manager.request_blocking("eth_blockNumber", [])
async def getBalance(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return await self.web3s.manager.request_blocking(
"eth_getBalance",
[account, block_identifier],
)
async def getStorageAt(self, account, position, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return await self.web3s.manager.request_blocking(
"eth_getStorageAt",
[account, position, block_identifier]
)
async def getCode(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return await self.web3s.manager.request_blocking(
"eth_getCode",
[account, block_identifier],
)
async def getBlock(self, block_identifier, full_transactions=False):
"""
`eth_getBlockByHash`
`eth_getBlockByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='eth_getBlockByNumber',
if_hash='eth_getBlockByHash',
if_number='eth_getBlockByNumber',
)
return await self.web3s.manager.request_blocking(
method,
[block_identifier, full_transactions],
)
async def getBlockTransactionCount(self, block_identifier):
"""
`eth_getBlockTransactionCountByHash`
`eth_getBlockTransactionCountByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='eth_getBlockTransactionCountByNumber',
if_hash='eth_getBlockTransactionCountByHash',
if_number='eth_getBlockTransactionCountByNumber',
)
return await self.web3s.manager.request_blocking(
method,
[block_identifier],
)
async def getUncleCount(self, block_identifier):
"""
`eth_getUncleCountByBlockHash`
`eth_getUncleCountByBlockNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='eth_getUncleCountByBlockNumber',
if_hash='eth_getUncleCountByBlockHash',
if_number='eth_getUncleCountByBlockNumber',
)
return await self.web3s.manager.request_blocking(
method,
[block_identifier],
)
async def getUncleByBlock(self, block_identifier, uncle_index):
"""
`eth_getUncleByBlockHashAndIndex`
`eth_getUncleByBlockNumberAndIndex`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='eth_getUncleByBlockNumberAndIndex',
if_hash='eth_getUncleByBlockHashAndIndex',
if_number='eth_getUncleByBlockNumberAndIndex',
)
return await self.web3s.manager.request_blocking(
method,
[block_identifier, uncle_index],
)
async def getTransaction(self, transaction_hash):
return await self.web3s.manager.request_blocking(
"eth_getTransactionByHash",
[transaction_hash],
)
@deprecated_for("w3.eth.getTransactionByBlock")
async def getTransactionFromBlock(self, block_identifier, transaction_index):
"""
Alias for the method getTransactionByBlock
Depreceated to maintain naming consistency with the json-rpc API
"""
return await self.getTransactionByBlock(block_identifier, transaction_index)
async def getTransactionByBlock(self, block_identifier, transaction_index):
"""
`eth_getTransactionByBlockHashAndIndex`
`eth_getTransactionByBlockNumberAndIndex`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='eth_getTransactionByBlockNumberAndIndex',
if_hash='eth_getTransactionByBlockHashAndIndex',
if_number='eth_getTransactionByBlockNumberAndIndex',
)
return await self.web3s.manager.request_blocking(
method,
[block_identifier, transaction_index],
)
async def waitForTransactionReceipt(self, transaction_hash, timeout=120):
return await wait_for_transaction_receipt(self.web3s, transaction_hash, timeout)
async def getTransactionReceipt(self, transaction_hash):
return await self.web3s.manager.request_blocking(
"eth_getTransactionReceipt",
[transaction_hash],
)
async def getTransactionCount(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return await self.web3s.manager.request_blocking(
"eth_getTransactionCount",
[
account,
block_identifier,
],
)
async def replaceTransaction(self, transaction_hash, new_transaction):
current_transaction = get_required_transaction(self.web3, transaction_hash)
return await replace_transaction(self.web3, current_transaction, new_transaction)
async def modifyTransaction(self, transaction_hash, **transaction_params):
assert_valid_transaction_params(transaction_params)
current_transaction = await get_required_transaction(self.web3, transaction_hash)
current_transaction_params = extract_valid_transaction_params(current_transaction)
new_transaction = merge(current_transaction_params, transaction_params)
return await replace_transaction(self.web3, current_transaction, new_transaction)
async def sendTransaction(self, transaction):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
# TODO: move gas estimation in middleware
if 'gas' not in transaction:
transaction = assoc(
transaction,
'gas',
get_buffered_gas_estimate(self.web3s, transaction),
)
return await self.web3s.manager.request_blocking(
"eth_sendTransaction",
[transaction],
)
async def sendRawTransaction(self, raw_transaction):
return await self.web3s.manager.request_blocking(
"eth_sendRawTransaction",
[raw_transaction],
)
async def debugTraceTransaction(self,txHash):
return await self.web3s.manager.request_blocking(
"debug_traceTransaction",
[txHash],
)
async def sign(self, account, data=None, hexstr=None, text=None):
message_hex = to_hex(data, hexstr=hexstr, text=text)
return await self.web3s.manager.request_blocking(
"eth_sign", [account, message_hex],
)
@async_apply_to_return_value(HexBytes)
async def call(self, transaction, block_identifier=None):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
# TODO: move to middleware
if block_identifier is None:
block_identifier = self.defaultBlock
result=await self.web3s.manager.request_blocking(
"eth_call",
[transaction, block_identifier],
)
return result
async def estimateGas(self, transaction):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
return await self.web3s.manager.request_blocking(
"eth_estimateGas",
[transaction],
)
async def filter(self, filter_params=None, filter_id=None):
if filter_id and filter_params:
raise TypeError(
"Ambiguous invocation: provide either a `filter_params` or a `filter_id` argument. "
"Both were supplied."
)
if is_string(filter_params):
if filter_params == "latest":
filter_id = await self.web3s.manager.request_blocking(
"eth_newBlockFilter", [],
)
return BlockFilter(self.web3s, filter_id)
elif filter_params == "pending":
filter_id = await self.web3s.manager.request_blocking(
"eth_newPendingTransactionFilter", [],
)
return TransactionFilter(self.web3s, filter_id)
else:
raise ValueError(
"The filter API only accepts the values of `pending` or "
"`latest` for string based filters"
)
elif isinstance(filter_params, dict):
_filter_id = await self.web3s.manager.request_blocking(
"eth_newFilter",
[filter_params],
)
return LogFilter(self.web3s, _filter_id)
elif filter_id and not filter_params:
return LogFilter(self.web3s, filter_id)
else:
raise TypeError("Must provide either filter_params as a string or "
"a valid filter object, or a filter_id as a string "
"or hex.")
async def getFilterChanges(self, filter_id):
return await self.web3s.manager.request_blocking(
"eth_getFilterChanges", [filter_id],
)
async def getFilterLogs(self, filter_id):
return await self.web3s.manager.request_blocking(
"eth_getFilterLogs", [filter_id],
)
async def getLogs(self, filter_params):
return await self.web3s.manager.request_blocking(
"eth_getLogs", [filter_params],
)
async def uninstallFilter(self, filter_id):
return await self.web3s.manager.request_blocking(
"eth_uninstallFilter", [filter_id],
)
def contract(self,
address=None,
**kwargs):
if address in self.web3s.contracts.keys():
return self.web3s.contracts[address]
ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)
ContractFactory = ContractFactoryClass.factory(self.web3s, **kwargs)
if address:
self.web3s.contracts[address]=ContractFactory(address)
return ContractFactory(address)
else:
return ContractFactory
def setContractFactory(self, contractFactory):
self.defaultContractFactory = contractFactory
async def getCompilers(self):
return await self.web3s.manager.request_blocking("eth_getCompilers", [])
async def getWork(self):
return await self.web3s.manager.request_blocking("eth_getWork", [])
def generateGasPrice(self, transaction_params=None):
if self.gasPriceStrategy:
return self.gasPriceStrategy(self.web3s, transaction_params)
def setGasPriceStrategy(self, gas_price_strategy):
self.gasPriceStrategy = gas_price_strategy
|
11508190
|
from django.contrib.auth.models import Group
from rest_framework import viewsets
from vocgui.models import TrainingSet
from vocgui.serializers import GroupSerializer
from vocgui.permissions import VerifyGroupKey
from vocgui.models import GroupAPIKey
from vocgui.utils import get_key
from django.core.exceptions import PermissionDenied
class GroupViewSet(viewsets.ModelViewSet):
"""
Defines a view set for the Group module.
Inherits from `viewsets.ModelViewSet` and defines queryset
and serializers.
"""
permission_classes = [VerifyGroupKey]
serializer_class = GroupSerializer
http_method_names = ["get"]
def get_queryset(self):
"""
Defining custom queryset
:param self: A handle to the :class:`GroupViewSet`
:type self: class
:return: (filtered) queryset
:rtype: QuerySet
"""
if getattr(self, "swagger_fake_view", False):
return TrainingSet.objects.none()
key = get_key(self.request)
if not key:
raise PermissionDenied()
api_key_object = GroupAPIKey.objects.get_from_key(key)
if not api_key_object:
raise PermissionDenied()
queryset = Group.objects.filter(id=api_key_object.organization_id)
return queryset
|
11508220
|
import sys
import json
from ..verifier import Proof
from .utils import g2_to_sol, g1_to_sol
def main(vk_filename, name='_getStaticProof'):
"""Outputs the solidity code necessary to instansiate a ProofWithInput variable"""
with open(vk_filename, 'r') as handle:
proof = Proof.from_dict(json.load(handle))
out = [
"\tfunction %s (Verifier.ProofWithInput memory output)" % (name),
"\t\tinternal pure",
"\t{",
"\t\tVerifier.Proof memory proof = output.proof;"
]
for k in proof.G2_POINTS:
x = getattr(proof, k)
out.append("\t\tproof.%s = %s;" % (k, g2_to_sol(x)))
for k in proof.G1_POINTS:
x = getattr(proof, k)
out.append("\t\tproof.%s = %s;" % (k, g1_to_sol(x)))
out.append("\t\toutput.input = new uint256[](%d);" % (len(proof.input),))
for i, v in enumerate(proof.input):
out.append("\t\toutput.input[%d] = %s;" % (i, hex(v)))
out.append("\t}")
print('\n'.join(out))
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: ethsnarks.cli.proof2sol <proof.json> [func-name]")
print("Outputs Solidity code, depending on Verifier.sol, which can be included in your code")
sys.exit(1)
sys.exit(main(*sys.argv[1:]))
|
11508236
|
import asyncio
import psutil
import logging as log
import time
from typing import Dict, Iterable
from dataclasses import dataclass
from collections import defaultdict
@dataclass
class WatchedApp:
id: str
dir: str
is_game: bool = True
def __eq__(self, other):
if isinstance(other, WatchedApp):
return self.id == other.id
elif type(other) == str:
return self.id == other
else:
raise TypeError(f"Trying to compare {type(self)} with {type(other)}")
def __hash__(self):
return hash(self.id)
class _ProcessWatcher:
"""Low level methods"""
def __init__(self):
self._watched_apps = defaultdict(set) # {WatchedApp: set([proc1, proc2, ...])}
self._cache = {}
@property
def watched_games(self):
return {k: v for k, v in self._watched_apps.items() if k.is_game}
@watched_games.setter
def watched_games(self, to_watch: Dict[str, str]):
# remove games not present in to_watch
for app in list(self._watched_apps.keys()):
if app.is_game and app.id not in to_watch:
del self._watched_apps[app]
# add games from to_watch keeping its processes if already present
for game_id, path in to_watch.items():
self._watched_apps.setdefault(WatchedApp(game_id, path), set())
def _get_running_games(self):
self.__remove_processes_if_dead()
return set([game.id for game, procs in self.watched_games.items() if procs])
def _is_app_tracked_and_running(self, app):
if app in self._watched_apps:
for proc in self._watched_apps[app]:
if proc.is_running:
return True
return False
def _search_in_all(self):
"""Fat check"""
log.debug(f'Performing check for all processes')
for proc in psutil.process_iter(ad_value=''):
self.__match_process(proc)
async def _search_in_all_slowly(self, interval=0.02):
"""Fat check with async intervals; 0.02 lasts a few seconds"""
log.debug(f'Performing async check in all processes; interval: {interval}')
for proc in psutil.process_iter(ad_value=''):
self.__match_process(proc)
await asyncio.sleep(interval)
def _search_in_children(self, procs: Iterable[psutil.Process], recursive=True):
"""Cache only child processes because process_iter has its own module level cache"""
found = False
for proc in procs.copy():
try:
for child in proc.children(recursive=recursive):
if child in self._cache:
found |= self.__match_process(self._cache[child])
else:
found |= self.__match_process(child)
self._cache[child] = child
except (psutil.AccessDenied, psutil.NoSuchProcess) as e:
log.warn(f'Getting children of {proc} has failed: {e}')
return found
def __match_process(self, proc):
for game in self._watched_apps:
try:
path = proc.exe()
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
else:
if not path:
return False
elif game.dir in path:
self._watched_apps[game].add(proc)
return True
return False
def __remove_processes_if_dead(self):
for game, processes in self._watched_apps.items():
# work on copy to avoid adding processes during iteration
for proc in processes.copy():
if not proc.is_running() or proc.status() == psutil.STATUS_ZOMBIE:
log.debug(f'Process {proc} is dead')
self._watched_apps[game].remove(proc)
class ProcessWatcher(_ProcessWatcher):
_LAUNCHER_ID = '__launcher__'
def __init__(self, launcher_identifier):
super().__init__()
self._watched_apps[WatchedApp(self._LAUNCHER_ID, launcher_identifier, False)]
self._launcher_children_cache = set()
# self._search_in_all()
@property
def _launcher(self):
return self._watched_apps[self._LAUNCHER_ID]
def _is_launcher_tracked_and_running(self):
return self._is_app_tracked_and_running(self._LAUNCHER_ID)
def is_launcher_running(self):
if self._is_launcher_tracked_and_running():
return True
self._search_in_all()
return self._is_launcher_tracked_and_running()
async def _pool_until_launcher_start(self, timeout, long_interval):
start = time.time()
while time.time() - start < timeout:
if self._is_launcher_tracked_and_running():
return True
self._search_in_all()
await asyncio.sleep(long_interval)
return False
async def pool_until_game_start(self, game_id, timeout, sint, lint):
"""
:param sint interval between checking launcher children
:param lint (longer) interval between checking if launcher exists
"""
log.debug(f'Starting wait for game {game_id} process')
start = time.time()
while time.time() - start < timeout:
found = await self._pool_until_launcher_start(timeout, lint)
if found:
self._search_in_children(self._launcher)
if self._watched_apps[game_id]:
log.debug(f'Game process found in {time.time() - start}s')
return True
await asyncio.sleep(sint)
self._search_in_all()
if self._watched_apps[game_id]:
log.debug(f'Game process found in the final fallback parsing all processes')
return True
def get_running_games(self, check_under_launcher):
"""Return set of ids of currently running games.
Note: does not actively look for launcher
"""
if check_under_launcher and self._is_launcher_tracked_and_running():
self._search_in_children(self._launcher, recursive=True)
return self._get_running_games()
|
11508245
|
from .loggers import logger
import random
from sc2 import run_game, Race, maps, Difficulty
from sc2.player import Bot, Computer
class GameLauncher:
def __init__(self, bot, use_model, model_path, map_name, realtime):
logger.debug(f'Game Launcher inited in Map {map_name}')
self.map = map_name
self.bot = bot
self.use_model = use_model
self.model_path = model_path
self.realtime = realtime
self.difficulty_dict = {
'easy': Difficulty.Easy,
'medium': Difficulty.Medium,
'hard': Difficulty.Hard
}
self.race_dict = {
'zerg': Race.Zerg,
'terran': Race.Terran,
'protoss': Race.Protoss
}
def create_bot(self, bot_title, train_data_tensor):
logger.debug(f'Create bot with model {self.model_path}')
return Bot(
Race.Protoss,
self.bot(train_data_tensor, self.use_model, bot_title, self.model_path)
)
def start_game(self, difficulty, race, train_data_tensor):
result = run_game(maps.get(self.map), [
self.create_bot('bot 1', train_data_tensor),
self.create_computer(difficulty, race)
], realtime=self.realtime)
return result
def get_train_data(self):
return self.bot.get_data()
def create_computer(self, d, r):
logger.debug(f'To compete: {self.race_dict[r]} {self.difficulty_dict[d]}')
return Computer(self.race_dict[r], self.difficulty_dict[d])
|
11508247
|
import albumentations as albu
def get_tr_augmentation(img_size):
augmentations = [
albu.HorizontalFlip(p=0.5),
albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
albu.PadIfNeeded(min_height=img_size[0], min_width=img_size[1], always_apply=True, border_mode=0),
albu.RandomCrop(height=img_size[0], width=img_size[1], always_apply=True),
albu.IAAAdditiveGaussianNoise(p=0.2),
albu.OneOf(
[
albu.CLAHE(p=1),
albu.RandomBrightness(p=1),
albu.RandomGamma(p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.IAASharpen(p=1),
albu.Blur(blur_limit=3, p=1),
albu.MotionBlur(blur_limit=3, p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.RandomContrast(p=1),
albu.HueSaturationValue(p=1),
],
p=0.9,
)
]
return augmentations
# When loading surface normals,
# augmentations which apply affine transformations are not used
def get_tr_augmentation_normals(img_size):
augmentations = [
albu.PadIfNeeded(min_height=img_size[0], min_width=img_size[1], always_apply=True, border_mode=0),
albu.RandomCrop(height=img_size[0], width=img_size[1], always_apply=True),
albu.IAAAdditiveGaussianNoise(p=0.2),
albu.OneOf(
[
albu.CLAHE(p=1),
albu.RandomBrightness(p=1),
albu.RandomGamma(p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.IAASharpen(p=1),
albu.Blur(blur_limit=3, p=1),
albu.MotionBlur(blur_limit=3, p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.RandomContrast(p=1),
albu.HueSaturationValue(p=1),
],
p=0.9,
)
]
return augmentations
|
11508258
|
import logging
from xia2.Driver.DriverFactory import DriverFactory
logger = logging.getLogger("xia2.Wrappers.Dials.ExportXDS")
def ExportXDS(DriverType=None):
"""A factory for ExportXDSWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class ExportXDSWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.export")
self._sweep_filename = None
self._crystal_filename = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def run(self):
logger.debug("Running dials.export")
self.clear_command_line()
self.add_command_line(self._experiments_filename)
self.add_command_line("format=xds")
self.start()
self.close_wait()
self.check_for_errors()
return ExportXDSWrapper()
|
11508304
|
from . import datasets
from . import domain_term
from . import domain_thesaurus
from . import phrase_detection
from . import semantic_related_word
from . import utils
from . import word_discrimination
|
11508329
|
import framework
from framework import TestOptions
import os
import shutil
import cl_bindgen.mangler as mangler
from cl_bindgen.processfile import ProcessOptions, process_file
import cl_bindgen.util as util
def make_default_options():
u_mangler = mangler.UnderscoreMangler()
k_mangler = mangler.KeywordMangler()
const_mangler = mangler.ConstantMangler()
# manglers are applied in the order that they are given in these lists:
# enum manglers transform enum fields, e.g. FOO, BAR in enum { FOO, BAR }
enum_manglers = [k_mangler, u_mangler]
# type mangers are applied to struct names, function names, and type names
type_manglers = [u_mangler]
# name manglers are applied to parameters and variables
name_manglers = [u_mangler]
# typedef manglers are applied to typedefs
typedef_manglers = [u_mangler]
constant_manglers = [u_mangler, const_mangler]
options = ProcessOptions(typedef_manglers=typedef_manglers,
enum_manglers=enum_manglers,
type_manglers=type_manglers,
name_manglers=name_manglers,
constant_manglers=constant_manglers)
return options
def make_gen_fn():
options = make_default_options()
def gen_fn(inputfile, outputfile):
options.output = outputfile
process_file(inputfile, options)
return gen_fn
tests = [
('inputs/simple_struct.h', 'outputs/simple-struct.lisp', {}),
('inputs/nested_struct.h', 'outputs/nested-struct.lisp', {}),
('inputs/function_pointer.h', 'outputs/function-pointer.lisp', {}),
]
def test_file_generation():
cur_dir = os.getcwd()
file_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(file_dir)
output_dir = ".output"
success = framework.run_tests(tests, make_gen_fn(), output_dir, os.sys.stdout)
if success:
shutil.rmtree(".output")
os.chdir(cur_dir)
return success
if __name__ == "__main__":
success = test_file_generation()
if success:
exit(0)
else:
exit(1)
|
11508351
|
import arrow
from test import get_user_session, cassette
def test_should_get_groups():
session = get_user_session()
with cassette('fixtures/resources/groups/list_groups/list_groups.yaml'):
page = session.groups.list()
assert len(page.items) == 2
assert page.count == 2
assert page.items[0].id == '164d48fb-2343-332d-b566-1a4884a992e4'
assert page.items[0].name == 'Basket weaving'
assert page.items[0].description == 'All of the best papers about weaving baskets'
assert page.items[0].disciplines == ['Arts and Literature']
assert page.items[0].tags == ['baskets', 'weaving']
assert page.items[0].webpage == 'http://example.com/baskets'
assert page.items[0].created == arrow.get(2014, 5, 20, 11, 40, 22)
assert page.items[0].link == 'http://www.mendeley.com/groups/4499471/basket-weaving/'
assert page.items[0].access_level == 'public'
assert page.items[0].role == 'member'
assert page.items[0].photo.original == \
'http://s3.amazonaws.com/mendeley-photos/73/e3/73e38a6a2cad3d760c04c1f7ef8a35ecf3f2d110.png'
assert page.items[0].photo.standard == \
'http://s3.amazonaws.com/mendeley-photos/73/e3/73e38a6a2cad3d760c04c1f7ef8a35ecf3f2d110-standard.jpg'
assert page.items[0].photo.square == \
'http://s3.amazonaws.com/mendeley-photos/73/e3/73e38a6a2cad3d760c04c1f7ef8a35ecf3f2d110-square.jpg'
assert page.items[0].owner.id == '3e71f6e3-e2b4-3f20-a873-da62554c5c38'
assert page.items[0].owner.display_name == '<NAME>, PhD'
assert page.items[1].id == 'bcb12b97-db8a-3c1d-b696-d99ed4371175'
assert page.items[1].name == 'Python SDK Test Group'
assert page.items[1].description == 'Test group for the Mendeley Python SDK'
assert page.items[1].disciplines == ['Computer and Information Science', 'Humanities']
assert page.items[1].tags == ['python', 'sdk']
assert page.items[1].webpage == 'http://dev.mendeley.com'
assert page.items[1].created == arrow.get(2014, 8, 27, 9, 40, 41)
assert page.items[1].link == 'http://www.mendeley.com/groups/4779311/python-sdk-test-group/'
assert page.items[1].access_level == 'public'
assert page.items[1].role == 'owner'
assert page.items[1].photo.original == \
'http://s3.amazonaws.com/mendeley-photos/a0/20/a020f9fd30af0029c059c45535ad231d3d0d055a.png'
assert page.items[1].photo.standard == \
'http://s3.amazonaws.com/mendeley-photos/a0/20/a020f9fd30af0029c059c45535ad231d3d0d055a-standard.jpg'
assert page.items[1].photo.square == \
'http://s3.amazonaws.com/mendeley-photos/a0/20/a020f9fd30af0029c059c45535ad231d3d0d055a-square.jpg'
assert page.items[1].owner.id == '9930207c-c19f-3de0-b531-86bd4388fa94'
assert page.items[1].owner.display_name == '<NAME>'
def test_should_page_through_groups():
session = get_user_session()
with cassette('fixtures/resources/groups/list_groups/page_through_groups.yaml'):
first_page = session.groups.list(page_size=1)
assert len(first_page.items) == 1
assert first_page.count == 2
assert first_page.items[0].id == '164d48fb-2343-332d-b566-1a4884a992e4'
assert first_page.items[0].name == 'Basket weaving'
assert first_page.items[0].owner.id == '3e71f6e3-e2b4-3f20-a873-da62554c5c38'
assert first_page.items[0].owner.display_name == '<NAME>, PhD'
second_page = first_page.next_page
assert len(second_page.items) == 1
assert second_page.count == 2
assert second_page.items[0].id == 'bcb12b97-db8a-3c1d-b696-d99ed4371175'
assert second_page.items[0].name == 'Python SDK Test Group'
assert second_page.items[0].owner.id == '9930207c-c19f-3de0-b531-86bd4388fa94'
assert second_page.items[0].owner.display_name == '<NAME>'
|
11508369
|
from typing import List, Tuple, Union
import unrealsdk
from .. import bl2tools
def set_materials(ai_pawn: unrealsdk.UObject, materials: List[unrealsdk.UObject]) -> None:
if materials is None:
return
ai_pawn.Mesh.Materials = materials
def set_scale(ai_pawn: unrealsdk.UObject, scale: float) -> None:
ai_pawn.Mesh.Scale = scale
def set_scale3d(ai_pawn: unrealsdk.UObject, scale3d: List[float]) -> None:
ai_pawn.Mesh.Scale3D = tuple(scale3d)
ai_pawn.Mesh.ForceUpdate(True)
def set_rotation(ai_pawn: unrealsdk.UObject, rotator: Union[List[int], Tuple[int, int, int]]) -> None:
ai_pawn.Mesh.Rotation = tuple(rotator)
ai_pawn.Mesh.ForceUpdate(True)
def set_location(ai_pawn: unrealsdk.UObject, position: Union[List[float], Tuple[float, float, float]]) -> None:
ai_pawn.Location = tuple(position)
def instantiate(ai_pawn_balance: unrealsdk.UObject) -> unrealsdk.UObject:
if not ai_pawn_balance:
return None
pc = bl2tools.get_player_controller()
_loc = (pc.Location.X, pc.Location.Y, pc.Location.Z)
pop_master = unrealsdk.FindAll("WillowPopulationMaster")[-1]
pawn = pop_master.SpawnPopulationControlledActor(ai_pawn_balance.AIPawnArchetype.Class,
None, "", _loc, (0, 0, 0),
ai_pawn_balance.AIPawnArchetype,
False, False)
if pc.GetCurrentPlaythrough() != 2:
will_pop = unrealsdk.FindAll("WillowPopulationOpportunityPoint")[1:]
pop = unrealsdk.FindAll("PopulationOpportunityPoint")[1:]
regions = pop if len(pop) > len(will_pop) else will_pop
region_game_stage = max(pc.GetGameStageFromRegion(x.GameStageRegion)
for x in regions if x.GameStageRegion)
else:
region_game_stage = max(x.GetGameStage() for x in unrealsdk.FindAll("WillowPlayerPawn") if x.Arms)
# PopulationFactoryBalancedAIPawn 105-120:
pawn.SetGameStage(region_game_stage)
pawn.SetExpLevel(region_game_stage)
pawn.SetGameStageForSpawnedInventory(region_game_stage)
pawn.SetAwesomeLevel(0)
pawn.Controller.InitializeCharacterClass()
pawn.Controller.RecalculateAttributeInitializedState()
pawn.InitializeBalanceDefinitionState(ai_pawn_balance, -1)
ai_pawn_balance.SetupPawnItemPoolList(pawn)
pawn.AddDefaultInventory()
ai = pawn.MyWillowMind.GetAIDefinition()
ai.TargetSearchRadius = 12000
return pawn
|
11508396
|
import numpy as np
import torch.nn as nn
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from modulate_conv import ModulateConv
from fused_bias_activation import FusedBiasActivation
from base_layer import BaseLayer
from conv2d_layer import Conv2dLayer
from fused_bias_activation import FusedBiasActivation
class FromRGB(BaseLayer):
def __init__(self, res):
super(FromRGB, self).__init__()
self.conv2d_layer = Conv2dLayer(in_feature_map=3, out_feature_map=self.cliped_features(res - 1), kernel=1)
self.fused_bias_act = FusedBiasActivation(channel=self.cliped_features(res - 1), act='LeakyRelu')
def forward(self, x, image):
t = self.conv2d_layer(image)
t = self.fused_bias_act(t)
return t if x is None else x + t
|
11508490
|
from yamtbx.dataproc.XIO import XIO
from collections import OrderedDict
sp_params_strs = OrderedDict(((("BL32XU", "EIGER9M", None, None), """\
distl {
detector_tiles = 1
peripheral_margin = 0
minimum_spot_area = 2
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
cheetah {
ADCthresh = 5
MinSNR = 8
MinPixCount = 3
MaxPixCount = 40
LocalBGRadius = 2
MinPeakSeparation = 0
algorithm = 8
binning = 1
}
software_binning = False
"""),
(("BL32XU", "MX225HS", "2x2", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 5
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HS", "4x4", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 2
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HS", "8x8", None), """\
distl {
detector_tiles = 3
peripheral_margin = 5
minimum_spot_area = 1
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "2x2", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 5
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "3x3", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 4
minimum_signal_height = 3.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "4x4", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 2
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "8x8", None), """\
distl {
detector_tiles = 3
peripheral_margin = 5
minimum_spot_area = 1
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "16x16", None), """\
distl {
detector_tiles = 3
peripheral_margin = 5
minimum_spot_area = 1
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "Q315r", "2x2", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 5
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "CMOS", "1x1", None), """\
distl {
detector_tiles = 1
peripheral_margin = 0
minimum_spot_area = 3
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL41XU", "PILATUS3 6M", None, None), """\
distl {
detector_tiles = None
peripheral_margin = 10
minimum_spot_area = 2
minimum_signal_height = 4
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
""")
))
def get_common_params_str(use_cuda=False, env="oys"):
if use_cuda:
dic = dict(use_cuda="True")
else:
#if nproc is None: nproc = get_number_of_processors(default=4)
#if env == "ppu": nproc //= 2
dic = dict(use_cuda="False")
return """\
engine = *distl xds
distl {
res {
outer = 5.
inner = 30.
}
scanbox_windows = 101 51 51
}
xds {
do_defpix = True
value_range_for_trusted_detector_pixels = 9000. 30000
}
cuda_median_background {
active = %(use_cuda)s
filter_radius = 10
filter_repeat = 1
}
#bkg_image = /home/yam/work/smoothing_131114/xds_process_scan/BKGINIT.cbf
#gain_image = /home/yam/work/smoothing_131114/xds_process_scan/GAIN.cbf
#bkg_image = /home/yam/work/smoothing_131114/my_scan172/honki/bkginit_20_20_1.cbf
#gain_image = /home/yam/work/smoothing_131114/my_scan172/honki/test_rev_median5.cbf
#gain_image_nbxy = 3,3
""" % dic
def get_key_by_img(imgfile):
im = XIO.Image(imgfile)
if im.header["ImageType"] == "marccd":
if im.header["SerialNumber"] in ("106", None): # None for 2013B
if im.header["Height"] == im.header["Width"] == 1440:
return ("BL32XU", "MX225HS", "4x4", None)
if im.header["Height"] == im.header["Width"] == 2880:
return ("BL32XU", "MX225HS", "2x2", None)
if im.header["Height"] == im.header["Width"] == 720:
return ("BL32XU", "MX225HS", "8x8", None)
if im.header["SerialNumber"] == "31":
if im.header["Height"] == im.header["Width"] == 384:
return ("BL32XU", "MX225HE", "16x16", None)
if im.header["Height"] == im.header["Width"] == 768:
return ("BL32XU", "MX225HE", "8x8", None)
if im.header["Height"] == im.header["Width"] == 1536:
return ("BL32XU", "MX225HE", "4x4", None)
if im.header["Height"] == im.header["Width"] == 2046:
return ("BL32XU", "MX225HE", "3x3", None)
if im.header["Height"] == im.header["Width"] == 3072:
return ("BL32XU", "MX225HE", "2x2", None)
elif im.header["ImageType"] == "adsc":
if im.header["Height"]==im.header["Width"]==2352 and int(im.header["PixelX"]*1000)==50:
return ("BL32XU", "CMOS", "1x1", None) # This may be used at BL26B2.
if im.header["SerialNumber"] == "915":
if im.header["Height"] == im.header["Width"] == 3072:
return ("BL32XU", "Q315r", "2x2", None)
elif im.header["SerialNumber"] == "PILATUS3 6M, S/N 60-0125":
return ("BL41XU", "PILATUS3 6M", None, None)
raise Exception("We do not know such a detector")
# get_key_by_img()
|
11508495
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import torch.nn.functional as F
from .core import *
import copy
def acoustic_builder(aco_type, opts):
if aco_type == 'rnn':
model = acoustic_rnn(num_inputs=opts.num_inputs,
emb_size=opts.emb_size,
rnn_size=opts.rnn_size,
rnn_layers=opts.rnn_layers,
dropout=opts.dout,
speakers=opts.spks,
mulout=opts.mulout,
cuda=opts.cuda,
emb_layers=opts.emb_layers,
emb_activation=opts.emb_activation)
train_fn = 'train_aco_epoch'
eval_fn = 'eval_aco_epoch'
elif aco_type == 'satt':
model = acoustic_satt(num_inputs=opts.num_inputs,
emb_size=opts.emb_size,
dropout=opts.dout,
emb_activation=opts.emb_activation,
speakers=opts.spks,
emb_layers=opts.emb_layers,
d_ff=opts.d_ff,
cuda=opts.cuda,
N=opts.N,
h=opts.h,
lnorm=(not opts.no_lnorm),
conv_out=opts.conv_out)
train_fn = 'train_attaco_epoch'
eval_fn = 'eval_attaco_epoch'
elif aco_type == 'decsatt':
model = acoustic_decoder_satt(num_inputs=opts.num_inputs,
emb_size=opts.emb_size,
dropout=opts.dout,
cuda=opts.cuda,
emb_activation=opts.emb_activation,
speakers=opts.spks,
emb_layers=opts.emb_layers,
d_ff=opts.d_ff,
N=opts.N,
h=opts.h)
train_fn = 'train_attaco_epoch'
eval_fn = 'eval_attaco_epoch'
else:
raise TypeError('Unrecognized model type: ', aco_type)
return model, train_fn, eval_fn
class acoustic_rnn(speaker_model):
""" acoustic RNN model """
def __init__(self, num_inputs, emb_size,
rnn_size, rnn_layers,
dropout, emb_activation='Tanh',
speakers=None,
mulspk_type='sinout',
mulout=False, cuda=False,
bnorm=False,
emb_layers=2):
super().__init__(num_inputs, mulspk_type,
speakers=speakers,
cuda=cuda)
self.emb_size = emb_size
self.emb_layers = emb_layers
self.emb_activation = emb_activation
self.rnn_size = rnn_size
self.rnn_layers = rnn_layers
self.bnorm = bnorm
self.num_outputs = 43
self.dropout = dropout
self.num_inputs = num_inputs
print('aco_rnn num_inputs=', num_inputs)
# build embedding of spks (if required)
self.build_spk_embedding()
# -- Build tanh embedding trunk
self.build_input_embedding()
# -- Build recurrent component
self.build_core_rnn()
# -- Build output mapping RNN(s)
self.build_output(rnn_output=True)
def forward(self, dling_features, hid_state=None, out_state=None,
speaker_idx=None):
""" Forward the duration + linguistic features, and the speaker ID
# Arguments
dling_features: Tensor with encoded linguistic features and
duration (absolute + relative)
speaker_id: Tensor with speaker idx to be generated
"""
if self.mulout and out_state is not None:
assert isinstance(out_state, dict), type(out_state)
if self.mulout and out_state is None:
out_state = dict((spk, None) for spk in self.speakers)
# forward through embedding
x = self.forward_input_embedding(dling_features, speaker_idx)
# forward through RNN core
x, hid_state = self.forward_core(x, hid_state)
# forward through output RNN
if self.mulout:
y = {}
nout_state = {}
for spk in self.speakers:
y[spk], \
nout_state[spk] = self.out_layers[spk](x,
out_state[spk])
# Bound classification output within [0, 1] properly
#y[spk] = self.correct_classification_output(y[spk])
y[spk] = tanh2sigmoid(y[spk])
#y[spk] = y[spk].view(dling_features.size(0), -1,
# self.num_outputs)
else:
y, nout_state = self.out_layer(x,
out_state)
# Bound classification output within [0, 1] properly
#y = correct_classification_output(y)
y = tanh2sigmoid(y)
#y = y.view(dling_features.size(0), -1, self.num_outputs)
return y, hid_state, nout_state
def init_hidden_state(self, curr_bsz):
return (torch.zeros(self.rnn_layers, curr_bsz, self.rnn_size),
torch.zeros(self.rnn_layers, curr_bsz, self.rnn_size))
def init_output_state(self, curr_bsz):
if self.mulout:
# return dict of output states, one per spk
out_states = {}
for spk in self.speakers:
out_states[spk] = (torch.zeros(1, curr_bsz,
self.num_outputs),
torch.zeros(1, curr_bsz,
self.num_outputs))
else:
out_states = (torch.zeros(1, curr_bsz,
self.num_outputs),
torch.zeros(1, curr_bsz,
self.num_outputs))
return out_states
class acoustic_satt(speaker_model):
def __init__(self, num_inputs, emb_size=512,
dropout=0.1,
emb_activation='Tanh',
speakers=None,
mulspk_type='sinout',
mulout=False, cuda=False,
bnorm=False,
emb_layers=2,
h=8, d_model=512,
d_ff=2048, N=6,
out_activation='Sigmoid',
lnorm=True,
conv_out=False):
# no other mulspk implemented yet
assert mulspk_type == 'sinout', mulspk_type
super().__init__(num_inputs, mulspk_type,
speakers=speakers,
cuda=cuda)
self.emb_size = emb_size
self.emb_layers = emb_layers
self.emb_activation = emb_activation
self.bnorm = bnorm
self.num_outputs = 43
self.dropout = dropout
self.num_inputs = num_inputs
# build embedding of spks (if required)
self.build_spk_embedding()
# wrongly named rnn_size, there are no RNN here
# but core needs this name for output layer
self.rnn_size = self.emb_size
# -- Build tanh embedding trunk
self.build_input_embedding()
c = copy.deepcopy
attn = MultiHeadedAttention(h, self.emb_size, dropout=dropout)
ff = PositionwiseFeedForward(self.emb_size, d_ff, dropout)
self.position = PositionalEncoding(self.emb_size, dropout)
enc_layer = AttEncoderLayer(self.emb_size, c(attn), c(ff),
dropout, lnorm)
self.model = clones(enc_layer, N)
if lnorm:
self.norm = LayerNorm(enc_layer.size)
# -- Build output mapping FC(s)
if conv_out:
self.out_layer = nn.Conv1d(emb_size, self.num_outputs, 21,
padding=10)
else:
self.build_output(rnn_output=False)
self.conv_out = conv_out
self.out_activation = out_activation
self.sigmoid = nn.Sigmoid()
#print('Built enc_layer: ', enc_layer)
#print('Built norm layer: ', self.norm)
#print('Built model: ', self.model)
print(self)
print('Initializing xavier weights...')
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, dling_features,
speaker_idx=None,
pe_start_idx=0):
""" Forward the duration + linguistic features, and the speaker ID
# Arguments
dling_features: Tensor with encoded linguistic features and
duration (absolute + relative)
speaker_id: Tensor with speaker idx to be generated
"""
# states are ignored, nothing useful is carried there
if self.mulout and out_state is not None:
assert isinstance(out_state, dict), type(out_state)
if self.mulout and out_state is None:
out_state = dict((spk, None) for spk in self.speakers)
# forward through embedding
x = self.forward_input_embedding(dling_features, speaker_idx)
x = x.transpose(0, 1)
if hasattr(self, 'position'):
x = self.position(x, pe_start_idx)
#print('x size: ', x.size())
h = x
# Now we will forward through the transformer encoder structure
for layer in self.model:
h = layer(h, None)
#print('h size: ', h.size())
if hasattr(self, 'norm'):
h = self.norm(h)
# forward through output FC
if self.conv_out:
h = h.transpose(1, 2)
y = self.out_layer(h)
y = y.transpose(1, 2)
else:
y = self.out_layer(h)
y = self.sigmoid(y)
y = y.transpose(0, 1)
return y
class acoustic_decoder_satt(speaker_model):
# TODO: Check validity of this model in terms of seq2seq behavior
def __init__(self, num_inputs, emb_size=512,
dropout=0.1,
emb_activation='Tanh',
speakers=None,
mulspk_type='sinout',
mulout=False, cuda=False,
bnorm=False,
emb_layers=2,
h=8, d_model=512,
d_ff=2048, N=6,
out_activation='Sigmoid'):
# no other mulspk implemented yet
assert mulspk_type == 'sinout', mulspk_type
super().__init__(num_inputs, mulspk_type,
speakers=speakers,
cuda=cuda)
self.do_cuda = cuda
self.emb_size = emb_size
self.emb_layers = emb_layers
self.emb_activation = emb_activation
# wrongly named rnn_size, there are no RNN here
# but core needs this name for output layer
self.rnn_size = emb_size
self.bnorm = bnorm
self.num_outputs = 43
self.dropout = dropout
self.num_inputs = num_inputs
# build embedding of spks (if required)
self.build_spk_embedding()
# -- Build tanh embedding trunk
self.build_input_embedding()
c = copy.deepcopy
attn = MultiHeadedAttention(h, emb_size)
ff = PositionwiseFeedForward(emb_size, d_ff, dropout)
self.position = PositionalEncoding(emb_size, dropout)
self.aco_W = nn.Linear(43, emb_size)
enc_layer = AttDecoderLayer(emb_size, c(attn), c(attn),
c(ff),
dropout)
self.model = clones(enc_layer, N)
self.norm = LayerNorm(enc_layer.size)
# -- Build output mapping FC(s)
self.build_output(rnn_output=False)
self.sigmoid = getattr(nn, opts.out_activation)()
#print('Built enc_layer: ', enc_layer)
#print('Built norm layer: ', self.norm)
#print('Built model: ', self.model)
print(self)
print('Initializing xavier weights...')
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, dling_features,
prev_aco_features=None,
speaker_idx=None,
pe_start_idx=0):
""" Forward the duration + linguistic features, and the speaker ID
# Arguments
dling_features: Tensor with encoded linguistic features and
duration (absolute + relative)
speaker_id: Tensor with speaker idx to be generated
"""
# states are ignored, nothing useful is carried there
if self.mulout and out_state is not None:
assert isinstance(out_state, dict), type(out_state)
if self.mulout and out_state is None:
out_state = dict((spk, None) for spk in self.speakers)
# forward through embedding
x = self.forward_input_embedding(dling_features, speaker_idx)
x = x.transpose(0, 1)
if prev_aco_features is None:
# build zero tensor and begin loop
bos_token = torch.zeros(dling_features.size(1),
1,
43)
if self.do_cuda:
bos_token = bos_token.cuda()
aco_tokens = []
aco_token = bos_token
for aco_i in range(dling_features.size(0)):
#print('Forwarding {}/{} aco frame'.format(aco_i,
# dling_features.size(0)))
aco = self.aco_W(aco_token)
if hasattr(self, 'position'):
aco = self.position(aco, pe_start_idx)
m = x
h = aco
# Now we will forward through the transformer encoder structure
for layer in self.model:
h = layer(h, m, None, None)
h = self.norm(h)
# forward through output FC
y = self.out_layer(h)
y = self.sigmoid(y)
aco_token = y
aco_tokens.append(aco_token)
aco_tokens = torch.cat(aco_tokens, dim=1)
return aco_tokens.transpose(0, 1)
else:
aco = self.aco_W(prev_aco_features.transpose(0,1))
if hasattr(self, 'position'):
aco = self.position(aco)
#print('x size: ', x.size())
m = x
h = aco
mask = subsequent_mask(h.size(1))
if self.do_cuda:
mask = mask.cuda()
# Now we will forward through the transformer encoder structure
for layer in self.model:
h = layer(h, m, None, mask)
#print('h size: ', h.size())
h = self.norm(h)
# forward through output FC
y = self.out_layer(h)
y = self.sigmoid(y)
y = y.transpose(0, 1)
return y
|
11508518
|
from pytest import approx
import math
from my_source import euclid
def test_euclid():
a = [0, 0, 0]
b = [4, 4, 4]
dist = euclid(a, b)
assert(math.sqrt(48.) == approx(dist))
|
11508536
|
import prodigy
from multiprocessing import Process
from time import sleep
from prodigy.recipes.ner import batch_train
import atexit
from pathlib import Path
import datetime as dt
from prodigy.components import printers
from prodigy.components.loaders import get_stream
from prodigy.core import recipe, recipe_args
from prodigy.util import TASK_HASH_ATTR, log
from datetime import datetime
from collections import Counter
# It's all going to be run by coder name.
# Config:
# - add list of coders
# - ?? add port per coder?
# - base file name for files
# - recipe, db, model, output
@prodigy.recipe('mark_custom',
dataset=recipe_args['dataset'],
source=recipe_args['source'],
api=recipe_args['api'],
loader=recipe_args['loader'],
label=recipe_args['label'],
view_id=recipe_args['view'],
memorize=recipe_args['memorize'],
exclude=recipe_args['exclude'])
def mark_custom(dataset, source=None, view_id=None, label='', api=None,
loader=None, memorize=False, exclude=None):
"""
Click through pre-prepared examples, with no model in the loop.
"""
log('RECIPE: Starting recipe mark', locals())
stream = list(get_stream(source, api, loader))
counts = Counter()
memory = {}
def fill_memory(ctrl):
if memorize:
examples = ctrl.db.get_dataset(dataset)
log("RECIPE: Add {} examples from dataset '{}' to memory"
.format(len(examples), dataset))
for eg in examples:
memory[eg[TASK_HASH_ATTR]] = eg['answer']
def ask_questions(stream):
for eg in stream:
eg['time_loaded'] = datetime.now().isoformat()
if TASK_HASH_ATTR in eg and eg[TASK_HASH_ATTR] in memory:
answer = memory[eg[TASK_HASH_ATTR]]
counts[answer] += 1
else:
if label:
eg['label'] = label
yield eg
def recv_answers(answers):
for eg in answers:
counts[eg['answer']] += 1
memory[eg[TASK_HASH_ATTR]] = eg['answer']
eg['time_returned'] = datetime.now().isoformat()
def print_results(ctrl):
print(printers.answers(counts))
def get_progress(session=0, total=0, loss=0):
progress = len(counts) / len(stream)
return progress
return {
'view_id': view_id,
'dataset': dataset,
'stream': ask_questions(stream),
'exclude': exclude,
'update': recv_answers,
'on_load': fill_memory,
'on_exit': print_results,
'config': {'label': label}
}
class MultiProdigy:
def __init__(self,
coder_list = [{"name" : "Daniel", "port" : 9010},
{"name" : "Youseff", "port" : 9011},
{"name" : "Emad", "port" : 9012},
{"name" : "Rafeef", "port" : 9013},
{"name" : "Mahmoud", "port" : 9014},
{"name" : "Zach", "port" : 9015},
{"name" : "Collin", "port" : 9016},
]):
self.coder_list = coder_list
self.processes = []
def serve(self, coder, port):
print(coder)
base = "data/protest_for_classification_"
filename = "{0}{1}.jsonl".format(base, coder)
prodigy.serve('mark_custom', # recipe
"gsr_is_protest", # db
filename, # input file
"classification", # view ID
"PROTEST",
None, # api
None, # loader
True, # memorize
"gsr_is_protest", # exclude
port=port) # port
def make_prodigies(self):
for coder_info in enumerate(self.coder_list):
coder_info = coder_info[1] # wut
thread = Process(target=self.serve, args = (coder_info['name'], coder_info['port']))
self.processes.append(thread)
def start_prodigies(self):
print("Starting Prodigy processes...")
for p in self.processes:
p.start()
sleep(1)
def kill_prodigies(self):
print("Killing Prodigy threads")
for i in self.processes:
try:
i.terminate()
except AttributeError:
print("Process {0} doesn't exist?".format(i))
self.processes = []
if __name__ == "__main__":
mp = MultiProdigy()
#mp.make_retrain_time()
atexit.register(mp.kill_prodigies)
mp.make_prodigies()
mp.start_prodigies()
while True:
sleep(5)
# if dt.datetime.now() > mp.retrain_time:
# print("Retraining model and scheduling next retraining for tomorrow")
# mp.make_retrain_time() # bump to tomorrow
# mp.train_and_restart()
|
11508546
|
import json
import re
import textwrap
from functools import partial
import pytest
import pytestqt.exceptions
from _pytest.compat import nullcontext
from PyQt5.QtCore import QByteArray
from PyQt5.QtCore import QDataStream
from PyQt5.QtCore import QIODevice
from PyQt5.QtCore import QMimeData
from PyQt5.QtCore import QPoint
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QDragEnterEvent
from PyQt5.QtGui import QDragMoveEvent
from PyQt5.QtGui import QDropEvent
from pytest_mock import MockFixture
import qmxgraph.constants
import qmxgraph.js
import qmxgraph.mime
from qmxgraph._web_view import ViewState
from qmxgraph.cell_bounds import CellBounds
from qmxgraph.exceptions import InvalidJavaScriptError
from qmxgraph.exceptions import ViewStateError
from qmxgraph.waiting import wait_callback_called
from qmxgraph.waiting import wait_signals_called
from qmxgraph.widget import QmxGraph
def test_error_redirection(loaded_graph) -> None:
"""
It is possible to redirect errors in JS code to Python/Qt side.
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
error_redirection = loaded_graph.error_bridge
with wait_signals_called(error_redirection.on_error) as cb:
eval_js(loaded_graph, """throw Error("test")""")
assert cb.args is not None
msg, url, line, column = cb.args
expected = textwrap.dedent(
'''\
Uncaught Error: test
stack:
Error: test
at <anonymous>:1:7'''
)
assert (url, line, column) == ('qrc:/', 1, 1)
def test_events_bridge_delayed_signals(graph, qtbot, mocker) -> None:
from qmxgraph.widget import EventsBridge
events = EventsBridge()
stub = mocker.stub()
call = mocker.call
events.on_cells_added.connect(stub)
events.cells_added_slot(["1"])
assert stub.call_args_list == []
def check_call(expected):
assert stub.call_args_list == expected
qtbot.waitUntil(partial(check_call, [call(["1"])]))
with events.delaying_signals():
events.cells_added_slot(["2"])
with pytest.raises(pytestqt.exceptions.TimeoutError):
expected = [call(["1"]), call(["2"])]
qtbot.waitUntil(partial(check_call, expected), timeout=1000)
qtbot.waitUntil(partial(check_call, expected))
def test_events_bridge_plain(graph, mocker) -> None:
"""
Verify if the Python code can listen to JavaScript events by using
qmxgraph's events bridge.
:type graph: qmxgraph.widget.qmxgraph
:type mocker: pytest_mock.MockFixture
"""
from qmxgraph.api import QmxGraphApi
events = graph.events_bridge
added_handler = mocker.Mock()
removed_handler = mocker.Mock()
labels_handler = mocker.Mock()
selections_handler = mocker.Mock()
terminal_handler = mocker.Mock()
terminal_with_port_handler = mocker.Mock()
events.on_cells_added.connect(added_handler)
events.on_cells_removed.connect(removed_handler)
events.on_label_changed.connect(labels_handler)
events.on_selection_changed.connect(selections_handler)
events.on_terminal_changed.connect(terminal_handler)
events.on_terminal_with_port_changed.connect(terminal_with_port_handler)
graph.load_and_wait()
# on_cells_added
with wait_signals_called(events.on_cells_added):
vertex_id = graph.api.insert_vertex(40, 40, 20, 20, 'test')
assert added_handler.call_args_list == [mocker.call([vertex_id])]
# on_selection_changed
assert selections_handler.call_args_list == []
with wait_signals_called(events.on_selection_changed):
eval_js(graph, "graphEditor.execute('selectVertices')")
assert selections_handler.call_args_list == [mocker.call([vertex_id])]
# on_label_changed
with wait_signals_called(events.on_label_changed):
graph.api.set_label(vertex_id, 'TOTALLY NEW LABEL')
assert labels_handler.call_args_list == [mocker.call(vertex_id, 'TOTALLY NEW LABEL', 'test')]
# on_terminal_changed, on_terminal_with_port_changed
foo_id = graph.api.insert_vertex(440, 40, 20, 20, 'foo')
bar_id = graph.api.insert_vertex(40, 140, 20, 20, 'bar')
edge_id = graph.api.insert_edge(vertex_id, foo_id, 'edge')
bar_port_name = 'a-port'
assert not graph.api.has_port(bar_id, bar_port_name)
graph.api.insert_port(bar_id, bar_port_name, 0, 0, 5, 5)
assert graph.api.has_port(bar_id, bar_port_name)
with wait_signals_called(
events.on_terminal_changed,
events.on_terminal_with_port_changed,
):
graph.api.set_edge_terminal(
edge_id, QmxGraphApi.TARGET_TERMINAL_CELL, bar_id, bar_port_name
)
with wait_signals_called(
events.on_terminal_changed,
events.on_terminal_with_port_changed,
):
graph.api.set_edge_terminal(edge_id, QmxGraphApi.SOURCE_TERMINAL_CELL, foo_id)
assert terminal_handler.call_args_list == [
mocker.call(edge_id, QmxGraphApi.TARGET_TERMINAL_CELL, bar_id, foo_id),
mocker.call(edge_id, QmxGraphApi.SOURCE_TERMINAL_CELL, foo_id, vertex_id),
]
assert terminal_with_port_handler.call_args_list == [
mocker.call(
edge_id,
QmxGraphApi.TARGET_TERMINAL_CELL,
bar_id,
bar_port_name,
foo_id,
'',
),
mocker.call(
edge_id,
QmxGraphApi.SOURCE_TERMINAL_CELL,
foo_id,
'',
vertex_id,
'',
),
]
# on_cells_removed
with wait_signals_called(events.on_cells_removed):
graph.api.remove_cells([vertex_id])
assert removed_handler.call_args_list == [mocker.call([vertex_id])]
def test_bridges_signal_handlers_can_call_api(loaded_graph) -> None:
"""
Verify if the Python code handling bridge signals can call the
qmxgraph api.
Testing only one method of `EventsBridge` since all bridge signals
are handled the same way.
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
zoom_scale_obtained = []
def handler_that_call_api(*args):
result = loaded_graph.api.get_zoom_scale()
zoom_scale_obtained.append(result)
events = loaded_graph.events_bridge
events.on_cells_added.connect(handler_that_call_api)
with wait_signals_called(events.on_cells_added):
loaded_graph.api.insert_vertex(40, 40, 20, 20, 'test')
assert zoom_scale_obtained == [1]
def test_set_double_click_handler(graph, handler, qtbot) -> None:
"""
:type graph: qmxgraph.widget.qmxgraph
:type handler: _HandlerFixture
"""
js_script = "graphEditor.execute('doubleClick', {vertex})"
# Handler can be set even while not yet loaded
graph.double_click_bridge.on_double_click.connect(handler.handler_func)
handler.assert_handled(
js_script=js_script,
called=True,
expected_calls=[()],
)
# It should be restored if loaded again after being blanked
wait_until_blanked(qtbot, graph)
handler.assert_handled(
js_script=js_script,
called=True,
expected_calls=[()],
)
# Setting handler to None disconnects it from event
graph.double_click_bridge.on_double_click.disconnect(handler.handler_func)
handler.assert_handled(
js_script=js_script,
called=False,
expected_calls=[],
)
def test_set_popup_menu_handler(graph, handler, qtbot) -> None:
"""
:type graph: qmxgraph.widget.qmxgraph
:type handler: _HandlerFixture
"""
js_script = "graphEditor.execute('popupMenu', {vertex}, 15, 15)"
# Handler can be set even while not yet loaded
graph.popup_menu_bridge.on_popup_menu.connect(handler.handler_func)
handler.assert_handled(
js_script=js_script,
called=True,
expected_calls=[(15, 15)],
)
# It should be restored if loaded again after being blanked
wait_until_blanked(qtbot, graph)
handler.assert_handled(
js_script=js_script,
called=True,
expected_calls=[(15, 15)],
)
# Setting handler to None disconnects it from event
graph.popup_menu_bridge.on_popup_menu.disconnect(handler.handler_func)
handler.assert_handled(
js_script=js_script,
called=False,
expected_calls=[],
)
def test_container_resize(loaded_graph) -> None:
"""
The div containing graph in web view must be resized match dimensions of
Qt widget in initialization and also when web view is resized.
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
expected_width = loaded_graph.inner_web_view().width()
expected_height = loaded_graph.inner_web_view().height()
def get_container_dimensions():
width = eval_js(loaded_graph, """document.getElementById('graphContainer').style.width""")
height = eval_js(loaded_graph, """document.getElementById('graphContainer').style.height""")
return int(width.replace('px', '')), int(height.replace('px', ''))
width, height = get_container_dimensions()
assert width == expected_width
assert height == expected_height
expected_width += 20
loaded_graph.resize(expected_width, expected_height)
width, height = get_container_dimensions()
assert width == expected_width
assert height == expected_height
def test_web_inspector(loaded_graph, mocker) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
:type mocker: pytest_mock.MockFixture
"""
from PyQt5.QtWidgets import QDialog
mocker.patch.object(QDialog, 'show')
mocker.patch.object(QDialog, 'hide')
loaded_graph.show_inspector()
QDialog.show.assert_called_once_with()
loaded_graph.hide_inspector()
QDialog.hide.assert_called_once_with()
QDialog.show.reset_mock()
loaded_graph.toggle_inspector()
QDialog.show.assert_called_once_with()
def test_blank(loaded_graph, qtbot) -> None:
"""
:type loaded_graph: qmxgraph.widget.QmxGraph
"""
assert loaded_graph.is_loaded()
loaded_graph.blank()
def check():
assert loaded_graph.inner_web_view().view_state == ViewState.Blank
qtbot.waitUntil(check)
assert not loaded_graph.is_loaded()
def test_blank_and_load(graph, qtbot) -> None:
graph.load_and_wait()
graph.blank()
wait_until_blanked(qtbot, graph)
graph.load_and_wait(timeout_ms=5000)
def test_web_channel_blocking(graph, qtbot) -> None:
def is_web_channel_blocked() -> bool:
# Both updates and signals should be blocked/unblocked. at the same time.
result = graph.inner_web_view().page().webChannel().blockUpdates()
assert graph.inner_web_view().page().webChannel().signalsBlocked() is result
return result
assert is_web_channel_blocked() is True
graph.load_and_wait()
assert is_web_channel_blocked() is False
graph.blank_and_wait()
assert is_web_channel_blocked() is True
graph.load_and_wait()
assert is_web_channel_blocked() is False
def test_call_once_when_loaded(graph: QmxGraph, mocker: MockFixture) -> None:
stubA = mocker.stub()
graph.call_once_when_loaded(stubA)
graph.load_and_wait()
assert stubA.call_count == 1
stubB = mocker.stub()
graph.call_once_when_loaded(stubB)
assert stubA.call_count == 1
assert stubB.call_count == 1
graph.blank_and_wait()
stubC = mocker.stub()
graph.call_once_when_loaded(stubC)
graph.load_and_wait()
assert stubA.call_count == 1
assert stubB.call_count == 1
assert stubC.call_count == 1
def test_state_errors_after_closing(graph: QmxGraph) -> None:
graph.close()
with pytest.raises(ViewStateError):
graph.load_and_wait()
with pytest.raises(ViewStateError):
graph.blank_and_wait()
def test_drag_drop(loaded_graph, drag_drop_events) -> None:
"""
Dragging and dropping data with valid qmxgraph MIME data in qmxgraph should
result graph changes according to contents of dropped data.
:type loaded_graph: qmxgraph.widget.qmxgraph
:type drag_drop_events: DragDropEventsFactory
"""
mime_data = qmxgraph.mime.create_qt_mime_data(
{
'vertices': [
{
'dx': 0,
'dy': 0,
'width': 64,
'height': 64,
'label': 'test 1',
},
{
'dx': 50,
'dy': 50,
'width': 32,
'height': 32,
'label': 'test 2',
'tags': {'foo': '1', 'bar': 'a'},
},
],
}
)
drag_enter_event = drag_drop_events.drag_enter(mime_data, position=(100, 100))
loaded_graph.inner_web_view().dragEnterEvent(drag_enter_event)
assert drag_enter_event.acceptProposedAction.call_count == 1
drag_move_event = drag_drop_events.drag_move(mime_data, position=(100, 100))
loaded_graph.inner_web_view().dragEnterEvent(drag_move_event)
assert drag_move_event.acceptProposedAction.call_count == 1
drop_event = drag_drop_events.drop(mime_data, position=(100, 100))
loaded_graph.inner_web_view().dropEvent(drop_event)
assert drop_event.acceptProposedAction.call_count == 1
cell_id = loaded_graph.api.get_cell_id_at(100, 100)
assert loaded_graph.api.get_cell_type(cell_id) == qmxgraph.constants.CELL_TYPE_VERTEX
assert loaded_graph.api.get_geometry(cell_id) == [100.0 - 64 / 2, 100.0 - 64 / 2, 64.0, 64.0]
assert loaded_graph.api.get_label(cell_id) == 'test 1'
cell_id = loaded_graph.api.get_cell_id_at(150, 150)
assert loaded_graph.api.get_cell_type(cell_id) == qmxgraph.constants.CELL_TYPE_VERTEX
assert loaded_graph.api.get_geometry(cell_id) == [150.0 - 32 / 2, 150.0 - 32 / 2, 32.0, 32.0]
assert loaded_graph.api.get_label(cell_id) == 'test 2'
assert loaded_graph.api.get_tag(cell_id, 'foo') == '1'
assert loaded_graph.api.get_tag(cell_id, 'bar') == 'a'
def test_drag_drop_invalid_mime_type(loaded_graph, drag_drop_events) -> None:
"""
Can't drop data in qmxgraph unless it is from qmxgraph valid MIME type,
all events should be ignored.
:type loaded_graph: qmxgraph.widget.qmxgraph
:type drag_drop_events: DragDropEventsFactory
"""
item_data = QByteArray()
data_stream = QDataStream(item_data, QIODevice.WriteOnly)
data_stream.writeString(
json.dumps('<?xml version="1.0"?><message>Hello World!</message>').encode('utf8')
)
mime_data = QMimeData()
mime_data.setData('application/xml', item_data)
drag_enter_event = drag_drop_events.drag_enter(mime_data)
loaded_graph.inner_web_view().dragEnterEvent(drag_enter_event)
assert drag_enter_event.ignore.call_count == 1
drag_move_event = drag_drop_events.drag_move(mime_data)
loaded_graph.inner_web_view().dragMoveEvent(drag_move_event)
assert drag_move_event.ignore.call_count == 1
drop_event = drag_drop_events.drop(mime_data)
loaded_graph.inner_web_view().dropEvent(drop_event)
assert drop_event.ignore.call_count == 1
@pytest.mark.qt_no_exception_capture
def test_drag_drop_invalid_version(loaded_graph, drag_drop_events) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
:type drag_drop_events: DragDropEventsFactory
"""
mime_data = qmxgraph.mime.create_qt_mime_data(
{
'version': -1,
}
)
drag_enter_event = drag_drop_events.drag_enter(mime_data)
loaded_graph.inner_web_view().dragEnterEvent(drag_enter_event)
assert drag_enter_event.acceptProposedAction.call_count == 1
drag_move_event = drag_drop_events.drag_move(mime_data)
loaded_graph.inner_web_view().dragMoveEvent(drag_move_event)
assert drag_move_event.acceptProposedAction.call_count == 1
drop_event = drag_drop_events.drop(mime_data)
import sys
print(
(
'This test will cause an exception in a Qt event loop:\n'
' ValueError: Unsupported version of QmxGraph MIME data: -1'
),
file=sys.stderr,
)
from pytestqt.exceptions import capture_exceptions
with capture_exceptions() as exceptions:
loaded_graph.inner_web_view().dropEvent(drop_event)
assert drop_event.acceptProposedAction.call_count == 0
assert len(exceptions) == 1
assert str(exceptions[0][1]) == "Unsupported version of QmxGraph MIME data: -1"
@pytest.mark.parametrize('debug', (True, False))
def test_invalid_api_call(loaded_graph, debug) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
:type debug: bool
"""
import qmxgraph.debug
old_debug = qmxgraph.debug.is_qmxgraph_debug_enabled()
qmxgraph.debug.set_qmxgraph_debug(debug)
try:
if debug:
# When debug feature is enabled, it fails as soon as call is made.
expected_message = re.escape(
'Uncaught Error: [QmxGraph] unable to find function "BOOM" in javascript api'
)
with pytest.raises(InvalidJavaScriptError, match=expected_message):
loaded_graph.api.call_api('BOOM')
else:
# When debug feature is disabled, code will raise on JavaScript
# side, but unless an error bridge is configured that could go
# unnoticed, as call would return None and could easily be
# mistaken by an OK call.
assert loaded_graph.api.call_api('BOOM') is None
finally:
qmxgraph.debug.set_qmxgraph_debug(old_debug)
@pytest.mark.parametrize('enabled', (True, False))
def test_graph_api_calls(loaded_graph, enabled) -> None:
"""
Tests the available calls to the graph api.
"""
graph_api_functions = [
('is_cells_deletable', 'set_cells_deletable'),
('is_cells_disconnectable', 'set_cells_disconnectable'),
('is_cells_editable', 'set_cells_editable'),
(
'is_cells_movable',
'set_cells_movable',
),
(
'is_cells_connectable',
'set_cells_connectable',
),
]
for getter_name, setter_name in graph_api_functions:
getter_func = getattr(loaded_graph.api, getter_name)
setter_func = getattr(loaded_graph.api, setter_name)
setter_func(enabled)
assert getter_func() is enabled
setter_func(not enabled)
assert getter_func() is not enabled
def test_tags(loaded_graph) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
no_tags_id = loaded_graph.api.insert_vertex(10, 10, 20, 20, 'test')
assert not loaded_graph.api.has_tag(no_tags_id, 'foo')
loaded_graph.api.set_tag(no_tags_id, 'foo', '1')
assert loaded_graph.api.has_tag(no_tags_id, 'foo')
assert loaded_graph.api.get_tag(no_tags_id, 'foo') == '1'
with_tags_id = loaded_graph.api.insert_vertex(50, 50, 20, 20, 'test', tags={'bar': '2'})
assert loaded_graph.api.has_tag(with_tags_id, 'bar')
assert loaded_graph.api.get_tag(with_tags_id, 'bar') == '2'
def test_get_cell_count(loaded_graph) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
from qmxgraph.common_testing import get_cell_count
node_a = loaded_graph.api.insert_vertex(10, 10, 50, 50, 'A')
node_b = loaded_graph.api.insert_vertex(400, 300, 50, 50, 'B')
loaded_graph.api.insert_edge(node_a, node_b, 'AB')
assert get_cell_count(loaded_graph, 'function(cell){ return false }') == 0
assert get_cell_count(loaded_graph, 'function(cell){ return cell.isEdge() }') == 1
assert get_cell_count(loaded_graph, 'function(cell){ return cell.isVertex() }') == 2
def test_get_cell_ids(loaded_graph) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
from qmxgraph.common_testing import get_cell_ids
node_a = loaded_graph.api.insert_vertex(10, 10, 50, 50, 'A')
node_b = loaded_graph.api.insert_vertex(400, 300, 50, 50, 'B')
loaded_graph.api.insert_edge(node_a, node_b, 'AB')
assert get_cell_ids(loaded_graph, 'function(cell){ return false }') == []
assert get_cell_ids(loaded_graph, 'function(cell){ return cell.isEdge() }') == ['4']
assert get_cell_ids(loaded_graph, 'function(cell){ return cell.isVertex() }') == ['2', '3']
def test_cell_bounds(loaded_graph) -> None:
node_a = loaded_graph.api.insert_vertex(10, 10, 50, 50, 'A')
bounds = loaded_graph.api.get_cell_bounds(node_a)
assert bounds == CellBounds(x=10, y=10, width=50, height=50)
new_bounds = CellBounds(x=100, y=100, width=75, height=75)
loaded_graph.api.set_cell_bounds(node_a, new_bounds)
assert loaded_graph.api.get_cell_bounds(node_a) == new_bounds
def test_last_index_of(loaded_graph) -> None:
"""
:type loaded_graph: qmxgraph.widget.qmxgraph
"""
assert eval_js(loaded_graph, "'canal'.lastIndexOf('a')") == 3
assert eval_js(loaded_graph, "'canal'.lastIndexOf('a', 2)") == 1
assert eval_js(loaded_graph, "'canal'.lastIndexOf('a', 0)") == -1
assert eval_js(loaded_graph, "'canal'.lastIndexOf('x')") == -1
assert eval_js(loaded_graph, "'canal'.lastIndexOf('c', -5)") == 0
assert eval_js(loaded_graph, "'canal'.lastIndexOf('c', 0)") == 0
assert eval_js(loaded_graph, "'canal'.lastIndexOf('')") == 5
assert eval_js(loaded_graph, "'canal'.lastIndexOf('', 2)") == 2
def eval_js(graph_widget, statement):
return graph_widget.inner_web_view().eval_js(statement)
@pytest.fixture(name='graph')
def graph_(qtbot) -> QmxGraph:
"""
:type qtbot: pytestqt.plugin.QtBot
:rtype: qmxgraph.widget.qmxgraph
"""
graph_ = QmxGraph(auto_load=False)
graph_.show()
qtbot.addWidget(graph_)
return graph_
@pytest.fixture(name='loaded_graph')
def loaded_graph_(graph):
"""
:type graph: qmxgraph.widget.qmxgraph
:rtype: qmxgraph.widget.qmxgraph
"""
graph.load_and_wait()
return graph
@pytest.fixture(name='drag_drop_events')
def drag_drop_events_(mocker):
"""
:type mocker: pyest_mock.MockFixture
:rtype: DragDropEventsFactory
"""
return DragDropEventsFactory(mocker)
class DragDropEventsFactory(object):
"""
Creates Qt drag & drop events spying some essential methods so tests can
track how many calls are made to them.
"""
def __init__(self, mocker):
self.mocker = mocker
def drag_enter(self, mime_data, position=None):
return self._create_dd_event(QDragEnterEvent, mime_data=mime_data, position=position)
def drag_move(self, mime_data, position=None):
return self._create_dd_event(QDragMoveEvent, mime_data=mime_data, position=position)
def drop(self, mime_data, position=None):
return self._create_dd_event(QDropEvent, mime_data=mime_data, position=position)
def _create_dd_event(self, event_type, position, mime_data):
# just a sensible position to those test this is useless
position = position or (100, 100)
dd_args = QPoint(*position), Qt.MoveAction, mime_data, Qt.LeftButton, Qt.NoModifier
dd_event = event_type(*dd_args)
self.mocker.spy(dd_event, 'acceptProposedAction')
self.mocker.spy(dd_event, 'ignore')
return dd_event
def wait_until_blanked(qtbot, graph):
"""
:type graph: qmxgraph.widget.qmxgraph
"""
graph.blank()
def is_blank():
assert graph.inner_web_view().view_state == ViewState.Blank
qtbot.waitUntil(is_blank)
class _HandlerFixture:
def __init__(self, graph, qtbot):
self.calls = []
self.cb = None
self.graph = graph
self.qtbot = qtbot
def handler_func(self, *args):
assert self.cb
self.calls.append(args)
self.cb(*args)
def assert_handled(self, *, js_script, called, expected_calls=()):
pass
assert "{vertex}" in js_script
self.graph.load_and_wait()
vertex_id = self.graph.api.insert_vertex(
10,
10,
20,
20,
'handler fixture test',
)
js_script = js_script.format(
vertex=f"graphEditor.graph.model.getCell({vertex_id})",
)
if called:
error_context = nullcontext()
else:
error_context = pytest.raises(TimeoutError)
with error_context:
with wait_callback_called() as cb:
self.cb = cb
eval_js(self.graph, js_script)
assert self.calls == [tuple(vertex_id) + tuple(args) for args in expected_calls]
self.calls.clear()
@pytest.fixture(name='handler')
def handler_(graph, qtbot):
"""
:type graph: qmxgraph.widget.qmxgraph
:rtype: _HandlerFixture
"""
return _HandlerFixture(graph, qtbot)
|
11508552
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.io
from IPython.core.display import display, HTML
from ipywidgets import interact, widgets, fixed
import sys
sys.path.append('helper_functions/')
def plotf2(r, img, ttl, sz):
#fig = plt.figure(figsize=(2, 2));
#plt.figure(figsize=(20, 20));
plt.title(ttl+' {}'.format(r))
plt.imshow(img[:,:,r], cmap="gray", vmin = 0, vmax = np.max(img));
plt.axis('off');
fig = plt.gcf()
fig.set_size_inches(sz)
plt.show();
#display(fig)
#clear_output(wait=True)
return
def plt3D(img, title = '', size = (5,5)):
#fig = plt.figure(figsize=sz);
interact(plotf2,
r=widgets.IntSlider(min=0,max=np.shape(img)[-1]-1,step=1,value=1),
img = fixed(img),
continuous_update= False,
ttl = fixed(title),
sz = fixed(size));
def crop(x):
DIMS0 = x.shape[0]//2 # Image Dimensions
DIMS1 = x.shape[1]//2 # Image Dimensions
PAD_SIZE0 = int((DIMS0)//2) # Pad size
PAD_SIZE1 = int((DIMS1)//2) # Pad size
C01 = PAD_SIZE0; C02 = PAD_SIZE0 + DIMS0 # Crop indices
C11 = PAD_SIZE1; C12 = PAD_SIZE1 + DIMS1 # Crop indices
return x[C01:C02, C11:C12,:]
def pre_plot(x):
x = np.fliplr(np.flipud(x))
x = x/np.max(x)
x = np.clip(x, 0,1)
return x
def stack_rgb_opt(reflArray, opt = 'helper_functions/false_color_calib.mat', scaling = [1,1,2.5]):
color_dict = scipy.io.loadmat(opt)
red = color_dict['red']; green = color_dict['green']; blue = color_dict['blue']
reflArray = reflArray/np.max(reflArray)
red_channel = np.zeros((reflArray.shape[0], reflArray.shape[1]))
green_channel = np.zeros((reflArray.shape[0], reflArray.shape[1]))
blue_channel = np.zeros((reflArray.shape[0], reflArray.shape[1]))
for i in range(0,64):
red_channel = red_channel + reflArray[:,:,i]*red[0,i]*scaling[0]
green_channel = green_channel + reflArray[:,:,i]*green[0,i]*scaling[1]
blue_channel = blue_channel + reflArray[:,:,i]*blue[0,i]*scaling[2]
red_channel = red_channel/64.
green_channel = green_channel/64.
blue_channel = blue_channel/64.
stackedRGB = np.stack((red_channel,green_channel,blue_channel),axis=2)
return stackedRGB
def preprocess(mask, psf, im):
# Crop indices
c1 = 100; c2 = 420; c3 = 80; c4 = 540
# Crop and normalize mask
mask = mask[c1:c2, c3:c4, :]
mask = mask/np.max(mask)
# Crop and normalize PSF
psf = psf[c1:c2, c3:c4]
psf = psf/np.linalg.norm(psf)
# Remove defective pixels in mask calibration
mask_sum = np.sum(mask, 2)
ind = np.unravel_index((np.argmax(mask_sum, axis = None)), mask_sum.shape)
mask[ind[0]-2:ind[0]+2, ind[1]-2:ind[1]+2, :] = 0
# Remove defective pixels in measurement
im = im[c1:c2, c3:c4]
im = im/np.max(im)
im[ind[0]-2:ind[0]+2, ind[1]-2:ind[1]+2] = 0
return mask, psf, im
|
11508554
|
import os
import json
def get_event():
with open(os.getenv("GITHUB_EVENT_PATH"), "r") as event_data:
event = json.loads(event_data.read())
return event
|
11508600
|
from core.helpers import catch_errors
from plugin.core.constants import PLUGIN_PREFIX
from plugin.managers.account import AccountManager
from plugin.models import Account
import logging
import requests
log = logging.getLogger(__name__)
@route(PLUGIN_PREFIX + '/resources/cover')
@catch_errors
def Cover(account_id, refresh=None, *args, **kwargs):
account = AccountManager.get(Account.id == account_id)
if not account.trakt:
return Redirect(R('art-default.png'))
try:
# Refresh trakt account details
account.trakt.refresh()
except Exception:
log.warn('Unable to refresh trakt account details', exc_info=True)
return Redirect(R('art-default.png'))
if account.trakt.cover is None:
return Redirect(R('art-default.png'))
try:
# Request cover image
response = requests.get(account.trakt.cover)
except Exception:
log.warn('Unable to retrieve account cover', exc_info=True)
return Redirect(R('art-default.png'))
if response.status_code != 200:
return Redirect(R('art-default.png'))
return response.content
@route(PLUGIN_PREFIX + '/resources/thumb')
@catch_errors
def Thumb(account_id, refresh=None, *args, **kwargs):
# Retrieve account
account = AccountManager.get(Account.id == account_id)
if not account.trakt:
# TODO better account placeholder image
return Redirect(R('icon-default.png'))
# Retrieve thumb url
thumb_url = account.thumb_url()
if not thumb_url:
# TODO better account placeholder image
return Redirect(R('icon-default.png'))
# Request thumb
try:
response = requests.get(thumb_url)
except Exception:
log.warn('Unable to retrieve account thumbnail', exc_info=True)
return Redirect(R('icon-default.png'))
if response.status_code != 200:
return Redirect(R('icon-default.png'))
return response.content
|
11508607
|
from __future__ import print_function, division
from builtins import range
import numpy as np
"""
This file defines layer types that are commonly used for recurrent neural
networks.
"""
def rnn_step_forward(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- cache: Tuple of values needed for the backward pass.
"""
next_h, cache = None, None
##############################################################################
# TODO: Implement a single forward step for the vanilla RNN. Store the next #
# hidden state and any values you need for the backward pass in the next_h #
# and cache variables respectively. #
##############################################################################
next_h = np.tanh(np.dot(prev_h, Wh) + np.dot(x, Wx) + b)
cache = (x, prev_h, next_h, Wx, Wh)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, cache
def rnn_step_backward(dnext_h, cache):
"""
Backward pass for a single timestep of a vanilla RNN.
Inputs:
- dnext_h: Gradient of loss with respect to next hidden state
- cache: Cache object from the forward pass
Returns a tuple of:
- dx: Gradients of input data, of shape (N, D)
- dprev_h: Gradients of previous hidden state, of shape (N, H)
- dWx: Gradients of input-to-hidden weights, of shape (D, H)
- dWh: Gradients of hidden-to-hidden weights, of shape (H, H)
- db: Gradients of bias vector, of shape (H,)
"""
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: Implement the backward pass for a single step of a vanilla RNN. #
# #
# HINT: For the tanh function, you can compute the local derivative in terms #
# of the output value from tanh. #
##################tan############################################################
x, prev_h, next_h, Wx, Wh = cache
dnext_h = dnext_h * (1 - next_h**2)
dprev_h = np.dot(dnext_h, Wh.T)
dx = np.dot(dnext_h, Wx.T)
dWh = np.dot(prev_h.T, dnext_h)
dWx = np.dot(x.T, dnext_h)
db = np.sum(dnext_h, axis=0)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWx, dWh, db
def bidirectional_rnn_concatenate_forward(h, hr, mask):
"""
(Optional) Forward pass for concatenating hidden vectors obtained from a RNN
trained on normal sentences and a RNN trained on reversed sentences at each
time step. We assume each hidden vector is of dimension H, timestep is T.
***Important Note***
The sentence length might be smaller than T, so there will be padding 0s at the
end of the sentences. For reversed RNN, the paddings are also at the end of the
sentences, rather than at the beginning of the sentences. For instance, given 5
timesteps and a word vector representation of a sentence [s1, s2, s3, s4, 0].
The vectors fed to two RNNs will be [s1, s2, s3, s4, 0] and [s4, s3, s2, s1, 0]
, respectively, so will the order of the hidden states.
Inputs:
- h, hr: Input hidden states, of shape (N, T, H).
- mask: Mask array which indices each sentence length, of shape (N, T), with
0 paddings at the end.
Returns a tuple of:
- ho: Output vector for this timestep, shape (N, T, 2*H).
- cache: Tuple of values needed for the backward pass.
"""
ho, cache = None, None
##############################################################################
# TODO: Implement the forward pass for a single step of a bidirectional RNN. #
##############################################################################
for i in range(h.shape[0]):
for j in range(h.shape[2]):
h[i, :, j] *= mask[i].T
hr[i, :, j] *= mask[i].T
ho = np.concatenate((h,hr),axis=2)
cache = (h, mask)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return ho, cache
def bidirectional_rnn_concatenate_backward(dho, cache):
"""
(Optional) Backward pass for a single timestep of the output layer of a
bidirectional RNN.
***Important Note***
The sentence length might be smaller than T, so there will be padding 0s at the
end of the sentences. For reversed RNN, the paddings are also at the end of the
sentences, rather than at the begining of the sentences. For instance, given 5
timesteps and a word vector representation of a sentence [s1, s2, s3, s4, 0].
The vectors fed to two RNNs will be [s1, s2, s3, s4, 0] and [s4, s3, s2, s1, 0]
, respectively, so will the order of the hidden states. During backward pass,
timesteps of the 0s will get 0 gradients.
Inputs:
- dout: Gradient of loss with respect to next hidden state.
- cache: Cache object from the forward pass.
Returns a tuple of:
- dh, dhr: Gradients of input data, of shape (N, T, H).
"""
dh, dhr = None, None
##############################################################################
# TODO: Implement the backward pass for a single step of the output layer of #
# of a bidirectional RNN. #
##############################################################################
h, mask = cache
H = int(dho.shape[-1] / 2)
dh = dho[:, :, 0:H]
dhr = dho[:, :, H:]
for i in range(h.shape[0]):
for j in range(h.shape[2]):
dh[i, :, j] *= mask[i].T
dhr[i, :, j] *= mask[i].T
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dh, dhr
def rnn_forward(x, h0, Wx, Wh, b):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
- cache: Values needed in the backward pass
"""
h, cache = None, None
##############################################################################
# TODO: Implement forward pass for a vanilla RNN running on a sequence of #
# input data. You should use the rnn_step_forward function that you defined #
# above. You can use a for loop to help compute the forward pass. #
##############################################################################
N, H = h0.shape
_, T, D = x.shape
h, cache = np.zeros((N, T, H)), {}
for t in range(T):
if t == 0:
h[:, t, :], cache[t] = rnn_step_forward(x[:, t, :], h0, Wx, Wh, b)
else:
h[:, t, :], cache[t] = rnn_step_forward(x[:, t, :], h[:, t - 1, :], Wx, Wh, b)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return h, cache
def rnn_backward(dh, cache):
"""
Compute the backward pass for a vanilla RNN over an entire sequence of data.
Inputs:
- dh: Upstream gradients of all hidden states, of shape (N, T, H)
Returns a tuple of:
- dx: Gradient of inputs, of shape (N, T, D)
- dh0: Gradient of initial hidden state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
- db: Gradient of biases, of shape (H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: Implement the backward pass for a vanilla RNN running an entire #
# sequence of data. You should use the rnn_step_backward function that you #
# defined above. You can use a for loop to help compute the backward pass. #
##############################################################################
N, T, H = dh.shape
_, D = cache[0][0].shape
dx, dh0, dWx, dWh, db = np.zeros((N, T, D)), np.zeros((N, H)), np.zeros((D, H)), np.zeros((H, H)), np.zeros((H,))
dh_t = 0
for t in reversed(range(T)):
dx[:, t, :], dh_t, dWx_t, dWh_t, db_t = rnn_step_backward(dh[:, t, :] + dh_t, cache[t])
dWx += dWx_t
dWh += dWh_t
db += db_t
dh0 = dh_t
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dh0, dWx, dWh, db
def average_forward(hi, mask):
"""
Forward pass to average the outputs at all timesteps. Assume we have T maximum
timesteps.
Inputs:
- hi: Input data at all timesteps, of shape (N, T, H).
- mask: Indicate the number of timesteps for each sample, i.e. sentence, of
shape (N, T).
Returns a tuple of:
- ho: Averaged output vector, of shape (N, H).
- cache: Tuple of values needed for the backward pass.
"""
ho, cache = None, None
N, T, H = hi.shape
count = np.sum(mask, axis=1)
ho = []
for i in range(N):
ho.append(np.dot(mask[i], hi[i]) / count[i])
ho = np.array(ho)
cache = {}
cache['mask'] = mask
cache['count'] = count
return ho, cache
def average_backward(dho, cache):
"""
Backward pass for the average layer.
Inputs:
- dho: Gradient of loss, of shape (N, H)
- cache: Cache object from the forward pass.
Returns a tuple of:
- dhi: Gradients of input data, of shape (M, N, H).
"""
N, H = dho.shape
T = cache['mask'].shape[1]
dhi = []
for i in range(N):
tmph = []
for j in range(T):
tmph.append(dho[i] * cache['mask'][i,j] / cache['count'][i])
dhi.append(tmph)
dhi = np.array(dhi)
return dhi
def temporal_affine_forward(x, w, b):
"""
Forward pass for a temporal affine layer. The input is a set of D-dimensional
vectors arranged into a minibatch of N timeseries, each of length T. We use
an affine function to transform each of those vectors into a new vector of
dimension M.
Inputs:
- x: Input data of shape (N, T, D)
- w: Weights of shape (D, M)
- b: Biases of shape (M,)
Returns a tuple of:
- out: Output data of shape (N, T, M)
- cache: Values needed for the backward pass
"""
N, T, D = x.shape
M = b.shape[0]
out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b
cache = x, w, b, out
return out, cache
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db
|
11508624
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from leaderboard.models import Comment
class AddCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('racer', 'comment_type', 'text')
labels = {
'racer': ('Identify who your comment is adressed to'),
'comment_type': ('Choose the type of your comment'),
'text': ('Enter your comment'),
}
class Registration(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', '<PASSWORD>')
username = forms.CharField(required=True, label='Enter username')
first_name = forms.CharField(required=True, label='Enter name')
last_name = forms.CharField(required=True, label='Enter surname')
email = forms.CharField(required=True, label='Enter e-mail')
password1 = forms.CharField(required=True, label='Enter password', widget=forms.PasswordInput)
password2 = forms.CharField(required=True, label='<PASSWORD>', widget=forms.PasswordInput)
|
11508647
|
def blah(range):
print "in blah()"
def foo(range):
return "foo100 foo99 foo101 foo102 foo103".split() + range
def functions_provided():
return "blah foo".split()
|
11508653
|
from pgctl import configsearch
class DescribeGlob:
def it_globs_files(self, tmpdir):
tmpdir.ensure('a/file.1')
tmpdir.ensure('d/file.4')
with tmpdir.as_cwd():
assert list(configsearch.glob('*/file.*')) == ['a/file.1', 'd/file.4']
|
11508701
|
import networkx
import sys
from collections import defaultdict
import stream
inp = sys.argv[1]
out = sys.argv[2]
outf = open(out, 'w')
#This class represents a directed graph
# using adjacency list representation
class Graph:
def __init__(self,vertices):
#No. of vertices
self.V= vertices
# default dictionary to store graph
self.graph = defaultdict(list)
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
'''A recursive function to print all paths from 'u' to 'd'.
visited[] keeps track of vertices in current path.
path[] stores actual vertices and path_index is current
index in path[]'''
def printAllPathsUtil(self, u, d, visited, path):
# Mark the current node as visited and store in path
visited[u]= True
path.append(u)
# If current vertex is same as destination, then print
# current path[]
if u ==d:
print(path)
else:
# If current vertex is not destination
#Recur for all the vertices adjacent to this vertex
for i in self.graph[u]:
if visited[i]==False:
self.printAllPathsUtil(i, d, visited, path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[u]= False
# Prints all paths from 's' to 'd'
def printAllPaths(self,s, d):
# Mark all the vertices as not visited
visited =[False]*(self.V)
# Create an array to store paths
path = []
# Call the recursive helper function to print all paths
path = self.printAllPathsUtil(s, d,visited, path)
return path
def vg_graph_reader(inp):
with stream.open(str(inp), "rb") as istream:
for data in istream:
l = vg_pb2.Graph()
l.ParseFromString(data)
g = Graph(len(l.node))
for j in range(len(l.edge)):
from_edge = getattr(l.edge[j], "from")
g.addEdge(from_edge, l.edge[j].to)
return g
g = vg_graph_reader(inp)
num_of_vertices = len(g.V)
for i in range(0,20):
ran1 = random.randint(0, num_of_vertices)
ran2 = random.randint(0, num_of_vertices)
paths = g.printAllPaths(g.V[ran1], g.V[ran2])
for k in paths:
outf.write(k)
|
11508731
|
import time
import caproto as ca
def test_timestamp_now():
# There's more built into this than it seems:
# 1. time.time() -> EPICS timestamp
# 2. EPICS timestamp -> POSIX timestamp
# 3. And a relaxed check that we're within 1 second of `time.time()`
# on the way out
now = ca.TimeStamp.now()
assert abs(time.time() - now.timestamp) < 1.
def test_timestamp_basic():
intval = ca.ChannelInteger(value=5)
# Try the datetime interface:
from_dt = intval.epics_timestamp.as_datetime().timestamp()
assert abs(from_dt - intval.timestamp) < 1.
def test_timestamp_raw_access():
intval = ca.ChannelInteger(value=5)
# Try the datetime interface:
intval.epics_timestamp.secondsSinceEpoch
intval.epics_timestamp.nanoSeconds
def test_timestamp_flexible():
t0 = time.time()
ts = ca.TimeStamp.from_flexible_value(t0)
assert abs(ts.timestamp - t0) < 1e-3
def test_timestamp_flexible_epics_tuple():
ts = ca.TimeStamp.from_flexible_value((1, 2))
assert ts.secondsSinceEpoch == 1
assert ts.nanoSeconds == 2
def test_timestamp_flexible_epics_copy():
ts = ca.TimeStamp.from_flexible_value(ca.TimeStamp(2, 3))
assert ts.secondsSinceEpoch == 2
assert ts.nanoSeconds == 3
|
11508747
|
import os
import pypact as pp
from tests.testerbase import Tester, REFERENCE_DIR
class PrintLib5UnitTest(Tester):
def setUp(self):
self.filename = os.path.join(os.path.join(REFERENCE_DIR), "printlib4.out")
def tearDown(self):
pass
def test_default(self):
pl = pp.PrintLib4()
self.assertEqual(0, len(pl), "Assert no data")
def test_fispact_deserialize(self):
pl = pp.PrintLib4()
self.assertEqual(0, len(pl), "Assert no data")
fr = pp.PrintLib4FileRecord(self.filename)
self.assertEqual(63, fr.start_index, "Assert start index")
self.assertEqual(53181, fr.end_index, "Assert end index")
pl = pp.PrintLib4()
pl.fispact_deserialize(fr)
self.assertEqual(106236, len(pl), "Assert data")
def test_reader(self):
with pp.PrintLib4Reader(self.filename) as pl:
self.assertEqual(106236, len(pl), "Assert data")
# first entry: H 1 (n,Dtot ) 6.20087E+02+- 0.00000E+00
self.assertEqual("H1", pl[0].nuclide, "Assert first entry nuclide")
self.assertEqual("(n,Dtot )", pl[0].reaction, "Assert first entry reaction")
self.assertEqual("", pl[0].daughter, "Assert first entry daughter")
self.assertEqual(6.20087E+02, pl[0].xs, "Assert first entry xs")
self.assertEqual(0.0, pl[0].delta_xs, "Assert first entry delta xs")
# second entry: H 1 (n,Del ) 6.20024E+02+- 0.00000E+00
self.assertEqual("H1", pl[1].nuclide, "Assert 2nd entry nuclide")
self.assertEqual("(n,Del )", pl[1].reaction, "Assert 2nd entry reaction")
self.assertEqual("", pl[1].daughter, "Assert 2nd entry daughter")
self.assertEqual(6.20024E+02, pl[1].xs, "Assert 2nd entry xs")
self.assertEqual(0.0, pl[1].delta_xs, "Assert 2nd entry delta xs")
# 10th entry: H 1 (n,E ) H 1 1.04108E+00+- 1.34070E+00
self.assertEqual("H1", pl[10].nuclide, "Assert 10th entry nuclide")
self.assertEqual("(n,E )", pl[10].reaction, "Assert 10th entry reaction")
self.assertEqual("H1", pl[10].daughter, "Assert 10th entry daughter")
self.assertEqual(1.04108E+00, pl[10].xs, "Assert 10th entry xs")
self.assertEqual(1.34070E+00, pl[10].delta_xs, "Assert 10th entry delta xs")
# 11th entry: H 1 (n,g ) H 2 6.38203E-05+- 8.60406E+00
self.assertEqual("H1", pl[11].nuclide, "Assert 11th entry nuclide")
self.assertEqual("(n,g )", pl[11].reaction, "Assert 11th entry reaction")
self.assertEqual("H2", pl[11].daughter, "Assert 11th entry daughter")
self.assertEqual(6.38203E-05, pl[11].xs, "Assert 11th entry xs")
self.assertEqual(8.60406E+00, pl[11].delta_xs, "Assert 11th entry delta xs")
# last entries
#
# Ds281 (n,np ) Mt280 6.13655E-08+- 9.81500E+01 Ds281 (n,d ) Mt280 2.54049E-05+- 9.54769E+01
# Ds281 (n,p ) Mt281 6.61453E-05+- 8.11175E+01 Ds281 (n,3n ) Ds279 5.79544E-11+- 1.08533E+02
# Ds281 (n,2n ) Ds280 3.30990E-04+- 9.52456E+01 Ds281 (n,E ) Ds281 3.38367E+00+- 1.04664E+00
# Ds281 (n,n ) Ds281 5.72114E-07+- 9.38494E+01 Ds281 (n,g ) Ds282 9.58985E-02+- 1.34170E-02
# 6th last entry: H 1 (n,g ) H 2 6.38203E-05+- 8.60406E+00
self.assertEqual("Ds281", pl[-6].nuclide, "Assert last entry nuclide")
self.assertEqual("(n,p )", pl[-6].reaction, "Assert last entry reaction")
self.assertEqual("Mt281", pl[-6].daughter, "Assert last entry daughter")
self.assertEqual(6.61453E-05, pl[-6].xs, "Assert last entry xs")
self.assertEqual(8.11175E+01, pl[-6].delta_xs, "Assert last entry delta xs")
# 3rd last entry: H 1 (n,g ) H 2 6.38203E-05+- 8.60406E+00
self.assertEqual("Ds281", pl[-3].nuclide, "Assert last entry nuclide")
self.assertEqual("(n,E )", pl[-3].reaction, "Assert last entry reaction")
self.assertEqual("Ds281", pl[-3].daughter, "Assert last entry daughter")
self.assertEqual(3.38367E+00, pl[-3].xs, "Assert last entry xs")
self.assertEqual(1.04664E+00, pl[-3].delta_xs, "Assert last entry delta xs")
# last entry: H 1 (n,g ) H 2 6.38203E-05+- 8.60406E+00
self.assertEqual("Ds281", pl[-1].nuclide, "Assert last entry nuclide")
self.assertEqual("(n,g )", pl[-1].reaction, "Assert last entry reaction")
self.assertEqual("Ds282", pl[-1].daughter, "Assert last entry daughter")
self.assertEqual(9.58985E-02, pl[-1].xs, "Assert last entry xs")
self.assertEqual(1.34170E-02, pl[-1].delta_xs, "Assert last entry delta xs")
|
11508774
|
import json
import logging
import os
import spacy
import torch
from PIL import Image
from torch.utils.data import Dataset
log = logging.getLogger(__name__)
nlp = spacy.load('en')
def get_tags(sentence):
return [w.text for w in nlp(sentence.strip())]
def get_index_from_annotation(image_id_2_index, annotation):
return image_id_2_index[annotation['image_id']]
def build_tags_json(annotation_path, tags_json_filename):
image_tags = dict()
image_paths = list()
annotations = json.load(open(annotation_path))
image_id_2_index = dict()
for i, image in enumerate(annotations['images']):
image_id_2_index[image['id']] = i
image_paths.append(image['file_name'])
for i, annotation in enumerate(annotations['annotations']):
if i % 1000 == 999:
print('Processed {:d}/{:d} annotations.'.format(i+1, len(annotations['annotations'])))
image_index_new = get_index_from_annotation(image_id_2_index, annotation)
if image_index_new not in image_tags:
image_tags[image_index_new] = list()
image_tags[image_index_new].extend(get_tags(annotation['caption']))
with open(tags_json_filename, 'w', encoding='utf8') as tags_json_file:
json.dump((image_paths, image_tags), tags_json_file)
return image_paths, image_tags
def load_tags_json(tags_json_filename):
with open(tags_json_filename, 'r') as tags_json_file:
image_paths, image_tags = json.load(tags_json_file)
return image_paths, image_tags
def get_tag_scores(image_tags):
tag_scores = dict()
for tags in image_tags.values():
for tag in tags:
tag = tag.lower()
tag_scores[tag] = tag_scores.get(tag, 0) + 1
return tag_scores
def get_filtered_tags(image_tags):
tag_scores = get_tag_scores(image_tags)
# Will keep only the tags that have at least 200 occurrences
filtered_tags = dict()
for image_idx, tags in image_tags.items():
filtered_image_tags = []
for tag in tags:
if tag_scores.get(tag, 0) >= 200:
filtered_image_tags.append(tag)
if filtered_image_tags:
filtered_tags[image_idx] = filtered_image_tags
return filtered_tags
def get_image_score(image_tags, image_index):
return len(image_tags.get(image_index, []))
def get_filtered_images(filtered_tags, image_indexes, min_score=3):
return [img for img in image_indexes if get_image_score(filtered_tags, img) >= min_score]
def get_images_by_tag(tags):
images_by_tag = dict()
for image_id, tags in tags.items():
for tag in tags:
images_by_tag.setdefault(tag, set()).add(image_id)
return images_by_tag
class MSCOCODataset(Dataset):
def __init__(self, images_directory, annotation_path, transform=None):
self.images_directory = images_directory
self.transform = transform
tags_json_filename = os.path.join(images_directory, "tags.json")
try:
self.image_paths, image_tags = load_tags_json(tags_json_filename)
except FileNotFoundError:
self.image_paths, image_tags = build_tags_json(annotation_path, tags_json_filename)
image_indexes = list(range(len(self.image_paths)))
filtered_tags = get_filtered_tags(image_tags)
self.filtered_images = get_filtered_images(filtered_tags, image_indexes)
# Because we've filtered out some of our images, we'll now map the previous image indices
# to the new ones to avoid dealing with gaps.
self.filtered_tags = self._build_filtered_tags_with_converted_image_indexes(filtered_tags)
self.images_by_tag = get_images_by_tag(self.filtered_tags)
def __len__(self):
return len(self.filtered_images)
def get_image_path(self, image_index):
return os.path.join(self.images_directory, self.image_paths[image_index])
def get_pil_image(self, index):
return Image.open(self.get_image_path(self.filtered_images[index])).convert('RGB')
def __getitem__(self, index):
image = self.get_pil_image(index)
if self.transform:
try:
image = self.transform(image)
except Exception as e:
log.warning("Could not transform image %s due to %s. Skipping.", index, e)
return torch.zeros(3, 224, 224)
return image
def _build_filtered_image_to_index_map(self):
filtered_image_to_index = dict()
filtered_image_paths = list()
for i, image_index in enumerate(self.filtered_images):
filtered_image_to_index[image_index] = i
filtered_image_paths.append(self.image_paths[image_index])
self.image_paths = filtered_image_paths
return filtered_image_to_index
def _build_filtered_tags_with_converted_image_indexes(self, filtered_tags):
filtered_image_to_index = self._build_filtered_image_to_index_map()
filtered_tags_with_converted_image_indexes = dict()
for image, tags in filtered_tags.items():
if image not in filtered_image_to_index:
continue
new_image_index = filtered_image_to_index[image]
filtered_tags_with_converted_image_indexes[new_image_index] = tags
return filtered_tags_with_converted_image_indexes
|
11508809
|
from django.contrib.gis import admin
from ... import models
from wazimap_ng.general.services import permissions
from wazimap_ng.general.admin.admin_base import BaseAdminModel, HistoryAdmin
from wazimap_ng.general.admin import filters
from wazimap_ng.general.admin.forms import HistoryAdminForm
@admin.register(models.Logo)
class LogoAdmin(BaseAdminModel, HistoryAdmin):
list_display = ("profile",)
list_filter = (filters.ProfileFilter,)
fieldsets = (
("", {
'fields': ('profile', 'logo', "url", )
}),
)
form = HistoryAdminForm
|
11508904
|
import sys
from flask import render_template, send_from_directory
import orangeshare
from orangeshare import Config
from orangeshare.updater import Updater
def favicon():
return send_from_directory("logo", "white.ico")
def index():
updater = Updater.get_updater()
return render_template("index.html", version=orangeshare.__version__, newer_version_available=updater.newer_version_available, newer_version=updater.newer_version)
def devices():
return render_template("devices.html")
def shortcuts():
return render_template("shortcuts.html")
def settings():
return render_template("settings.html", conf_file=Config.get_config().file)
def update():
updater = Updater.get_updater()
return render_template(
"update.html",
newer_version_available=updater.newer_version_available,
newer_version=updater.newer_version,
windows_installation="--windows-installation" in sys.argv,
gnome_extension="--gnome-extension" in sys.argv,
python_executable=sys.executable
)
|
11508949
|
import pytest
from django.contrib import auth
from vmprofile.models import RuntimeData, CPUProfile
@pytest.mark.django_db
def test_log_get_user(client):
username = 'username'
password = '<PASSWORD>'
user = auth.models.User.objects.create_user(
username,
'<EMAIL>',
password
)
rd1 = RuntimeData.objects.create(user=user)
rd2 = RuntimeData.objects.create(user=None)
response = client.get('/api/log/')
assert len(response.data['results']) == 2
client.login(username=username, password=password)
response = client.get('/api/log/')
assert len(response.data['results']) == 1
response = client.get('/api/log/?all=True')
assert len(response.data['results']) == 2
|
11508976
|
import sys
from flask.globals import request
from flask.views import View
from toga import platform
class TogaView(View):
def __init__(self, app_module):
super().__init__()
self.app_module = app_module
def dispatch_request(self, state):
# Make the Python __main__ context identify as the app being executed.
sys.modules['__main__'] = self.app_module
# Set the current platform to be `web`
platform.current_platform = 'web'
# Instantiate the app
app = self.app_module.main()
# Render the app
return app._impl.render(
state=state,
headers=dict(request.headers)
)
class App:
def __init__(self, app_module, name='toga'):
self.app_module = app_module
self.name = name
def route(self, app, path):
view = TogaView.as_view(self.name, app_module=self.app_module)
app.add_url_rule(
'{path}'.format(path=path),
defaults={'state': ''},
view_func=view
)
app.add_url_rule(
'{path}<path:state>'.format(path=path),
view_func=view
)
|
11509029
|
from __future__ import unicode_literals
import contextlib
from .green import GreenPool
from .utils import get_logger
__all__ = ['ServerPool']
class ServerPool(object):
def __init__(self):
self.pool = GreenPool()
self.servers = {}
self.logger = get_logger().getChild('pool')
@contextlib.contextmanager
def new_server(self, name, server_class, *args, **kwargs):
server = server_class(*args, **kwargs)
server.logger = server.logger.getChild(name)
yield server
self.servers[name] = server
def loop(self):
for name, server in self.servers.items():
self.logger.info('Prepared "%s"' % name)
self.pool.spawn_n(server.loop)
try:
self.pool.waitall()
except (SystemExit, KeyboardInterrupt):
self.logger.info('Exit')
|
11509061
|
import fileinput
import sys
import re
import os
sf=os.environ.get('SF')
query=os.environ.get('QUERY')
for line in fileinput.input():
re_embeddings = re.search(r'^#Embeddings: (\d+)$', line)
if re_embeddings:
tuples = re_embeddings.group(1)
re_runtime = re.search(r'Query time \(seconds\): (.+)$', line)
if re_runtime:
time = re_runtime.group(1)
print(f"RapidMatch\t\t{sf}\t{query}\t{time}\t{tuples}")
|
11509085
|
import logging
import sys
import numpy as np
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from .feature_extractor import FeatureExtractor
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger("RF")
class RandomForestFeatureExtractor(FeatureExtractor):
def __init__(self,
name="RF",
classifier_kwargs={
'n_estimators': 30,
},
randomize=True,
one_vs_rest=True,
**kwargs):
FeatureExtractor.__init__(self,
name=name,
supervised=True,
**kwargs)
self.randomize = randomize
self.classifier_kwargs = classifier_kwargs.copy()
if not self.randomize:
self.classifier_kwargs['random_state'] = 89274
if self.use_regression:
self.one_vs_rest = False
else:
self.one_vs_rest = one_vs_rest
logger.debug("Initializing RF with the following parameters: "
" randomize %s, one_vs_rest %s, classifier_kwargs %s",
self.randomize, self.one_vs_rest, self.classifier_kwargs)
def _train_one_vs_rest(self, data, labels):
n_clusters = labels.shape[1]
n_points = data.shape[0]
classifiers = []
for i_cluster in range(n_clusters):
classifiers.append(self._create_classifier())
tmp_labels = np.zeros(n_points)
tmp_labels[labels[:, i_cluster] == 1] = 1
classifiers[i_cluster].fit(data, tmp_labels)
return classifiers
def train(self, train_set, train_labels):
# Construct and train classifier
logger.debug("Training RF with %s samples and %s features ...", train_set.shape[0], train_set.shape[1])
if self.one_vs_rest:
return self._train_one_vs_rest(train_set, train_labels)
else:
classifier = self._create_classifier()
if self.use_regression:
train_labels = train_labels.squeeze()
classifier.fit(train_set, train_labels)
return classifier
def get_feature_importance(self, classifier, data, labels):
logger.debug("Extracting feature importance using RF ...")
n_features = data.shape[1]
feature_importances = np.zeros((n_features, self.n_clusters))
for i_cluster in range(self.n_clusters):
if self.one_vs_rest:
feature_importances[:, i_cluster] = classifier[i_cluster].feature_importances_
else:
feature_importances[:, i_cluster] = classifier.feature_importances_
return feature_importances
def _create_classifier(self):
return RandomForestRegressor(**self.classifier_kwargs) if self.use_regression \
else RandomForestClassifier(**self.classifier_kwargs)
|
11509087
|
from utils.tail_call_optimized import tail_call_optimized
@tail_call_optimized
def fib_tail_recur(n, res, temp):
if n == 0:
return res
return fib_tail_recur(n - 1, temp, res + temp)
if __name__ == "__main__":
print(fib_tail_recur(1000, 0, 1))
|
11509128
|
import os
import sys
import copy
import numpy as np
import scipy.io as sio
def sparse_nmf_matlab(V, params, verbose=True, useGPU=True, gpuIndex=1, save_H=True):
"""
Uses sparse_nmf.m to learn the parameters of a well-done sparse
NMF model for the nonnegative input data V.
Automatically chunks V into appropriately-sized chunks so that
Matlab can train SNMF on many input frames with a large number
of SNMF basis vectors.
Inputs:
V - shape (n_feats, n_frames) nonnegative data matrix
paramfile - dictionary of sparse_nmf parameters
Outputs:
W - shape (n_feats, r) nonnegative sparse NMF dictionary with unit-L2 norm columns
H - shape (r, n_frames) nonnegative activation matrix
obj - dictionary containing 'cost' (divergence+sparsity) and 'div' (divergence)
"""
# make a copy of the params dictionary, since we might modify it
params_copy = copy.deepcopy(params)
# get the shape of the data and determine the number of chunks
(n_feats, n_frames) = V.shape
r = int(params['r'])
r_for_max_frame_batch_size = 200
max_frame_batch_size = 700000 # max number of frames that fit on 12GB GPU when r=100
frame_batch_size = int( float(max_frame_batch_size) * (float(r_for_max_frame_batch_size)/float(r)) )
n_chunks = int(np.ceil( float(n_frames) / float(frame_batch_size) ))
if save_H:
# initialize the full H
H = np.zeros((r,n_frames))
else:
H = None
# iterate through the chunks
obj_snmf = {'obj_snmf_per_chunk': []}
initial_cost = 0.
final_cost = 0.
initial_div = 0.
final_div = 0.
for i in range(n_chunks):
print("")
if i==10:
temp='We are at chunk 10'
print("sparse NMF: processing chunk %d of %d..." % (i+1, n_chunks))
start_idx = i * frame_batch_size
end_idx = ( i + 1 ) * frame_batch_size
W, H_tmp, obj_snmf_tmp = sparse_nmf_matlab_on_chunk(V[:,start_idx:end_idx], params_copy, verbose=verbose, gpuIndex=gpuIndex)
# update the current dictionary:
if 'w_update_ind' in params_copy.keys():
idx_update = np.where(params_copy['w_update_ind'])[0]
params_copy['init_w'][:, idx_update] = W[:, idx_update]
else:
params_copy['init_w'] = W
# accumulate the cost function
obj_snmf['obj_snmf_per_chunk'].append(obj_snmf_tmp) # we append instead of accum because we might run different number of iterations per chunk
initial_cost = initial_cost + obj_snmf_tmp['cost'][0]
initial_div = initial_div + obj_snmf_tmp['div'][0]
final_cost = final_cost + obj_snmf_tmp['cost'][-1]
final_div = final_div + obj_snmf_tmp['div'][-1]
if save_H:
# write the portion of H we just computed from the chunk
H[:,start_idx:end_idx] = H_tmp
print("sparse NMF: initial overall cost %e, final overall cost %e" % (initial_cost, final_cost))
print("sparse NMF: initial overall div %e, final overall div %e" % (initial_div, final_div))
obj_snmf['cost'] = [initial_cost, final_cost]
obj_snmf['div'] = [initial_div, final_div]
if n_chunks==1:
obj_snmf = obj_snmf['obj_snmf_per_chunk'][0]
return W, H, obj_snmf
def sparse_nmf_matlab_on_chunk(V, params, verbose=True, useGPU=True, gpuIndex=1):
(m,n)=V.shape
# write the V matrix to a .mat file
sio.savemat(open("V.mat","wb"),{"V":V})
# write the params dictionary to a .mat file
params_save = copy.deepcopy(params)
params_save.update({'display': float(verbose)})
sio.savemat(open("sparse_nmf_params.mat","wb"),params_save)
# run the Matlab script that uses hard-coded .mat files as input, and returns
# results in sparse_nmf_output.mat
cmd_matlab = "matlab -nosplash -nodesktop -nodisplay -r \"addpath('sparseNMF'); useGPU=%d; gpuIndex=%d; sparse_nmf_exec(); quit();\"" % (useGPU, gpuIndex)
if not verbose:
cmd_matlab = cmd_matlab + " > /dev/null"
print("Running matlab command: %s" % cmd_matlab)
err=os.system(cmd_matlab)
if not (err==0):
OSError("Error running Matlab command '%s' using os.system: error %s. If you are running Linux, you might be able to fix this problem by setting vm.overcommit=1 on your system, which will launch the Matlab process even if this python process has a large memory footprint, which can happen when there are a large number of frames and/or basis vectors. But this is a pretty hacky fix." % (err,cmd_matlab))
L=sio.loadmat(open("sparse_nmf_output.mat","rb"))
W=np.asarray(L['W'],dtype=V.dtype)
H=np.asarray(L['H'],dtype=V.dtype)
obj={'cost':np.squeeze(np.asarray(L['cost'])),'div':np.squeeze(np.asarray(L['div']))}
return W,H,obj
|
11509174
|
from nitorch.core import cli
from nitorch.core.cli import ParseError
class View(cli.ParsedStructure):
"""Structure that holds parameters of the `denoise_mri` command"""
files: list = []
help = r"""[nitorch] Interactive viewer for volumetric images.
usage:
nitorch view *FILES
"""
def parse(args):
"""Parse the command-line arguments of the `view` command.
Parameters
----------
args : list of str
List of arguments, without the command name.
Returns
-------
View
Filled structure
"""
struct = View()
struct.files = []
while cli.next_isvalue(args):
val, *args = args
struct.files.append(val)
while args:
if cli.next_isvalue(args):
raise ParseError(f'Value {args[0]} does not seem to belong '
f'to a tag.')
tag, *args = args
if tag in ('-h', '--help'):
print(help)
return None
else:
raise ParseError(f'Unknown tag {tag}')
return struct
|
11509188
|
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
m = ModelOutput('class2_sed.rtout')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Total SED
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc)
ax.loglog(sed.wav, sed.val, color='black', lw=3, alpha=0.5)
# Direct stellar photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='source_emit')
ax.loglog(sed.wav, sed.val, color='blue')
# Scattered stellar photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='source_scat')
ax.loglog(sed.wav, sed.val, color='teal')
# Direct dust photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='dust_emit')
ax.loglog(sed.wav, sed.val, color='red')
# Scattered dust photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='dust_scat')
ax.loglog(sed.wav, sed.val, color='orange')
ax.set_xlabel(r'$\lambda$ [$\mu$m]')
ax.set_ylabel(r'$\lambda F_\lambda$ [ergs/s/cm$^2$]')
ax.set_xlim(0.1, 2000.)
ax.set_ylim(2.e-16, 2.e-9)
fig.savefig('class2_sed_plot_components.png')
|
11509192
|
from . import BaseTestClass
try:
from urllib import urlencode
from urlparse import parse_qsl, urlparse
except ImportError:
from urllib.parse import parse_qsl, urlencode, urlparse
import responses
from instamojo_wrapper import Instamojo
from tests.payloads import payment_requests_payload
class TestPaymentRequests(BaseTestClass):
def setUp(self):
self.api_endpoint = 'https://www.instamojo.com/api/1.1/'
self.api = Instamojo('API-KEY', 'AUTH-TOKEN', self.api_endpoint)
@responses.activate
def test_payment_request_create(self):
data = payment_requests_payload['payment_request_create']
endpoint = self.api_endpoint + 'payment-requests/'
responses.add(
responses.POST,
endpoint,
body='{}',
content_type='application/json'
)
resp = self.api.payment_request_create(**data['request'])
self.assertEqual(resp, {})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url, endpoint)
@responses.activate
def test_payment_request_status(self):
data = payment_requests_payload['payment_request_status']
endpoint = self.api_endpoint + 'payment-requests/{id}/'.format(**data['request'])
responses.add(
responses.GET,
endpoint,
body='{}',
content_type='application/json'
)
resp = self.api.payment_request_status(**data['request'])
self.assertEqual(resp, {})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url, endpoint)
@responses.activate
def test_payment_requests_list(self):
data = payment_requests_payload['payment_requests_list']
endpoint = self.api_endpoint + 'payment-requests/'
responses.add(
responses.GET,
endpoint,
body='{}',
content_type='application/json'
)
resp = self.api.payment_requests_list(**data['request'])
self.assertEqual(resp, {})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url, endpoint)
@responses.activate
def test_payment_requests_list_optional_params(self):
data = payment_requests_payload['payment_requests_list_optional_params']
endpoint = self.api_endpoint + 'payment-requests/'
responses.add(
responses.GET,
endpoint,
body='{}',
content_type='application/json'
)
resp = self.api.payment_requests_list(**data['request'])
self.assertEqual(resp, {})
self.assertEqual(len(responses.calls), 1)
parsed_url = urlparse(responses.calls[0].request.url)
self.assertTrue(endpoint.endswith(parsed_url.path))
self.assertDictEqual(dict(parse_qsl(parsed_url.query.strip('/'))), data['request'])
@responses.activate
def test_payment_request_payment_status(self):
data = payment_requests_payload['payment_request_payment_status']
endpoint = self.api_endpoint + 'payment-requests/{id}/{payment_id}/'.format(**data['request'])
responses.add(
responses.GET,
endpoint,
body='{}',
content_type='application/json'
)
resp = self.api.payment_request_payment_status(**data['request'])
self.assertEqual(resp, {})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(
responses.calls[0].request.url, endpoint)
|
11509220
|
from django.views.generic import ListView, DetailView
from .models import Post
class PostListView(ListView):
model = Post
paginate_by = 10
class PostDetailView(DetailView):
model = Post
|
11509241
|
import abc
import contextlib
import os
import random
import shelve
import river.base
import river.metrics
import river.stats
import river.utils
import dill
import flask
try:
import redis
except ImportError:
pass
from . import exceptions
from . import flavors
class StorageBackend(abc.ABC):
"""Abstract storage backend.
This interface defines a set of methods to implement in order for a database to be used as a
storage backend. This allows using different databases in a homogeneous manner by proving a
single interface.
"""
@abc.abstractmethod
def __setitem__(self, key, obj):
"""Store an object."""
@abc.abstractmethod
def __getitem__(self, key):
"""Retrieve an object."""
@abc.abstractmethod
def __delitem__(self, key):
"""Remove an object from storage."""
@abc.abstractmethod
def __iter__(self):
"""Iterate over the keys."""
@abc.abstractmethod
def close(self):
"""Do something when the app shuts down."""
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class ShelveBackend(shelve.DbfilenameShelf, StorageBackend): # type: ignore
"""Storage backend based on the shelve module from the standard library.
This should mainly be used for development and testing, but not production.
"""
class RedisBackend(StorageBackend):
def __init__(self, host, port, db):
self.r = redis.Redis(host=host, port=port, db=db)
def __setitem__(self, key, obj):
self.r[key] = dill.dumps(obj)
def __getitem__(self, key):
return dill.loads(self.r[key])
def __delitem__(self, key):
self.r.delete(key)
def __iter__(self):
for key in self.r.scan_iter():
yield key.decode()
def close(self):
return
# The following will make it so that shelve.open returns ShelveBackend instead of DbfilenameShelf
shelve.DbfilenameShelf = ShelveBackend # type: ignore
def get_db() -> StorageBackend:
if 'db' not in flask.g:
backend = flask.current_app.config['STORAGE_BACKEND']
if backend == 'shelve':
flask.g.db = shelve.open(flask.current_app.config['SHELVE_PATH'])
elif backend == 'redis':
flask.g.db = RedisBackend(
host=flask.current_app.config['REDIS_HOST'],
port=int(flask.current_app.config['REDIS_PORT']),
db=int(flask.current_app.config['REDIS_DB'])
)
else:
raise ValueError(f'Unknown storage backend: {backend}')
return flask.g.db
def close_db(e=None):
db = flask.g.pop('db', None)
if db is not None:
db.close()
def drop_db():
"""This function's responsability is to wipe out a database.
This could be implement within each StorageBackend, it's just a bit more akward because at this
point the database connection is not stored in the app anymore.
"""
backend = flask.current_app.config['STORAGE_BACKEND']
if backend == 'shelve':
path = flask.current_app.config['SHELVE_PATH']
with contextlib.suppress(FileNotFoundError):
os.remove(f'{path}.db')
elif backend == 'redis':
r = redis.Redis(
host=flask.current_app.config['REDIS_HOST'],
port=flask.current_app.config.get('REDIS_PORT', 6379),
db=flask.current_app.config.get('REDIS_DB', 0)
)
r.flushdb()
def set_flavor(flavor: str):
drop_db()
try:
flavor = flavors.allowed_flavors()[flavor]
except KeyError:
raise exceptions.UnknownFlavor
db = get_db()
db['flavor'] = flavor
init_metrics()
init_stats()
def init_stats():
db = get_db()
db['stats'] = {
'learn_mean': river.stats.Mean(),
'learn_ewm': river.stats.EWMean(.3),
'predict_mean': river.stats.Mean(),
'predict_ewm': river.stats.EWMean(.3),
}
def init_metrics():
db = get_db()
try:
flavor = db['flavor']
except KeyError:
raise exceptions.FlavorNotSet
db['metrics'] = flavor.default_metrics()
def add_model(model: river.base.Estimator, name: str = None) -> str:
db = get_db()
# Pick a name if none is given
if name is None:
while True:
name = _random_slug()
if f'models/{name}' not in db:
break
db[f'models/{name}'] = model
return name
def delete_model(name: str):
db = get_db()
del db['models/{name}']
def _random_slug(rng=random) -> str:
"""
>>> rng = random.Random(42)
>>> _random_slug(rng)
'earsplitting-apricot'
"""
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, 'adjectives.txt')) as f, open(os.path.join(here, 'food_names.txt')) as g:
return f'{rng.choice(f.read().splitlines())}-{rng.choice(g.read().splitlines())}'
|
11509296
|
import cv2
import numpy as np
from depthai_sdk import toTensorResult, Previews
def decode(nnManager, packet):
data = np.squeeze(toTensorResult(packet)["Output/Transpose"])
classColors = [[0,0,0], [0,255,0]]
classColors = np.asarray(classColors, dtype=np.uint8)
outputColors = np.take(classColors, data, axis=0)
return outputColors
def draw(nnManager, data, frames):
if len(data) == 0:
return
for name, frame in frames:
if name == "color" and nnManager.source == "color" and not nnManager._fullFov:
scaleFactor = frame.shape[0] / nnManager.inputSize[1]
resizeW = int(nnManager.inputSize[0] * scaleFactor)
resized = cv2.resize(data, (resizeW, frame.shape[0])).astype(data.dtype)
offsetW = int(frame.shape[1] - nnManager.inputSize[0] * scaleFactor) // 2
tailW = frame.shape[1] - offsetW - resizeW
stacked = np.hstack((np.zeros((frame.shape[0], offsetW, 3)).astype(resized.dtype), resized, np.zeros((frame.shape[0], tailW, 3)).astype(resized.dtype)))
cv2.addWeighted(frame, 1, stacked, 0.2, 0, frame)
elif name in (Previews.color.name, Previews.nnInput.name, "host"):
cv2.addWeighted(frame, 1, cv2.resize(data, frame.shape[:2][::-1]), 0.2, 0, frame)
|
11509328
|
import subprocess
import sys
import os
def command_exists(command):
proc = subprocess.Popen(["hash", command], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return proc.stderr.read() == b''
if not command_exists("node"):
if len(sys.argv) < 2:
print("Run as python3 Make.py <package manager>")
sys.exit(1)
PACKAGE = " ".join(sys.argv[1:])
if not command_exists(PACKAGE.split(" ")[0]):
print("Can't find package manager %s. Some common ones to try are brew install (mac), apt-get install (most linux distros)")
sys.exit(1)
print("Node not installed, install it?")
res = input("[Y/n]")
if res == "n":
print("Cancelling installation")
sys.exit(1)
inst = subprocess.Popen((PACKAGE + " nodejs").split())
if inst.wait():
print("nodejs not found, trying node.js instead...")
inst = subprocess.Popen((PACKAGE + " node.js").split())
if not command_exists("elm"):
if len(sys.argv) < 2:
print("Run as python3 Make.py <package manager>")
sys.exit(1)
PACKAGE = " ".join(sys.argv[1:])
if not command_exists(PACKAGE.split(" ")[0]):
print("Can't find package manager %s. Some common ones to try are brew install (mac), apt-get install (most linux distros)")
sys.exit(1)
print("Elm not installed, install it?")
res = input("[Y/n]")
if res == "n":
print("Cancelling installation")
sys.exit(1)
inst = subprocess.Popen((PACKAGE + " elm").split())
for module in "ws", "ip":
a = subprocess.Popen(["npm", "ls", module], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
errorCode = a.wait()
if errorCode != 0:
print("%s isn't installed. Install it?" % module)
res = input("[Y/n]")
if res == "n":
print("Cancelling installation")
sys.exit(1)
subprocess.run(["npm", "install", module])
if not os.path.isdir("UglifyJS"):
res = input("Install uglifyjs? (Not requiered) [y/N] ").lower()
if res == "y":
os.system("git clone git://github.com/mishoo/UglifyJS.git")
|
11509335
|
description = 'Tensile machine'
group = 'optional'
devices = dict(
teload = device('nicos.devices.generic.VirtualMotor',
description = 'load value of the tensile machine',
abslimits = (-50000, 50000),
unit = 'N',
fmtstr = '%.2f',
),
# tepos = device('nicos.devices.generic.VirtualMotor',
# description = 'position value of the tensile machine',
# abslimits = (0, 70),
# # SPODI limits
# # abslimits = (-10, 55),
# unit = 'mm',
# fmtstr = '%.3f',
# ),
# teext = device('nicos.devices.generic.VirtualMotor',
# description = 'extension value of the tensile machine',
# abslimits = (-3000, 3000),
# unit = 'um',
# fmtstr = '%.3f',
# ),
)
display_order = 40
|
11509349
|
import collections
import dateutil.parser
import datetime
import logging
from . import funcs
from .common import CoordType
import json
InitialData = collections.namedtuple("InitialData", ["header", "firstrows", "rowcount", "filename"])
class ParseSettings():
"""A "model-like" object which stores all the settings we need to parse a file."""
def __init__(self):
self.coord_type = CoordType.LonLat
self.meters_conversion = 1.0
self.timestamp_field = -1
self.xcoord_field = -1
self.ycoord_field = -1
self.crime_type_fields = []
self.timestamp_format = ""
@property
def coord_type(self):
"""Lon/Lat or XY coords?
:return: :class:`CoordType` enum
"""
return self._coord_type
@coord_type.setter
def coord_type(self, value):
if not isinstance(value, CoordType):
raise ValueError("Must be instance of :class:`CoordType`")
self._coord_type = value
@property
def meters_conversion(self):
"""If in XY coords, return the factor to multiple the values by to get
to meters.
"""
return self._proj_convert
@meters_conversion.setter
def meters_conversion(self, value):
self._proj_convert = value
@staticmethod
def feet():
"""Conversion from feet to meters, see
https://en.wikipedia.org/wiki/Foot_(unit)"""
return 0.3048
@property
def timestamp_format(self):
"""The format to use to decode the timestamp. Either "" to attempt
auto-detecting, or a valid `strptime` format string.
"""
return self._ts_format
@timestamp_format.setter
def timestamp_format(self, value):
self._ts_format = value
@property
def timestamp_field(self):
"""Field from CSV file to use as the timestamp. -1==None."""
return self._ts_field
@timestamp_field.setter
def timestamp_field(self, value):
self._ts_field = value
@property
def xcoord_field(self):
"""Field from CSV file to use as the X Coord / Longitude. -1==None."""
return self._x_field
@xcoord_field.setter
def xcoord_field(self, value):
self._x_field = value
@property
def ycoord_field(self):
"""Field from CSV file to use as the Y Coord / Latitude. -1==None."""
return self._y_field
@ycoord_field.setter
def ycoord_field(self, value):
self._y_field = value
@property
def crime_type_fields(self):
"""A list (possibly empty) of fields to use a crime time identifiers."""
return self._ct_fields
@crime_type_fields.setter
def crime_type_fields(self, value):
input = list(value)
while len(input) > 0 and input[-1] == -1:
del input[-1]
self._ct_fields = input
@property
def coordinate_scaling(self):
if self.coord_type == CoordType.XY:
return self.meters_conversion
else:
return 1.0
def to_dict(self):
"""Return a dictionary storing the settings."""
return { "coord_type" : self.coord_type.name,
"meters_conversion" : self.meters_conversion,
"timestamp_field" : self.timestamp_field,
"xcoord_field" : self.xcoord_field,
"ycoord_field" : self.ycoord_field,
"crime_type_fields" : self.crime_type_fields,
"timestamp_format" : self.timestamp_format
}
@staticmethod
def from_dict(data):
out = ParseSettings()
out.coord_type = CoordType[data["coord_type"]]
out.meters_conversion = data["meters_conversion"]
out.timestamp_field = data["timestamp_field"]
out.xcoord_field = data["xcoord_field"]
out.ycoord_field = data["ycoord_field"]
out.crime_type_fields = data["crime_type_fields"]
out.timestamp_format = data["timestamp_format"]
return out
class ParseError(Exception):
"""Indicate a problem is parsing the input data. Convert to string for a
human readable reason."""
class Model():
"""The model.
:param initial_data: An instance of :class:`InitialData`
"""
def __init__(self, initial_data):
self._initial_data = initial_data
self._parsed = None
@property
def header(self):
return self._initial_data.header
@property
def firstrows(self):
return self._initial_data.firstrows
@property
def rowcount(self):
return self._initial_data.rowcount
@property
def filename(self):
return self._initial_data.filename
@property
def processed_data(self):
"""Returns `None` if parsing failed, or a triple of lists
`(timestamps, xcoords, ycoords, crime_types)` where `crime_types`
or is a list of tuples, each tuple being the crime types."""
return self._parsed
def try_parse(self, parse_settings):
"""Attempt to parse the initial data.
:param parse_settings: An instance of :class:`ParseSettings` describing
the parse settings to use.
:return: An error message, or None for success (in which case the
:attr:`processed_data` will be updated.)
"""
self._parsed = None
if parse_settings.timestamp_field == -1:
return "Need to select a field for the timestamps"
if parse_settings.xcoord_field == -1:
return "Need to select a field for the X coordinates"
if parse_settings.ycoord_field == -1:
return "Need to select a field for the Y coordinates"
if len(parse_settings.crime_type_fields) > 1:
for i in range(1, len(parse_settings.crime_type_fields)+1):
if parse_settings.crime_type_fields[-i] == -1:
return "Cannot specify a crime sub-type without the main crime type"
logger = logging.getLogger(__name__)
logger.debug("Attempting to parse the initial input data")
tp = _TryParse(parse_settings, logger=logger)
try:
ts, xs, ys, typs = [], [], [], []
for row in self._initial_data.firstrows:
data = tp.try_parse(row)
ts.append(data[0])
xs.append(data[1])
ys.append(data[2])
if len(data) > 3:
typs.append(data[3:])
except ParseErrorData as ex:
if ex.reason == "time":
return ("Cannot understand the data/time string '{}'.\n" +
"Make sure you have selected the correct field for the timestamps. " +
"If necessary, try entering a specific timestamp format.").format(ex.data)
else:
return ("Cannot understand the {} coordinate string '{}'.\n" +
"Make sure you have selected the correct field for the {} coordinates."
).format(ex.reason, ex.data, ex.reason)
self._parsed = ts, xs, ys, typs
@staticmethod
def load_full_dataset(parse_settings):
"""A coroutine. On error, yields the exception for that row."""
if (parse_settings.timestamp_field == -1 or parse_settings.xcoord_field == -1
or parse_settings.ycoord_field == -1):
raise ValueError()
if len(parse_settings.crime_type_fields) > 1:
for i in range(1, len(parse_settings.crime_type_fields)+1):
if parse_settings.crime_type_fields[-i] == -1:
raise ValueError()
logger = logging.getLogger(__name__)
logger.debug("Attempting to parse the whole data-set")
tp = _TryParse(parse_settings, logger=logger)
row = yield
row_number = 0
while True:
row_number += 1
try:
data = tp.try_parse(row)
except Exception as ex:
data = (row_number, ex)
row = yield data
class ParseErrorData(Exception):
def __init__(self, reason, data):
self.reason = reason
self.data = data
class _TryParse():
def __init__(self, parse_settings, logger=funcs.null_logger()):
self.time_parser = self._time_parser(parse_settings.timestamp_format)
self.time_field = parse_settings.timestamp_field
self.x_field = parse_settings.xcoord_field
self.y_field = parse_settings.ycoord_field
self.crime_fields = parse_settings.crime_type_fields
self.scale = parse_settings.coordinate_scaling
self._logger = logger
def try_parse(self, row):
"""Attempt to parse the row. Raises :class:`ParseError` on error."""
try:
timestamp = self.time_parser(row[self.time_field])
except Exception as ex:
self._logger.debug("Timestamp parsing error was %s / %s", type(ex), ex)
raise ParseErrorData("time", row[self.time_field])
x = self._get_coord(row, self.x_field, "X") * self.scale
y = self._get_coord(row, self.y_field, "Y") * self.scale
data = [timestamp, x, y]
for f in self.crime_fields:
data.append( row[f] )
return data
def _time_parser(self, format):
if format == "":
parser = dateutil.parser.parse
else:
parser = lambda s : datetime.datetime.strptime(s, format)
return parser
def _get_coord(self, row, field, name):
try:
return float( row[field] )
except:
raise ParseErrorData(name, row[field])
|
11509385
|
import unittest
class TestCase(unittest.TestCase):
longMessage = True
def assertPointEqual(self, p1, p2):
self.assertNamedTupleAlmostEqual(p1, p2)
def assertNamedTupleAlmostEqual(self, t1, t2):
for field in t1._fields:
msg = 'Field {} is different'.format(field)
self.assertAlmostEqual(getattr(t1, field), getattr(t2, field), msg=msg)
|
11509396
|
import boto3
client = boto3.client('cloudwatch')
def lambda_handler(event, context):
t = event["reading_time"]
v = float(event["reading_value"])
print "New temperature reading: Time: %s, Temp: %.2f" % (t, v)
client.put_metric_data(
Namespace = 'Temperature Monitoring Database App',
MetricData = [{
'MetricName':'Temperature Reading',
'Timestamp': t,
'Value': v,
}]
)
return {"Status":"OK"}
|
11509408
|
def _ocamlrun(ctx):
executable = ctx.actions.declare_file(ctx.attr.name)
bytecode = ctx.file.src
ocamlrun = ctx.file._ocamlrun
template = ctx.file._runscript
ctx.actions.expand_template(
template=template,
output=executable,
substitutions={
"{ocamlrun}": ocamlrun.short_path,
"{bytecode}": bytecode.short_path,
},
is_executable=True,
)
runfiles = [ocamlrun, bytecode, executable]
return [
DefaultInfo(
default_runfiles=ctx.runfiles(files=runfiles),
executable=executable,
),
]
ocamlrun = rule(
attrs={
"src":
attr.label(
allow_single_file=True,
mandatory=True,
),
"_ocamlrun":
attr.label(
default="//reason/private/opam:ocamlrun",
allow_single_file=True,
),
"_runscript":
attr.label(
default="//reason/private/opam:ocamlrun.tpl",
allow_single_file=True,
),
},
implementation=_ocamlrun,
executable=True)
|
11509440
|
import re
from typing import List
from securify.analyses.patterns.abstract_pattern import Severity, PatternMatch, MatchComment
from securify.analyses.patterns.ast.abstract_ast_pattern import AbstractAstPattern
from securify.analyses.patterns.ast.declaration_utils import DeclarationUtils
class SolidityNamingConventionPattern(DeclarationUtils, AbstractAstPattern):
name = "Solidity Naming Convention"
description = "Reports declarations that do not adhere to Solidity's naming convention."
severity = Severity.INFO
tags = {}
def find_matches(self) -> List[PatternMatch]:
ast_root = self.get_ast_root()
for decl_name, decl_type, decl_node in self.find_named_nodes(ast_root):
# Empty names that pass compilation are ok (e.g. constructors, fallback function, etc.)
if decl_name == "":
continue
if decl_name in self.discouraged_names and "variable" in decl_type:
yield self.match_violation().with_info(
MatchComment(f"Local and state variables should not be "
f"named 'l', 'I' or 'O' as those are often "
f"indistinguishable from the numerals one "
f"and zero"),
*self.ast_node_info(decl_node)
)
decl_type = self.refine_decl_type(decl_type, decl_node)
convention = self.naming_conventions.get(decl_type, None)
if convention is None:
continue
convention_pattern, convention_name = convention
if not convention_pattern.fullmatch(decl_name):
yield self.match_violation().with_info(
MatchComment(f"The {decl_type} '{decl_name}' does not adhere "
f"to Solidity's naming convention. It should be "
f"in {convention_name}."),
*self.ast_node_info(decl_node)
)
@staticmethod
def refine_decl_type(decl_type, decl_node):
if decl_type == 'state variable' and getattr(decl_node, 'constant', False):
return 'constant'
if decl_type in {'function', 'state variable'}:
if getattr(decl_node, 'visibility', None) in {'private', 'internal'}:
return f"{decl_type} (private)"
return decl_type
upper_case = re.compile('[A-Z0-9_]+'), "UPPER_CASE"
mixed_case = re.compile('[a-z]([A-Za-z0-9]+)?_?'), "mixedCase (i.e. camelCase)"
mixed_case_private = re.compile('[_]?[a-z]([A-Za-z0-9]+)?_?'), "_mixedCase (i.e. camelCase with underscore prefix)"
cap_words = re.compile('[A-Z]([A-Za-z0-9]+)?_?'), "CapitalizedWords (i.e. PascalCase)"
discouraged_names = {"l", "I", "O"}
naming_conventions = {
'contract': cap_words,
'struct': cap_words,
'enum': cap_words,
'event': cap_words,
'constant': upper_case,
'modifier': mixed_case,
'function': mixed_case,
'function (private)': mixed_case_private,
'local variable': mixed_case,
'state variable': mixed_case,
'state variable (private)': mixed_case_private,
}
|
11509532
|
from django.urls import path
from . import views
urlpatterns = [
path('products/', views.ProductList.as_view()),
path('reviews/', views.ReviewRatingList.as_view()),
path('orders-products/', views.OrderProductList.as_view()),
]
|
11509559
|
from .utils import get_linter_errors_list
from sitefab.utils import objdict
def test_no_meta(sitefab, empty_post):
empty_post.meta = None
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert isinstance(error_list, list)
def test_meta_no_toc(sitefab, empty_post):
empty_post.meta = objdict()
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert isinstance(error_list, list)
def test_e300_triggered(sitefab, empty_post):
empty_post.meta.toc = [["headline", 1, 0]]
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E300" in error_list
def test_e300_not_triggered(sitefab, empty_post):
empty_post.meta.toc = [["headline", 2, 0]]
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E300" not in error_list
def test_e301_triggered(sitefab, empty_post):
empty_post.meta.toc = [["headline", 2, 0]]
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E301" in error_list
def test_e301_not_triggered(sitefab, empty_post):
empty_post.meta.toc = [["headline", 2, 0], ["headline 2", 2, 1]]
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E301" not in error_list
|
11509567
|
import numpy as np
def graycomatrixext(im_input, im_roi_mask=None,
offsets=None, num_levels=None, gray_limits=None,
symmetric=False, normed=False, exclude_boundary=False):
"""Computes gray-level co-occurence matrix (GLCM) within a region of
interest (ROI) of an image. GLCM is a 2D histogram/matrix containing the
counts/probabilities of co-occuring intensity values at a given offset
within an ROI of an image.
Read the documentation to know the default values used for each of the
optional parameter in different scenarios.
Parameters
----------
im_input : array_like
Input single channel intensity image
im_roi_mask : array_like, optional
A binary mask specifying the region of interest within which
to compute the GLCM. If not specified GLCM is computed for the
the entire image.
Default: None
offsets : array_like, optional
A (num_offsets, num_image_dims) array of offset vectors
specifying the distance between the pixel-of-interest and
its neighbor. Note that the first dimension corresponds to
the rows.
Because this offset is often expressed as an angle, the
following table lists the offset values that specify common
angles for a 2D image, given the pixel distance D.
=========== =============
Angle (deg) offset [y, x]
=========== =============
0 [0 D]
45 [-D D]
90 [-D 0]
135 [-D -D]
=========== =============
Default
- 1D: np.array([1])
- 2D : numpy.array([ [1, 0], [0, 1], [1, 1], [1, -1] ])
- 3D and higher: numpy.identity(num_image_dims)
num_levels : unsigned int, optional
An integer specifying the number of gray levels For example, if
`NumLevels` is 8, the intensity values of the input image are
scaled so they are integers between 1 and 8. The number of gray
levels determines the size of the gray-level co-occurrence matrix.
Default: 2 for binary/logical image, 32 for numeric image
gray_limits : array_like, optional
A two-element array specifying the desired input intensity range.
Intensity values in the input image will be clipped into this range.
Default: [0, 1] for boolean-valued image, [0, 255] for integer-valued
image, and [0.0, 1.0] for-real valued image
symmetric : bool, optional
A boolean value that specifies whether or not the ordering of values
in pixel pairs is considered while creating the GLCM matrix.
For example, if `Symmetric` is True, then while calculating the
number of times the value 1 is adjacent to the value 2, both
1,2 and 2,1 pairings are counted. GLCM created in this way is
symmetric across its diagonal.
Default: False
normed : bool, optional
A boolean value specifying whether or not to normalize glcm.
Default: False
exclude_boundary : bool, optional
Specifies whether or not to exclude a pixel-pair if the
neighboring pixel in the pair is outside `im_roi_mask`.
Has an effect only when `im_roi_mask` is specified.
Default: False
Returns
-------
glcm : array_like
num_levels x num_levels x num_offsets array containing the GLCM
for each offset.
References
----------
.. [#] <NAME>., <NAME>, and <NAME>, "Textural Features
for Image Classification", IEEE Transactions on Systems, Man, and
Cybernetics, Vol. SMC-3, 1973, pp. 610-621.
.. [#] <NAME>., and <NAME>. Computer and Robot Vision:
Vol. 1, Addison-Wesley, 1992, p. 459.
"""
num_dims = len(im_input.shape)
# roi mask
if im_roi_mask is None:
# compute glcm for whole input image
im_roi_mask = np.ones_like(im_input, dtype='bool')
if im_input.shape != im_roi_mask.shape:
raise ValueError('size mismatch between input image and roi mask')
# gray_limits
if gray_limits is None:
gray_limits = _default_gray_limits(im_input)
assert(len(gray_limits) == 2 and gray_limits[0] < gray_limits[1])
# num_levels
if num_levels is None:
num_levels = _default_num_levels(im_input)
# offsets
if offsets is None:
# set default offset value
offsets = _default_offsets(im_input)
else:
# check sanity
if offsets.shape[1] != num_dims:
raise ValueError(
'Dimension mismatch between input image and offsets'
)
num_offsets = offsets.shape[0]
# scale input intensity image
im_input = im_input.astype('float')
im_input -= gray_limits[0]
im_input /= np.float(gray_limits[1] - gray_limits[0])
im_input *= (num_levels - 1)
im_input = np.round(im_input).astype('int')
# compute glcm for each offset
glcm = np.zeros((num_levels, num_levels, num_offsets))
im_input_flat = np.ravel(im_input)
im_roi_mask_flat = np.ravel(im_roi_mask)
roi_coord_ind = np.nonzero(im_roi_mask)
roi_lin_ind = np.ravel_multi_index(roi_coord_ind, im_roi_mask.shape)
for i in range(num_offsets):
# compute indices of neighboring pixels by applying the offset
neigh_coord_ind = [None] * len(roi_coord_ind)
for j in range(num_dims):
neigh_coord_ind[j] = roi_coord_ind[j] + offsets[i, j]
# throw out pixels with invalid neighbors
neigh_valid = np.ones_like(neigh_coord_ind[0], dtype='bool')
for j in range(num_dims):
neigh_valid[neigh_coord_ind[j] < 0] = False
neigh_valid[neigh_coord_ind[j] >= im_roi_mask.shape[j]] = False
for j in range(num_dims):
neigh_coord_ind[j] = np.compress(neigh_valid, neigh_coord_ind[j],
axis=0).astype(np.int64)
neigh_lin_ind = np.ravel_multi_index(neigh_coord_ind,
im_roi_mask.shape)
if exclude_boundary:
neigh_valid[im_roi_mask_flat[neigh_lin_ind] == 0] = False
neigh_lin_ind = np.compress(neigh_valid, neigh_lin_ind, axis=0)
valid_roi_lin_ind = np.compress(neigh_valid, roi_lin_ind, axis=0)
# get intensities of pixel pairs which become coord indices in glcm
p1 = np.take(im_input_flat, valid_roi_lin_ind, axis=0)
p2 = np.take(im_input_flat, neigh_lin_ind, axis=0)
# convert pixel-pair values to linear indices in glcm
pind = np.ravel_multi_index((p1, p2), glcm.shape[:2])
# find unique linear indices and their counts
pind, pcount = np.unique(pind, return_counts=True)
# put count of each linear index in glcm
cur_glcm = np.zeros((num_levels, num_levels))
cur_glcm_flat = np.ravel(cur_glcm)
cur_glcm_flat[pind] = pcount
# symmetricize if asked for
if symmetric:
cur_glcm += cur_glcm.T
# normalize if asked
if normed:
cur_glcm /= cur_glcm.sum()
glcm[:, :, i] = cur_glcm
return glcm
def _default_gray_limits(im_input):
assert(isinstance(im_input, np.ndarray))
if np.issubdtype(im_input.dtype, np.bool_):
gray_limits = [0, 1]
elif np.issubdtype(im_input.dtype, np.integer):
gray_limits = [0, 255]
elif np.issubdtype(im_input.dtype, np.floating):
gray_limits = [0.0, 1.0]
else:
raise ValueError('The type of the argument im_input is invalid')
return gray_limits
def _default_num_levels(im_input):
assert(isinstance(im_input, np.ndarray))
if np.issubdtype(im_input.dtype, np.bool_):
num_levels = 2
elif np.issubdtype(im_input.dtype, np.number):
num_levels = 32
else:
raise ValueError('The type of the argument im_input is invalid')
return num_levels
def _default_offsets(im_input):
num_dims = len(im_input.shape)
if num_dims == 2:
offsets = np.array([
[0, 1], [1, 0], [1, 1], [1, -1]
])
else:
# TODO: need to come up with a better strategy for 3D and higher
offsets = np.identity(num_dims)
return offsets
|
11509616
|
import boto3
ec2 = boto3.client('ec2')
response = ec2.describe_instances()
for item in response['Reservations']:
for eachinstance in item['Instances']:
print (eachinstance['InstanceId'],eachinstance['PrivateIpAddress'])
|
11509623
|
from django.views import generic
from django.apps import apps
class PageView(generic.DetailView):
model = apps.get_model('cc_cms', 'Page')
def get_template_names(self):
return [f'pages/{self.object.slug}.html', 'cc_cms/page.html']
|
11509629
|
from argparse import ArgumentParser
from sys import argv
from importnb import Notebook
from .tangle import Tangle
from .weave import Weave
parser = ArgumentParser()
parser.add_argument("file")
def main():
from .loader import Literate
ns = parser.parse_args(argv[1:])
module = Literate.load(ns.file)
from rich import print
from rich.markdown import Markdown
print(Markdown(module._repr_markdown_()))
if __name__ == "__main__":
main()
|
11509665
|
from waldur_core.logging.loggers import EventLogger, event_logger
from .models import Comment, Issue
def get_issue_scopes(issue):
project = issue.project.project
result = {project, project.customer}
if issue.resource:
result.add(issue.resource)
return result
class IssueEventLogger(EventLogger):
issue = Issue
class Meta:
event_types = (
'issue_deletion_succeeded',
'issue_update_succeeded',
'issue_creation_succeeded',
)
event_groups = {'jira': event_types}
@staticmethod
def get_scopes(event_context):
issue = event_context['issue']
return get_issue_scopes(issue)
class CommentEventLogger(EventLogger):
comment = Comment
class Meta:
event_types = (
'comment_deletion_succeeded',
'comment_update_succeeded',
'comment_creation_succeeded',
)
event_groups = {'jira': event_types}
@staticmethod
def get_scopes(event_context):
issue = event_context['comment'].issue
return get_issue_scopes(issue)
event_logger.register('jira_issue', IssueEventLogger)
event_logger.register('jira_comment', CommentEventLogger)
|
11509671
|
import sys
import argparse
from pyvideoai import config
import dataset_configs
from pyvideoai.dataloaders.utils import retry_load_images
import os
from PIL import Image
import numpy as np
def get_parser():
parser = argparse.ArgumentParser(description="Visualise success and failures. For now, only support EPIC",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--mode", type=str, default="oneclip", choices=["oneclip", "multicrop"], help="Evaluate using 1 clip or 30 clips.")
return parser
def rgb_to_greyscale_image(imgs):
#T, H, W, C = imgs.shape
rgb_weights = [0.2989, 0.5870, 0.1140]
return np.dot(imgs[...,:3], rgb_weights)
def video_TC_channel_mix(imgs):
T, H, W, C = imgs.shape
imgs = imgs.transpose((0,3,1,2)).reshape(-1, H, W) # TC, H, W
imgs = imgs.reshape(C, T, H, W).transpose((1,2,3,0)) # TC, H, W
return imgs
def video_frame_blend(imgs, weighted=False):
if weighted:
weights = np.arange(len(imgs)) + 1
else:
weights = None
return np.average(imgs, axis=0, weights=weights).astype(np.uint8)
def rgb_save_jpg(img, save_path):
Image.fromarray(img).save(save_path)
def rgb_save_jpgs(imgs, save_dir):
os.makedirs(save_dir, exist_ok=True)
for i, frame in enumerate(imgs):
path = os.path.join(save_dir, f'{i:05d}.jpg')
rgb_save_jpg(frame, path)
def rgb_save_gif(imgs, save_path, optimize=True, duration=100, loop=0 ):
#T, H, W, C = imgs.shape
gif_frames = []
for gif_frame in imgs:
gif_frames.append(Image.fromarray(gif_frame))
gif_frames[0].save(save_path, save_all=True, append_images=gif_frames[1:], optimize=optimize, duration=duration, loop=loop)
def alter_img_and_save(imgs, output_dir):
greyscale_imgs = rgb_to_greyscale_image(imgs)
TCmixed_imgs = video_TC_channel_mix(imgs)
blended_img = video_frame_blend(imgs[10:15])
weighted_blended_img = video_frame_blend(imgs[10:15], weighted=True)
rgb_save_gif(imgs, os.path.join(output_dir, 'orig.gif'))
rgb_save_gif(greyscale_imgs, os.path.join(output_dir, 'grey.gif'))
rgb_save_gif(TCmixed_imgs, os.path.join(output_dir, 'TCmixed.gif'))
rgb_save_jpgs(TCmixed_imgs, os.path.join(output_dir, 'TCmixed'))
rgb_save_jpg(imgs[15], os.path.join(output_dir, 'frame.jpg'))
rgb_save_jpg(blended_img, os.path.join(output_dir, 'blended.jpg'))
rgb_save_jpg(weighted_blended_img, os.path.join(output_dir, 'weighted_blended.jpg'))
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
base_output_dir = os.path.join(config.DATA_DIR, 'visualisations', 'frame_blend_grey_scale')
# CATER
output_dir = os.path.join(base_output_dir, 'CATER_new_000014')
os.makedirs(output_dir, exist_ok=True)
cater_cfg = dataset_configs.load_cfg('cater_task2')
cater_frames_dir = os.path.join(cater_cfg.dataset_root, 'frames', 'CATER_new_000014.avi')
frames_indices = list(range(35))
frames_paths = [os.path.join(cater_frames_dir, f'{idx:05d}.jpg') for idx in frames_indices]
imgs= retry_load_images(frames_paths, backend='cv2', bgr=False)
alter_img_and_save(imgs, output_dir)
# diving48
output_dir = os.path.join(base_output_dir, 'diving48-iv0Gu1VXAgc_00225')
os.makedirs(output_dir, exist_ok=True)
cfg = dataset_configs.load_cfg('diving48')
frames_dir = os.path.join(cfg.image_frames_dir, 'iv0Gu1VXAgc_00225.mp4')
frames_indices = list(range(35))
frames_paths = [os.path.join(frames_dir, f'{idx:05d}.jpg') for idx in frames_indices]
imgs= retry_load_images(frames_paths, backend='cv2', bgr=False)
alter_img_and_save(imgs, output_dir)
# diving48 full
output_dir = os.path.join(base_output_dir, 'diving48-iv0Gu1VXAgc_00225-full')
os.makedirs(output_dir, exist_ok=True)
cfg = dataset_configs.load_cfg('diving48')
frames_dir = os.path.join(cfg.image_frames_dir, 'iv0Gu1VXAgc_00225.mp4')
frames_indices = list(range(80))
frames_paths = [os.path.join(frames_dir, f'{idx:05d}.jpg') for idx in frames_indices]
imgs= retry_load_images(frames_paths, backend='cv2', bgr=False)
alter_img_and_save(imgs, output_dir)
# epic
output_dir = os.path.join(base_output_dir, 'epic-13612')
os.makedirs(output_dir, exist_ok=True)
cfg = dataset_configs.load_cfg('epic_verb')
frames_dir = os.path.join(cfg.dataset_root, 'segments324_15fps_frames', '13612')
frames_indices = list(range(35))
frames_paths = [os.path.join(frames_dir, f'{idx:05d}.jpg') for idx in frames_indices]
imgs= retry_load_images(frames_paths, backend='cv2', bgr=False)
alter_img_and_save(imgs, output_dir)
|
11509685
|
import os
from parse import parse
from hatch.create import create_package
from hatch.files.setup import TEMPLATE
from hatch.files.vc.git import get_email, get_user
from hatch.settings import copy_default_settings
from hatch.utils import temp_chdir
from .utils import read_file
def test_name():
with temp_chdir() as d:
settings = copy_default_settings()
settings['name'] = 'Don Quixote'
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['name'] == 'Don Quixote'
def test_name_none():
with temp_chdir() as d:
settings = copy_default_settings()
settings['name'] = ''
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
expected_author = get_user() or '<NAME>'
assert parsed['name'] == expected_author
def test_email():
with temp_chdir() as d:
settings = copy_default_settings()
settings['email'] = 'no-reply@dev.null'
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['email'] == 'no-reply@dev.null'
def test_email_none():
with temp_chdir() as d:
settings = copy_default_settings()
settings['email'] = ''
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
expected_email = get_email() or '<EMAIL>'
assert parsed['email'] == expected_email
def test_package_name():
with temp_chdir() as d:
settings = copy_default_settings()
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['package_name'] == 'ok'
def test_package_name_normalized():
with temp_chdir() as d:
settings = copy_default_settings()
create_package(d, 'invalid-name', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['package_name_normalized'] == 'invalid_name'
def test_readme():
with temp_chdir() as d:
settings = copy_default_settings()
settings['readme']['format'] = 'rst'
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['readme_file'] == 'README.rst'
def test_package_url():
with temp_chdir() as d:
settings = copy_default_settings()
settings['vc_url'] = 'https://github.com/me'
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['package_url'] == 'https://github.com/me/ok'
def test_license_single():
with temp_chdir() as d:
settings = copy_default_settings()
settings['licenses'] = ['mit']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['license'] == 'MIT'
def test_license_multiple():
with temp_chdir() as d:
settings = copy_default_settings()
settings['licenses'] = ['mit', 'apache2']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['license'] == 'MIT/Apache-2.0'
def test_license_classifiers_single():
with temp_chdir() as d:
settings = copy_default_settings()
settings['licenses'] = ['mit']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['license_classifiers'] == "\n 'License :: OSI Approved :: MIT License',"
def test_license_classifiers_multiple():
with temp_chdir() as d:
settings = copy_default_settings()
settings['licenses'] = ['mit', 'apache2']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['license_classifiers'] == (
"\n 'License :: OSI Approved :: MIT License',"
"\n 'License :: OSI Approved :: Apache Software License',"
)
def test_pyversions_single():
with temp_chdir() as d:
settings = copy_default_settings()
settings['pyversions'] = ['3.6']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['pyversions'] == "\n 'Programming Language :: Python :: 3.6',"
def test_pyversions_multiple():
with temp_chdir() as d:
settings = copy_default_settings()
settings['pyversions'] = ['3.6', '2.7']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['pyversions'] == (
"\n 'Programming Language :: Python :: 2.7',"
"\n 'Programming Language :: Python :: 3.6',"
)
def test_pypy():
with temp_chdir() as d:
settings = copy_default_settings()
settings['pyversions'] = ['3.6', 'pypy3']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['pypy'] == "\n 'Programming Language :: Python :: Implementation :: PyPy',\n"
def test_pypy_none():
with temp_chdir() as d:
settings = copy_default_settings()
settings['pyversions'] = ['3.6']
create_package(d, 'ok', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['pypy'] == '\n'
def test_cli():
with temp_chdir() as d:
settings = copy_default_settings()
settings['cli'] = True
create_package(d, 'invalid-name', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['entry_point'] == (
'\n'
" 'entry_points': {\n"
" 'console_scripts': [\n"
" 'invalid-name = invalid_name.cli:invalid_name',\n"
' ],\n'
' },\n'
)
def test_cli_none():
with temp_chdir() as d:
settings = copy_default_settings()
settings['cli'] = False
create_package(d, 'invalid-name', settings)
contents = read_file(os.path.join(d, 'setup.py'))
parsed = parse(TEMPLATE, contents)
assert parsed['entry_point'] == '\n'
|
11509691
|
class Solution:
def maxProduct(self, nums: List[int]) -> int:
if not nums: return 0
f = g = result = nums[0]
for n in nums[1:]:
if n < 0: f, g = g, f
f, g = max(f * n, n), min(g * n, n)
result = max(result, f)
return result
|
11509693
|
import os
import sys
import shutil
import pytest
import cv2
import numpy as np
import tensorflow as tf
from fixtures import test_asset_dir, model_dir
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from confignet import FaceImageNormalizer, ConfigNet, LatentGAN
def get_normalized_test_image(test_asset_dir, output_shape):
filename = "img_0000000_000.png"
image_path = os.path.join(test_asset_dir, filename)
image = cv2.imread(image_path)
return FaceImageNormalizer.normalize_individual_image(image, output_shape)
@pytest.mark.parametrize("resolution", [256, 512])
def test_confignet_basic(test_asset_dir, model_dir, resolution):
model_path = os.path.join(model_dir, "confignet_%d"%resolution, "model.json")
model = ConfigNet.load(model_path)
with tf.device('/cpu:0'):
normalized_image = get_normalized_test_image(test_asset_dir, (resolution, resolution))
embedding, rotation = model.encode_images(normalized_image[np.newaxis])
decoded_image = model.generate_images(embedding, rotation)
n_blendshapes = model.config["facemodel_inputs"]["blendshape_values"][0]
neutral_expression = np.zeros((1, n_blendshapes), np.float32)
modified_embedding = model.set_facemodel_param_in_latents(embedding, "blendshape_values", neutral_expression)
decoded_image_modified = model.generate_images(embedding, rotation)
reference_value_file = os.path.join(test_asset_dir, "confignet_basic_ref_%d.npz"%resolution)
# set to True to save results as reference
save_reference = False
if save_reference:
np.savez(reference_value_file, embedding=embedding, rotation=rotation,
decoded_image=decoded_image, modified_embedding=modified_embedding,
decoded_image_modified=decoded_image_modified)
reference_vals = np.load(reference_value_file)
assert np.allclose(embedding, reference_vals["embedding"])
assert np.allclose(rotation, reference_vals["rotation"])
assert np.allclose(decoded_image, reference_vals["decoded_image"])
assert np.allclose(modified_embedding, reference_vals["modified_embedding"])
assert np.allclose(decoded_image_modified, reference_vals["decoded_image_modified"])
@pytest.mark.parametrize("resolution", [256, 512])
def test_confignet_finetune(test_asset_dir, model_dir, resolution):
model_path = os.path.join(model_dir, "confignet_%d"%resolution, "model.json")
model = ConfigNet.load(model_path)
normalized_image = get_normalized_test_image(test_asset_dir, (resolution, resolution))
with tf.device('/cpu:0'):
embedding, rotation = model.fine_tune_on_img(normalized_image[np.newaxis], n_iters=1)
decoded_image = model.generate_images(embedding, rotation)
reference_value_file = os.path.join(test_asset_dir, "confignet_finetune_ref_%d.npz"%resolution)
# set to True to save results as reference
save_reference = False
if save_reference:
np.savez(reference_value_file, embedding=embedding, rotation=rotation,
decoded_image=decoded_image)
reference_vals = np.load(reference_value_file)
assert np.allclose(embedding, reference_vals["embedding"])
assert np.allclose(rotation, reference_vals["rotation"])
assert np.allclose(decoded_image, reference_vals["decoded_image"])
@pytest.mark.parametrize("resolution", [256, 512])
def test_latent_gan(model_dir, test_asset_dir, resolution):
latentgan_model_path = os.path.join(model_dir, "latentgan_%d"%resolution, "model.json")
confignet_model_path = os.path.join(model_dir, "confignet_%d"%resolution, "model.json")
latentgan = LatentGAN.load(latentgan_model_path)
confignet = ConfigNet.load(confignet_model_path)
np.random.seed(0)
with tf.device('/cpu:0'):
confignet_latents = latentgan.generate_latents(1)
generated_imgs = confignet.generate_images(confignet_latents, np.zeros((1, 3)))
reference_value_file = os.path.join(test_asset_dir, "latentgan_ref_%d.npz"%resolution)
# set to True to save results as reference
save_reference = False
if save_reference:
np.savez(reference_value_file, generated_imgs=generated_imgs)
reference_vals = np.load(reference_value_file)
assert np.allclose(generated_imgs, reference_vals["generated_imgs"])
|
11509704
|
import os
import numpy as np
def conf_prep(mu,beta,D,w):
file = open("conf.txt","w")
file.writelines("w = %d\nb = %d\nD = %d\nmu = %f" % (w,beta,D,mu))
file.close()
# Update this with the path to msgsteiner
msgpath = "./msgsteiner"
# The interval stop value is not included in the range
mu_range = np.arange(0.002,0.004,0.002)
beta_range = np.arange(150,160,10)
# Increase the stop value to test two values of w
w_range = np.arange(2,3.1,1)
prize_file = "gbm_prize.txt"
edge_file = "../../data/iref_mitab_miscore_2013_08_12_interactome.txt"
conf_file = "conf.txt"
wt_path = "../../results/WT/"
ko_path = "../../results/KO/"
D = 10
# Create output directories if needed
if not os.path.exists(wt_path):
os.makedirs(wt_path)
if not os.path.exists(ko_path):
os.makedirs(ko_path)
for mu in mu_range:
for beta in beta_range:
for w in w_range:
conf_prep(mu,beta,D,w)
out_label = "WT_w%f_beta%d_D%d_mu%f" %(w,beta,D,mu)
os.system("python ../../scripts/forest.py --prize %s --edge %s --conf conf.txt --msgpath %s --outpath %s --outlabel %s" %(prize_file,edge_file,msgpath,wt_path,out_label))
out_label = "KO_w%f_beta%d_D%d_mu%f" %(w,beta,D,mu)
os.system("python ../../scripts/forest.py --prize %s --edge %s --conf conf.txt --msgpath %s --knockout EGFR --outpath %s --outlabel %s" %(prize_file,edge_file,msgpath,ko_path,out_label))
|
11509708
|
import unittest
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from .utils import get_apis
class TestSingleHopQuery(unittest.TestCase):
def test_variant2gene(self):
# test <gene, enableMF, mf>
seqd = SingleEdgeQueryDispatcher(input_cls='SequenceVariant',
input_id='DBSNP',
output_cls='Gene',
output_id='SYMBOL',
pred='located_in',
values='rs137852559')
seqd.query()
self.assertTrue('SHOX' in seqd.G)
edges = seqd.G['DBSNP:rs137852559']['SHOX']
self.assertTrue('LitVar API' in get_apis(edges))
|
11509712
|
import os
import argparse
import json
import datetime
import sys
from PyQt5 import QtGui, QtWidgets, uic
from google.cloud import pubsub_v1
qtUiFile = "gcp_qt.ui"
class Ui(QtWidgets.QMainWindow):
"""Basic Message Visualizer gui"""
def __init__(self, project_id, subscription_id):
super(Ui, self).__init__() # Call the inherited classes __init__ method
self.load_UI()
self.setWindowIcon(QtGui.QIcon('shield.ico'))
self.subscriber = pubsub_v1.SubscriberClient()
self.subscription_path = self.subscriber.subscription_path(project_id, subscription_id)
self.subscriber.subscribe(self.subscription_path, callback=self.subscription_callback)
def load_UI(self):
uic.loadUi(qtUiFile, self) # Load the .ui file
# Setup treeview
self.treeView.setRootIsDecorated(False)
self.treeView.setAlternatingRowColors(True)
self.model = QtGui.QStandardItemModel()
self.model.setHorizontalHeaderLabels(['Date/Time', 'Serial Number', 'Led Status'])
self.treeView.setModel(self.model)
def add_data(self, date_time, sno, led_status):
self.model.insertRow(0)
self.model.setData(self.model.index(0, 0), date_time)
self.model.setData(self.model.index(0, 1), sno)
self.model.setData(self.model.index(0, 2), led_status)
def subscription_callback(self, message):
"""Receive messages from the subscription"""
data = json.loads(message.data)
self.LE_project.setText(message.attributes['projectId'])
self.LE_registry.setText(message.attributes['deviceRegistryId'])
self.LE_region.setText(message.attributes['deviceRegistryLocation'])
sample_values = [message.attributes['deviceId']] + \
['{}: {}'.format(k, v) for k, v in data.items() if k != 'timestamp']
sample_time = datetime.datetime.fromtimestamp(data['timestamp'])
serialno, led_status = sample_values
self.add_data(sample_time.strftime("%H:%M:%S"), serialno, led_status)
message.ack()
def run_gcp_gui(credential_file, subscription = 'data-view'):
if credential_file is not None:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credential_file
with open(os.environ["GOOGLE_APPLICATION_CREDENTIALS"]) as f:
credentials = json.load(f)
project = credentials['project_id']
app = QtWidgets.QApplication(sys.argv)
window = Ui(project, subscription)
window.show() # Show the GUI
sys.exit(app.exec_())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GCP Example Gui')
parser.add_argument('--subscription', help='Topic Subscription')
parser.add_argument('--creds', help='Credential Json File')
args = parser.parse_args()
run_gcp_gui(args.creds, args.subscription)
|
11509713
|
import urllib.request
import xarray as xr
import xstac
import pandas as pd
import pathlib
import pystac
import json
url = "http://berkeleyearth.lbl.gov/auto/Global/Gridded/Complete_TMAX_EqualArea.nc"
file = "Complete_TMAX_EqualArea.nc"
if __name__ == "__main__":
if not pathlib.Path(file).exists():
urllib.request.urlretrieve(url, file)
ds = xr.open_dataset(file)
# TODO: Verify that this is correct.
# xarray is reading these as floating point numbers like 1980.125; Need
# to figure out what convention / encoding gets us from floats to datetimes.
time = pd.date_range(
pd.Timestamp(year=int(ds.time.min().item()), month=1, day=1),
freq="MS",
periods=len(ds.time),
)
ds["time"] = time
# attrs need to be JSON serializable; xarray / h5py are loading these
# as NumPy scalars; we convert them to JSON-serializable scalars.
for item in ["temperature", "climatology"]:
for k in ["valid_min", "valid_max"]:
ds[item].attrs[k] = ds[item].attrs[k].item()
properties = dict(
start_datetime=time[0].to_pydatetime(), end_datetime=time[-1].to_pydatetime()
)
template = {
"id": "id",
"type": "Feature",
"links": [],
"geometry": None, # generated by xstac
"stac_version": "1.0.0",
"properties": {
"start_datetime": time[0].isoformat() + "Z",
"end_datetime": time[-1].isoformat() + "Z",
},
"assets": {},
}
result = xstac.xarray_to_stac(
ds,
template,
temporal_dimension="time",
x_dimension="longitude",
y_dimension="latitude",
reference_system="4326", # TODO: this is probably incorrect.
)
result.assets["data"] = pystac.Asset(
href=url, title="Complete TMAX Equal Area", roles=["data"]
)
pathlib.Path("berkeley-earth-item.json").write_text(
json.dumps(result.to_dict(), indent=2)
)
|
11509814
|
import sys
from PyQt5.QtWidgets import QDialog, QFileDialog, QApplication, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.uic import loadUi
import os
import cv2
class CtwoD(QDialog):
def __init__(self):
super().__init__()
path = os.getcwd()
os.chdir(path+'/twoD')
loadUi('twoD_module.ui', self)
self.setWindowTitle('2D Processing')
self.image = None
self.setMouseTracking(False)
self.loadButton.clicked.connect(self.load_clicked)
self.saveButton.clicked.connect(self.save_clicked)
self.drawButton.setCheckable(True)
self.drawButton.clicked.connect(self.draw_clicked)
self.seedButton.clicked.connect(self.seed_clicked)
self.thresholdButton.clicked.connect(self.threshold_clicked)
self.imgLabel_1.window = 1
self.imgLabel_2.window = 2
self.morButton.clicked.connect(self.mor_clicked)
self.kersizeEdit.setText(str(self.imgLabel_2.mor_Kersize))
self.iterEdit.setText(str(self.imgLabel_2.mor_Iter))
self.edgeButton.clicked.connect(self.edge_clicked)
self.undoButton.clicked.connect(self.undo_clicked)
self.undoButton.setIcon(QIcon('resources/undo.png'))
self.undoButton.setText('')
self.grayButton.clicked.connect(self.gray_clicked)
def resizeEvent(self, event):
super().resizeEvent(event)
if self.imgLabel_1.processedImage is not None:
self.imgLabel_1.display_image()
if self.imgLabel_2.processedImage is not None:
self.imgLabel_2.display_image()
def gray_clicked(self):
try:
self.imgLabel_1.processedImage = cv2.cvtColor(self.imgLabel_1.processedImage, cv2.COLOR_BGR2GRAY)
self.imgLabel_2.processedImage = cv2.cvtColor(self.imgLabel_2.processedImage, cv2.COLOR_BGR2GRAY)
except Exception:
QMessageBox.about(None, 'Information', 'Already converted to grayscaled!')
self.imgLabel_1.display_image()
self.imgLabel_2.display_image()
def undo_clicked(self):
self.imgLabel_1.processedImage = self.imgLabel_1.image.copy()
self.imgLabel_1.display_image()
self.imgLabel_2.processedImage = self.imgLabel_2.image.copy()
self.imgLabel_2.display_image()
def edge_clicked(self):
self.imgLabel_2.edge_detection(self.edgeBox.itemText(self.edgeBox.currentIndex()))
def mor_clicked(self):
self.imgLabel_2.mor_Kersize = int(self.kersizeEdit.text())
self.imgLabel_2.mor_Iter = int(self.iterEdit.text())
self.imgLabel_2.morthology(self.morBox.itemText(self.morBox.currentIndex()))
def threshold_clicked(self):
threshold = int(self.thresholdValue.text())
self.imgLabel_2.thresholding(threshold)
def seed_clicked(self):
self.imgLabel_2.seed = True
def draw_clicked(self, status):
if status:
self.imgLabel_2.drawornot = True
self.drawButton.setText('Start drawimg')
else:
self.imgLabel_2.drawornot = False
self.drawButton.setText('Stop drawimg')
def save_clicked(self):
fname, _filter = QFileDialog.getSaveFileName(self, 'save file', '~/untitled', "Image Files (*.jpg)")
if fname:
cv2.imwrite(fname, self.imgLabel_2.processedImage)
else:
print('Error')
def load_clicked(self):
fname, _filter = QFileDialog.getOpenFileName(self, 'open file', '~/Desktop',
"Image Files (*.jpg *.png *.bmp *.dcm *DCM)")
name, extension = os.path.splitext(fname)
print(extension)
if extension.lower() == '.dcm':
self.imgLabel_1.load_dicom_image(fname)
self.imgLabel_2.load_dicom_image(fname)
else:
try:
self.imgLabel_1.load_image(fname)
self.imgLabel_2.load_image(fname)
except Exception:
print('Error')
finally:
print('Finish loading')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = CtwoD()
ex.show()
sys.exit(app.exec_())
|
11509848
|
from target import TargetType
import os
import re
import cv2
import time
import subprocess
import numpy as np
TARGET_DIR = './assets/'
TMP_DIR = './tmp/'
SIMILAR = {
'1': ['i', 'I', 'l', '|', ':', '!', '/', '\\'],
'2': ['z', 'Z'],
'3': [],
'4': [],
'5': ['s', 'S'],
'6': [],
'7': [],
'8': ['&'],
'9': [],
'0': ['o', 'O', 'c', 'C', 'D']
}
class UIMatcher:
@staticmethod
def match(screen, target: TargetType):
"""
在指定快照中确定货物的屏幕位置。
"""
# 获取对应货物的图片。
# 有个要点:通过截屏制作货物图片时,请在快照为实际大小的模式下截屏。
template = cv2.imread(target.value)
# 获取货物图片的宽高。
th, tw = template.shape[:2]
# 调用 OpenCV 模板匹配。
res = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
rank = max_val
# 矩形左上角的位置。
tl = max_loc
# 阈值判断。
if rank < 0.82:
return None
# 这里,我随机加入了数字(15),用于补偿匹配值和真实位置的差异。
return tl[0] + tw / 2 + 15, tl[1] + th / 2 + 15, rank
@staticmethod
def read(filepath: str):
"""
工具函数,用于读取图片。
"""
return cv2.imread(filepath)
@staticmethod
def write(image):
"""
工具函数,用于读取图片。
"""
ts = str(int(time.time()))
return cv2.imwrite(f'{TARGET_DIR}{ts}.jpg', image)
@staticmethod
def image_to_txt(image, cleanup=False, plus=''):
# cleanup为True则识别完成后删除生成的文本文件
# plus参数为给tesseract的附加高级参数
image_url = f'{TMP_DIR}tmp.jpg'
txt_name = f'{TMP_DIR}tmp'
txt_url = f'{txt_name}.txt'
if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR)
cv2.imwrite(image_url, image)
subprocess.check_output('tesseract --dpi 72 ' + image_url + ' ' +
txt_name + ' ' + plus, shell=True) # 生成同名txt文件
text = ''
with open(txt_url, 'r') as f:
text = f.read().strip()
if cleanup:
os.remove(txt_url)
os.remove(image_url)
return text
@staticmethod
def normalize_txt(txt: str):
for key, sim_list in SIMILAR.items():
for sim in sim_list:
txt = txt.replace(sim, key)
txt = re.sub(r'\D', '', txt)
return txt
@staticmethod
def cut(image, left_up, len_width=(190, 50)):
sx = left_up[0]
sy = left_up[1]
dx = left_up[0] + len_width[0]
dy = left_up[1] + len_width[1]
return image[sy:dy, sx:dx]
@staticmethod
def plain(image):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
erode = cv2.erode(image, kernel)
dilate = cv2.dilate(erode, kernel)
return dilate
@staticmethod
def fill_color(image):
copy_image = image.copy()
h, w = image.shape[:2]
mask = np.zeros([h + 2, w + 2], np.uint8)
cv2.floodFill(copy_image, mask, (0, 0), (255, 255, 255), (100, 100, 100), (50, 50, 50),
cv2.FLOODFILL_FIXED_RANGE)
return copy_image
@staticmethod
def pre(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_blue = np.array([103, 43, 46])
upper_blue = np.array([103, 255, 255])
image = cv2.inRange(image, lower_blue, upper_blue)
return image
@staticmethod
def pre_building_panel(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask_orange = cv2.inRange(image, (10, 40, 40), (40, 255, 255))
mask_blue = cv2.inRange(image, (80, 40, 40), (140, 255, 255))
image = cv2.bitwise_or(mask_orange, mask_blue)
return image
|
11509856
|
import random
import time
class Cat:
def __init__(self):
self.cat_lives = 9
def begin_cats_life(self):
while True:
if self.cat_is_fed() and not self.lost_a_life():
print ('The Cat is doing well.')
else:
print ('The Cat is not doing so great.')
time.sleep(3)
def cat_is_fed(self):
if random.random() > 0.5:
print ('Cat is fed')
return True
print('Cat is not fed')
return False
def lost_a_life(self):
if random.random() > 0.25:
print('Cat landed on all four')
return False
else:
self.cat_lives = self.cat_lives - 1
print('Cat lost a life')
print('The Cat now has {0} lives left.'.format(self.cat_lives))
return True
cat = Cat()
cat.begin_cats_life()
|
11509871
|
import zlib
from . import register_test
# Detect blacklisted files based on their extension.
blacklisted_extensions = ("dll", "exe", "dylib", "so", "sh", "class")
blacklisted_magic_numbers = (
(0x4d, 0x5a), # EXE/DLL
(0x5a, 0x4d), # Alternative for EXE/DLL
(0x7f, 0x45, 0x4c, 0x46), # UNIX elf
(0x23, 0x21), # Shebang (shell script)
(0xca, 0xfe, 0xba, 0xbe), # Java + Mach-O (dylib)
(0xca, 0xfe, 0xd0, 0x0d), # Java (packed)
(0xfe, 0xed, 0xfa, 0xce), # Mach-O
(0x46, 0x57, 0x53), # Uncompressed SWF
(0x43, 0x57, 0x53), # ZLIB compressed SWF
)
VC_DIRS = (".git", ".svn", )
@register_test(tier=1)
def test_blacklisted_files(err, package=None):
"Detects blacklisted files and extensions."
if not package:
return
flagged_files = []
flagged_for_vc = False
for name in package:
file_ = package.info(name)
if (file_["name_lower"].startswith(" ") or
file_["name_lower"].endswith(" ")):
err.error(
err_id=("packagelayout", "invalid_name"),
error="Filename starts with or ends with invalid character.",
description=["A filename within the package was found to "
"begin or end with a space. This is not "
"allowed.",
"Detected filename: '%s'" % name],
filename=name)
continue
# Simple test to ensure that the extension isn't blacklisted
extension = file_["extension"]
if extension in blacklisted_extensions:
# Note that there is a binary extension in the metadata
err.metadata["contains_binary_extension"] = True
flagged_files.append(name)
continue
if any(x in VC_DIRS for x in name.lower().split("/")):
if flagged_for_vc:
continue
flagged_for_vc = True
err.error(
err_id=("packagelayout", "version_control"),
error="Version control detected in package",
description=["A version control directory was detected in "
"your package. Version control may not be "
"included as part of a packaged app due to size "
"and potentially sensitive data.",
"Detected file: %s" % name],
filename=name)
continue
# Perform a deep inspection to detect magic numbers for known binary
# and executable file types.
try:
z = package.zf.open(name)
bytes = tuple(map(ord, z.read(4))) # Longest is 4 bytes
z.close()
except zlib.error:
# Tell the zip that there's a broken file.
package.broken_files.add(name)
return err.error(
err_id=("packagelayout", "blacklisted_files", "bad_zip"),
error="ZIP could not be read",
description="Validation failed because the ZIP package does "
"not seem to be valid. One or more files could not "
"be successfully unzipped.",
filename=name)
if any(bytes[0:len(x)] == x for x in blacklisted_magic_numbers):
# Note that there is binary content in the metadata
err.metadata["contains_binary_content"] = True
err.warning(
err_id=("testcases_packagelayout", "test_blacklisted_files",
"disallowed_file_type"),
warning="Flagged file type found",
description=["A file was found to contain flagged content "
"(i.e.: executable data, potentially "
"unauthorized scripts, etc.).",
u"The file \"%s\" contains flagged content" %
name],
filename=name)
if flagged_files:
err.warning(
err_id=("testcases_packagelayout", "test_blacklisted_files",
"disallowed_extension"),
warning="Flagged file extensions found.",
description=["Files whose names end with flagged extensions have "
"been found in the app.",
"The extension of these files are flagged because "
"they usually identify binary components, which can "
"contain malware.", "\n".join(flagged_files)])
@register_test(tier=1)
def test_layout_all(err, package):
"""Tests the well-formedness of apps."""
if not package:
return
package_namelist = list(package.zf.namelist())
package_nameset = set(package_namelist)
if len(package_namelist) != len(package_nameset):
err.error(
err_id=("testcases_packagelayout", "test_layout_all",
"duplicate_entries"),
error="Package contains duplicate entries",
description="The package contains multiple entries with the same "
"name. This practice has been banned. Try unzipping "
"and re-zipping your app and try again.")
if any(name.startswith('META-INF/') for name in package_nameset):
err.error(
err_id=("testcases_packagelayout", "test_layout_all",
"META-INF"),
error="Packages must not contain META-INF",
description="Packages must not contain a META-INF directory. This "
"directory prevents apps from being properly signed.")
|
11509883
|
from floto.api import SwfType
class WorkflowType(SwfType):
"""
Attributes
----------
default_child_policy : Optional[str]
Specify the default policy to use for the child workflow executions when a workflow
execution of this type is terminated.
Valid values: TERMINATE | REQUEST_CANCEL | ABANDON
If not assigned, then TERMINATE will be used
default_execution_start_to_close_timeout : Optional[str]
Default maximum duration in seconds for executions of this workflow type. Default can be overridden
when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
Duration in seconds; An integer: 0 <= timeout < 60*60*24*356 (one year)
If not assigned, then str(60 * 60 * 24) (one day) will be used
default_lambda_role : Optional[str]
The ARN of the default IAM role to use when a workflow execution of this type invokes
AWS Lambda functions
"""
def __init__(self, *, domain, name, version,
description=None,
default_task_list='default',
default_task_start_to_close_timeout=None,
default_task_priority='0',
default_child_policy='TERMINATE',
default_execution_start_to_close_timeout=None,
default_lambda_role=None
):
if default_task_start_to_close_timeout is None:
default_task_start_to_close_timeout = str(60 * 60 * 6)
super().__init__(domain=domain, name=name, version=version,
description=description,
default_task_list=default_task_list,
default_task_start_to_close_timeout=default_task_start_to_close_timeout,
default_task_priority=default_task_priority)
self.default_child_policy = default_child_policy
self.default_execution_start_to_close_timeout = default_execution_start_to_close_timeout or str(60 * 60 * 24)
self.default_lambda_role = default_lambda_role
@property
def swf_attributes(self):
"""Class attributes as wanted by the AWS SWF API """
a = {'domain': self.domain,
'name': self.name,
'version': self.version}
if self.description is not None:
a['description'] = self.description
if self.default_task_list is not None:
a['defaultTaskList'] = {'name': self.default_task_list}
if self.default_task_start_to_close_timeout is not None:
a['defaultTaskStartToCloseTimeout'] = self.default_task_start_to_close_timeout
if self.default_task_priority is not None:
a['defaultTaskPriority'] = self.default_task_priority
if self.default_child_policy is not None:
a['defaultChildPolicy'] = self.default_child_policy
if self.default_execution_start_to_close_timeout is not None:
a['defaultExecutionStartToCloseTimeout'] = self.default_execution_start_to_close_timeout
if self.default_lambda_role is not None:
a['defaultLambdaRole'] = self.default_lambda_role
return a
|
11509971
|
from functools import wraps
from typing import Any, Callable, Optional, TypeVar
import grpc
import grpc.aio
class RequestException(Exception):
"""An exception was raised when communicating with the service."""
def __init__(self, delegate: grpc.aio.AioRpcError):
self._delegate = delegate
grpc.aio._call.AioRpcError
def code(self) -> grpc.StatusCode:
return self._delegate.code()
def details(self) -> Optional[str]:
return self._delegate.details()
def debug_error_string(self) -> str:
return self._delegate.debug_error_string()
T = TypeVar("T", bound=Callable[..., Any])
def wrap_client_exception_async(f: T) -> T:
"""Decorator which will intercept exceptions coming from the server and
rewrite them into a usable format.
"""
@wraps(f)
async def inner(*args, **kwargs):
try:
return await f(*args, **kwargs)
except grpc.aio.AioRpcError as rpce:
raise RequestException(rpce)
return inner
|
11510025
|
from toee import *
import char_editor
def CheckPrereq(attachee, classLevelled, abilityScoreRaised):
#Sneak Attack Check
if not char_editor.has_feat(feat_sneak_attack):
return 0
if char_editor.stat_level_get(stat_level_paladin) > 1: #workaround until I figure out how to check for immunity to fear
return 0
return 1
|
11510036
|
from argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS
from swarmcg.shared import styling
from swarmcg.io.job_args import defaults
def get_analyze_args():
print(styling.header_package("Module: Optimization run analysis\n"))
formatter = lambda prog: RawTextHelpFormatter(prog, width=135, max_help_position=52)
args_parser = ArgumentParser(
description=styling.ANALYSE_DESCR,
formatter_class=formatter,
add_help=False,
usage=SUPPRESS
)
args_header = styling.sep_close + "\n| ARGUMENTS |\n" + styling.sep_close
bullet = " "
required_args = args_parser.add_argument_group(args_header + "\n\n" + bullet + "INPUT/OUTPUT")
required_args.add_argument("-opti_dir", **defaults.opti_dir.args)
required_args.add_argument("-o", **defaults.o_an.args)
optional_args = args_parser.add_argument_group(bullet + "OTHERS")
optional_args.add_argument("-plot_scale", **defaults.plot_scale.args)
optional_args.add_argument("-h", "-help", **defaults.help.args)
return args_parser
|
11510074
|
from dbnd import dbnd_cmd
class TestShowCmds(object):
def test_show_tasks(self):
dbnd_cmd("show-tasks", [])
def test_show_configs(self):
dbnd_cmd("show-configs", [])
|
11510137
|
from os.path import join
def translate(config):
result = {
'deployment_implementation': _deployment_implementation(),
'redis_url': config.get('redis_url', 'redis://localhost:6379'),
'worker_container_overrides': config.get('worker', {}),
# 'job_store_root': config['job_store_root'],
'job_results_root': config['job_results_root'],
'working_dir_root': config['working_dir_root'],
'scheduler_url': config['scheduler_url'],
'container_config_root': config['container_config_root']
}
result.update({
'artifact_archive_implementation': _archive_implementation(result['job_results_root']),
'miscellaneous_archive_implementation': _archive_implementation(result['job_results_root']),
'persisted_data_archive_implementation': _archive_implementation(result['job_results_root']),
})
return result
def _deployment_implementation():
from foundations_local_docker_scheduler_plugin.job_deployment import JobDeployment
return {
'deployment_type': JobDeployment
}
def _archive_implementation(result_end_point):
from foundations_contrib.config.mixin import archive_implementation
from foundations_contrib.local_file_system_bucket import LocalFileSystemBucket
return archive_implementation(result_end_point, LocalFileSystemBucket)
|
11510166
|
from __future__ import absolute_import
import docker
import pytest
from docker import utils as docker_utils
@pytest.fixture(scope='session')
def docker_client():
client_cfg = docker_utils.kwargs_from_env()
return docker.APIClient(version='1.21', **client_cfg)
@pytest.yield_fixture(scope='session')
def registry(docker_client):
cli = docker_client
cli.pull('registry', '2')
cont = cli.create_container(
'registry:2',
ports=[5000],
host_config=cli.create_host_config(
port_bindings={
5000: 5000,
},
),
)
try:
cli.start(cont)
try:
yield
finally:
cli.stop(cont)
finally:
cli.remove_container(cont, v=True, force=True)
|
11510184
|
import requests
import json
from string import Template
import framework.server.common.codes as codes
class InvalidResponse():
text = {'server_error': ''}
REQUEST_TEMPLATE = Template("http://$HOST:$PORT/request")
CONTAINER_PORT = 8081
def HandleRequest(payload, host, framework):
resp = ""
clientUrl = REQUEST_TEMPLATE.substitute(HOST = host, PORT = CONTAINER_PORT)
print(json.dumps(payload), clientUrl)
try:
resp = requests.get(clientUrl, data=json.dumps(payload), timeout = 360)
except Exception as e:
resp = InvalidResponse()
resp.text = json.dumps({'server_error': str(e) + ' for payload = ' + json.dumps(payload)})
framework.logger.debug("Response received by server from agent "+host+" : "+resp.text)
return json.loads(resp.text)
|
11510213
|
import pytest
from wq.core.task import CompositeTaskId
tags_supply = [[], [''], ['a', 'b'], ['a', 'b', 'c']]
@pytest.mark.parametrize('tags', tags_supply)
def test_uniqueness(tags):
id1 = CompositeTaskId(*tags)
id2 = CompositeTaskId(*tags)
assert len(id1.id_seq()) == len(tags) + 1
assert len(id2.id_seq()) == len(tags) + 1
assert id1.id_seq() != id2.id_seq()
@pytest.mark.parametrize('tags', tags_supply)
def test_to_from_id_repr_is_identity(tags):
task_id = CompositeTaskId(*tags)
r = task_id.id_repr()
parsed_seq = CompositeTaskId.from_id_repr(r)
assert task_id.id_seq() == parsed_seq
@pytest.mark.parametrize('tags', tags_supply)
@pytest.mark.parametrize('n_elements', [1, 2, 3])
def test_parse_initial_segment(tags, n_elements):
task_id = CompositeTaskId(*tags)
init_seg_repr = task_id.id_repr_initial_segment(n_elements)
parsed_seg = CompositeTaskId.from_id_repr(init_seg_repr)
expected_seg = task_id.id_seq()[0:n_elements]
assert parsed_seg == expected_seg
|
11510219
|
import bpy
# ORIGINAL MIXAMO RENAME CODE FETCHED FROM BLENDER ANSWERS (I THINK), IF YOU ARE THE AUTHOR OF THIS
# CODE, PLEASE LET ME KNOW SO THAT I CAN CREDIT YOU HERE.
print('Running Mixamo Armature Renaming Script.')
bpy.ops.object.mode_set(mode = 'OBJECT')
if not bpy.ops.object:
print('Please select the armature')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.context.object.show_in_front = True
for rig in bpy.context.selected_objects:
if rig.type == 'ARMATURE':
for mesh in rig.children:
for vg in mesh.vertex_groups:
#print(vg.name)
new_name = vg.name
new_name = new_name.replace("mixamorig:","")
#print(new_name)
rig.pose.bones[vg.name].name = new_name
vg.name = new_name
for bone in rig.pose.bones:
#print(bone.name.replace("mixamorig:",""))
bone.name = bone.name.replace("mixamorig:","")
for action in bpy.data.actions:
print(action.name)
fc = action.fcurves
for f in fc:
#print(f.data_path)
f.data_path = f.data_path.replace("mixamorig:","")
# THE PURPOSE OF THE NEXT SET OF INSTRUCTIONS BELOW IS
# TO ATTEMPT TO RESIZE THE ARMATURE AND
# SCALE CORRESPONDING KEYFRAMES OF THE HIP BONE ALONG THE Y-AXIS
# AROUND A 2D CURSOR
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action = 'DESELECT')
context = bpy.context
areatype = context.area.type
context.area.type = 'GRAPH_EDITOR'
ob = context.object
bpy.context.space_data.pivot_point = 'CURSOR'
bpy.context.scene.frame_current = 1
bpy.context.space_data.cursor_position_y = 0
for action in bpy.data.actions:
hip_bone=bpy.context.object.pose.bones['Hips']
print('HIP BONE', hip_bone)
dpath = hip_bone.path_from_id("location")
hip_fcurves = [False, False, False]
hip_fcurves[0] = action.fcurves.find(dpath, index=0)
hip_fcurves[1] = action.fcurves.find(dpath, index=1)
hip_fcurves[2] = action.fcurves.find(dpath, index=2)
#action = ob.animation_data.action
#fcurve = action.fcurves[0]
#fcurve.select = True
#bpy.ops.graph.handle_type(type='VECTOR')
#context.area.type = areatype
for hip_fcurve in hip_fcurves:
print(hip_fcurve)
hip_fcurve.select = True
# Just copied and paste console, after scaling hip bone position transform along a 2d Cursor.
bpy.ops.transform.resize(value=(1, 0.01, 1), orient_type='GLOBAL', \
orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', \
constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, \
proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, \
use_proportional_projected=False)
#for curve in action.fcurves:
#
# if curve.data_path[-len("location"):] == "location":
# print('CURVE', curve)
|
11510235
|
from garbagedog.constants import GCEventType
def test_from_gc_line():
log_line = "2015-05-26T14:45:37.987-0200: 151.126: [GC (Allocation Failure) 151.126: [DefNew: " \
"629119K->69888K(629120K), 0.0584157 secs] 1619346K->1273247K(2027264K), 0.0585007 secs] " \
"[Times: user=0.06 sys=0.00, real=0.06 secs]"
assert GCEventType.from_gc_line(log_line) == GCEventType.DEF_NEW
def test_from_gc_line_unknown():
log_line = "2015-05-26T14:45:37.987-0200 Nothing Here"
assert GCEventType.from_gc_line(log_line) == GCEventType.UNKNOWN
|
11510255
|
import unittest
from datetime import datetime
from pyjo import Model, DatetimeField
from pyjo.exceptions import FieldTypeError
class DatetimeFieldTest(unittest.TestCase):
def test_datetimefield(self):
class A(Model):
date = DatetimeField()
time = 1478390400
dt = datetime.utcfromtimestamp(time)
a = A(date=dt)
self.assertEqual(a.date, dt)
with self.assertRaises(FieldTypeError):
a.date = 'hello'
pj = a.to_dict()
self.assertEqual(pj['date'], time)
aa = A.from_json(a.to_json())
self.assertEqual(aa.date, dt)
if __name__ == '__main__':
unittest.main()
|
11510297
|
from typing import Any, List, Union
import numpy as np
from xarray import DataArray
import weldx.transformations as tf
from weldx.time import Time
from weldx.transformations import WXRotation
def check_coordinate_system_orientation(
orientation: DataArray,
orientation_expected: np.ndarray,
positive_orientation_expected: bool,
):
"""Check if the orientation of a local coordinate system is as expected.
Parameters
----------
orientation :
Orientation
orientation_expected :
Expected orientation
positive_orientation_expected :
True, if the orientation is expected to be
positive. False otherwise.
"""
# test expected positive orientation
det = np.linalg.det(orientation.sel(v=[2, 0, 1]))
assert np.all((det > 0) == positive_orientation_expected)
assert tf.is_orthogonal_matrix(orientation.values)
orientation_expected = tf.normalize(orientation_expected)
assert np.allclose(orientation, orientation_expected)
def check_coordinate_system(
lcs: tf.LocalCoordinateSystem,
orientation_expected: Union[np.ndarray, List[List[Any]], DataArray],
coordinates_expected: Union[np.ndarray, List[Any], DataArray],
positive_orientation_expected: bool = True,
time=None,
time_ref=None,
):
"""Check the values of a coordinate system.
Parameters
----------
lcs :
Coordinate system that should be checked
orientation_expected :
Expected orientation
coordinates_expected :
Expected coordinates
positive_orientation_expected :
Expected orientation
time :
A pandas.DatetimeIndex object, if the coordinate system is expected to
be time dependent. None otherwise.
time_ref:
The expected reference time
"""
orientation_expected = np.array(orientation_expected)
coordinates_expected = np.array(coordinates_expected)
if time is not None:
assert orientation_expected.ndim == 3 or coordinates_expected.ndim == 2
assert np.all(lcs.time == Time(time, time_ref))
assert lcs.reference_time == time_ref
check_coordinate_system_orientation(
lcs.orientation, orientation_expected, positive_orientation_expected
)
assert np.allclose(lcs.coordinates.values, coordinates_expected, atol=1e-9)
def check_cs_close(lcs_0, lcs_1):
"""Check if 2 coordinate systems are nearly identical.
Parameters
----------
lcs_0:
First coordinate system.
lcs_1
Second coordinate system.
"""
check_coordinate_system(
lcs_0,
lcs_1.orientation.data,
lcs_1.coordinates.data,
True,
lcs_1.time,
lcs_1.reference_time,
)
def r_mat_x(factors) -> np.ndarray:
"""Get an array of rotation matrices that represent a rotation around the x-axis.
The rotation angles are the provided factors times pi.
Parameters
----------
factors:
List of factors that are multiplied with pi to get the rotation angles.
Returns
-------
numpy.ndarray:
An array of rotation matrices
"""
return WXRotation.from_euler("x", np.array(factors) * np.pi).as_matrix()
def r_mat_y(factors) -> np.ndarray:
"""Get an array of rotation matrices that represent a rotation around the y-axis.
The rotation angles are the provided factors times pi.
Parameters
----------
factors:
List of factors that are multiplied with pi to get the rotation angles.
Returns
-------
numpy.ndarray:
An array of rotation matrices
"""
return WXRotation.from_euler("y", np.array(factors) * np.pi).as_matrix()
def r_mat_z(factors) -> np.ndarray:
"""Get an array of rotation matrices that represent a rotation around the z-axis.
The rotation angles are the provided factors times pi.
Parameters
----------
factors:
List of factors that are multiplied with pi to get the rotation angles.
Returns
-------
numpy.ndarray:
An array of rotation matrices
"""
return WXRotation.from_euler("z", np.array(factors) * np.pi).as_matrix()
|
11510301
|
from torch.utils.data import Dataset, DataLoader, Subset
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import Sampler
import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseDataLoader
import pickle
import numpy as np
import json
import os
from tqdm import tqdm
from torch._six import container_abcs, string_classes, int_classes, FileNotFoundError
import re
import random
import copy
import itertools
import math
from multiprocessing import Pool
from functools import partial
import pdb
from model.transformers import BertTokenizer
def tokenize_table(table, config):
caption, headers, cells = table
tokenized_meta = config.tokenizer.encode(caption, max_length=config.max_title_length, add_special_tokens=False)
tokenized_headers = [config.tokenizer.encode(z, max_length=config.max_header_length, add_special_tokens=False) for _, z in headers]
tokenized_cells = [[index, config.tokenizer.encode(cell, max_length=config.max_cell_length, add_special_tokens=False)] for index, cell in cells]
input_tok = []
input_tok_pos = []
input_tok_type = []
tokenized_meta_length = len(tokenized_meta)
input_tok += tokenized_meta
input_tok_pos += list(range(tokenized_meta_length))
input_tok_type += [0]*tokenized_meta_length
tokenized_headers_length = [len(z) for z in tokenized_headers]
input_tok += list(itertools.chain(*tokenized_headers))
input_tok_pos += list(itertools.chain(*[list(range(z)) for z in tokenized_headers_length]))
input_tok_type += [1]*sum(tokenized_headers_length)
input_ent_text = []
input_ent_type = []
column_en_map = {}
row_en_map = {}
for index, cell in cells:
if len(cell)!=0:
tokenized_cell = config.tokenizer.encode(cell, max_length=config.max_cell_length, add_special_tokens=False)
if len(tokenized_cell)==0:
continue
input_ent_text.append([index,tokenized_cell])
input_ent_type.append(3 if index[1] == 0 else 4)
e_i = len(input_ent_text)-1
if index[1] not in column_en_map:
column_en_map[index[1]] = [e_i]
else:
column_en_map[index[1]].append(e_i)
if index[0] not in row_en_map:
row_en_map[index[0]] = [e_i]
else:
row_en_map[index[0]].append(e_i)
input_length = len(input_tok) + len(input_ent_text)
meta_and_headers_length = tokenized_meta_length+sum(tokenized_headers_length)
assert len(input_tok) == meta_and_headers_length
#create input mask
meta_ent_mask = np.ones([tokenized_meta_length, len(input_ent_text)], dtype=int)
header_ent_mask = np.zeros([sum(tokenized_headers_length), len(input_ent_text)], dtype=int)
start_i = 0
header_span = {}
for h_i, (h_j, _) in enumerate(headers):
header_span[h_j] = (start_i, start_i+tokenized_headers_length[h_i])
start_i += tokenized_headers_length[h_i]
for e_i, (index, _) in enumerate(input_ent_text):
if index[1] in header_span:
header_ent_mask[header_span[index[1]][0]:header_span[index[1]][1], e_i] = 1
ent_header_mask = np.transpose(header_ent_mask)
input_tok_ent_mask = np.concatenate([meta_ent_mask, header_ent_mask], axis=0)
ent_meta_mask = np.ones([len(input_ent_text), tokenized_meta_length], dtype=int)
ent_ent_mask = np.eye(len(input_ent_text), dtype=int)
for _,e_is in column_en_map.items():
for e_i_1 in e_is:
for e_i_2 in e_is:
ent_ent_mask[e_i_1, e_i_2] = 1
for _,e_is in row_en_map.items():
for e_i_1 in e_is:
for e_i_2 in e_is:
ent_ent_mask[e_i_1, e_i_2] = 1
input_ent_mask = [np.concatenate([ent_meta_mask, ent_header_mask], axis=1), ent_ent_mask]
input_ent_cell_length = [len(x) if len(x)!=0 else 1 for _,x in input_ent_text]
if len(input_ent_cell_length) != 0:
max_cell_length = max(input_ent_cell_length)
else:
max_cell_length = 0
input_ent_text_padded = np.zeros([len(input_ent_text), max_cell_length], dtype=int)
if max_cell_length != 0:
for i,(_,x) in enumerate(input_ent_text):
input_ent_text_padded[i, :len(x)] = x
assert input_ent_mask[0].shape[1] == len(input_tok)
return [input_tok,input_tok_type,input_tok_pos,input_tok_ent_mask,len(input_tok), \
input_ent_text_padded,input_ent_cell_length,np.array(input_ent_type),input_ent_mask,len(input_ent_text)]
def tokenize_table_for_bert(table, config):
caption, headers, cells = table
tokenized_meta = config.tokenizer.encode(caption, max_length=config.max_title_length, add_special_tokens=False)
tokenized_headers = [config.tokenizer.encode(z, max_length=config.max_header_length, add_special_tokens=False) for _, z in headers]
input_tok = []
tokenized_meta_length = len(tokenized_meta)
input_tok += tokenized_meta
tokenized_headers_length = [len(z) for z in tokenized_headers]
input_tok += list(itertools.chain(*tokenized_headers))
return [input_tok,len(input_tok)]
def process_single_TR(input_data, config):
_, (q_id, tokenized_query, pos, neg) = input_data
pos_inputs, neg_inputs = [], []
for table in pos:
input_tok,input_tok_type,input_tok_pos,input_tok_ent_mask,input_tok_length, \
input_ent_text,input_ent_cell_length,input_ent_type,input_ent_mask,input_ent_length = table
prepend_input_tok = [config.tokenizer.cls_token_id]
prepend_input_tok_pos = [0]
prepend_input_tok_type = [5]
tokenized_query_length = len(tokenized_query)
prepend_input_tok += tokenized_query
prepend_input_tok_pos += list(range(tokenized_query_length))
prepend_input_tok_type += [5]*tokenized_query_length
input_tok = prepend_input_tok+input_tok
input_tok_pos = prepend_input_tok_pos + input_tok_pos
input_tok_type = prepend_input_tok_type + input_tok_type
input_tok_length += tokenized_query_length + 1
input_tok_ent_mask = np.concatenate([np.ones([tokenized_query_length + 1, len(input_ent_text)], dtype=int), input_tok_ent_mask], axis=0)
input_ent_mask = (np.concatenate([np.ones([len(input_ent_text), tokenized_query_length + 1], dtype=int), input_ent_mask[0]], axis=1), input_ent_mask[1])
pos_inputs.append([np.array(input_tok),np.array(input_tok_type),np.array(input_tok_pos),input_tok_ent_mask,input_tok_length, \
input_ent_text,input_ent_cell_length,input_ent_type,input_ent_mask,input_ent_length])
for table in neg:
input_tok,input_tok_type,input_tok_pos,input_tok_ent_mask,input_tok_length, \
input_ent_text,input_ent_cell_length,input_ent_type,input_ent_mask,input_ent_length = table
prepend_input_tok = [config.tokenizer.cls_token_id]
prepend_input_tok_pos = [0]
prepend_input_tok_type = [5]
tokenized_query_length = len(tokenized_query)
prepend_input_tok += tokenized_query
prepend_input_tok_pos += list(range(tokenized_query_length))
prepend_input_tok_type += [5]*tokenized_query_length
input_tok = prepend_input_tok+input_tok
input_tok_pos = prepend_input_tok_pos + input_tok_pos
input_tok_type = prepend_input_tok_type + input_tok_type
input_tok_length += tokenized_query_length + 1
input_tok_ent_mask = np.concatenate([np.ones([tokenized_query_length + 1, len(input_ent_text)], dtype=int), input_tok_ent_mask], axis=0)
input_ent_mask = (np.concatenate([np.ones([len(input_ent_text), tokenized_query_length + 1], dtype=int), input_ent_mask[0]], axis=1), input_ent_mask[1])
neg_inputs.append([np.array(input_tok),np.array(input_tok_type),np.array(input_tok_pos),input_tok_ent_mask,input_tok_length, \
input_ent_text,input_ent_cell_length,input_ent_type,input_ent_mask,input_ent_length])
return [q_id, pos_inputs, neg_inputs]
def process_single_TR_forBert(input_data, config):
_, (q_id, tokenized_query, pos, neg) = input_data
pos_inputs, neg_inputs = [], []
for table in pos:
input_tok,input_tok_length = table
prepend_input_tok = [config.tokenizer.cls_token_id]
tokenized_query_length = len(tokenized_query)
prepend_input_tok += tokenized_query + [config.tokenizer.sep_token_id]
prepend_input_tok_type = [0]*(tokenized_query_length+2)
input_tok_type = prepend_input_tok_type + [1]*len(input_tok)
input_tok = prepend_input_tok+input_tok
input_tok_length += tokenized_query_length + 2
pos_inputs.append([np.array(input_tok),np.array(input_tok_type),input_tok_length])
for table in neg:
input_tok,input_tok_length = table
prepend_input_tok = [config.tokenizer.cls_token_id]
tokenized_query_length = len(tokenized_query)
prepend_input_tok += tokenized_query + [config.tokenizer.sep_token_id]
prepend_input_tok_type = [0]*(tokenized_query_length+2)
input_tok_type = prepend_input_tok_type + [1]*len(input_tok)
input_tok = prepend_input_tok+input_tok
input_tok_length += tokenized_query_length + 2
neg_inputs.append([np.array(input_tok),np.array(input_tok_type),input_tok_length])
return [q_id, pos_inputs, neg_inputs]
class WebQueryTableDataset(Dataset):
def _preprocess(self, data_dir):
if not self.for_bert:
preprocessed_filename = os.path.join(
data_dir, "procressed_TR", self.src
)
else:
preprocessed_filename = os.path.join(
data_dir, "procressed_TR_Bert", self.src
)
preprocessed_filename += ".pickle"
if not self.force_new and os.path.exists(preprocessed_filename):
print("try loading preprocessed data from %s" % preprocessed_filename)
with open(preprocessed_filename, "rb") as f:
return pickle.load(f)
else:
print("try creating preprocessed data in %s" % preprocessed_filename)
try:
if not self.for_bert:
os.mkdir(os.path.join(data_dir, "procressed_TR"))
else:
os.mkdir(os.path.join(data_dir, "procressed_TR_Bert"))
except FileExistsError:
pass
train_q_t, dev_q_t, test_q_t = {}, {}, {}
with open(os.path.join(data_dir, "WQT.dataset.query.tsv"), "r", encoding='utf-8') as f:
next(f)
for i, q in enumerate(tqdm(f)):
q = q.strip().split('\t')
if q[2] == 'train':
train_q_t[i] = [i, self.tokenizer.encode(q[1], max_length=self.max_query_length, add_special_tokens=False), [], []]
elif q[2] == 'dev':
dev_q_t[i] = [i, self.tokenizer.encode(q[1], max_length=self.max_query_length, add_special_tokens=False), [], []]
elif q[2] == 'test':
test_q_t[i] = [i, self.tokenizer.encode(q[1], max_length=self.max_query_length, add_special_tokens=False), [], []]
else:
pdb.set_trace()
assert i == int(q[0])
with open(os.path.join(data_dir, "WQT.dataset.table.tsv"), "r", encoding='utf-8') as f:
next(f)
tables = []
for q, t in enumerate(tqdm(f)):
t = t.strip().split('\t')
if len(t) != 7:
pdb.set_trace()
cells = []
headers = []
for j,h in enumerate(t[4].split('_|_')):
if j >= 5:
break
h = h.strip()
if len(h)!=0:
headers.append([j,h])
column_num = j+1
rows = t[5].split('_||_')
for i,row in enumerate(rows):
if i >= 20:
break
for j, cell in enumerate(row.split('_|_')):
if j >= 5:
break
cell = cell.strip()
if cell != 'None' and len(cell)!=0:
cells.append([(i,j),cell])
if j+1 != column_num:
# pdb.set_trace()
cells = []
break
tables.append([
t[2] +' '+t[3],
headers,
cells
])
assert q == int(t[0])
pool = Pool(processes=5)
tokenized_tables = list(tqdm(pool.imap(partial(tokenize_table if not self.for_bert else tokenize_table_for_bert,config=self), tables, chunksize=1000),total=len(tables)))
pool.close()
with open(os.path.join(data_dir, "WQT.dataset.query-table.tsv"), "r", encoding='utf-8') as f:
next(f)
pos = 0.0
neg = 0.0
for qt in tqdm(f):
qt = qt.strip().split('\t')
q_id = int(qt[0])
t_id = int(qt[1])
label = int(qt[2])
if label == 1:
pos += 1
if q_id in train_q_t:
train_q_t[q_id][2].append(tokenized_tables[t_id])
elif q_id in dev_q_t:
dev_q_t[q_id][2].append(tokenized_tables[t_id])
elif q_id in test_q_t:
test_q_t[q_id][2].append(tokenized_tables[t_id])
else:
pdb.set_trace()
else:
neg += 1
if q_id in train_q_t:
train_q_t[q_id][3].append(tokenized_tables[t_id])
elif q_id in dev_q_t:
dev_q_t[q_id][3].append(tokenized_tables[t_id])
elif q_id in test_q_t:
test_q_t[q_id][3].append(tokenized_tables[t_id])
else:
pdb.set_trace()
print('{} train pairs, {} dev pairs, {} test pairs'.format(len(train_q_t),len(dev_q_t),len(test_q_t)))
print('pos/neg ratio: %f'%(pos/neg))
pool = Pool(processes=4)
processed_train_q_t = list(tqdm(pool.imap(partial(process_single_TR if not self.for_bert else process_single_TR_forBert,config=self), list(train_q_t.items()), chunksize=1000),total=len(train_q_t)))
processed_dev_q_t = list(tqdm(pool.imap(partial(process_single_TR if not self.for_bert else process_single_TR_forBert,config=self), list(dev_q_t.items()), chunksize=1000),total=len(dev_q_t)))
processed_test_q_t = list(tqdm(pool.imap(partial(process_single_TR if not self.for_bert else process_single_TR_forBert,config=self), list(test_q_t.items()), chunksize=1000),total=len(test_q_t)))
pool.close()
# pdb.set_trace()
with open(os.path.join(data_dir, "procressed_TR" if not self.for_bert else "procressed_TR_Bert", 'train.pickle'), 'wb') as f:
pickle.dump(processed_train_q_t, f)
with open(os.path.join(data_dir, "procressed_TR" if not self.for_bert else "procressed_TR_Bert", 'dev.pickle'), 'wb') as f:
pickle.dump(processed_dev_q_t, f)
with open(os.path.join(data_dir, "procressed_TR" if not self.for_bert else "procressed_TR_Bert", 'test.pickle'), 'wb') as f:
pickle.dump(processed_test_q_t, f)
# pdb.set_trace()
if self.src == "train":
return processed_train_q_t
elif self.src == "dev":
return processed_dev_q_t
else:
return processed_test_q_t
def __init__(self, data_dir, max_input_tok=500, src="train", max_length = [50, 50, 10, 10], force_new=False, tokenizer = None, for_bert=False):
if tokenizer is not None:
self.tokenizer = tokenizer
else:
self.tokenizer = BertTokenizer.from_pretrained('data/pre-trained_models/bert-base-uncased')
self.src = src
self.for_bert = for_bert
self.max_query_length = max_length[0]
self.max_title_length = max_length[1]
self.max_header_length = max_length[2]
self.max_cell_length = max_length[3]
self.force_new = force_new
self.max_input_tok = max_input_tok
self.data = self._preprocess(data_dir)
# pdb.set_trace()
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
class finetune_collate_fn_TR:
def __init__(self, tokenizer, is_train=True, neg_num = 5):
self.tokenizer = tokenizer
self.is_train = is_train
self.neg_num = neg_num
def __call__(self, raw_batch):
batch_q_id, batch_pos_inputs, batch_neg_inputs = zip(*raw_batch)
if self.is_train:
batch_neg_inputs = [random.sample(x, self.neg_num) if len(x)>self.neg_num else x for x in batch_neg_inputs]
batch_size = len(batch_q_id)
new_batch_q_id = []
batch_input_tok,batch_input_tok_type,batch_input_tok_pos,batch_input_tok_ent_mask,batch_input_tok_length,batch_labels = [], [], [], [], [], []
batch_input_ent_text,batch_input_ent_cell_length,batch_input_ent_type,batch_input_ent_mask,batch_input_ent_length = [], [], [], [], []
for i in range(batch_size):
if not self.is_train and (len(batch_pos_inputs[i]) == 0 or len(batch_neg_inputs[i]) == 0):
continue
if len(batch_pos_inputs[i]) != 0:
input_tok,input_tok_type,input_tok_pos,input_tok_ent_mask,input_tok_length, \
input_ent_text,input_ent_cell_length,input_ent_type,input_ent_mask,input_ent_length = zip(*batch_pos_inputs[i])
batch_input_tok += list(input_tok)
batch_input_tok_type += list(input_tok_type)
batch_input_tok_pos += list(input_tok_pos)
batch_input_tok_length += list(input_tok_length)
batch_input_tok_ent_mask += list(input_tok_ent_mask)
batch_input_ent_text += list(input_ent_text)
batch_input_ent_cell_length += list(input_ent_cell_length)
batch_input_ent_type += list(input_ent_type)
batch_input_ent_mask += list(input_ent_mask)
batch_input_ent_length += list(input_ent_length)
batch_labels += [1]*len(input_tok)
new_batch_q_id += [batch_q_id[i]]*len(input_tok)
if len(batch_neg_inputs[i]) != 0:
input_tok,input_tok_type,input_tok_pos,input_tok_ent_mask,input_tok_length, \
input_ent_text,input_ent_cell_length,input_ent_type,input_ent_mask,input_ent_length = zip(*batch_neg_inputs[i])
batch_input_tok += list(input_tok)
batch_input_tok_type += list(input_tok_type)
batch_input_tok_pos += list(input_tok_pos)
batch_input_tok_length += list(input_tok_length)
batch_input_tok_ent_mask += list(input_tok_ent_mask)
batch_input_ent_text += list(input_ent_text)
batch_input_ent_cell_length += list(input_ent_cell_length)
batch_input_ent_type += list(input_ent_type)
batch_input_ent_mask += list(input_ent_mask)
batch_input_ent_length += list(input_ent_length)
batch_labels += [0]*len(input_tok)
new_batch_q_id += [batch_q_id[i]]*len(input_tok)
batch_q_id = new_batch_q_id
batch_size = len(batch_q_id)
max_input_tok_length = max(batch_input_tok_length)
max_input_ent_length = max(batch_input_ent_length)
max_input_ent_cell_length = max([max(z) if len(z)!=0 else 0 for z in batch_input_ent_cell_length])
batch_input_tok_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_type_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_pos_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_ent_text_padded = np.zeros([batch_size, max_input_ent_length, max_input_ent_cell_length], dtype=int)
batch_input_ent_text_length = np.ones([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_type_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_tok_mask_padded = np.zeros([batch_size, max_input_tok_length, max_input_tok_length+max_input_ent_length], dtype=int)
batch_input_ent_mask_padded = np.zeros([batch_size, max_input_ent_length, max_input_tok_length+max_input_ent_length], dtype=int)
for i, (tok_l, ent_l) in enumerate(zip(batch_input_tok_length, batch_input_ent_length)):
batch_input_tok_padded[i, :tok_l] = batch_input_tok[i]
batch_input_tok_type_padded[i, :tok_l] = batch_input_tok_type[i]
batch_input_tok_pos_padded[i, :tok_l] = batch_input_tok_pos[i]
batch_input_tok_mask_padded[i, :tok_l, :tok_l] = 1
batch_input_tok_mask_padded[i, :tok_l, max_input_tok_length:max_input_tok_length+ent_l] = batch_input_tok_ent_mask[i]
batch_input_ent_text_padded[i, :ent_l, :batch_input_ent_text[i].shape[-1]] = batch_input_ent_text[i]
batch_input_ent_text_length[i, :ent_l] = batch_input_ent_cell_length[i]
batch_input_ent_type_padded[i, :ent_l] = batch_input_ent_type[i]
batch_input_ent_mask_padded[i, :ent_l, :tok_l] = batch_input_ent_mask[i][0]
batch_input_ent_mask_padded[i, :ent_l, max_input_tok_length:max_input_tok_length+ent_l] = batch_input_ent_mask[i][1]
batch_input_tok_padded = torch.LongTensor(batch_input_tok_padded)
batch_input_tok_type_padded = torch.LongTensor(batch_input_tok_type_padded)
batch_input_tok_pos_padded = torch.LongTensor(batch_input_tok_pos_padded)
batch_input_tok_mask_padded = torch.LongTensor(batch_input_tok_mask_padded)
batch_input_ent_text_padded = torch.LongTensor(batch_input_ent_text_padded)
batch_input_ent_text_length = torch.LongTensor(batch_input_ent_text_length)
batch_input_ent_type_padded = torch.LongTensor(batch_input_ent_type_padded)
batch_input_ent_mask_padded = torch.LongTensor(batch_input_ent_mask_padded)
batch_labels = torch.FloatTensor(batch_labels)
return batch_q_id, batch_input_tok_padded, batch_input_tok_type_padded, batch_input_tok_pos_padded, batch_input_tok_mask_padded,\
batch_input_ent_text_padded, batch_input_ent_text_length, batch_input_ent_type_padded, batch_input_ent_mask_padded, batch_labels
class finetune_collate_fn_TR_forBert:
def __init__(self, tokenizer, is_train=True, neg_num = 5):
self.tokenizer = tokenizer
self.is_train = is_train
self.neg_num = neg_num
def __call__(self, raw_batch):
batch_q_id, batch_pos_inputs, batch_neg_inputs = zip(*raw_batch)
if self.is_train:
batch_neg_inputs = [random.sample(x, self.neg_num) if len(x)>self.neg_num else x for x in batch_neg_inputs]
batch_size = len(batch_q_id)
new_batch_q_id = []
batch_input_tok,batch_input_tok_type,batch_input_tok_pos,batch_input_tok_ent_mask,batch_input_tok_length,batch_labels = [], [], [], [], [], []
batch_input_ent_text,batch_input_ent_cell_length,batch_input_ent_type,batch_input_ent_mask,batch_input_ent_length = [], [], [], [], []
for i in range(batch_size):
if not self.is_train and (len(batch_pos_inputs[i]) == 0 or len(batch_neg_inputs[i]) == 0):
continue
if len(batch_pos_inputs[i]) != 0:
input_tok,input_tok_type,input_tok_length = zip(*batch_pos_inputs[i])
batch_input_tok += list(input_tok)
batch_input_tok_type += list(input_tok_type)
batch_input_tok_length += list(input_tok_length)
batch_labels += [1]*len(input_tok)
new_batch_q_id += [batch_q_id[i]]*len(input_tok)
if len(batch_neg_inputs[i]) != 0:
input_tok,input_tok_type,input_tok_length = zip(*batch_neg_inputs[i])
batch_input_tok += list(input_tok)
batch_input_tok_type += list(input_tok_type)
batch_input_tok_length += list(input_tok_length)
batch_labels += [0]*len(input_tok)
new_batch_q_id += [batch_q_id[i]]*len(input_tok)
batch_q_id = new_batch_q_id
batch_size = len(batch_q_id)
max_input_tok_length = max(batch_input_tok_length)
batch_input_tok_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_type_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_mask_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
for i, tok_l in enumerate(batch_input_tok_length):
batch_input_tok_padded[i, :tok_l] = batch_input_tok[i]
batch_input_tok_type_padded[i, :tok_l] = batch_input_tok_type[i]
batch_input_tok_mask_padded[i, :tok_l] = 1
batch_input_tok_padded = torch.LongTensor(batch_input_tok_padded)
batch_input_tok_type_padded = torch.LongTensor(batch_input_tok_type_padded)
batch_input_tok_pos_padded = torch.LongTensor([])
batch_input_tok_mask_padded = torch.LongTensor(batch_input_tok_mask_padded)
batch_input_ent_text_padded = torch.LongTensor([])
batch_input_ent_text_length = torch.LongTensor([])
batch_input_ent_type_padded = torch.LongTensor([])
batch_input_ent_mask_padded = torch.LongTensor([])
batch_labels = torch.FloatTensor(batch_labels)
return batch_q_id, batch_input_tok_padded, batch_input_tok_type_padded, batch_input_tok_pos_padded, batch_input_tok_mask_padded,\
batch_input_ent_text_padded, batch_input_ent_text_length, batch_input_ent_type_padded, batch_input_ent_mask_padded, batch_labels
class TRLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(
self,
dataset,
batch_size,
shuffle=True,
is_train = True,
num_workers=0,
sampler=None,
neg_num=5,
for_bert=False
):
self.shuffle = shuffle
if sampler is not None:
self.shuffle = False
self.batch_idx = 0
self.n_samples = len(dataset)
self.is_train = is_train
if not for_bert:
self.collate_fn = finetune_collate_fn_TR(dataset.tokenizer, is_train=self.is_train, neg_num=neg_num)
else:
self.collate_fn = finetune_collate_fn_TR_forBert(dataset.tokenizer, is_train=self.is_train, neg_num=neg_num)
self.init_kwargs = {
"dataset": dataset,
"batch_size": batch_size,
"shuffle": self.shuffle,
"collate_fn": self.collate_fn,
"num_workers": num_workers,
"sampler": sampler
}
super().__init__(**self.init_kwargs)
|
11510308
|
class Solution:
def multiply(self, A, B):
return [[sum(a * b for a, b in zip(A[i], [B[k][j] for k in range(len(B))])) for j in range(len(B[0]))] for i in range(len(A))]
|
11510316
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .gltf import GLTF
from .gltf_content import GLTFContent
from .gltf_exporter import GLTFExporter
from .gltf_mesh import GLTFMesh
from .gltf_parser import GLTFParser
from .gltf_reader import GLTFReader
__all__ = [
'GLTF',
'GLTFContent',
'GLTFMesh',
'GLTFReader',
'GLTFParser',
'GLTFExporter',
]
|
11510318
|
import re
import os
import ruamel.yaml as yaml
from markdown import markdown, Markdown
from .base import Command
from io import StringIO
def unmark_element(element, stream=None):
"""Markdown renderer that ignores markup and outputs only plain text.
"""
if stream is None:
stream = StringIO()
if element.text:
stream.write(element.text)
for sub in element:
unmark_element(sub, stream)
if element.tail:
stream.write(element.tail)
return stream.getvalue()
# This adds an output_format to the Markdown parser to produce plain text.
# Useful for stripping out markdown formatting. https://stackoverflow.com/a/54923798
Markdown.output_formats["plain"] = unmark_element
__md = Markdown(output_format="plain")
__md.stripTopLevelTags = False
def make_anchor(type):
"""Takes a type name in CamelCase and returns the label (anchor) equivalent which is
all lower case, as the labels Sphinx generates for headers.
"""
return type.lower()
def make_keyword(keyword):
"""Adds HTML <code> tags around input
"""
return '<code class="docutils literal">{}</code>'.format(keyword)
def make_breakable(keyword):
"""Inserts zero-width-space HTML entities after each underscore IF the keyword is long.
This is useful to allow long keywords to be split into different lines.
"""
return '_​'.join(keyword.split('_')) if len(keyword) > 30 else keyword
def make_size_label(size, var):
"""Takes a `size` and a flag `var` indicating if the size is variable and
returns an HTML string describing it. E.g. "16+ bytes = 0x10+ (variable)"
"""
template = '{} byte{} = {}' if var == 0 else '{}+ byte{} = {}+ <i>(variable)</i>'
return template.format(size, '' if size == 1 else 's', hex(size))
def make_title(comment):
"""Replace quotes with " and use only the first line, so comments can be used inside HTML attributes.
"""
return __md.convert(comment.partition('\n')[0].replace('"', '"'))
def find_schema_locations(path):
"""Find all schema files in the given path and quickly scan them to find struct and enum definition locations.
"""
locations = {}
fullpath = os.path.abspath(path)
for root, dirs, filenames in os.walk(fullpath):
# Remove .git from the search. This functionality prevents me from using glob.glob() or pathlib.rglob().
dirs[:] = [d for d in dirs if d not in ['.git']]
for filename in filenames:
# Only interested in schema files
if filename.endswith(".cats"):
absfilename = os.path.join(root, filename)
f = open(absfilename, "r")
for linenum, line in enumerate(f):
# If line contains "struct" or "enum" followed by a keyword...
m = re.search(r'^(struct|enum) ([0-9a-zA-Z]+)\b', line)
if m:
# Store the file path and line number, indexed by the keyword
locations[m.group(2)] = (os.path.relpath(absfilename, fullpath), linenum + 1)
return locations
def find_catapult_model_locations(path):
"""Find all catapult-client model files in the given path and quickly scan them to find struct and enum definition locations.
"""
locations = {}
fullpath = os.path.abspath(path)
for root, dirs, filenames in os.walk(fullpath):
# Remove .git and _build from the search. This functionality prevents me from using glob.glob() or pathlib.rglob().
dirs[:] = [d for d in dirs if d not in ['.git', '_build']]
for filename in filenames:
# Only interested in header files
if filename.endswith(".h"):
absfilename = os.path.join(root, filename)
f = open(absfilename, "r")
for linenum, line in enumerate(f):
# Detect all struct and enum definitions. There are several checks because macros make detection complex.
m = re.search(r'\b(struct|enum class) ([a-zA-Z]+)\b', line)
if m:
locations[m.group(2)] = (os.path.relpath(absfilename, fullpath), linenum + 1)
continue
m = re.search(r'DEFINE_EMBEDDABLE_TRANSACTION\(([a-zA-Z]+)\)', line)
if m:
locations[m.group(1) + 'Transaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
locations['Embedded' + m.group(1) + 'Transaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
continue
m = re.search(r'DEFINE_EMBEDDABLE_TRANSACTION\(([a-zA-Z]+)##[A-Z_]+##([a-zA-Z]+)\)', line)
if m:
locations[m.group(1) + 'Address' + m.group(2) + 'Transaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
locations[m.group(1) + 'Mosaic' + m.group(2) + 'Transaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
locations['Embedded' + m.group(1) + 'Address' + m.group(2) + 'Transaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
locations['Embedded' + m.group(1) + 'Mosaic' + m.group(2) + 'Transaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
continue
m = re.search(r'DEFINE_ACCOUNT_RESTRICTION_TRANSACTION\(([a-zA-Z]+)', line)
if m:
locations['Account' + m.group(1) + 'RestrictionTransaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
locations['EmbeddedAccount' + m.group(1) + 'RestrictionTransaction'] = (os.path.relpath(absfilename, fullpath), linenum + 1)
continue
# Add special case for AggregateCompleteTransaction and AggregateBondedTransaction, which are actually just AggregateTransactions
if 'AggregateTransaction' in locations:
locations['AggregateCompleteTransaction'] = locations['AggregateTransaction']
locations['AggregateBondedTransaction'] = locations['AggregateTransaction']
return locations
class SerializationCommand(Command):
"""Command to parse the Symbol or NEM schema YAML file into a ReStructured Text documentation page.
Reads configuration from `self.config`, parses a YAML file produced by `catbuffer-parser`
and prints to stdout a RST file.
"""
def calc_total_type_size(self, element):
"""Receives a YAML object describing a type and returns a tuple containing:
first: The total size in bytes of the type (calculated recursively)
second: A flag indicating if the size is variable. When 1, the calculated
size is actually the _minimum_ type size.
A type is deemed variable if its size is not an integer (e.g: message[message_size]).
"""
size = 0
var = 0
if element['type'] == 'byte':
if isinstance(element['size'], int):
size = int(element['size'])
else:
var = 1
elif element['type'] == 'enum':
size = int(element['size'])
else:
# Structs
for f in element['layout']:
if f['type'] == 'byte':
field_size = 1
field_var = 0
else:
(field_size, field_var) = self.calc_total_type_size(self.types[f['type']])
if isinstance(f.get('size', 1), int):
# If the field has an int `size`, OR it has no size (and then we assume it's 1)
size += field_size * int(f.get('size', 1))
var += field_var
else:
# If the size is not an int it means it is variable
var = 1
return (size, var)
def field_description(self, field):
"""Receives a YAML object describing a struct field and returns an HTML string.
For example: "byte[4]", or an HTML hyperlink to another type in the page.
"""
if field['type'] == 'byte':
return 'byte[{}]'.format(field['size'])
else:
# Add the array indicator only if the field has a size
return '<a href="{}#{}" title="{}">{}</a>{}'.format(self.page_prefix, make_anchor(field['type']), make_title(self.types[field['type']]['comments']), field['type'],
'' if field.get('size', 0) == 0 else '​[{}]'.format(field['size']))
def type_description(self, element):
"""Receives a YAML object describing a type and returns an HTML string containing
a hyperlink to its section in the page.
"""
return '<a href="{}#{}" title="{}">{}</a>'.format(self.page_prefix, make_anchor(element['name']), make_title(element['comments']), element['name'])
def parse_comment(self, comment):
"""Build proper HTML comments:
- Turn markdown present in the comments into HTML code.
- Detect type names and turn them into links.
- Turn line breaks into <br/> (RST is very picky wrt indentation)
"""
output = ''
first_line = True
for line in comment.split('\n'):
if first_line:
first_line = False
else:
output += '<br/>'
ignore_keywords = False
for word in line.split():
if word[0] == '[':
# Do not look for keywords inside [markdown links](like this one).
ignore_keywords = True
# Separate any non-keyword chars (like parenthesis or punctuation) before looking words up
# (And special-case an optional ending 's' for things like "MosaicId's")
m = re.search(r'^([^a-zA-Z]*)([a-zA-Z]+)([^a-rt-zA-Z]*)$', word)
if not ignore_keywords and m and m.group(2) in self.types:
output += m.group(1) + self.type_description(self.types[m.group(2)]) + m.group(3)
elif word == '\\note':
output += '<br/><b>Note:</b>'
else:
output += word
output += ' '
if word[-1] == ')':
ignore_keywords = False
return markdown(output).replace('<code>', '<code class="docutils literal">')
def print_rst_header(self, element, index_file):
"""Prints RST header common to Enums and Structs, including:
- A RST label so this type can be referenced from other places
- A RST header so Sphinx adds <h3> tags
"""
name = element['name']
print('.. _{}:'.format(make_anchor(name)), file=index_file)
print(file=index_file)
print(name, file=index_file)
print('=' * len(name), file=index_file)
print(file=index_file)
def print_md_header(self, element, index_file):
"""Prints MD header common to Enums and Structs
"""
name = element['name']
print('### {}'.format(name), file=index_file)
print('{:.no_toc}', file=index_file)
print(file=index_file)
def print_html_header(self, element, size, var, html_file):
"""Prints HTML header common to Enums and Structs, including:
- An two-cells HTML table containing the type's description on the left and
an info box on the right with type size and code links.
"""
name = element['name']
print('<table style="width: 100%;"><tr><td>', file=html_file)
print(' <div class="side-info"><table>', file=html_file)
print(' <tr><td class="side-info-icon">↕</td><td>Size: {}</td></tr>'.format(make_size_label(size, var)), file=html_file)
if name in self.type_schema_locations:
print(' <tr><td class="side-info-icon"><i class="fab fa-github"></i></td>'
'<td><a href="https://github.com/symbol/catbuffer-schemas/blob/main/{}/{}#L{}">schema</a></td></tr>'.format( \
self.source_api, self.type_schema_locations[name][0], self.type_schema_locations[name][1]), file=html_file)
if name in self.type_catapult_locations:
print(' <tr><td class="side-info-icon"><i class="fab fa-github"></i></td>'
'<td><a href="https://github.com/symbol/catapult-client/blob/main/{}#L{}">catapult model</a></td></tr>'.format(
self.type_catapult_locations[name][0], self.type_catapult_locations[name][1]), file=html_file)
print(' </table></div>', file=html_file)
print(self.parse_comment(element['comments']), file=html_file)
print('</td></tr></table>', file=html_file)
print(file=html_file)
def print_type(self, element, index_file):
"""Adds a row to the basic types HTML table.
"""
print(' <div id="{}"><b>{}</b></div>'.format(make_anchor(element['name']), element['name']), file=index_file)
print(' <div>{} {}byte{}</div>'.
format(element['size'], 'u' if element['signedness'] == 'unsigned' else '', 's' if element['size'] > 1 else ''), file=index_file)
print(' <div class="description">{}</div>'.format(self.parse_comment(element['comments'])), file=index_file)
def print_enum(self, element):
"""Describes an Enum type using the common header and an HTML table with all the values.
"""
filename = os.path.join(self.config['dst_path'], '{}.html'.format(element['name']))
with open(filename, 'w') as html_file:
self.print_html_header(element, element['size'], 0, html_file)
print('<div class="big-table3">', file=html_file)
for v in element['values']:
print('<div><b>{}</b></div>'.format(hex(v['value'])), file=html_file)
print('<div>{}</div>'.format(make_keyword(v['name'])), file=html_file)
print('<div class="description">{}</div>'.format(self.parse_comment(v['comments'])), file=html_file)
print('</div>', file=html_file)
def print_struct_content(self, element, indent, html_file):
"""Internal method to describe a Struct type. It calls itself to describe inlined structs, increasing the `indent` parameter.
It prints HTML table rows so it assumes an HTML table with the correct number of columns has been already opened.
Each entry in the `layout` array is printed as a row with name, type and description, except for inline struct which are expanded
in place.
"""
for v in element['layout']:
comment = ''
disposition = v.get('disposition') or ''
if disposition == 'inline':
(size, var) = self.calc_total_type_size(self.types[v['type']])
size_label = make_size_label(size, var)
# Manual handling of up to 3 indentation levels.
# If we ever have more than these many levels this will need to be made more generic.
if indent < 1:
print(' <div style="grid-column: 1 / span 6;" class="big-table-section">{}<span style="float:right">{}</span></div>'.
format(self.type_description(self.types[v['type']]), size_label), file=html_file)
elif indent < 2:
print(' <div class="indentation-cell-h"></div>'
'<div style="grid-column: 2 / span 5;" class="big-table-section">{}<span style="float:right">{}</span></div>'.
format(self.type_description(self.types[v['type']]), size_label), file=html_file)
else:
print(' <div class="indentation-cell-h"></div><div class="indentation-cell-h"></div>'
'<div style="grid-column: 3 / span 4;" class="big-table-section">{}<span style="float:right">{}</span></div>'.
format(self.type_description(self.types[v['type']]), size_label), file=html_file)
self.print_struct_content(self.types[v['type']], indent + 1, html_file)
continue
elif disposition == 'const':
type = self.types.get(v['type'])
if type and type['type'] == 'enum':
comment = '<b>const</b> {} ({})<br/>'.format(make_keyword(v['value']), make_keyword(hex(type['values_dict'][v['value']]['value'])))
else:
comment = '<b>const</b> {}<br/>'.format(make_keyword(v['value']))
elif disposition == 'reserved':
comment = '<b>reserved</b> {}<br/>'.format(make_keyword(v['value']))
comment += self.parse_comment(v['comments'])
print(' <div{}> </div>'.format('' if indent < 1 else ' class="indentation-cell"'), file=html_file)
print(' <div{}> </div>'.format('' if indent < 2 else ' class="indentation-cell"'), file=html_file)
print(' <div{}> </div>'.format('' if indent < 3 else ' class="indentation-cell"'), file=html_file)
print(' <div>{}</div>'.format(make_keyword(make_breakable(v['name']))), file=html_file)
print(' <div>{}</div>'.format(self.field_description(v)), file=html_file)
print(' <div class="description">{}</div>'.format(comment), file=html_file)
def print_struct(self, element):
"""Describes a Struct type using the common header and an HTML table with all the fields.
It opens an HTML table with 6 columns: 3 for indentation and 3 for field data.
Actual content is provided by print_struct_content().
It adds a last row in the table with the list of all types which include this struct (only present
in inline structs).
"""
filename = os.path.join(self.config['dst_path'], '{}.html'.format(element['name']))
with open(filename, 'w') as html_file:
(size, var) = self.calc_total_type_size(element)
self.print_html_header(element, size, var, html_file)
print('<div class="big-table6">', file=html_file)
self.print_struct_content(element, 0, html_file)
print('</div>', file=html_file)
if len(element['inlined-from']) > 0:
print('<details><summary>Included in:</summary><div class="tabulated-list"><div>', file=html_file)
print('</div><div>'.join(self.type_description(self.types[x]) for x in element['inlined-from']), file=html_file)
print('</div></div></details>', file=html_file)
print(file=html_file)
def output_rst(self):
"""Creates an index file in ReStructuredText format and an HTML file for each structure.
The index file includes all the other files.
"""
filename = os.path.join(self.config['dst_path'], 'index.rst')
index_file = open(filename, 'w')
print('#############', file=index_file)
print('Serialization', file=index_file)
print('#############', file=index_file)
print(file=index_file)
print('The `catbuffer schemas <https://github.com/symbol/catbuffer-schemas>`_ repository defines how the different Symbol entities type should be serialized (for example, Transactions). In combination with the `catbuffer-generators <https://github.com/symbol/catbuffer-generators>`_ project, developers can generate builder classes for a given set of programming languages.', file=index_file)
print(file=index_file)
# Hide level 4 headers from local TOC using CSS: there's too many of them and I could not find
# a Sphinx-friendly way of doing it.
print('.. raw:: html', file=index_file)
print(file=index_file)
print(' <style>.bs-sidenav ul ul ul > li {display: none;}</style>', file=index_file)
print(' <div id="serialization">', file=index_file)
print(file=index_file)
# Process all basic types
print('Basic Types', file=index_file)
print('***********', file=index_file)
print(file=index_file)
print('.. raw:: html', file=index_file)
print(file=index_file)
print(' <div class="big-table3">', file=index_file)
for e in self.schema:
if e['type'] == 'byte':
self.print_type(e, index_file)
print(' </div>', file=index_file)
print(file=index_file)
# Process all enums
print('Enumerations', file=index_file)
print('************', file=index_file)
print(file=index_file)
for e in self.schema:
if e['type'] == 'enum':
self.print_rst_header(e, index_file)
print('.. raw:: html', file=index_file)
print(' :file: {}.html'.format(e['name']), file=index_file)
print(file=index_file)
self.print_enum(e)
# Process all "user" structs
print('Structures', file=index_file)
print('**********', file=index_file)
print(file=index_file)
for e in self.schema:
if e['type'] == 'struct' and len(e['inlined-from']) == 0:
self.print_rst_header(e, index_file)
print('.. raw:: html', file=index_file)
print(' :file: {}.html'.format(e['name']), file=index_file)
print(file=index_file)
self.print_struct(e)
# Process all "inner" structs
print('Inner Structures', file=index_file)
print('****************', file=index_file)
print(file=index_file)
print('These are structures only meant to be included inside other structures.', file=index_file)
print('Their description is already present in the containing structures above and is only repeated here for completeness.', file=index_file)
print(file=index_file)
for e in self.schema:
if e['type'] == 'struct' and len(e['inlined-from']) > 0:
self.print_rst_header(e, index_file)
print('.. raw:: html', file=index_file)
print(' :file: {}.html'.format(e['name']), file=index_file)
print(file=index_file)
self.print_struct(e)
print('.. raw:: html', file=index_file)
print(file=index_file)
print(' </div>', file=index_file)
index_file.close()
def output_md(self):
"""Creates an index file in MarkDown format and an HTML file for each structure.
The index file includes all the other files.
"""
filename = os.path.join(self.config['dst_path'], 'index.md')
index_file = open(filename, 'w')
# Frontmatter (admittedly customized for nem-docs)
print('---', file=index_file)
print('title: Serialization', file=index_file)
print('parent: Developer resources', file=index_file)
print('taxonomy:', file=index_file)
print(' category:', file=index_file)
print(' - docs', file=index_file)
print('---', file=index_file)
print(file=index_file)
print('* TOC', file=index_file)
print('{:toc}', file=index_file)
print(file=index_file)
# Process all basic types
print('## Basic Types', file=index_file)
print(file=index_file)
print('<div class="big-table3">', file=index_file)
for e in self.schema:
if e['type'] == 'byte':
self.print_type(e, index_file)
print('</div>', file=index_file)
print(file=index_file)
# Process all enums
print('## Enumerations', file=index_file)
print(file=index_file)
for e in self.schema:
if e['type'] == 'enum':
self.print_md_header(e, index_file)
print('{{% include serialization/{}.html %}}'.format(e['name']), file=index_file)
print(file=index_file)
self.print_enum(e)
# Process all "user" structs
print('## Structures', file=index_file)
print(file=index_file)
for e in self.schema:
if e['type'] == 'struct' and len(e['inlined-from']) == 0:
self.print_md_header(e, index_file)
print('{{% include serialization/{}.html %}}'.format(e['name']), file=index_file)
print(file=index_file)
self.print_struct(e)
# Process all "inner" structs
print('## Inner Structures', file=index_file)
print(file=index_file)
print('These are structures only meant to be included inside other structures.', file=index_file)
print('Their description is already present in the containing structures above and is only repeated here for completeness.', file=index_file)
print(file=index_file)
for e in self.schema:
if e['type'] == 'struct' and len(e['inlined-from']) > 0:
self.print_md_header(e, index_file)
print('{{% include serialization/{}.html %}}'.format(e['name']), file=index_file)
print(file=index_file)
self.print_struct(e)
index_file.close()
def execute(self):
"""Contains all the logic to execute the serialization command.
It prints ReStructuredText (RST) to the console which can be redirected to a file to be directly included
in Sphinx.
The content is RST so all headers and labels can be referenced from other Sphinx pages, but contains
embedded HTML (using `.. raw:: html` directives) for added flexibility.
"""
# Read a single YAML file containing all schemas
with open(self.config['schema']) as f:
try:
self.schema = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
return
# Build types dictionary indexed by type name, for simpler access
self.types = {}
for e in self.schema:
self.types[e['name']] = e
self.types[e['name']]['inlined-from'] = []
if e['type'] == 'enum':
# For each value type, build a values dictionary indexed by value name, for simpler access too
e['values_dict'] = {}
for value in e['values']:
e['values_dict'][value['name']] = value
# Keep track of all structs which include each struct.
# This also allows marking "internal" structs and list them in a separate section.
for e in self.schema:
if e['type'] == 'struct':
for field in e['layout']:
if field.get('disposition', '') == 'inline':
self.types[field['type']]['inlined-from'].append(e['name'])
# Parse source schemas to extract exact locations of type definitions.
# This step could be avoided if the parsed YAML file already contained this information.
self.type_schema_locations = find_schema_locations(self.config['source_schema_path'])
# Parse source of catapult-client to extract exact locations of model definitions.
self.type_catapult_locations = find_catapult_model_locations(self.config['source_catapult_path'])
self.source_api = self.config['source_schema_path'].split('/')[-1]
# Print document title and introduction
if self.config['format'] == 'rst':
self.page_prefix = '/serialization'
self.output_rst()
else:
self.page_prefix = ''
self.output_md()
|
11510346
|
from pyhf.parameters.paramsets import (
paramset,
unconstrained,
constrained_by_normal,
constrained_by_poisson,
)
from pyhf.parameters.utils import reduce_paramsets_requirements
from pyhf.parameters.paramview import ParamViewer
__all__ = [
'paramset',
'unconstrained',
'constrained_by_normal',
'constrained_by_poisson',
'reduce_paramsets_requirements',
'ParamViewer',
]
def __dir__():
return __all__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.