id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11479073
|
from celery import shared_task
from .utils import get_device_model
@shared_task()
def send_push_notification(device_id, message, url, badge_count, sound, extra, category, **kwargs):
"""
Sends a push notification message to the specified tokens
"""
device_model = get_device_model()
device = device_model.objects.get(id=device_id)
device.send_push_notification(message, url, badge_count, sound, extra, category, **kwargs)
return "Message: %s" % message
@shared_task()
def send_silent_push_notification(device_id, extra, badge_count, content_available, **kwargs):
"""
Sends a push notification message to the specified tokens
"""
device_model = get_device_model()
device = device_model.objects.get(id=device_id)
device.send_silent_push_notification(extra, badge_count, content_available, **kwargs)
return "Silent push"
|
11479102
|
from __future__ import print_function
import urllib
import constants
from bs4 import BeautifulSoup
import re
from constants import *
url = "https://www.cia.gov/library/publications/the-world-factbook/geos/"
country_table = [i.rstrip().split(";") for i in open("ciaCountryCode.txt").readlines()]
def escape(text, characters):
for character in characters:
text = text.replace(character, '\\' + character)
return text
def quot(st):
return '"' + st.replace("\n", "\\n").replace("\r", "\\r") + '"'
def main():
for i in COUNTRY_CODES:
country_code = "error"
for j in country_table:
if j[2] == i.upper():
country_code = j[1].lower()
break
if country_code == "error":
continue
data = urllib.urlopen(url + country_code + ".html").read()
soup = BeautifulSoup(data, 'html.parser')
desc = soup.find("div", {"class": "category_data"})
try:
print("%s = %s;" % (quot(i), quot(escape(desc.text, "\""))))
except Exception:
print(i)
main()
|
11479111
|
import threading
from rich.align import Align
from textual import events
from textual.app import App
from textual.reactive import Reactive
from rich.panel import Panel
from textual.widgets import ScrollView, Footer, Header, TreeClick, TreeControl, TreeNode
from textual_app.get_log_task import GetLogTask
from textual_widgets.status_bar import StatusBar
from textual_widgets.project_tree import ProjectTree, LambdaEntry
import yaml
async def get_lambdas_1(filename: str, tree: TreeControl) -> None:
with open(filename, "r") as fh:
dictionary_yaml = yaml.safe_load(fh)
default_region = dictionary_yaml.get("default_region", None)
if default_region is not None:
del dictionary_yaml["region"]
for p in dictionary_yaml:
for lambda_ in dictionary_yaml[p]:
path = lambda_["path"]
custom_name = lambda_.get("custom_name", path.split("/")[-1])
region = lambda_.get("region", default_region)
await tree.add(
tree.root.id,
custom_name,
{"path": path, "region": region},
)
await tree.root.expand()
async def get_lambdas(filename: str, node: TreeNode[LambdaEntry]) -> None:
with open(filename, "r") as fh:
dictionary_yaml = yaml.safe_load(fh)
default_region = dictionary_yaml.get("region", None)
if default_region is not None:
del dictionary_yaml["region"]
for project in dictionary_yaml:
await node.add(project, LambdaEntry(True, "", None, None))
new_node = node.children[-1]
for lambda_ in dictionary_yaml[project]:
log_group_name = lambda_["path"]
custom_name = lambda_.get("custom-name", log_group_name.split("/")[-1])
region = lambda_.get("region", default_region)
await new_node.add(
custom_name,
LambdaEntry(
False,
log_group_name,
custom_name,
region,
),
)
new_node.loaded = True
await new_node.expand()
node.loaded = True
await node.expand()
class RichWatchApp(App):
def __init__(
self, log_groups_file="log_groups.yaml", thread_class=GetLogTask, **kwargs
) -> None:
self.log_groups_file = log_groups_file
self.thread_class = thread_class
super(RichWatchApp, self).__init__(**kwargs)
main_body = Reactive(Panel(Align.center("Logs Content"), style="bold"))
async def watch_main_body(self, _) -> None:
await self.main_view.update(Panel(self.main_body))
async def action_scroll_down(self) -> None:
self.main_view.scroll_down()
async def action_scroll_up(self) -> None:
self.main_view.scroll_up()
async def action_redownload(self) -> None:
self.status_view.reset_timer()
self.thread_trigger.set()
async def action_auto_refresh(self) -> None:
self.status_view.toggle_auto_refresh()
async def action_custom_quit(self) -> None:
self.log_thread.end()
await self.shutdown()
async def action_hide_bars(self) -> None:
await self.view.action_toggle("tree_bar")
await self.view.action_toggle("status_bar")
async def on_load(self, event: events.Load) -> None:
await self.bind("b", "hide_bars()", "Toggle sidebar")
await self.bind("j", "scroll_up()", "Go down")
await self.bind("k", "scroll_down()", "Go up")
await self.bind("r", "redownload()", "Redownload logs")
await self.bind("a", "auto_refresh()", "Auto Refresh")
await self.bind("q", "custom_quit()", "Quit")
async def on_mount(self, event: events.Mount) -> None:
self.thread_trigger = threading.Event()
self.log_thread = self.thread_class(self, self.thread_trigger)
# ----------- LAYOUT -----------
await self.view.dock(Header(), edge="top")
await self.view.dock(Footer(), edge="bottom")
self.tree = ProjectTree("List of Logs Groups", name="my_name")
self.main_view = ScrollView(self.main_body)
self.status_view = StatusBar(self.thread_trigger)
self.tree_view = ScrollView(self.tree)
await self.view.dock(self.tree_view, edge="left", size=30, name="tree_bar")
await self.view.dock(self.status_view, edge="right", size=30, name="status_bar")
await self.view.dock(self.main_view, name="main_bar")
# --------- OTHER -----------
await get_lambdas(self.log_groups_file, self.tree.root)
self.log_thread.start()
async def handle_tree_click(self, message: TreeClick[LambdaEntry]) -> None:
region = message.node.data.region
action = message.node.data.log_name
if action is not None and not self.thread_trigger.is_set():
self.log_thread.set_log_group_region(region)
self.log_thread.set_log_group_name(action)
self.status_view.reset_timer()
self.thread_trigger.set()
|
11479119
|
import pytextnow as pytn
client = pytn.Client("username") # You can also include the cookie in ther Client constructor
# Here you should input your connect.sid cookie
client.send_sms("number", "text")
|
11479125
|
import visualization.panda.world as wd
import modeling.geometric_model as gm
import basis.robot_math as rm
import math
import numpy as np
base = wd.World(cam_pos=[1, 1, 1], lookat_pos=[0, 0, 0], toggle_debug=True)
frame_o = gm.gen_frame(length=.2)
frame_o.attach_to(base)
# rotmat = rm.rotmat_from_axangle([1,1,1],math.pi/4)
rotmat_a = rm.rotmat_from_euler(math.pi / 3, -math.pi / 6, math.pi / 3)
# frame_a = gm.gen_mycframe(length=.2, rotmat=rotmat)
frame_a = gm.gen_dashframe(length=.2, rotmat=rotmat_a)
frame_a.attach_to(base)
# point in a
pos_a = np.array([.15, .07, .05])
# pos_start = rotmat_a.dot(pos_a)
# pos_end = rotmat_a.dot(np.array([pos_a[0], pos_a[1], 0]))
# # gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_stick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3]).attach_to(base)
# pos_start = rotmat_a.dot(np.array([pos_a[0], pos_a[1], 0]))
# pos_end = rotmat_a.dot(np.array([pos_a[0], 0, 0]))
# gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
#
# pos_start = rotmat_a.dot(pos_a)
# pos_end = rotmat_a.dot(np.array([pos_a[0], 0, pos_a[2]]))
# # gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_stick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3]).attach_to(base)
# pos_start = rotmat_a.dot(np.array([pos_a[0], 0, pos_a[2]]))
# pos_end = rotmat_a.dot(np.array([pos_a[0], 0, 0]))
# gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
#
# # pos_start = rotmat_a.dot(pos_a)
# # pos_end = rotmat_a.dot(np.array([pos_a[0], pos_a[1], 0]))
# # gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# pos_start = rotmat_a.dot(np.array([pos_a[0], pos_a[1], 0]))
# pos_end = rotmat_a.dot(np.array([0, pos_a[1], 0]))
# gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
#
# pos_start = rotmat_a.dot(pos_a)
# pos_end = rotmat_a.dot(np.array([0, pos_a[1], pos_a[2]]))
# # gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_stick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3]).attach_to(base)
# pos_start = rotmat_a.dot(np.array([0, pos_a[1], pos_a[2]]))
# pos_end = rotmat_a.dot(np.array([0, pos_a[1], 0]))
# gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
#
# # pos_start = rotmat_a.dot(pos_a)
# # pos_end = rotmat_a.dot(np.array([pos_a[0], 0, pos_a[2]]))
# # gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# pos_start = rotmat_a.dot(np.array([pos_a[0], 0, pos_a[2]]))
# pos_end = rotmat_a.dot(np.array([0, 0, pos_a[2]]))
# gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# # pos_start = rotmat_a.dot(pos_a)
# # pos_end = rotmat_a.dot(np.array([0, pos_a[1], pos_a[2]]))
# # gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0, 0, 0, .3], lsolid=.005, lspace=.005).attach_to(base)
# pos_start = rotmat_a.dot(np.array([0, pos_a[1], pos_a[2]]))
# pos_end = rotmat_a.dot(np.array([0, 0, pos_a[2]]))
# gm.gen_dashstick(pos_start, pos_end, thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# cvt to sigma o
pos_o = rotmat_a.dot(pos_a)
# gm.gen_dashstick(pos_o, np.array([pos_o[0], pos_o[1], 0]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
gm.gen_stick(pos_o, np.array([pos_o[0], pos_o[1], 0]), thickness=.001, rgba=[0,0,0,.3]).attach_to(base)
gm.gen_dashstick(np.array([pos_o[0], pos_o[1], 0]), np.array([pos_o[0], 0, 0]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_dashstick(pos_o, np.array([pos_o[0], 0, pos_o[2]]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
gm.gen_stick(pos_o, np.array([pos_o[0], 0, pos_o[2]]), thickness=.001, rgba=[0,0,0,.3]).attach_to(base)
gm.gen_dashstick(np.array([pos_o[0], 0, pos_o[2]]), np.array([pos_o[0], 0, 0]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_dashstick(pos_o, np.array([pos_o[0], pos_o[1], 0]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
gm.gen_dashstick(np.array([pos_o[0], pos_o[1], 0]), np.array([0, pos_o[1], 0]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_dashstick(pos_o, np.array([0, pos_o[1], pos_o[2]]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
gm.gen_stick(pos_o, np.array([0, pos_o[1], pos_o[2]]), thickness=.001, rgba=[0,0,0,.3]).attach_to(base)
gm.gen_dashstick(np.array([0, pos_o[1], pos_o[2]]), np.array([0, pos_o[1], 0]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_dashstick(pos_o, np.array([pos_o[0], 0, pos_o[2]]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
gm.gen_dashstick(np.array([pos_o[0], 0, pos_o[2]]), np.array([0, 0, pos_o[2]]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# gm.gen_dashstick(pos_o, np.array([0, pos_o[1], pos_o[2]]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
gm.gen_dashstick(np.array([0, pos_o[1], pos_o[2]]), np.array([0, 0, pos_o[2]]), thickness=.001, rgba=[0,0,0,.3], lsolid=.005, lspace=.005).attach_to(base)
# #
gm.gen_sphere(pos=pos_o, radius=.005, rgba=[0,0,0,1]).attach_to(base)
# gm.gen_dashstick(np.zeros(3), pos_o, thickness=.003, rgba=[.3, .3, .3, 1], lsolid=.01, lspace=.01).attach_to(base)
gm.gen_stick(np.zeros(3), pos_o, thickness=.003, rgba=[.3, .3, .3, 1]).attach_to(base)
base.run()
|
11479135
|
from decimal import Decimal
from future.moves.urllib.parse import ParseResult
from collections import OrderedDict
from enum import Enum
from uuid import UUID
from datetime import date, datetime, time
from attr._compat import iteritems
from .functions import to_dict
from .types import (
TypedSequence, TypedMapping, TypedSet, DEFAULT_DATE_FORMAT,
DEFAULT_DATETIME_FORMAT, DEFAULT_TIME_FORMAT
)
@to_dict.register(list) # noqa F811
@to_dict.register(set)
@to_dict.register(tuple)
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
retain_collection_types = kwargs.get("retain_collection_types", False)
if not suppress_empty_values or len(obj):
cf = obj.__class__ if retain_collection_types else list
return cf([to_dict(i, **kwargs) for i in obj])
@to_dict.register(dict) # noqa F811
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
dict_factory = kwargs.get("dict_factory", OrderedDict)
items = []
for kk, vv in iteritems(obj):
vv = to_dict(vv, **kwargs)
if (not suppress_empty_values) or (vv is not None):
items.append((to_dict(kk, **kwargs), vv))
if not suppress_empty_values or len(items):
return dict_factory(items)
@to_dict.register(TypedSequence) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.list, **kwargs)
@to_dict.register(TypedSet) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.set, **kwargs)
@to_dict.register(TypedMapping) # noqa F811
def _(obj, **kwargs):
suppress_map_key_values = kwargs.get("suppress_map_key_values", False)
suppress_empty_values = kwargs.get("suppress_empty_values", False)
rv = kwargs.get("dict_factory", OrderedDict)()
items = obj.items()
for key_value, item in items:
sub_dict = to_dict(item, **kwargs)
if suppress_map_key_values:
sub_dict.pop(obj.key)
rv[key_value] = sub_dict
if not suppress_empty_values or len(items):
return rv
@to_dict.register(Enum) # noqa F811
def _(obj, **kwargs):
return obj.value
@to_dict.register(UUID) # noqa F811
def _(obj, **kwargs):
return str(obj)
@to_dict.register(ParseResult) # noqa F811
def _(obj, **kwargs):
return obj.geturl()
@to_dict.register(date) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATE_FORMAT
return obj.strftime(formatter)
@to_dict.register(datetime) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATETIME_FORMAT
return (obj.isoformat() if formatter == "ISO_FORMAT"
else obj.strftime(formatter))
@to_dict.register(time) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_TIME_FORMAT
return obj.strftime(formatter)
@to_dict.register(Decimal) # noqa F811
def _(obj, **kwargs):
return str(obj)
|
11479153
|
from codecs import Codec, CodecInfo, register as lookup_function
from typing import Union, Tuple
from warnings import warn
from iota.exceptions import with_context
__all__ = [
'AsciiTrytesCodec',
'TrytesDecodeError',
]
class TrytesDecodeError(ValueError):
"""
Indicates that a tryte string could not be decoded to bytes.
"""
pass
class AsciiTrytesCodec(Codec):
"""
Legacy codec for converting byte strings into trytes, and vice
versa.
This method encodes each pair of trytes as an ASCII code point (and
vice versa when decoding).
The end result requires more space than if the trytes were converted
mathematically, but because the result is ASCII, it's easier to work
with.
Think of this kind of like Base 64 for balanced ternary (:
"""
name = 'trytes_ascii'
compat_name = 'trytes'
"""
Old name for this codec.
Note: Will be removed in PyOTA v2.1!
"""
# :bc: Without the bytearray cast, Python 2 will populate the dict
# with characters instead of integers.
alphabet = dict(enumerate(bytearray(b'9ABCDEFGHIJKLMNOPQRSTUVWXYZ')))
"""
Used to encode bytes into trytes.
"""
index = dict(zip(alphabet.values(), alphabet.keys()))
"""
Used to decode trytes into bytes.
"""
@classmethod
def get_codec_info(cls) -> CodecInfo:
"""
Returns information used by the codecs library to configure the
codec for use.
"""
codec = cls()
codec_info = {
'encode': codec.encode,
'decode': codec.decode,
# In Python 2, all codecs are made equal.
# In Python 3, some codecs are more equal than others.
'_is_text_encoding': False
}
return CodecInfo(**codec_info)
def encode(self,
input: Union[memoryview, bytes, bytearray],
errors: str = 'strict') -> Tuple[bytes, int]:
"""
Encodes a byte string into trytes.
"""
if isinstance(input, memoryview):
input = input.tobytes()
if not isinstance(input, (bytes, bytearray)):
raise with_context(
exc=TypeError(
"Can't encode {type}; byte string expected.".format(
type=type(input).__name__,
)),
context={
'input': input,
},
)
# :bc: In Python 2, iterating over a byte string yields
# characters instead of integers.
if not isinstance(input, bytearray):
input = bytearray(input)
trytes = bytearray()
for c in input:
second, first = divmod(c, len(self.alphabet))
trytes.append(self.alphabet[first])
trytes.append(self.alphabet[second])
return bytes(trytes), len(input)
def decode(self,
input: Union[memoryview, bytes, bytearray],
errors: str = 'strict') -> Tuple[bytes, int]:
"""
Decodes a tryte string into bytes.
"""
if isinstance(input, memoryview):
input = input.tobytes()
if not isinstance(input, (bytes, bytearray)):
raise with_context(
exc=TypeError(
"Can't decode {type}; byte string expected.".format(
type=type(input).__name__,
)),
context={
'input': input,
},
)
# :bc: In Python 2, iterating over a byte string yields
# characters instead of integers.
if not isinstance(input, bytearray):
input = bytearray(input)
bytes_ = bytearray()
for i in range(0, len(input), 2):
try:
first, second = input[i:i + 2]
except ValueError:
if errors == 'strict':
raise with_context(
exc=TrytesDecodeError(
"'{name}' codec can't decode value; "
"tryte sequence has odd length.".format(
name=self.name,
),
),
context={
'input': input,
},
)
elif errors == 'replace':
bytes_ += b'?'
continue
try:
bytes_.append(
self.index[first]
+ (self.index[second] * len(self.index))
)
except ValueError:
# This combination of trytes yields a value > 255 when
# decoded.
# Naturally, we can't represent this using ASCII.
if errors == 'strict':
raise with_context(
exc=TrytesDecodeError(
"'{name}' codec can't decode trytes {pair} "
"at position {i}-{j}: "
"ordinal not in range(255)".format(
name=self.name,
pair=chr(first) + chr(second),
i=i,
j=i + 1,
),
),
context={
'input': input,
}
)
elif errors == 'replace':
bytes_ += b'?'
return bytes(bytes_), len(input)
@lookup_function
def check_trytes_codec(encoding):
"""
Determines which codec to use for the specified encoding.
References:
- https://docs.python.org/3/library/codecs.html#codecs.register
"""
if encoding == AsciiTrytesCodec.name:
return AsciiTrytesCodec.get_codec_info()
elif encoding == AsciiTrytesCodec.compat_name:
warn(
'"{old_codec}" codec will be removed in PyOTA v2.1. '
'Use "{new_codec}" instead.'.format(
new_codec=AsciiTrytesCodec.name,
old_codec=AsciiTrytesCodec.compat_name,
),
DeprecationWarning,
)
return AsciiTrytesCodec.get_codec_info()
return None
|
11479164
|
from unittest import TestCase
from asserts import assert_equal
from htmlgen import Image
from test_htmlgen.util import parse_short_tag
class ImageTest(TestCase):
def test_attributes(self):
image = Image("my-image.png", "Alternate text")
assert_equal("my-image.png", image.url)
assert_equal("Alternate text", image.alternate_text)
def test_attributes_default_alt(self):
image = Image("my-image.png")
assert_equal("", image.alternate_text)
def test_with_alt(self):
image = Image("my-image.png", "Alternate text")
tag = parse_short_tag(str(image))
assert_equal("img", tag.name)
assert_equal("my-image.png", image.get_attribute("src"))
assert_equal("Alternate text", image.get_attribute("alt"))
def test_without_alt(self):
image = Image("my-image.png")
tag = parse_short_tag(str(image))
assert_equal("img", tag.name)
assert_equal("my-image.png", image.get_attribute("src"))
assert_equal("", image.get_attribute("alt"))
|
11479214
|
class CycleError(Exception):
pass
def _sort_graph_topologically(graph):
"""
Sort directed graph topologicaly.
Args:
graph: dict of lists (key is node name, value if list of neighbours)
Returns:
generator of nodes in topoligical order
"""
# calculate input degree (number of nodes pointing to particular node)
indeg = {k: 0 for k in graph}
for node, edges in graph.items():
for edge in edges:
indeg[edge] += 1
# sort graph topologically
# return nodes which input degree is 0
no_requirements = set([a for a in indeg if indeg.get(a, 0) == 0])
while no_requirements:
next_node = no_requirements.pop()
# for each node to which this one is pointing - decrease input degree
for dependency in graph[next_node]:
indeg[dependency] -= 1
# add to set of nodes ready to be returned (without nodes pointing
# to it)
if indeg[dependency] == 0:
no_requirements.add(dependency)
yield next_node
if any(indeg.values()):
raise CycleError("Cycle detected during topological sort")
def _compare_instances_types(instances):
"""Function check type of instances.
Conditions:
- transition can run only objects with the same type.
"""
if not all(
map(lambda x: isinstance(instances[0], x.__class__), instances)
):
raise TypeError()
|
11479229
|
from shutil import copyfile
copyfile("message2", "snakemake-tibanna-test/1/final_message")
copyfile("message2", "snakemake-tibanna-test2/1/final_message")
|
11479234
|
import tqdm
import argparse
def read_file(file):
with open(file,'r',encoding='utf-8') as f:
text=f.readlines()
return text
def convert(input_file,output_file):
texts=read_file(file=input_file)
with open(output_file,'w+',encoding='utf-8') as f:
for i in tqdm.tqdm(range(len(texts))):
text, tagged = texts[i].split('\t')
words = text.strip().split(' ')
tags = tagged.strip().split(' ')
for word, tag in zip(words, tags):
f.write(word+' '+tag+'\n')
f.write('\n')
if __name__ == '__main__':
parser=argparse.ArgumentParser(description="将分词标注数据转换为bert可以读取的数据格式")
parser.add_argument('input_file',type=str,help="输入文件")
parser.add_argument('output_file',type=str,help="输出文件")
args=parser.parse_args()
convert(args.input_file,args.output_file)
|
11479250
|
response.title = settings.title
response.subtitle = settings.subtitle
response.meta.author = '%(author)s <%(author_email)s>' % settings
response.meta.keywords = settings.keywords
response.meta.description = settings.description
response.menu = [
(T('Business Service Catalogue'),URL('default','index')==URL(),URL('default','index'),[
(T('Business Service Catalogue'),URL('default','index')==URL(),URL('default','index'),[],),
(T('Notification'),URL('default','manage_notification')==URL(),URL('default','manage_notification'),[],),
]),
(T('Inventory'),URL('default','device_manage')==URL(),URL('default','device_manage'),[
(T('Device'),URL('default','device_manage')==URL(),URL('default','device_manage'),[]),
(T('Device&IP Association'),URL('default','device2ip_manage')==URL(),URL('default','device2ip_manage'),[]),
(T('Ip'),URL('default','ip_manage')==URL(),URL('default','ip_manage'),[]),
(T('IP&Port association'),URL('default','ip2port_manage')==URL(),URL('default','ip2port_manage'),[]),
(T('Port'),URL('default','port_manage')==URL(),URL('default','port_manage'),[]),
(T('Orphan IPs'),URL('default','orphan_ip')==URL(),URL('default','orphan_ip'),[]),
]),
(T('Network Discovery'),URL('default','network_discovery')==URL(),URL('default','network_discovery'),[
# (T('Network Discovery'),URL('default','network_discovery')==URL(),URL('default','network_discovery'),[],),
# (T('IP Range'),URL('default','ip_range')==URL(),URL('default','ip_range'),[]),
],),
(T('Parameters'),URL('default','manage_parameters')==URL(),URL('default','manage_parameters'),[],),
(T('Help'),URL('default','help')==URL(),URL('default','help'),[],)
]
|
11479278
|
class Solution:
def checkPalindromeFormation(self, a: str, b: str) -> bool:
if len(a) == 1:
return True
if a[:2] == b[-2:][::-1]:
return True
if a[-2:][::-1] == b[:2]:
return True
return False
|
11479291
|
from setuptools import setup, find_packages
setup(
setup_requires=["pbr>=1.9", "setuptools>=17.1"],
pbr=True,
packages=find_packages(where="src"),
package_dir={"": "src"},
)
|
11479305
|
import os
import requests
from selenium import webdriver
from sys import platform
from xml.etree import ElementTree
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DRIVER_NAME = "chromedriver.exe" if platform == "win32" else "chromedriver"
DRIVER_DIR = os.path.join(BASE_DIR, "plugins", DRIVER_NAME)
JS_SCRIPT = 'if(yt.config_.TTS_URL.length) window.location.href=yt.config_.TTS_URL+"&kind=asr&fmt=srv1&lang=en"'
def get_transcribe_url(youtube_url):
""" Get transcribe URL.
Args:
youtube_url (str): YouTube URL.
"""
driver = webdriver.Chrome(DRIVER_DIR)
driver.get(youtube_url)
driver.execute_script(JS_SCRIPT)
transcribe_url = driver.current_url
driver.quit()
return transcribe_url
|
11479311
|
import pytest
from common.core import (
cleanup_process,
)
from common.constants import (
INSTANCE_MANAGER_REPLICA, INSTANCE_MANAGER_ENGINE,
)
from rpc.instance_manager.process_manager_client import ProcessManagerClient
@pytest.fixture()
def em_client(request, address=INSTANCE_MANAGER_ENGINE):
c = ProcessManagerClient(address)
request.addfinalizer(lambda: cleanup_process(c))
return c
@pytest.fixture()
def pm_client(request, address=INSTANCE_MANAGER_REPLICA):
c = ProcessManagerClient(address)
request.addfinalizer(lambda: cleanup_process(c))
return c
|
11479319
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SciNet(nn.Module):
def __init__(self, input_dim, output_dim, latent_dim, layer_dim):
"""Initialize SciNet Model.
Params
======
input_dim (int): number of inputs
output_dim (int): number of outputs
latent_dim (int): number of latent neurons
Layer_dim (int): number of neurons in hidden layers
"""
super(SciNet, self).__init__()
self.latent_dim = latent_dim
self.enc1 = nn.Linear(input_dim, layer_dim)
self.enc2 = nn.Linear(layer_dim, layer_dim)
self.latent = nn.Linear(layer_dim, latent_dim*2)
self.dec1 = nn.Linear(latent_dim+1, layer_dim)
self.dec2 = nn.Linear(layer_dim,layer_dim)
self.out = nn.Linear(layer_dim, output_dim)
def encoder(self, x):
z = F.elu(self.enc1(x))
z = F.elu(self.enc2(z))
z = self.latent(z)
self.mu = z[:, 0:self.latent_dim]
self.log_sigma = z[:, self.latent_dim:]
self.sigma = torch.exp(self.log_sigma)
# Use reparametrization trick to sample from gaussian
eps = torch.randn(x.size(0), self.latent_dim)
z_sample = self.mu + self.sigma * eps
# Compute KL loss
self.kl_loss = kl_divergence(self.mu, self.log_sigma, dim=self.latent_dim)
return z_sample
def decoder(self, z):
x = F.elu(self.dec1(z))
x = F.elu(self.dec2(x))
return self.out(x)
def forward(self, obs):
q = obs[:,-1].reshape(obs.size(0),1)
obs = obs[:,0:-1]
self.latent_r = self.encoder(obs)
dec_input = torch.cat( (q, self.latent_r), 1)
return self.decoder(dec_input)
def kl_divergence(means, log_sigma, dim, target_sigma=0.1):
"""
Computes Kullback–Leibler divergence for arrays of mean and log(sigma)
"""
target_sigma = torch.Tensor([target_sigma])
return 1 / 2. * torch.mean(torch.mean(1 / target_sigma**2 * means**2 +
torch.exp(2 * log_sigma) / target_sigma**2 - 2 * log_sigma + 2 * torch.log(target_sigma), dim=1) - dim)
|
11479336
|
from collections import defaultdict
from functools import cmp_to_key, wraps
from typing import Optional, Dict
import time
from quart import *
from _jwt import *
import asyncio
from models import *
import json
import hashlib
import random
import string
import math
def md5(v: str):
return hashlib.md5(v.encode(encoding='UTF-8')).hexdigest()
cs_need_update = True
cs_cache = {}
md_cache = music_data()
md_map = {}
for music in md_cache:
md_map[music['id']] = music
def get_ds(r: Dict):
for m in md_cache:
if m['title'] == r["title"] and m['type'] == r['type']:
return m["ds"][r["level_index"]]
return 0
def is_new(r: Dict):
for m in md_cache:
if m['title'] == r["title"] and m['type'] == r['type']:
return m["basic_info"]["is_new"]
return False
def is_new_2(r: Record):
for m in md_cache:
if m['title'] == r.title and m['type'] == r.type:
return m["basic_info"]["is_new"]
return False
app = Quart(__name__)
with open('config.json', encoding='utf-8') as fr:
config = json.load(fr)
db_url = config["database_url"]
jwt_secret = config["jwt_secret"]
@app.after_request
def cors(environ):
environ.headers['Access-Control-Allow-Origin'] = '*'
environ.headers['Access-Control-Allow-Method'] = '*'
environ.headers['Access-Control-Allow-Headers'] = 'x-requested-with,content-type'
return environ
@app.route("/feedback", methods=['POST'])
async def feedback():
j = await request.get_json()
FeedBack.insert(j).execute()
return {"message": "提交成功"}
def login_required(f):
@wraps(f)
async def func(*args, **kwargs):
try:
token = decode(request.cookies['jwt_token'])
except KeyError:
return {"status": "error", "msg": "尚未登录"}, 403
if token == {}:
return {"status": "error", "msg": "尚未登录"}, 403
if token['exp'] < ts():
return {"status": "error", "msg": "会话过期"}, 403
g.username = token['username']
g.user = Player.get(Player.username == g.username)
return await f(*args, **kwargs)
return func
@app.route("/login", methods=['POST'])
async def login():
j = await request.get_json()
username = j["username"]
password = j["password"]
try:
user: Player = Player.get(Player.username == username)
if md5(password + user.salt) == user.password:
resp = await make_response({"message": "登录成功"})
resp.set_cookie('jwt_token', username_encode(
username), max_age=30 * 86400)
return resp
except Exception:
pass
return {
"errcode": -3,
"message": "用户名或密码错误",
}, 401
@app.route("/register", methods=['POST'])
async def register():
j = await request.get_json()
player = Player.select().where(Player.username == j["username"])
if player.exists():
return {
"errcode": -1,
"message": "此用户名已存在",
}, 400
salt = ''.join(random.sample(string.ascii_letters + string.digits, 16))
Player.create(username=j["username"], salt=salt,
password=md5(j["password"] + salt))
resp = await make_response({"message": "注册成功"})
resp.set_cookie('jwt_token', username_encode(j["username"]))
return resp
@app.route("/player/profile", methods=['GET', 'POST'])
@login_required
async def profile():
if request.method == 'GET':
u: Player = g.user
return {
"username": u.username,
"nickname": u.nickname,
"additional_rating": u.additional_rating,
"bind_qq": u.bind_qq,
"privacy": u.privacy,
"plate": u.plate
}
else:
try:
obj = await request.json
# handle plate there.
if "plate" in obj:
d = obj["plate"]
version = d["version"]
plate_type = d["plate_type"]
verified, plate_label = verify_plate(g.user, version, plate_type)
if verified:
g.user.__setattr__("plate", plate_label)
del obj["plate"]
if "bind_qq" in obj:
# check duplicate
bind_qq = obj["bind_qq"]
try:
player = Player.get((Player.bind_qq == bind_qq) & (Player.id != g.user.id)) & (bind_qq != '')
# Not found -> except
return {
"message": f"此 QQ 号已经被用户名为{player.username}的用户绑定,请先解绑再进行操作~"
}, 400
except Exception:
pass
for key in obj:
g.user.__setattr__(key, obj[key])
g.user.save()
u: Player = g.user
return {
"username": u.username,
"nickname": u.nickname,
"additional_rating": u.additional_rating,
"bind_qq": u.bind_qq,
"privacy": u.privacy,
"plate": u.plate
}
except Exception as e:
print(e)
return {
"message": "error"
}, 400
def verify_plate(player, version, plate_type) -> Tuple[bool, str]:
try:
if version == "无":
return True, ""
plate_name = get_plate_name(version, plate_type)
if plate_name == "真将":
return False, ""
return True, plate_name
except Exception:
return False, ""
@app.route("/player/change_password", methods=['POST'])
@login_required
async def change_password():
password = (await request.json)["password"]
if len(password) >= 30:
return {"message": "密码不能大于30位"}, 400
g.user.password = md5(password + g.user.salt)
return {"message": "success"}
@app.route("/music_data", methods=['GET'])
async def get_music_data():
resp = await make_response(json.dumps(md_cache))
resp.headers['content-type'] = "application/json; charset=utf-8"
return resp
@app.route("/player/records", methods=['GET'])
@login_required
async def get_records():
r = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', g.user.id)
await compute_ra(g.user)
records = []
for record in r:
elem = record_json(record)
records.append(elem)
return {"records": records, "username": g.username, "additional_rating": g.user.additional_rating}
@app.route("/player/test_data", methods=['GET'])
async def get_test_data():
r = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', 293)
records = []
for record in r:
elem = record_json(record)
records.append(elem)
return {"records": records, "username": "TESTUSER", "additional_rating": "2100"}
def get_dx_and_sd(player):
l = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', player.id)
l1 = []
l2 = []
for r in l:
setattr(r, 'ra', r.ds * get_l(r.achievements)
* min(100.5, r.achievements) / 100)
if r.is_new:
l2.append(r)
else:
l1.append(r)
l1.sort(key=lambda x: x.ra, reverse=True)
l2.sort(key=lambda x: x.ra, reverse=True)
return l1[:25], l2[:15]
def get_dx_and_sd_for50(player):
l = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', player.id)
l1 = []
l2 = []
for r in l:
setattr(r, 'ra', r.ds * get_l(r.achievements)
* min(100.5, r.achievements) / 100)
if r.is_new:
l2.append(r)
else:
l1.append(r)
l1.sort(key=lambda x: x.ra, reverse=True)
l2.sort(key=lambda x: x.ra, reverse=True)
return l1[:35], l2[:15]
def getplatelist(player, version: List[Dict]):
l = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs,chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.version as `version`, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', player.id)
fl = recordList()
vl = []
for r in l:
fl.append(r)
for i in range(0, len(version)):
vl += fl.filter(version=version[i])
return vl
@app.route("/query/player", methods=['POST'])
async def query_player():
obj = await request.json
try:
if "qq" in obj:
p: Player = Player.get(Player.bind_qq == obj["qq"])
else:
username = obj["username"]
p: Player = Player.get(Player.username == username)
except Exception:
return {
"message": "user not exists"
}, 400
if p.privacy and "username" in obj:
try:
token = decode(request.cookies['jwt_token'])
except KeyError:
return {"status": "error", "msg": "已设置隐私"}, 403
if token == {}:
return {"status": "error", "msg": "已设置隐私"}, 403
if token['exp'] < ts():
return {"status": "error", "msg": "会话过期"}, 403
if token['username'] != obj["username"]:
return {"status": "error", "msg": "已设置隐私"}, 403
if "b50" in obj:
sd, dx = get_dx_and_sd_for50(p)
else:
sd, dx = get_dx_and_sd(p)
asyncio.create_task(compute_ra(p))
nickname = p.nickname
if nickname == "":
nickname = p.username if len(p.username) <= 8 else p.username[:8] + '…'
try:
user_data = json.loads(p.user_data)
except Exception:
user_data = None
return {
"username": p.username,
"rating": p.rating,
"additional_rating": p.additional_rating,
"nickname": nickname,
"plate": p.plate,
"charts": {
"sd": [record_json(c) for c in sd],
"dx": [record_json(c) for c in dx]
},
"user_id": p.user_id,
"user_data": user_data
}
@app.route("/query/plate", methods=['POST'])
async def query_plate():
obj = await request.json
try:
if "qq" in obj:
p: Player = Player.get(Player.bind_qq == obj["qq"])
else:
username = obj["username"]
p: Player = Player.get(Player.username == username)
except Exception:
return {"message": "user not exists"}, 400
if p.privacy and "username" in obj:
try:
token = decode(request.cookies['jwt_token'])
except KeyError:
return {"status": "error", "msg": "已设置隐私"}, 403
if token == {}:
return {"status": "error", "msg": "已设置隐私"}, 403
if token['exp'] < ts():
return {"status": "error", "msg": "会话过期"}, 403
if token['username'] != obj["username"]:
return {"status": "error", "msg": "已设置隐私"}, 403
v: List[Dict] = obj["version"]
vl = getplatelist(p, v)
return {
"verlist": [platerecord_json(c) for c in vl]
}
async def compute_ra(player: Player):
rating = 0
sd, dx = get_dx_and_sd(player)
for t in sd:
rating += int(t.ra)
for t in dx:
rating += int(t.ra)
player.rating = rating
player.save()
return rating
@app.route("/player/update_records", methods=['POST'])
@login_required
async def update_records():
global cs_need_update
cs_need_update = True
j = await request.get_json()
dicts = {}
if "userId" in j:
try:
for ml in j["userMusicList"]:
for m in ml["userMusicDetailList"]:
if str(m["musicId"]) not in md_map:
continue
music = md_map[str(m["musicId"])]
level = m["level"]
achievement = min(1010000, m["achievement"])
fc = ["", "fc", "fcp", "ap", "app"][m["comboStatus"]]
fs = ["", "fs", "fsp", "fsd", "fsdp"][m["syncStatus"]]
dxScore = m["deluxscoreMax"]
cid = music["cids"][level]
dicts[cid] = (achievement / 10000.0, fc, fs, dxScore)
g.user.user_id = j["userId"]
g.user.user_data = json.dumps(j["userData"]) if "userData" in j else ""
g.user.save()
except Exception as e:
return {
"message": str(e)
}, 400
else:
for record in j:
# print(time.time())
title = record['title']
_type = record['type']
level = record['level_index']
m = get_music_by_title(md_cache, title, _type)
if m is None or level >= len(m["cids"]):
continue
cid = m["cids"][level]
dicts[cid] = (record["achievements"], record["fc"],
record["fs"], record["dxScore"])
rs = NewRecord.raw(
'select * from newrecord where player_id = %s', g.user.id)
updates = []
creates = []
for r in rs:
# print(r.chart_id)
if r.chart_id in dicts:
v = dicts[r.chart_id]
r.achievements = min(v[0], 101)
r.fc = v[1]
r.fs = v[2]
r.dxScore = v[3]
updates.append(r)
del dicts[r.chart_id]
# print(len(dicts))
for k in dicts:
v = dicts[k]
creates.append({"chart": k, "player": g.user.id,
"fc": v[1], "fs": v[2], "dxScore": v[3], "achievements": min(v[0], 101)})
NewRecord.insert_many(creates).execute()
# print(updates)
NewRecord.bulk_update(updates, fields=[
NewRecord.achievements, NewRecord.fc, NewRecord.fs, NewRecord.dxScore])
await compute_ra(g.user)
return {
"message": "更新成功",
}
@app.route("/player/update_record", methods=['POST'])
@login_required
async def update_record():
# must be update.
global cs_need_update
cs_need_update = True
record = await request.get_json()
title = record['title']
_type = record['type']
level = record['level_index']
m = get_music_by_title(md_cache, title, _type)
if m is None:
return
cid = m["cids"][level]
r: NewRecord = NewRecord.get(
(NewRecord.player == g.user.id) & (NewRecord.chart == cid))
assert r
r.achievements = min(record['achievements'], 101)
r.fc = record['fc']
r.fs = record['fs']
r.save()
await compute_ra(g.user)
return {
"message": "更新成功",
}
@app.route("/player/delete_records", methods=['DELETE'])
@login_required
async def delete_records():
global cs_need_update
cs_need_update = True
nums = NewRecord.delete().where(NewRecord.player == g.user.id).execute()
await compute_ra(g.user)
return {
"message": nums
}
@app.route("/rating_ranking", methods=['GET'])
async def rating_ranking():
players = Player.select()
data = []
for player in players:
data.append({"username": player.username, "ra": player.rating})
resp = await make_response(json.dumps(data, ensure_ascii=False))
resp.headers['content-type'] = "application/json; charset=utf-8"
return resp
@app.route("/count_view", methods=['GET'])
async def count_view():
v: Views = Views.get()
v.prober += 1
v.save()
return {"views": v.prober}
async def message_resp():
today_ts = int((time.time() + 8 * 3600) / 86400) * 86400 - 8 * 3600
results = Message.select(Message, Player).join(
Player).where(Message.ts >= today_ts)
l = []
for r in results:
l.append({"text": r.text, "username": r.player.username,
"ts": r.ts, "nickname": r.nickname})
resp = await make_response(json.dumps(l, ensure_ascii=False))
resp.headers['content-type'] = "application/json; charset=utf-8"
return resp
@app.route("/message", methods=['GET'])
async def message_g():
return await message_resp()
@app.route("/message", methods=['POST'])
@login_required
async def message():
if request.method == 'POST':
a = Message()
a.player = g.user
j = await request.get_json()
a.text = j["text"]
a.nickname = j["nickname"]
a.ts = int(time.time())
a.save(force_insert=True)
return await message_resp()
@app.route("/chart_stats", methods=['GET'])
async def chart_stats():
global cs_need_update
global cs_cache
if len(cs_cache) > 0:
resp = await make_response(json.dumps(cs_cache, ensure_ascii=False))
resp.headers['content-type'] = "application/json; charset=utf-8"
return resp
cursor = NewRecord.raw(
'select newrecord.chart_id, count(*) as cnt, avg(achievements) as `avg`,'
' sum(case when achievements >= 100 then 1 else 0 end) as sssp_count from newrecord group by chart_id'
)
data = defaultdict(lambda: [{}, {}, {}, {}, {}])
for elem in cursor:
data[elem.chart.music.id][elem.chart.level] = {"count": elem.cnt,
"avg": elem.avg,
"sssp_count": int(elem.sssp_count)
}
level_dict = defaultdict(lambda: [])
md = md_cache
for elem in md:
key = elem['id']
for i in range(len(elem['ds'])):
elem2 = {
"key": key,
"level_index": i,
"count": 1,
"avg": 0,
"sssp_count": 0
}
for _k in data[key][i]:
elem2[_k] = data[key][i][_k]
if elem2['count'] >= 30:
level_dict[elem['level'][i]].append(elem2)
for level in level_dict:
level_dict[level].sort(
key=lambda x: x['sssp_count'] / x['count'], reverse=True)
ln = len(level_dict[level])
for i in range(ln):
elem = level_dict[level][i]
rate = ((i + 0.5) / ln)
if elem['count'] < 30:
continue
if rate <= 0.1:
elem['tag'] = 'Very Easy'
elif rate <= 0.3:
elem['tag'] = 'Easy'
elif rate < 0.7:
elem['tag'] = 'Medium'
elif rate < 0.9:
elem['tag'] = 'Hard'
else:
elem['tag'] = 'Very Hard'
elem['v'] = i
elem['t'] = ln
level_index = elem['level_index']
key = elem['key']
del elem['key']
del elem['level_index']
data[key][level_index] = elem
cs_cache = data
cs_need_update = False
resp = await make_response(json.dumps(data, ensure_ascii=False))
resp.headers['content-type'] = "application/json; charset=utf-8"
return resp
app.run(host='0.0.0.0', port=8333, loop=asyncio.get_event_loop())
|
11479351
|
import unittest
from hbconfig import Config
from kino.slack.template import MsgTemplate
class MsgTemplateTest(unittest.TestCase):
def setUp(self):
Config("config_example")
print(Config)
def test_schedule(self):
attachments = MsgTemplate.make_schedule_template("pretext", {})
self.assertEqual(isinstance(attachments, list), True)
def test_skill(self):
attachments = MsgTemplate.make_skill_template("pretext", {})
self.assertEqual(isinstance(attachments, list), True)
def test_help(self):
attachments = MsgTemplate.make_help_template("guide", {})
self.assertEqual(isinstance(attachments, list), True)
def test_giphy(self):
attachments = MsgTemplate.make_giphy_template("query", "url")
self.assertEqual(isinstance(attachments, list), True)
def test_weather(self):
attachments = MsgTemplate.make_weather_template(
"address", "icon", "summary", "temperature"
)
self.assertEqual(isinstance(attachments, list), True)
def test_air_quality(self):
data = {"cai": {"grade": "1", "value": "good", "description": "좋음"}, "pm25": {}}
attachments = MsgTemplate.make_air_quality_template("station_name", data)
self.assertEqual(isinstance(attachments, list), True)
def test_todoist(self):
attachments = MsgTemplate.make_todoist_task_template([])
self.assertEqual(isinstance(attachments, list), True)
def test_feed(self):
attachments = MsgTemplate.make_feed_template(("title", "link", "description"))
self.assertEqual(isinstance(attachments, list), True)
def test_bus(self):
attachments = MsgTemplate.make_bus_stop_template({})
self.assertEqual(isinstance(attachments, list), True)
def test_summary(self):
data = {"Color": "RED", "total": "90"}
attachments = MsgTemplate.make_summary_template(data)
self.assertEqual(isinstance(attachments, list), True)
|
11479397
|
import sys; sys.dont_write_bytecode=True
sys.path.insert(0, '../')
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import util
import pickle
import hdf5_to_dict as io
FIGX = 14
FIGY = 10
SIZE = 40
if len(sys.argv) != 3:
util.warn('Format: python eht_plot.py [averages] [output]')
sys.exit()
favg = sys.argv[1]
fout = sys.argv[2]
avg = pickle.load(open(favg, 'rb'))
fig = plt.figure(figsize=(FIGX, FIGY))
ax = plt.subplot(2,3,1)
ax.plot(avg['r'], avg['rho_r'], color='k', linewidth=2)
#ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('<rho>')
ax.set_ylim([1.e-2, 1.e0])
ax = plt.subplot(2,3,2)
ax.plot(avg['r'], avg['Pg_r'], color='k', linewidth=2)
#ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('<Pg>')
ax.set_ylim([1.e-6, 1.e-2])
ax = plt.subplot(2,3,3)
ax.plot(avg['r'], avg['B_r'], color='k', linewidth=2)
#ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('<|B|>')
ax.set_ylim([1.e-4, 1.e-1])
ax = plt.subplot(2,3,4)
ax.plot(avg['r'], avg['uphi_r'], color='k', linewidth=2)
#ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('<u^phi>')
ax.set_ylim([1.e-3, 1.e1])
ax = plt.subplot(2,3,5)
ax.plot(avg['r'], avg['Ptot_r'], color='k', linewidth=2)
#ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('<Ptot>')
ax.set_ylim([1.e-6, 1.e-2])
ax = plt.subplot(2,3,6)
ax.plot(avg['r'], avg['betainv_r'], color='k', linewidth=2)
#ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('<beta^-1>')
ax.set_ylim([1.e-2, 1.e1])
plt.savefig(fout + '_ravgs.png')
plt.close(fig)
# SADW
fig = plt.figure(figsize=(FIGX, FIGY))
ax = plt.subplot(2,3,1)
ax.plot(avg['r'], avg['rho_SADW'], color='k', linewidth=2)
ax.set_yscale('log')
plt.savefig(fout + '_sadw.png')
plt.close(fig)
fig = plt.figure(figsize=(FIGX, FIGY))
sx = ''
sx = '_d'
ax = plt.subplot(5,1,1)
ax.plot(avg['t'+sx], np.fabs(avg['Mdot'+sx]), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('|Mdot|')
ax = plt.subplot(5,1,2)
ax.plot(avg['t'+sx], avg['Phi'+sx], color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('Phi')
ax = plt.subplot(5,1,3)
ax.plot(avg['t'+sx], np.fabs(avg['Ldot'+sx]), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('|Ldot|')
ax = plt.subplot(5,1,4)
ax.plot(avg['t'+sx], np.fabs(avg['Edot'+sx] - avg['Mdot'+sx]), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('|Edot - Mdot|')
ax = plt.subplot(5,1,5)
ax.plot(avg['t'+sx], avg['Lum'+sx], color='k')
ax.set_xlim([0,1e4])
ax.set_xlabel('t/M')
ax.set_ylabel('Lum')
plt.savefig(fout + '_fluxes.png')
plt.close(fig)
fig = plt.figure(figsize=(FIGX, FIGY))
ax = plt.subplot(5,1,1)
ax.plot(avg['t'+sx], np.fabs(avg['Mdot'+sx]), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('|Mdot|')
print np.fabs(avg['Mdot'+sx]).max()
ax = plt.subplot(5,1,2)
ax.plot(avg['t'+sx], avg['Phi'+sx]/np.sqrt(np.fabs(avg['Mdot'+sx])), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('Phi/sqrt(|Mdot|)')
ax = plt.subplot(5,1,3)
ax.plot(avg['t'+sx], np.fabs(avg['Ldot'+sx])/(np.fabs(avg['Mdot'+sx])), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('|Ldot|/|Mdot|')
ax = plt.subplot(5,1,4)
ax.plot(avg['t'+sx], np.fabs(avg['Edot'+sx] - avg['Mdot'+sx])/(np.fabs(avg['Mdot'+sx])), color='k')
ax.set_xlim([0,1e4])
ax.set_xticklabels([])
ax.set_ylabel('|Edot - Mdot|/|Mdot|')
ax = plt.subplot(5,1,5)
ax.plot(avg['t'+sx], avg['Lum'+sx]/(np.fabs(avg['Mdot'+sx])), color='k')
ax.set_xlim([0,1e4])
ax.set_xlabel('t/M')
ax.set_ylabel('Lum/|Mdot|')
plt.savefig(fout + '_normfluxes.png')
plt.close(fig)
|
11479427
|
class ParameterSetting():
def __init__(self, csv_path='./', data_dir='furbo_only', save_root='snapshots', model_file='snapshots/final_model.pkl',
model_name = 'CNN14', val_split=0,
epochs=20, batch_size=128, lr=0.0001, num_class=2,
time_drop_width=64, time_stripes_num=2, freq_drop_width=8, freq_stripes_num=2,
sr=8000, nfft=200, hop=80, mel=64, resume=None, normalize=None, preload=False,
spec_aug=False, optimizer='adam', scheduler='cosine'):
self.csv_path = csv_path
self.data_dir = data_dir
self.save_root = save_root
self.model_file = model_file
self.model_name = model_name
self.val_split = val_split
self.epochs = epochs
self.batch_size = batch_size
self.lr = lr
self.num_class = num_class
self.optimizer = optimizer
self.scheduler = scheduler
self.time_drop_width = time_drop_width
self.time_stripes_num = time_stripes_num
self.freq_drop_width = freq_drop_width
self.freq_stripes_num = freq_stripes_num
self.sr = sr
self.nfft = nfft
self.hop = hop
self.mel = mel
self.resume = resume
self.normalize = normalize
self.preload = preload
self.spec_aug = spec_aug
|
11479430
|
import sublime
from .debug import debug
from .get_source_folders import get_source_folders
from .get_exclude_patterns import get_exclude_patterns
from .utils import error_message, status_message
from .exec_command import run_command_async
def update_source_modules(source_modules):
source_folders = get_source_folders()
exclude_patterns = get_exclude_patterns()
debug("update_source_modules:source_folders", source_folders)
def callback(err, result):
source_modules_callback(err, result, source_modules)
next()
def next():
if len(source_folders) > 0:
source_folder = source_folders.pop(0)
folder_patterns = exclude_patterns.get(source_folder) or {}
debug("folder_patterns", folder_patterns)
run_command_async(
"exportsFromDirectory",
{
"directory": source_folder,
"folderExcludePatterns": folder_patterns.get(
"folderExcludePatterns"
),
"fileExcludePatterns": folder_patterns.get("fileExcludePatterns"),
},
callback,
)
source_modules.clear()
next()
def source_modules_callback(err, result, source_modules):
if err:
return error_message(err)
if type(result) is not list:
return error_message("Unexpected type of result: " + type(result))
for item in result:
filepath = item.get("filepath")
if filepath is None:
continue
source_modules.append(item)
count = len(source_modules)
status_message("{0} source modules found".format(count))
debug("Update source modules", count)
|
11479434
|
from battle_city.connection import PlayerConnection
from asynctest.mock import CoroutineMock, call
import pytest
@pytest.mark.asyncio
async def test_client_write_small_message():
writer = CoroutineMock()
writer.drain = CoroutineMock()
connection = PlayerConnection(reader=None, writer=writer)
await connection.write({'test': 'test'})
assert writer.method_calls == [
call.write(b'{"test": "test"}'),
call.write(b'\n'),
call.drain(),
]
|
11479448
|
import os
import sys
import shutil
from subprocess import check_output
import pytest
import loky
from loky import cpu_count
def test_version():
assert hasattr(loky, '__version__'), (
"There are no __version__ argument on the loky module")
def test_cpu_count():
cpus = cpu_count()
assert type(cpus) is int
assert cpus >= 1
cpu_count_cmd = ("from loky.backend.context import cpu_count;"
"print(cpu_count())")
def test_cpu_count_affinity():
if not hasattr(os, 'sched_getaffinity') or not hasattr(shutil, 'which'):
pytest.skip()
taskset_bin = shutil.which('taskset')
python_bin = shutil.which('python')
if taskset_bin is None or python_bin is None:
raise pytest.skip()
try:
os.sched_getaffinity(0)
except NotImplementedError:
pytest.skip()
res = check_output([taskset_bin, '-c', '0',
python_bin, '-c', cpu_count_cmd])
assert res.strip().decode('utf-8') == '1'
def test_cpu_count_cfs_limit():
if sys.platform == "win32":
pytest.skip()
if not hasattr(shutil, 'which'):
pytest.skip()
docker_bin = shutil.which('docker')
if docker_bin is None:
raise pytest.skip()
loky_path = os.path.abspath(os.path.dirname(loky.__file__))
# The following will always run using the Python 3.6 docker image.
# We mount the loky source as /loky inside the container,
# so it can be imported when running commands under /
res = check_output([docker_bin, 'run', '--rm', '--cpus', '0.5',
'-v', '%s:/loky' % loky_path,
'python:3.6',
'python', '-c', cpu_count_cmd])
assert res.strip().decode('utf-8') == '1'
|
11479496
|
from collections import defaultdict
import multiprocessing
import numpy as np
from lfd.rapprentice import math_utils, LOG
def intersect_segs(ps_n2, q_22):
"""Takes a list of 2d nodes (ps_n2) of a piecewise linear curve and two points representing a single segment (q_22)
and returns indices into ps_n2 of intersections with the segment."""
assert ps_n2.shape[1] == 2 and q_22.shape == (2, 2)
def cross(a_n2, b_n2):
return a_n2[:,0]*b_n2[:,1] - a_n2[:,1]*b_n2[:,0]
rs = ps_n2[1:,:] - ps_n2[:-1,:]
s = q_22[1,:] - q_22[0,:]
denom = cross(rs, s[None,:])
qmp = q_22[0,:][None,:] - ps_n2[:-1,:]
ts = cross(qmp, s[None,:]) / denom # zero denom will make the corresponding element of 'intersections' false
us = cross(qmp, rs) / denom # same here
intersections = np.flatnonzero((ts > 0) & (ts < 1) & (us > 0) & (us < 1))
return intersections, ts, us
def rope_has_intersections(ctl_pts):
for i in range(len(ctl_pts) - 1):
curr_seg = ctl_pts[i:i+2,:]
intersections, ts, us = intersect_segs(ctl_pts[:,:2], curr_seg[:,:2])
if len(intersections) != 0:
return True
return False
def compute_dt_code(ctl_pts, plotting=False):
"""Takes rope control points (Nx3 array), closes the loop, and computes the Dowker-Thistlethwaite code for the knot.
The z-value for the points are used for determining over/undercrossings.
Follows procedure outlined here: http://katlas.math.toronto.edu/wiki/DT_(Dowker-Thistlethwaite)_Codes
"""
# First, close the loop by introducing extra points under the table and toward the robot (by subtracting z and x values)
# first_pt, last_pt = ctl_pts[0], ctl_pts[-1]
# flipped = False
# if first_pt[1] > last_pt[1]:
# first_pt, last_pt = last_pt, first_pt
# flipped = True
# min_z = ctl_pts[:,2].min()
# extra_first_pt, extra_last_pt = first_pt + [-.1, -.1, min_z-1], last_pt + [-.1, .1, min_z-1]
# if flipped:
# extra_pts = [extra_first_pt, extra_first_pt + [-1, 0, 0], extra_last_pt + [-1, 0, 0], extra_last_pt, last_pt]
# else:
# extra_pts = [extra_last_pt, extra_last_pt + [-1, 0, 0], extra_first_pt + [-1, 0, 0], extra_first_pt, first_pt]
# ctl_pts = np.append(ctl_pts, extra_pts, axis=0)
if plotting:
import trajoptpy, openravepy
env = openravepy.Environment()
viewer = trajoptpy.GetViewer(env)
handles = []
handles.append(env.plot3(ctl_pts, 5, [0, 0, 1]))
viewer.Idle()
# Upsampling loop: upsample until every segment has at most one crossing
need_upsample_ind = None
upsample_iters = 0
max_upsample_iters = 10
while True:
counter = 1
crossings = defaultdict(list)
# Walk along rope: for each segment, compute intersections with all other segments
for i in range(len(ctl_pts) - 1):
curr_seg = ctl_pts[i:i+2,:]
intersections, ts, us = intersect_segs(ctl_pts[:,:2], curr_seg[:,:2])
if len(intersections) == 0:
continue
if len(intersections) != 1:
LOG.debug('warning: more than one intersection for segment %d, now upsampling', i)
need_upsample_ind = i
break
# for each intersection, determine and record over/undercrossing
i_int = intersections[0]
if plotting:
handles.append(env.drawlinestrip(ctl_pts[i_int:i_int+2], 5, [1, 0, 0]))
int_point_rope = ctl_pts[i_int] + ts[i_int]*(ctl_pts[i_int+1] - ctl_pts[i_int])
int_point_curr_seg = curr_seg[0] + us[i_int]*(curr_seg[1] - curr_seg[0])
#assert np.allclose(int_point_rope[:2], int_point_curr_seg[:2])
above = int_point_curr_seg[2] > int_point_rope[2]
crossings[tuple(sorted((i, i_int)))].append(-counter if counter % 2 == 0 and above else counter)
counter += 1
if plotting: viewer.Idle()
# upsample if necessary
if need_upsample_ind is not None and upsample_iters < max_upsample_iters:
spacing = np.linspace(0, 1, len(ctl_pts))
new_spacing = np.insert(spacing, need_upsample_ind+1, (spacing[need_upsample_ind]+spacing[need_upsample_ind+1])/2.)
ctl_pts = math_utils.interp2d(new_spacing, spacing, ctl_pts)
upsample_iters += 1
need_upsample = None
continue
break
# Extract relevant part of crossing data to produce DT code
out = []
for pair in crossings.itervalues():
assert len(pair) == 2
odd = [p for p in pair if p % 2 == 1][0]
even = [p for p in pair if p % 2 == 0][0]
out.append((odd, even))
out.sort()
dt_code = [-o[1] for o in out]
return dt_code
def _dt_code_to_knot(dt_code):
import snappy
try:
m = snappy.Manifold("DT:[%s]" % ",".join(map(str, dt_code)))
knot = snappy.HTLinkExteriors.identify(m)
return knot.name()
except:
import traceback
traceback.print_exc()
return None
def dt_code_to_knot(dt_code):
def dt_code_to_knot_wrapper(q, x):
result = _dt_code_to_knot(x)
q.put(result)
q.close()
q = multiprocessing.Queue(1)
proc = multiprocessing.Process(target=dt_code_to_knot_wrapper, args=(q, dt_code))
proc.start()
TIMEOUT = 1
try:
result = q.get(True, TIMEOUT)
except:
LOG.warn("Timeout for knot identification exceeded, assuming no knot")
result = None
finally:
proc.terminate()
return result
def identify_knot(ctl_pts):
"""Given control points from a rope, gives a knot name if identified by snappy, or None otherwise"""
try:
dt_code = compute_dt_code(ctl_pts)
print 'dt code', dt_code
return dt_code_to_knot(dt_code)
except:
import traceback
traceback.print_exc()
return None
def main():
#dt_code = [8, 6, -4, -10, 2]
#dt_code = [4, 6, 2, -10, 8]
dt_code = [4, 6, 2, -8]
# m = snappy.Manifold("DT:[%s]" % ",".join(map(str, dt_code)))
# knot = snappy.HTLinkExteriors.identify(m)
# print knot.name()
#print dt_code_to_knot(dt_code)
#return
import cPickle
with open("results/single_example_no_failures_100_03cm_s0.pkl", "r") as f: experiments = cPickle.load(f)
log = experiments[2][1]
rope_nodes = []
for entry in log:
if 'sim_rope_nodes_after_full_traj' in entry.name:
rope_nodes.append(entry.data)
for i, n in enumerate(rope_nodes):
knot = identify_knot(n)
print "[%d/%d] %s" % (i+1, len(rope_nodes), knot)
if __name__ == '__main__':
main()
|
11479505
|
from functools import total_ordering
import json
import os
from random import SystemRandom
import shutil
from uuid import UUID
from ethereum.slogging import get_logger
from ethereum.tools import keys
from ethereum.utils import privtopub
from ethereum.utils import sha3, is_string, encode_hex, checksum_encode, to_string, decode_hex
import bitcoin
from eth_account import Account as EAccount
import binascii
#common.log = get_logger('accounts')
DEFAULT_COINBASE = decode_hex('de0b295669a9fd93d5f28d9ec85e40f4cb697bae')
random = SystemRandom()
def mk_privkey(seed):
return sha3(seed)
def mk_random_privkey():
k = hex(random.getrandbits(256))[2:-1].zfill(64)
assert len(k) == 64
return decode_hex(k)
class Account(EAccount):
"""
"""
def __init__(self, keystore, password=None, path=None):
self.keystore = keystore
self._privatekey = None
try:
self._address = decode_hex(self.keystore['address'])
except KeyError:
self._address = None
self.locked = True
if password is not None:
self.unlock(password)
if path is not None:
self.path = os.path.abspath(path)
else:
self.path = None
@classmethod
def new(cls, password, key=None, uuid=None, path=None):
"""
:param password:
:param key:
:param uuid:
:param path:
:return:
"""
if key is None:
key = mk_random_privkey()
# [NOTE]: key and password should be bytes
if not is_string(key):
key = to_string(key)
if not is_string(password):
password = to_string(password)
account = cls.create(key)
keystore = Account.encrypt(account.privateKey,password)
return Account(keystore, password, path)
@classmethod
def load(cls, path, password=None):
"""
:param path:
:param password:
:return:
"""
with open(path) as f:
keystore = json.load(f)
if not keys.check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path)
def toJson(self):
"""
:return:
"""
return json.dumps(self.keystore)
def dump(self):
"""
:param include_address:
:param include_id:
:return:
"""
return json.dumps(self.toJson())
def save(self, include_address=True, include_id=True):
"""
:return:
"""
if self.path:
with open(self.path) as f:
json.dump(f, self.toJson())
else:
raise Exception("No path given")
def unlock(self, password):
"""Unlock the account with a password.
If the account is already unlocked, nothing happens, even if the password is wrong.
:raises: :exc:`ValueError` (originating in ethereum.keys) if the password is wrong (and the
account is locked)
"""
if self.locked:
self._privatekey = self.decrypt(json.dumps(self.keystore), password)
self.locked = False
def lock(self):
"""
:return:
"""
self._privatekey = None
self.locked = True
@property
def privkey(self):
"""The account's private key or `None` if the account is locked"""
if not self.locked:
return self._privatekey
else:
return None
@property
def private_key_string(self):
if self.privkey:
return binascii.hexlify(self.privkey).decode()
@property
def pubkey(self):
"""The account's public key or `None` if the account is locked"""
if not self.locked:
return privtopub(self.privkey)
else:
return None
@property
def pubkey_safe(self):
if not self.locked:
return encode_hex(bitcoin.privtopub(self.privkey))
else:
return None
@property
def address(self):
"""
:return:
"""
if self._address:
pass
elif 'address' in self.keystore:
self._address = decode_hex(self.keystore['address'])
elif not self.locked:
self._address = keys.privtoaddr(self.privkey)
else:
return None
return checksum_encode(encode_hex(self._address))
@property
def uuid(self):
"""
"""
try:
return self.keystore['id']
except KeyError:
return None
@uuid.setter
def uuid(self, value):
"""
:param value: if value is None, remove it
:return:
"""
if value is not None:
self.keystore['id'] = value
elif 'id' in self.keystore:
self.keystore.pop('id')
def sign_hash(self, message_hash):
"""
:param message_hash:
:param private_key:
:return:
"""
return self.signHash(message_hash,self.privkey)
def sign_tansaction(self, transaction_dict):
"""
:param transaction_dict:
:param private_key:
:return:
"""
return self.signTransaction(transaction_dict, self.privkey)
def __repr__(self):
if self.address is not None:
address = encode_hex(self.address)
else:
address = '?'
return '<Account(address={address}, id={id})>'.format(address=address, id=self.uuid)
@total_ordering
class MinType(object):
""" Return Min value for sorting comparison
This class is used for comparing unorderded types. e.g., NoneType
"""
def __le__(self, other):
return True
def __eq__(self, other):
return (self is other)
def privtoaddr(x):
if len(x) > 32:
x = decode_hex(x)
return sha3(bitcoin.privtopub(x)[1:])[12:]
if __name__ == "__main__":
account = Account.new("mytest",path="./test")
print(account.pubkey)
print(encode_hex(bitcoin.privtopub(account.privkey)))
print(encode_hex(account.privkey))
print(account.address)
print(account.dump())
|
11479522
|
import numpy as np
import pandas as pd
from ..data.dataset import TableDataset
from ..mltypes import Identifier
import typing as t
def findna(dataset: TableDataset) -> TableDataset:
data = dataset.table_data
data = data.replace(('nan', 'NaN', 'NA', 'na', np.nan), None, inplace=False)
return TableDataset(data, dataset.target_columns)
def fillna(dataset: TableDataset) -> TableDataset:
data = dataset.table_data
if isinstance(data, pd.DataFrame):
data = data.fillna(method='ffill', inplace=False)
data.fillna(method='bfill', inplace=True)
else:
mask = np.isnan(dataset.table_data)
idx = np.where(~mask, np.arange(mask.shape[1]), 0)
np.maximum.accumulate(idx, axis=1, out=idx)
data = data[np.arange(idx.shape[0])[:, None], idx]
return TableDataset(data, dataset.target_columns)
def drop_index(dataset: TableDataset, index_id: Identifier = 0) -> TableDataset:
data = dataset.table_data
if isinstance(data, pd.DataFrame):
column_ids = data.columns
if index_id not in column_ids:
index_id = data.columns[index_id]
data = data.drop(columns=index_id, inplace=False)
else:
assert isinstance(index_id, int)
pos_id = index_id if index_id >= 0 else data.shape[1] + index_id
data = np.delete(data, pos_id, axis=1)
return TableDataset(data, dataset.target_columns)
def drop_columns(dataset: TableDataset, columns: t.Sequence[Identifier]) -> TableDataset:
data = dataset.table_data.drop(columns, axis=1)
return TableDataset(data, dataset.target_columns)
|
11479529
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchnlp.common.hparams import HParams
from torchnlp.common.model import Model, gen_model_dir
from torchnlp.modules import outputs
import os
VOCABS_FILE = 'vocabs.pt'
class Tagger(Model):
"""
Abstract base class that adds the following boilerplate for
sequence tagging tasks:
- Word Embeddings
- Character Embeddings
- Tag projection
- CRF
Derived classes implement the compute() method and not forward().
This is so that projection and other layers can be added
"""
def __init__(self, hparams=None, vocabs=None):
"""
Parameters:
hparams: Instance of HParams class
num_tags: Number of output tags
vocabs: tuple of (word vocab, char vocab, tags vocab). Each is an
instance of torchtext.vocab.Vocab.
NOTE: If word_vocab.vectors is available it will initialize the embeddings
and with word_vocab.vectors make it non-trainable
"""
super(Tagger, self).__init__(hparams)
if vocabs is None or not isinstance(vocabs, tuple) or len(vocabs) != 3:
raise ValueError('Must provide vocabs 3-tuple')
vocab_word, vocab_char, vocab_tags = vocabs
if vocab_word is None:
raise ValueError('Must provide vocab_word')
if vocab_tags is None:
raise ValueError('Must provide vocab_word')
self.vocabs = vocabs
self.vocab_tags = vocab_tags # Needed during eval and prediction
self.embedding_word = nn.Embedding(len(vocab_word), hparams.embedding_size_word)
self.embedding_char = None
if vocab_char is not None and hparams.embedding_size_char > 0:
self.embedding_char = nn.Embedding(len(vocab_char), hparams.embedding_size_char)
if vocab_word.vectors is not None:
if hparams.embedding_size_word != vocab_word.vectors.shape[1]:
raise ValueError('embedding_size should be {} but got {}'
.format(vocab_word.vectors.shape[1],
hparams.embedding_size_word))
self.embedding_word.weight.data.copy_(vocab_word.vectors)
self.embedding_word.weight.requires_grad = False
if hparams.use_crf:
self.output_layer = outputs.CRFOutputLayer(hparams.hidden_size, len(vocab_tags))
else:
self.output_layer = outputs.SoftmaxOutputLayer(hparams.hidden_size, len(vocab_tags))
def _embed_compute(self, batch):
inputs_word_emb = self.embedding_word(batch.inputs_word)
inputs_char_emb = None
if self.embedding_char is not None:
inputs_char_emb = self.embedding_char(batch.inputs_char.view(-1,
batch.inputs_char.shape[-1]))
return self.compute(inputs_word_emb, inputs_char_emb)
def forward(self, batch):
"""
NOTE: batch must have the following attributes:
inputs_word, inputs_char, labels
"""
with torch.no_grad():
hidden = self._embed_compute(batch)
output = self.output_layer(hidden)
return output
# TODO: Add beam search somewhere :)
def loss(self, batch, compute_predictions=False):
"""
NOTE: batch must have the following attributes:
inputs_word, inputs_char, labels
"""
hidden = self._embed_compute(batch)
predictions = None
if compute_predictions:
predictions = self.output_layer(hidden)
loss_val = self.output_layer.loss(hidden, batch.labels)
return loss_val, predictions
def compute(self, inputs_word_emb, inputs_char_emb):
"""
Abstract method that is called to compute the final model
hidden state. Derived classes implement the method to take
input embeddings and provide the final hidden state
Parameters:
inputs_word_emb: Input word embeddings of shape
[batch, sequence-length, word-embedding-size]
inputs_char_emb[optional]: Input character embeddings of shape
[batch x sequence-length, word-length, char-embedding-size]
Returns:
Final hidden state in the shape [batch, sequence-length, hidden-size]
"""
raise NotImplementedError("Must implement compute()")
@classmethod
def create(cls, task_name, hparams, vocabs, **kwargs):
"""
Saves the vocab files
"""
model = super(Tagger, cls).create(task_name, hparams, vocabs=vocabs, **kwargs)
model_dir = gen_model_dir(task_name, cls)
torch.save(vocabs, os.path.join(model_dir, VOCABS_FILE))
return model
@classmethod
def load(cls, task_name, checkpoint, **kwargs):
model_dir = gen_model_dir(task_name, cls)
vocabs_path = os.path.join(model_dir, VOCABS_FILE)
if not os.path.exists(vocabs_path):
raise OSError('Vocabs file not found')
vocabs = torch.load(vocabs_path)
return super(Tagger, cls).load(task_name, checkpoint, vocabs=vocabs, **kwargs)
def hparams_tagging_base():
return HParams(
batch_size=100,
embedding_size_word=300,
embedding_size_char=0, # No char embeddings
embedding_size_char_per_word=100,
embedding_size_tags=100,
hidden_size=128,
learning_rate=0.2,
learning_rate_decay=None,
max_length=256,
num_hidden_layers=1,
dropout=0.2,
optimizer_adam_beta1=0.9,
optimizer_adam_beta2=0.98,
use_crf=False
)
|
11479533
|
import time
import unittest
from malcolm.core.timestamp import TimeStamp
class TestAlarm(unittest.TestCase):
def test_no_args(self):
now = time.time()
o = TimeStamp()
self.assertAlmostEqual(now, o.to_time(), delta=0.2)
def test_args(self):
o = TimeStamp(1231112, 211255265, 43)
assert o.secondsPastEpoch == 1231112
assert o.nanoseconds == 211255265
assert o.userTag == 43
assert o.to_time() == 1231112.211255265
|
11479549
|
import pytest
import array
from uintset import UintSet
WORD_SIZE = 64
def test_new():
s = UintSet()
assert len(s) == 0
def test_new_from_iterable():
s = UintSet([1, 100, 3]) # beyond word 0
assert len(s) == 3
def test_add():
s = UintSet()
s.add(0)
assert len(s) == 1
def test_add_multiple():
s = UintSet()
s.add(1)
s.add(3)
s.add(1)
assert len(s) == 2
def test_add_negative():
s = UintSet()
with pytest.raises(ValueError):
s.add(-1)
def test_contains():
s = UintSet()
s.add(1)
assert 1 in s
def test_iter():
s = UintSet([1, 5, 0, 3, 2, 4])
assert list(s) == [0, 1, 2, 3, 4, 5]
def test_repr_empty():
s = UintSet()
assert repr(s) == 'UintSet()'
def test_repr():
s = UintSet([1, 5, 0, 3, 2, 4])
assert repr(s) == 'UintSet({0, 1, 2, 3, 4, 5})'
def test_eq():
test_cases = [
(UintSet(), UintSet(), True),
(UintSet([1]), UintSet(), False),
(UintSet(), UintSet([1]), False),
(UintSet([1, 2, 100]), UintSet([100, 2, 1]), True), # beyond word 0
(UintSet([1, 100]), UintSet([1, 101]), False),
(UintSet([1, 100]), UintSet([1, 100, 1000]), False),
]
for s1, s2, want in test_cases:
assert (s1 == s2) is want
def test_copy():
test_cases = [
UintSet(),
UintSet([1]),
UintSet([1, 2]),
UintSet([1, 100]), # beyond word 0
]
for s1 in test_cases:
s2 = s1.copy()
assert s1 == s2
union_cases = [
(UintSet(), UintSet(), UintSet()),
(UintSet([1]), UintSet(), UintSet([1])),
(UintSet(), UintSet([1]), UintSet([1])),
(UintSet([1, 100]), UintSet([100, 1]), UintSet([100, 1])), # beyond word 0
(UintSet([1, 100]), UintSet([2]), UintSet([1, 2, 100])),
]
@pytest.mark.parametrize("s1, s2, want", union_cases)
def test_or_op(s1, s2, want):
got = s1 | s2
assert len(got) == len(want)
assert got == want
@pytest.mark.parametrize("s1, s2, want", union_cases)
def test_union(s1, s2, want):
got = s1.union(s2)
assert len(got) == len(want)
assert got == want
@pytest.mark.parametrize("s1, s2, want", union_cases)
def test_union_iterable(s1, s2, want):
it = list(s2)
got = s1.union(it)
assert len(got) == len(want)
assert got == want
def test_union_iterable_multiple():
s = UintSet([1, 3, 5])
it1 = [2, 4, 6]
it2 = {10, 11, 12}
want = UintSet({1, 2, 3, 4, 5, 6, 10, 11, 12})
got = s.union(it1, it2)
assert got == want
@pytest.fixture
def intersection_cases():
return [
(UintSet(), UintSet(), UintSet()),
(UintSet([1]), UintSet(), UintSet()),
(UintSet([1]), UintSet([1]), UintSet([1])),
(UintSet([1, 100]), UintSet([100, 1]), UintSet([100, 1])), # beyond word 0
(UintSet([1, 100]), UintSet([2]), UintSet()),
(UintSet([1, 2, 3, 4]), UintSet([2, 3, 5]), UintSet([2, 3])),
]
def test_and_op(intersection_cases):
for s1, s2, want in intersection_cases:
got = s1 & s2
assert len(got) == len(want)
assert got == want
def test_intersection(intersection_cases):
for s1, s2, want in intersection_cases:
got = s1.intersection(s2)
assert len(got) == len(want)
assert got == want
@pytest.fixture
def symmetric_diff_cases():
return [
(UintSet(), UintSet(), UintSet()),
(UintSet([1]), UintSet(), UintSet([1])),
(UintSet([1]), UintSet([1]), UintSet()),
(UintSet([1, 100]), UintSet([100, 1]), UintSet()), # beyond word 0
(UintSet([1, 100]), UintSet([2]), UintSet([1, 100, 2])),
(UintSet([1, 2, 3, 4]), UintSet([2, 3, 5]), UintSet([1, 4, 5])),
]
def test_xor_op(symmetric_diff_cases):
for s1, s2, want in symmetric_diff_cases:
got = s1 ^ s2
assert len(got) == len(want)
assert got == want
def test_symmetric_difference(symmetric_diff_cases):
for s1, s2, want in symmetric_diff_cases:
got = s1.symmetric_difference(s2)
assert len(got) == len(want)
assert got == want
difference_cases = [
(UintSet(), UintSet(), UintSet()),
(UintSet([1]), UintSet(), UintSet([1])),
(UintSet([1]), UintSet([1]), UintSet()),
(UintSet([1, 100]), UintSet([100, 1]), UintSet()), # beyond word 0
(UintSet([1, 100]), UintSet([2]), UintSet([1, 100])),
(UintSet([1, 2, 3, 4]), UintSet([2, 3, 5]), UintSet([1, 4])),
]
@pytest.mark.parametrize("s1, s2, want", difference_cases)
def test_sub_op(s1, s2, want):
got = s1 - s2
assert len(got) == len(want)
assert got == want
@pytest.mark.parametrize("s1, s2, want", difference_cases)
def test_difference(s1, s2, want):
got = s1.difference(s2)
assert len(got) == len(want)
assert got == want
def test_remove():
test_cases = [
(UintSet([0]), 0, UintSet()),
(UintSet([1, 2, 3]), 2, UintSet([1, 3])),
]
for s, elem, want in test_cases:
s.remove(elem)
assert s == want
def test_remove_all():
elems = [1, 2, 3]
set = UintSet(elems)
for e in elems:
set.remove(e)
assert len(set) == 0
def test_remove_not_found():
s = UintSet()
elem = 1
with pytest.raises(KeyError) as excinfo:
s.remove(elem)
assert str(excinfo.value) == str(elem)
def test_remove_not_found_2():
s = UintSet([1, 3])
elem = 2
with pytest.raises(KeyError) as excinfo:
s.remove(elem)
assert str(excinfo.value) == str(elem)
def test_pop_not_found():
s = UintSet()
with pytest.raises(KeyError) as excinfo:
s.pop()
assert 'pop from an empty set' in str(excinfo.value)
def test_pop():
test_cases = [0, 1, WORD_SIZE-1, WORD_SIZE, WORD_SIZE+1, 100]
for want in test_cases:
s = UintSet([want])
got = s.pop()
assert got == want
assert len(s) == 0
def test_pop_all():
want = [0, 1, 100]
s = UintSet(want)
got = []
while s:
got.append(s.pop())
assert len(s) == (len(want) - len(got))
assert got == want
|
11479571
|
def my_marginalization(input_array, binary_decision_array):
marginalization_array = input_array * binary_decision_array
marginal = np.sum(marginalization_array, axis=0) # note axis
marginal /= marginal.sum() # normalize
return marginalization_array, marginal
marginalization_array, marginal = my_marginalization(input_array, binary_decision_array)
with plt.xkcd():
plot_myarray(marginalization_array, 'estimated $\hat x$', '$\~x$', 'Marginalization array: $p(\^x | \~x)$')
plt.figure()
plt.plot(x, marginal)
plt.xlabel('$\^x$')
plt.ylabel('probability')
plt.show()
|
11479616
|
class TextEditorMemento:
def __init__(self, content: str):
self.content = content
def get_state(self):
return self.content
class TextEditor:
def __init__(self, content: str):
self.content = content
def get_content(self):
return self.content
def add_new_content(self, extra_content: str):
self.content += f"\n{extra_content}"
def save(self):
return TextEditorMemento(self.content)
def restore(self, m: TextEditorMemento):
self.content = m.get_state()
class Caretaker:
def __init__(self):
self.history = []
def push(self, m: TextEditorMemento):
self.history.append(m)
def pop(self):
return self.history.pop()
|
11479630
|
from abc import ABCMeta, abstractmethod
from instagram_api.utils.http import ClientCookieJar
__all__ = ['StorageInterface']
class StorageInterface(metaclass=ABCMeta):
cookie_jar_class: type(ClientCookieJar)
@abstractmethod
def open(self, config: dict):
raise NotImplementedError
@abstractmethod
def has_user(self, usenrame: str) -> bool:
raise NotImplementedError
@abstractmethod
def open_user(self, username: str) -> dict:
raise NotImplementedError
@abstractmethod
def move_user(self, old_username: str, new_username: str):
raise NotImplementedError
@abstractmethod
def delete_user(self, username: str):
raise NotImplementedError
@abstractmethod
def load_user_settings(self) -> dict:
raise NotImplementedError
@abstractmethod
def save_user_settings(self, settings: dict, trigger_key: str = None):
raise NotImplementedError
@abstractmethod
def has_user_cookies(self) -> bool:
raise NotImplementedError
@abstractmethod
def load_user_cookies(self) -> ClientCookieJar:
raise NotImplementedError
@abstractmethod
def save_user_cookies(self, jar: ClientCookieJar):
raise NotImplementedError
@abstractmethod
def close_user(self):
raise NotImplementedError
@abstractmethod
def close(self):
raise NotImplementedError
def __del__(self):
self.close()
|
11479645
|
from __future__ import division
from __future__ import print_function
import argparse
import collections
import datetime
import itertools
import os.path
import time
from scipy.stats import entropy
BK_ENTROPY_CUTOFF = 2.5
LFM_ENTROPY_CUTOFF = 3.0
MIN_OCCURRENCES = 10
MIN_VALID_SEQ_LEN = 3
MAX_VALID_SEQ_LEN = 500
def parse_brightkite(path):
"""Parse the BrightKite dataset.
This takes as input the file `loc-brightkite_totalCheckins.txt` available
at the following URL: <https://snap.stanford.edu/data/loc-brightkite.html>.
"""
# Format: [user] [check-in time] [latitude] [longitude] [location id].
with open(path) as f:
for i, line in enumerate(f):
try:
usr, ts, lat, lon, loc = line.strip().split('\t')
except ValueError:
print("could not parse line {} ('{}'), ignoring".format(
i, line.strip()))
continue
dt = datetime.datetime.strptime(ts, "%Y-%m-%dT%H:%M:%SZ")
ts = time.mktime(dt.timetuple())
yield (usr, loc, ts)
def parse_lastfm(path):
"""Parse the last.fm dataset.
This takes as input the file
`userid-timestamp-artid-artname-traid-traname.tsv` available at the
following URL:
<http://www.dtic.upf.edu/~ocelma/MusicRecommendationDataset/lastfm-1K.html>.
"""
# Format: [user] [timestamp] [artist ID] [artist] [track ID] [track].
with open(path) as f:
for i, line in enumerate(f):
try:
usr, ts, aid, artist, tid, track = line.strip().split('\t')
except ValueError:
print("could not parse line {} ('{}'), ignoring".format(
i, line.strip()))
continue
dt = datetime.datetime.strptime(ts, "%Y-%m-%dT%H:%M:%SZ")
ts = time.mktime(dt.timetuple())
yield (usr, aid, ts)
def preprocess(stream, output_dir, prefix="processed", min_entropy=0.0):
"""Preprocess a stream of (user, item, timestamp) triplets.
The preprocessing roughly includes the following steps:
- remove items that occur infrequently,
- remove users that consume very few items,
- remove users who do not consume "diverse enough" items,
- separate data into training and validation sets,
- make sure that items in the validation sets appear at least once in the
training set,
- relabel items and users with consecutive integers.
"""
# Step 1: read stream and count number of item occurrences.
data = list()
occurrences = collections.defaultdict(lambda: 0)
for user, item, ts in stream:
data.append((user, item, ts))
occurrences[item] += 1
# Step 2: remove items that occurred infrequently, create user seqs.
tmp_dict = collections.defaultdict(list)
for user, item, ts in data:
if occurrences[item] < MIN_OCCURRENCES:
continue
tmp_dict[user].append((ts, item))
# Step 3: order user sequences by timestamp.
seq_dict = dict()
for user, seq in tmp_dict.items():
seq = [item for ts, item in sorted(seq)]
seq_dict[user] = seq
# Step 4: split into training and validation sets. Ignore users who
# consumed few items or who do not meet entropy requirements.
train = dict()
valid = dict()
for user, seq in seq_dict.items():
if len(seq) <= MIN_OCCURRENCES:
continue
hist = collections.defaultdict(lambda: 0)
for item in seq:
hist[item] += 1
if entropy(list(hist.values())) <= min_entropy:
continue
# Implementation note: round(0.025 * 100) gives 3.0 in Python, but 2.0
# in Julia. Beware! Results might differ!
cutoff = min(MAX_VALID_SEQ_LEN, max(MIN_VALID_SEQ_LEN,
int(round(0.025 * len(seq)))))
train[user] = seq[:-cutoff]
valid[user] = seq[-cutoff:]
# Step 5: relabel users and items, and remove items that do not appear in
# the training sequences.
items = set(itertools.chain(*train.values()))
users = set(train.keys())
user2id = dict(zip(users, range(1, len(users) + 1)))
item2id = dict(zip(items, range(1, len(items) + 1)))
train2 = dict()
valid2 = dict()
for user in users:
train2[user2id[user]] = tuple(map(lambda x: item2id[x], train[user]))
valid2[user2id[user]] = tuple(map(lambda x: item2id[x],
filter(lambda x: x in items, valid[user])))
# Step 6: write out the sequences.
train_path = os.path.join(output_dir, "{}-train.txt".format(prefix))
valid_path = os.path.join(output_dir, "{}-valid.txt".format(prefix))
with open(train_path, "w") as tf, open(valid_path, "w") as vf:
for uid in user2id.values():
t = 1
for iid in train2[uid]:
tf.write("{} {} {}\n".format(uid, iid, t))
t += 1
for iid in valid2[uid]:
vf.write("{} {} {}\n".format(uid, iid, t))
t += 1
print("Done.")
def main(args):
if args.which == "brightkite":
stream = parse_brightkite(args.path)
cutoff = BK_ENTROPY_CUTOFF
elif args.which == "lastfm":
stream = parse_lastfm(args.path)
cutoff = LFM_ENTROPY_CUTOFF
else:
raise RuntimeError("unknown dataset?!")
preprocess(stream, args.output_dir,
prefix=args.which,
min_entropy=cutoff)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("which", choices=("brightkite", "lastfm"))
parser.add_argument("path")
parser.add_argument("--output-dir", default="./")
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
main(args)
|
11479678
|
import os
import sys
import six
import atexit
import weakref
import logging
import threading
import queue
import multiprocessing
import asyncio
from yggdrasil.tools import YggClass, sleep
MPI = None
_on_mpi = False
_mpi_rank = -1
if os.environ.get('YGG_SUBPROCESS', False):
if 'YGG_MPI_RANK' in os.environ:
_on_mpi = True
_mpi_rank = int(os.environ['YGG_MPI_RANK'])
else:
try:
from mpi4py import MPI
_on_mpi = (MPI.COMM_WORLD.Get_size() > 1)
_mpi_rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
pass
mp_ctx = multiprocessing.get_context()
mp_ctx_spawn = multiprocessing.get_context("spawn")
_main_thread = threading.main_thread()
_thread_registry = weakref.WeakValueDictionary()
_lock_registry = weakref.WeakValueDictionary()
logger = logging.getLogger(__name__)
def test_target_error(): # pragma: debug
raise RuntimeError("Test error.")
def test_target_sleep(): # pragma: debug
sleep(10.0)
def check_processes(): # pragma: debug
r"""Check for processes that are still running."""
import psutil
current_process = psutil.Process()
children = current_process.children(recursive=True)
if len(children) > 0:
logging.info("Process %s has %d children" % (
current_process.pid, len(children)))
for child in children:
logger.info(" %s process running" % child.pid)
def check_threads(): # pragma: debug
r"""Check for threads that are still running."""
# logger.info("Checking %d threads" % len(_thread_registry))
for k, v in _thread_registry.items():
if v.is_alive():
logger.error("Thread is alive: %s" % k)
if threading.active_count() > 1:
logger.info("%d threads running" % threading.active_count())
for t in threading.enumerate():
logger.info(" %s thread running" % t.name)
def check_locks(): # pragma: debug
r"""Check for locks in lock registry that are locked."""
# logger.info("Checking %d locks" % len(_lock_registry))
for k, v in _lock_registry.items():
res = v.acquire(False)
if res:
v.release()
else:
logger.error("Lock could not be acquired: %s" % k)
def check_sockets(): # pragma: debug
r"""Check registered sockets."""
from yggdrasil.communication import cleanup_comms
count = cleanup_comms('ZMQComm')
if count > 0:
logger.info("%d sockets closed." % count)
def ygg_atexit(): # pragma: debug
r"""Things to do at exit."""
check_locks()
check_threads()
# # This causes a segfault in a C dependency
# if not is_subprocess():
# check_sockets()
# Python 3.4 no longer supported if using pip 9.0.0, but this
# allows the code to work if somehow installed using an older
# version of pip
if sys.version_info[0:2] == (3, 4): # pragma: no cover
# Print empty line to ensure close
print('', end='')
sys.stdout.flush()
atexit.register(ygg_atexit)
class SafeThread(threading.Thread):
r"""Thread that sets Event on error."""
def __init__(self, *args, **kwargs):
self._errored = threading.Event()
super(SafeThread, self).__init__(*args, **kwargs)
def run(self, *args, **kwargs):
try:
super(SafeThread, self).run(*args, **kwargs)
except BaseException:
self._errored.set()
raise
@property
def exitcode(self):
r"""int: Exit code. 1 if error, 0 otherwise."""
if (not self._started.is_set()) or self.is_alive():
return None
return int(self._errored.is_set())
@property
def pid(self):
r"""Process ID."""
return os.getpid()
class AliasDisconnectError(RuntimeError):
pass
def add_aliased_attribute(cls, name, with_lock=False):
r"""Factory to alias an attribute so that it refers to the wrapped
object.
Args:
name (str): Name of attribute to alias.
with_lock (bool, optional): If True, the class's lock will be
acquired before getting the attribute. Defaults to False.
"""
def alias_wrapper(self):
self.check_for_base(name)
lock_acquired = False
if ((with_lock and hasattr(self, 'lock')
and (name not in self._unlocked_attr))):
self.lock.acquire()
lock_acquired = True
try:
out = getattr(self._base, name)
finally:
if lock_acquired:
self.lock.release()
return out
alias_wrapper.__name__ = name
setattr(cls, name, property(alias_wrapper))
def add_aliased_method(cls, name, with_lock=False):
r"""Factory to alias a method so that it refers to the wrapped
object.
Args:
name (str): Name of method to alias.
with_lock (bool, optional): If True, the class's lock will be
acquired before executing the method. Defaults to False.
"""
def alias_wrapper(self, *args, **kwargs):
self.check_for_base(name)
lock_acquired = False
if ((with_lock and hasattr(self, 'lock')
and (name not in self._unlocked_attr))):
self.lock.acquire()
lock_acquired = True
try:
out = getattr(self._base, name)(*args, **kwargs)
finally:
if lock_acquired:
self.lock.release()
return out
alias_wrapper.__name__ = name
setattr(cls, name, alias_wrapper)
class AliasMeta(type):
r"""Meta class for adding aliased methods to the class."""
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
for k in cls._base_meth:
assert(not hasattr(cls, k))
add_aliased_method(cls, k, with_lock=cls._base_locked)
for k in cls._base_attr:
assert(not hasattr(cls, k))
add_aliased_attribute(cls, k, with_lock=cls._base_locked)
cls._base_meth = []
cls._base_attr = []
if (cls._base_class_name is None) and (name not in ['AliasObject',
'MultiObject',
'ContextObject']):
cls._base_class_name = name
return cls
@six.add_metaclass(AliasMeta)
class AliasObject(object):
r"""Alias object that calls to attribute.
Args:
dont_initialize_base (bool, optional): If True the base object
will not be initialized. Defaults to False.
"""
__slots__ = ['_base', '__weakref__']
_base_class_name = None
_base_class = None
_base_attr = []
_base_meth = []
_base_locked = False
_unlocked_attr = []
def __init__(self, *args, dont_initialize_base=False, **kwargs):
self._base = None
if (not dont_initialize_base) and (self._base_class is not None):
self._base = self._base_class(*args, **kwargs)
@classmethod
def from_base(cls, base, *args, **kwargs):
r"""Create an instance by creating a based from the provided
base class."""
if base is not None:
kwargs['dont_initialize_base'] = True
out = cls(*args, **kwargs)
out._base = base
else:
out = cls(*args, **kwargs)
return out
def __getstate__(self):
out = dict()
def add_base_slots(base):
out.update(
dict((slot, getattr(self, slot))
for slot in base.__slots__
if (hasattr(self, slot)
and (slot not in ['_base_class', '__weakref__'])
and (slot not in out))))
for x in base.__bases__:
if x != object:
add_base_slots(x)
add_base_slots(self.__class__)
return out
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
def check_for_base(self, attr):
r"""Raise an error if the aliased object has been disconnected."""
if self._base is None:
raise AliasDisconnectError(
("Aliased object has been disconnected so "
"'%s' is no longer available.") % attr)
@property
def dummy_copy(self):
r"""Dummy copy of base."""
return None
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
if self._base is not None:
dummy = self.dummy_copy
del self._base
self._base = dummy
def __del__(self):
self.disconnect()
class MultiObject(AliasObject):
r"""Concurrent/parallel processing object using either threads
or processes."""
__slots__ = ['task_method', 'parallel']
def __init__(self, *args, task_method="threading", **kwargs):
self.task_method = task_method
if task_method in ["thread", "threading", "concurrent"]:
self.parallel = False
elif task_method in ["process", "multiprocessing", "parallel"]:
self.parallel = True
else: # pragma: debug
raise ValueError(("Unsupported method for concurrency/"
"parallelism: '%s'") % task_method)
super(MultiObject, self).__init__(*args, **kwargs)
class Context(MultiObject):
r"""Context for managing threads/processes."""
def __init__(self, task_method='thread', dont_initialize_base=False):
super(Context, self).__init__(dont_initialize_base=True,
task_method=task_method)
if not dont_initialize_base:
if self.parallel:
self._base = mp_ctx_spawn
else:
self._base = threading
def __getstate__(self):
state = super(Context, self).__getstate__()
if self.parallel:
state['_base'] = state['_base']._name
else:
state['_base'] = None
return state
def __setstate__(self, state):
if state['_base'] is None:
state['_base'] = threading
else:
# Use the existing context?
# state['_base'] = mp_ctx_spawn
state['_base'] = multiprocessing.get_context(state['_base'])
super(Context, self).__setstate__(state)
def RLock(self, *args, **kwargs):
r"""Get a recursive lock in this context."""
kwargs['task_context'] = self
return RLock(*args, **kwargs)
def Event(self, *args, **kwargs):
r"""Get an event in this context."""
kwargs['task_context'] = self
return Event(*args, **kwargs)
def Task(self, *args, **kwargs):
r"""Get a task in this context."""
kwargs['task_context'] = self
return Task(*args, **kwargs)
def Queue(self, *args, **kwargs):
r"""Get a queue in this context."""
kwargs['task_context'] = self
return Queue(*args, **kwargs)
def Dict(self, *args, **kwargs):
r"""Get a shared dictionary in this context."""
kwargs['task_context'] = self
return Dict(*args, **kwargs)
def current_task(self):
r"""Current task (process/thread)."""
if self.parallel:
return self._base.current_process()
else:
return self._base.current_thread()
def main_task(self):
r"""Main task (process/thread)."""
if self.parallel:
out = None
if hasattr(self._base, 'parent_process'): # pragma: no cover
out = self._base.parent_process()
if out is None:
out = self.current_task()
return out
else:
return _main_thread
class DummyContextObject(object): # pragma: no cover
__slots__ = []
@property
def context(self):
return None
def disconnect(self):
pass
class ContextObject(MultiObject):
r"""Base class for object intialized in a context."""
__slots__ = ["_managed_context", "_context", "_base_class"]
def __init__(self, *args, task_method='threading',
task_context=None, **kwargs):
self._managed_context = None
if task_context is None:
task_context = Context(task_method=task_method)
self._managed_context = task_context
elif isinstance(task_context, weakref.ReferenceType):
task_context = task_context()
task_method = task_context.task_method
self._context = weakref.ref(task_context)
self._base_class = self.get_base_class(task_context)
if ((self._base_class
and isinstance(self._base_class, type)
and issubclass(self._base_class, (LockedObject, ContextObject)))):
kwargs['task_context'] = task_context
super(ContextObject, self).__init__(
*args, task_method=task_method, **kwargs)
def __getstate__(self):
state = super(ContextObject, self).__getstate__()
state['_context'] = None
return state
def __setstate__(self, state):
if state['_managed_context'] is None:
state['_managed_context'] = Context(task_method=state['task_method'])
state['_context'] = weakref.ref(state['_managed_context'])
super(ContextObject, self).__setstate__(state)
@classmethod
def get_base_class(cls, context):
r"""Get instance of base class that will be represented."""
name = cls._base_class_name
context.check_for_base(name)
return getattr(context._base, name)
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
if ContextObject is not None:
super(ContextObject, self).disconnect()
if self._managed_context is not None:
self._managed_context.disconnect()
self._managed_context = None
@property
def context(self):
r"""Context: Context used to create this object."""
return self._context()
class DummyRLock(DummyContextObject): # pragma: no cover
def acquire(self, *args, **kwargs):
pass
def release(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
pass
class RLock(ContextObject):
r"""Recursive lock. Acquiring the lock after disconnect is called
through use as a context will not raise an error, but will not
do anything."""
_base_meth = ['acquire', 'release', '__enter__', '__exit__']
def __getstate__(self):
state = super(RLock, self).__getstate__()
if (not self.parallel) and (not isinstance(state['_base'], DummyRLock)):
state['_base'] = None
return state
def __setstate__(self, state):
if state['_base'] is None:
state['_base'] = threading.RLock()
super(RLock, self).__setstate__(state)
@property
def dummy_copy(self):
r"""Dummy copy of base."""
return DummyRLock()
class DummyEvent(DummyContextObject): # pragma: no cover
__slots__ = ["_value"]
def __init__(self, value=False):
self._value = value
def is_set(self):
return self._value
def set(self):
self._value = True
def clear(self):
self._value = False
def wait(self, *args, **kwargs):
if self._value:
return
raise AliasDisconnectError("DummyEvent will never change to True.")
class ProcessEvent(object):
r"""Multiprocessing/threading event associated with a process that has
a discreet start and end."""
__slots__ = ["started", "stopped"]
def __init__(self, *args, **kwargs):
self.started = Event(*args, **kwargs)
self.stopped = Event(task_context=self.started.context)
def start(self):
r"""Set the started event."""
self.started.set()
def stop(self):
r"""Set the stopped event."""
self.stopped.set()
def has_started(self):
r"""bool: True if the process has started."""
return self.started.is_set()
def has_stopped(self):
r"""bool: True if the process has stopped."""
return self.stopped.is_set()
def is_running(self):
r"""bool: True if the processes has started, but hasn't stopped."""
return (self.has_started() and (not self.has_stopped()))
class Event(ContextObject):
r"""Multiprocessing/threading event."""
__slots__ = ["_set", "_clear", "_set_callbacks", "_clear_callbacks"]
_base_attr = ContextObject._base_attr + ['is_set', 'wait']
def __init__(self, *args, **kwargs):
self._set = None
self._clear = None
self._set_callbacks = []
self._clear_callbacks = []
super(Event, self).__init__(*args, **kwargs)
self._set = self._base.set
self._clear = self._base.clear
def set(self):
r"""Set the event."""
self._set()
for (x, a, k) in self._set_callbacks:
x(*a, **k)
def clear(self):
r"""Clear the event."""
self._clear()
for (x, a, k) in self._clear_callbacks:
x(*a, **k)
@property
def dummy_copy(self):
r"""Dummy copy of base."""
return DummyEvent(self._base.is_set())
def __getstate__(self):
state = super(Event, self).__getstate__()
if not self.parallel:
state.pop('_set')
state.pop('_clear')
state['_base'] = state['_base'].is_set()
return state
def __setstate__(self, state):
if isinstance(state['_base'], bool):
val = state['_base']
state['_base'] = threading.Event()
state['_set'] = state['_base'].set
state['_clear'] = state['_base'].clear
if val:
state['_base'].set()
super(Event, self).__setstate__(state)
# @classmethod
# def from_event_set(cls, *events):
# r"""Create an event that is triggered when any one of the provided
# events is set.
# Args:
# *events: One or more events that will trigger this event.
# """
# # Modified version of https://stackoverflow.com/questions/12317940/
# # python-threading-can-i-sleep-on-two-threading-events-simultaneously/
# # 36661113
# or_event = cls()
# def changed():
# bools = [e.is_set() for e in events]
# if any(bools):
# or_event.set()
# else:
# or_event.clear()
# for e in events:
# e.add_callback(changed, trigger='set')
# e.add_callback(changed, trigger='clear')
# return or_event
def add_callback(self, callback, args=(), kwargs={}, trigger='set'):
r"""Add a callback that will be called when set or clear is invoked.
Args:
callback (callable): Callable executed when set is called.
args (tuple, optional): Arguments to pass to the callback.
kwargs (dict, optional): Keyword arguments to pass to the
callback.
trigger (str, optional): Action triggering the set call.
Options are 'set' or 'clear'. Defaults to 'set'.
"""
getattr(self, f'_{trigger}_callbacks').append(
(callback, args, kwargs))
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
if Event is not None:
super(Event, self).disconnect()
self._set = self._base.set
self._clear = self._base.clear
class ValueEvent(Event):
r"""Class for handling storing a value that also triggers an event."""
__slots__ = ["_event_value"]
def __init__(self, *args, **kwargs):
self._event_value = None
super(ValueEvent, self).__init__(*args, **kwargs)
def set(self, value=None):
self._event_value = value
super(ValueEvent, self).set()
def clear(self):
self._event_value = None
super(ValueEvent, self).clear()
def get(self):
return self._event_value
class DummyTask(DummyContextObject): # pragma: no cover
__slots__ = ["name", "exitcode", "daemon"]
def __init__(self, name='', exitcode=0, daemon=False):
self.name = name
self.exitcode = exitcode
self.daemon = daemon
super(DummyTask, self).__init__()
def join(self, *args, **kwargs):
return
def is_alive(self):
return False
def terminate(self):
pass
def kill(self):
pass
class Task(ContextObject):
r"""Multiprocessing/threading process."""
__slots__ = ["_target", "_args", "_kwargs"]
_base_attr = ['name', 'daemon', 'authkey', 'sentinel', 'exitcode', 'pid']
_base_meth = ['start', 'run', 'join',
# Thread only
'getName', 'setName', 'isDaemon', 'setDaemon',
# Process only
'terminate']
def __init__(self, target=None, args=(), kwargs={}, **kws):
self._target = target
self._args = args
self._kwargs = kwargs
if self._target is not None:
kws['target'] = self.target
super(Task, self).__init__(**kws)
@classmethod
def get_base_class(cls, context):
r"""Get instance of base class that will be represented."""
if context.parallel:
return context._base.Process
else:
return SafeThread
@property
def dummy_copy(self):
r"""Dummy copy of base."""
name = b'dummy'
exitcode = 0
daemon = False
try:
name = self._base.name
exitcode = self._base.exitcode
daemon = self._base.daemon
except AttributeError: # pragma: debug
pass
return DummyTask(name=name, exitcode=exitcode, daemon=daemon)
def __getstate__(self):
state = super(Task, self).__getstate__()
if not self.parallel:
state['_base'] = {
'name': state['_base'].name, 'group': None,
'daemon': state['_base'].daemon,
'target': state['_base']._target,
'args': state['_base']._args,
'kwargs': state['_base']._kwargs}
return state
def __setstate__(self, state):
if isinstance(state['_base'], dict):
state['_base'] = SafeThread(**state['_base'])
super(Task, self).__setstate__(state)
def is_alive(self):
r"""Determine if the process/thread is alive."""
out = self._base.is_alive()
if out is None: # pragma: debug
out = False
return out
@property
def ident(self):
r"""Process ID."""
if self.parallel:
return self._base.pid
else:
return self._base.ident
def target(self, *args, **kwargs):
r"""Run the target."""
try:
self._initialize()
self._target(*self._args, **self._kwargs)
except BaseException as e:
self._on_error(e)
finally:
self._finalize()
def _initialize(self):
r"""Initialize a run."""
pass
def _finalize(self):
r"""Finalize a run."""
pass
def _on_error(self, e):
r"""Handle an error during a run."""
raise
def kill(self, *args, **kwargs):
r"""Kill the task."""
if self.parallel and hasattr(self._base, 'kill'):
return self._base.kill(*args, **kwargs)
elif hasattr(self._base, 'terminate'):
return self._base.terminate(*args, **kwargs)
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
self._target = None
if Task is not None:
super(Task, self).disconnect()
class TaskLoop(Task):
r"""Class for looping over a task."""
__slots__ = ["break_flag", "polling_interval", "break_stack",
"_loop_target", "_loop_count"]
def __init__(self, target=None, polling_interval=0.0, **kws):
self.polling_interval = polling_interval
self.break_stack = None
self._loop_target = target
self._loop_count = 0
if self._loop_target is not None:
kws['target'] = self.loop_target
super(TaskLoop, self).__init__(**kws)
self.break_flag = Event(task_context=self._context)
def break_loop(self, break_stack=None):
r"""Break the task loop."""
if self.break_stack is None:
if break_stack is None:
import traceback
break_stack = ''.join(traceback.format_stack())
self.break_stack = break_stack
self.break_flag.set()
def kill(self, *args, **kwargs):
r"""Kill the task."""
self.break_loop()
return super(TaskLoop, self).kill(*args, **kwargs)
def loop_target(self, *args, **kwargs):
r"""Continue calling the target until the loop is broken."""
while not self.break_flag.is_set():
try:
self._loop_target(*args, **kwargs)
except BreakLoopException as e:
self.break_loop(e.break_stack)
break
if self.polling_interval:
self.break_flag.wait(self.polling_interval)
self._loop_count += 1
def _finalize(self):
r"""Finalize a run."""
self.break_loop()
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
self.break_flag.disconnect()
self._loop_target = None
if TaskLoop is not None:
super(TaskLoop, self).disconnect()
class DummyQueue(DummyContextObject): # pragma: no cover
def empty(self):
return True
def full(self):
return False
def get(self, *args, **kwargs):
raise AliasDisconnectError("There are no messages in a DummyQueue.")
def get_nowait(self, *args, **kwargs):
raise AliasDisconnectError("There are no messages in a DummyQueue.")
def put(self, *args, **kwargs):
raise AliasDisconnectError("Cannot put messages in a DummyQueue.")
def put_nowait(self, *args, **kwargs):
raise AliasDisconnectError("Cannot put messages in a DummyQueue.")
def qsize(self):
return 0
def join(self, *args, **kwargs):
return
def join_thread(self, *args, **kwargs):
return
def close(self):
pass
class Queue(ContextObject):
r"""Multiprocessing/threading queue."""
_base_meth = ['full', 'get', 'get_nowait', 'join_thread', 'qsize']
@classmethod
def get_base_class(cls, context):
r"""Get instance of base class that will be represented."""
if context.parallel:
return context._base.Queue
else:
return queue.Queue
def __getstate__(self):
state = super(Queue, self).__getstate__()
if (not self.parallel) and (not isinstance(state['_base'], DummyQueue)):
state['_base'] = None
return state
def __setstate__(self, state):
if state['_base'] is None:
state['_base'] = queue.Queue()
super(Queue, self).__setstate__(state)
@property
def dummy_copy(self):
r"""Dummy copy of base."""
return DummyQueue()
def join(self, *args, **kwargs):
self.check_for_base('join')
if self.parallel:
try:
self._base.close()
except OSError: # pragma: debug
pass
return self._base.join_thread(*args, **kwargs)
else:
return self._base.join(*args, **kwargs)
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
if self.parallel:
self.join()
if Queue is not None:
super(Queue, self).disconnect()
def empty(self):
try:
return self._base.empty()
except OSError: # pragma: debug
self.disconnect()
return True
def put(self, *args, **kwargs):
try:
self._base.put(*args, **kwargs)
except AttributeError: # pragma: debug
# Multiprocessing queue asserts it is not closed
self.disconnect()
raise AliasDisconnectError("Queue was closed.")
def put_nowait(self, *args, **kwargs):
try:
self._base.put_nowait(*args, **kwargs)
except AttributeError: # pragma: debug
# Multiprocessing queue asserts it is not closed
self.disconnect()
raise AliasDisconnectError("Queue was closed.")
class Dict(ContextObject):
r"""Multiprocessing/threading shared dictionary."""
_base_meth = ['clear', 'copy', 'get', 'items', 'keys',
'pop', 'popitem', 'setdefault', 'update', 'values',
'__contains__', '__delitem__', '__getitem__',
'__iter__', '__len__', '__setitem__']
@classmethod
def get_base_class(cls, context):
r"""Get instance of base class that will be represented."""
if context.parallel:
manager = context._base.Manager()
return manager.dict
else:
return LockedDict
# Don't define this so that is is not called after manager is
# shut down.
# @property
# def dummy_copy(self):
# r"""Dummy copy of base."""
# return self._base.copy()
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
try:
final_value = {k: v for k, v in self._base.items()}
except BaseException: # pragma: debug
final_value = {}
if LockedDict and isinstance(self._base, LockedDict):
self._base.disconnect()
if getattr(self._base, '_manager', None) is not None:
self._base._manager.shutdown()
self._base._manager.join()
del self._base._manager
self._base._manager = None
if hasattr(self._base, '_close'):
self._base._close()
if Dict is not None:
super(Dict, self).disconnect()
self._base = final_value
class LockedObject(AliasObject):
r"""Container that provides a lock that is acquired before accessing
the object."""
_base_locked = True
def __init__(self, *args, task_method='process',
task_context=None, **kwargs):
self.lock = RLock(task_method=task_method,
task_context=task_context)
super(LockedObject, self).__init__(*args, **kwargs)
def disconnect(self):
r"""Disconnect from the aliased object by replacing it with
a dummy object."""
if LockedObject is not None:
super(LockedObject, self).disconnect()
self.lock.disconnect()
# class LockedList(LockedObject):
# r"""List intended to be shared between threads."""
# def __init__(self, *args, **kwargs):
# base = list(*args, **kwargs)
# super(LockedList, self).__init__(base)
class LockedDict(LockedObject):
r"""Dictionary that can be shared between threads."""
_base_class = dict
_base_meth = ['clear', 'copy', 'get', 'items', 'keys',
'pop', 'popitem', 'setdefault', 'update', 'values',
'__contains__', '__delitem__', '__getitem__',
'__iter__', '__len__', '__setitem__']
def add_subdict(self, key):
r"""Add a subdictionary."""
self[key] = {}
@property
def dummy_copy(self):
r"""Dummy copy of base."""
try:
out = self._base.copy()
except BaseException: # pragma: debug
out = {}
return out
class TimeoutError(asyncio.TimeoutError):
r"""Error to raise when a wait times out."""
def __init__(self, msg, function_value):
self.function_value = function_value
super(TimeoutError, self).__init__(msg)
class WaitableFunction(object):
r"""Create an object that can be waited on until a function returns True.
Args:
function (callable): Callable function that takes no arguments and
returns a boolean.
polling_interval (float, optional): Time (in seconds) that should be
waited in between function calls. Defaults to 0.1 seconds.
"""
__slots__ = ["function", "polling_interval"]
def __init__(self, function, polling_interval=0.01):
self.function = function
self.polling_interval = polling_interval
def wait(self, timeout=None, on_timeout=False):
r"""Wait for the function to return True.
Args:
timeout (float, optional): Time (in seconds) that should be
waited for the process to finish. A value of None will wait
indefinitely. Defaults to None.
on_timeout (callable, bool, str, optional): Object indicating
what action should be taken in the event that the timeout is
reached. If a callable is provided, it will be called. A
value of False will cause a TimeoutError to be raised. A
value of True will cause the function value to be returned.
A string will be used as the error message for a raised
timeout error. Defaults to False.
Returns:
object: The result of the function call.
"""
def task_target():
if self.function():
raise BreakLoopException
loop = TaskLoop(target=task_target,
polling_interval=self.polling_interval)
loop.start()
loop.join(timeout)
if loop.is_alive():
loop.kill()
if on_timeout is True:
return self.function()
elif (on_timeout is False):
msg = f'Timeout at {timeout} s'
elif isinstance(on_timeout, str):
msg = on_timeout
else:
return on_timeout()
raise TimeoutError(msg, self.function())
return self.function()
def wait_on_function(function, timeout=None, on_timeout=False,
polling_interval=0.1):
r"""Wait for the function to return True.
Args:
function (callable): Callable function that takes no arguments and
returns a boolean.
timeout (float, optional): Time (in seconds) that should be
waited for the process to finish. A value of None will wait
indefinitely. Defaults to None.
on_timeout (callable, bool, str, optional): Object indicating
what action should be taken in the event that the timeout is
reached. If a callable is provided, it will be called. A
value of False will cause a TimeoutError to be raised. A
value of True will cause the function value to be returned.
A string will be used as the error message for a raised
timeout error. Defaults to False.
polling_interval (float, optional): Time (in seconds) that should be
waited in between function calls. Defaults to 0.1 seconds.
Returns:
object: The result of the function call.
"""
x = WaitableFunction(function, polling_interval=polling_interval)
return x.wait(timeout=timeout, on_timeout=on_timeout)
class MPIRequestWrapper(WaitableFunction):
r"""Wrapper for an MPI request."""
__slots__ = ["request", "completed", "canceled", "_result"]
def __init__(self, request, completed=False, **kwargs):
self.request = request
self.completed = completed
self.canceled = False
self._result = None
super(MPIRequestWrapper, self).__init__(
lambda: self.test()[0] or self.canceled, **kwargs)
def cancel(self):
r"""Cancel the request."""
if not self.test()[0]:
self.canceled = True
return self.request.Cancel()
@property
def result(self):
r"""object: The result of the MPI request."""
if not self.completed: # pragma: intermittent
self.test()
return self._result
def test(self):
r"""Test to see if the request has completed."""
if not self.completed:
self.completed, self._result = self.request.test()
return (self.completed, self._result)
def wait(self, timeout=None, on_timeout=False):
r"""Wait for the request to be completed.
Args:
timeout (float, optional): Time (in seconds) that should be
waited for the process to finish. A value of None will wait
indefinitely. Defaults to None.
on_timeout (callable, bool, str, optional): Object indicating
what action should be taken in the event that the timeout is
reached. If a callable is provided, it will be called. A
value of False will cause a TimeoutError to be raised. A
value of True will cause the function value to be returned.
A string will be used as the error message for a raised
timeout error. Defaults to False.
Returns:
object: The result of the request.
"""
if not self.test()[0]:
super(MPIRequestWrapper, self).wait(timeout=timeout,
on_timeout=on_timeout)
return self._result
class MPIPartnerError(Exception):
r"""Error raised when there is an error on another process."""
pass
class MPIErrorExchange(object):
r"""Set of MPI messages to check for errors."""
tags = {'ERROR_ON_RANK0': 1,
'ERROR_ON_RANKX': 2}
closing_messages = ['ERROR', 'COMPLETE']
def __init__(self, global_tag=0):
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
if self.rank == 0:
self.partner_ranks = list(range(1, self.size))
else:
self.partner_ranks = [0]
self.reset(global_tag=global_tag)
self._first_use = True
def reset(self, global_tag=0):
r"""Rest comms for the next test."""
global_tag = max(self.comm.alltoall([global_tag] * self.size))
self.global_tag = global_tag + max(self.tags.values()) + 1
if self.rank == 0:
self.incoming_tag = self.tags['ERROR_ON_RANKX'] + global_tag
self.outgoing_tag = self.tags['ERROR_ON_RANK0'] + global_tag
else:
self.incoming_tag = self.tags['ERROR_ON_RANK0'] + global_tag
self.outgoing_tag = self.tags['ERROR_ON_RANKX'] + global_tag
self.outgoing = None
self.incoming = [
MPIRequestWrapper(
self.comm.irecv(source=i, tag=self.incoming_tag),
polling_interval=0)
for i in self.partner_ranks]
self._first_use = False
def recv(self, wait=False):
r"""Check for response to receive request."""
results = []
for i, x in enumerate(self.incoming):
if wait:
x.wait()
completed, result = x.test()
if ((completed
and ((self.rank != 0)
or (result[1] not in self.closing_messages)))):
self.incoming[i] = MPIRequestWrapper(
self.comm.irecv(
source=self.partner_ranks[i],
tag=self.incoming_tag),
polling_interval=0)
results.append((completed, result))
return results
def send(self, msg):
r"""Send a message."""
if (self.rank == 0) or (self.outgoing is None):
for i in self.partner_ranks:
self.comm.send(msg, dest=i, tag=self.outgoing_tag)
if (self.rank != 0) and (msg[1] in self.closing_messages):
self.outgoing = msg
def finalize(self, failure):
r"""Finalize an instance by waiting for completions.
Args:
failure (bool): True if there was an error.
"""
complete = True
try:
complete = self.sync(msg='COMPLETE',
local_error=failure,
check_complete=True,
sync_tag=True)
finally:
while not complete: # pragma: debug
complete = self.sync(msg='COMPLETE',
local_error=failure,
check_complete=True,
dont_raise=True,
sync_tag=True)
def sync(self, local_tag=None, msg=None, get_tags=False,
check_equal=False,
dont_raise=False, local_error=False, sync_tag=False,
check_complete=False):
r"""Synchronize processes.
Args:
local_tag (int): Next tag that will be used by the local MPI comm.
get_tags (bool, optional): If True, tags will be exchanged between
all processes. Defaults to False.
check_equal (bool, optional): If True, tags will be checked to be
equal. Defaults to False.
dont_raise (bool, optional): If True and a MPIPartnerError were
to be raised, the sync will abort. Defaults to False.
Raises:
MPIPartnerError: If there was an error on one of the other MPI
processes.
AssertionError: If check_equal is True and the tags are not
equivalent.
"""
if local_tag is None:
local_tag = self.global_tag
remote_error = False
if msg is None:
msg = 'TAG'
if local_error: # pragma: debug
msg = 'ERROR'
if self.outgoing is not None: # pragma: debug
msg = self.outgoing
if self.rank != 0:
self.send((local_tag, msg))
out = self.recv(wait=True)
complete, results = out[0] # self.recv(wait=True)[0]
assert(complete)
else:
if (self.outgoing is None) and (msg in self.closing_messages):
self.outgoing = msg
results = [(True, (local_tag, msg))] + self.recv(wait=True)
# TODO: Check for completion (instead of error)
self.send(results)
remote_error = any((x[0] and (x[1][1] == 'ERROR'))
for x in results)
all_tag = [x[1][0] for x in results]
if sync_tag:
self.global_tag = max(all_tag)
else:
self.global_tag = local_tag
if remote_error and (not local_error) and (not dont_raise): # pragma: debug
raise MPIPartnerError("Error on another process.")
if check_equal and not (remote_error or local_error):
assert(all((x == local_tag) for x in all_tag))
if check_complete:
return all(x[1][1] in self.closing_messages
for x in results)
if get_tags:
return all_tag
# class LockedWeakValueDict(LockedDict):
# r"""Dictionary of weakrefs that can be shared between threads."""
# _base_class = weakref.WeakValueDictionary
# _base_attr = ['data']
# _base_meth = ['itervaluerefs', 'valuerefs']
# def __init__(self, *args, **kwargs):
# self._dict_refs = {}
# super(LockedWeakValueDict, self).__init__(*args, **kwargs)
# def add_subdict(self, key):
# r"""Add a subdictionary."""
# self._dict_refs[key] = weakref.WeakValueDictionary()
# self[key] = self._dict_refs[key]
class YggTask(YggClass):
r"""Class for managing Ygg thread/process."""
_disconnect_attr = (YggClass._disconnect_attr
+ ['context', 'lock', 'process_instance',
'error_flag', 'start_flag', 'terminate_flag',
'pipe'])
def __init__(self, name=None, target=None, args=(), kwargs=None,
daemon=False, group=None, task_method='thread',
context=None, with_pipe=False, **ygg_kwargs):
if kwargs is None:
kwargs = {}
if (target is not None) and ('target' in self._schema_properties):
ygg_kwargs['target'] = target
target = None
self.context = Context.from_base(task_method=task_method,
base=context)
self.as_process = self.context.parallel
if self.as_process:
self.in_process = False
self.pipe = None
self.send_pipe = None
if with_pipe:
self.pipe = self.context._base.Pipe()
kwargs['send_pipe'] = self.pipe[1]
else:
self.in_process = True
process_kwargs = dict(
name=name, group=group, daemon=daemon,
target=self.run)
self.process_instance = self.context.Task(**process_kwargs)
self._ygg_target = target
self._ygg_args = args
self._ygg_kwargs = kwargs
self.lock = self.context.RLock()
self.create_flag_attr('error_flag')
self.create_flag_attr('start_flag')
self.create_flag_attr('terminate_flag')
self._calling_thread = None
self.state = ''
super(YggTask, self).__init__(name, **ygg_kwargs)
if not self.as_process:
global _thread_registry
global _lock_registry
_thread_registry[self.name] = self.process_instance._base
_lock_registry[self.name] = self.lock._base
atexit.register(self.atexit)
def __getstate__(self):
out = super(YggTask, self).__getstate__()
out.pop('_input_args', None)
out.pop('_input_kwargs', None)
return out
def atexit(self): # pragma: debug
r"""Actions performed when python exits."""
if self.is_alive():
self.info('Thread alive at exit')
self.cleanup()
def printStatus(self, return_str=False):
r"""Print the class status."""
fmt = '%s(%s): state: %s'
args = (self.__module__, self.print_name, self.state)
if return_str:
msg, _ = self.logger.process(fmt, {})
return msg % args
self.logger.info(fmt, *args)
def cleanup(self):
r"""Actions to perform to clean up the thread after it has stopped."""
self.disconnect()
def create_flag_attr(self, attr):
r"""Create a flag."""
setattr(self, attr, self.context.Event())
def get_flag_attr(self, attr):
r"""Return the flag attribute."""
return getattr(self, attr)
def set_flag_attr(self, attr, value=True):
r"""Set a flag."""
if value:
self.get_flag_attr(attr).set()
else:
self.get_flag_attr(attr).clear()
def clear_flag_attr(self, attr):
r"""Clear a flag."""
self.set_flag_attr(attr, value=False)
def check_flag_attr(self, attr):
r"""Determine if a flag is set."""
return self.get_flag_attr(attr).is_set()
def wait_flag_attr(self, attr, timeout=None):
r"""Wait until a flag is True."""
return self.get_flag_attr(attr).wait(timeout=timeout)
def start(self, *args, **kwargs):
r"""Start thread/process and print info."""
self.state = 'starting'
if not self.was_terminated:
self.set_started_flag()
self.before_start()
self.process_instance.start(*args, **kwargs)
# self._calling_thread = self.get_current_task()
def before_start(self):
r"""Actions to perform on the main thread/process before
starting the thread/process."""
self.debug('')
def run(self, *args, **kwargs):
r"""Continue running until terminate event set."""
self.debug("Starting method")
self.state = 'running'
try:
self.run_init()
self.call_target()
except BaseException: # pragma: debug
self.state = 'error'
self.run_error()
finally:
self.run_finally()
if self.state != 'error':
self.state = 'finished'
def run_init(self):
r"""Actions to perform at beginning of run."""
# atexit.register(self.atexit)
self.debug('pid = %s, ident = %s', self.pid, self.ident)
self.in_process = True
if self.as_process and ('send_pipe' in self._ygg_kwargs):
self.send_pipe = self._ygg_kwargs.pop('send_pipe')
def call_target(self):
r"""Call target."""
if self._ygg_target:
self._ygg_target(*self._ygg_args, **self._ygg_kwargs)
def run_error(self):
r"""Actions to perform on error in try/except wrapping run."""
self.exception("%s ERROR", self.context.task_method.upper())
self.set_flag_attr('error_flag')
def run_finally(self):
r"""Actions to perform in finally clause of try/except wrapping
run."""
if self.as_process:
if self.send_pipe is not None:
self.send_pipe.close()
for k in ['_ygg_target', '_ygg_args', '_ygg_kwargs']:
if hasattr(self, k):
delattr(self, k)
def join(self, *args, **kwargs):
r"""Join the process/thread."""
return self.process_instance.join(*args, **kwargs)
def is_alive(self, *args, **kwargs):
r"""Determine if the process/thread is alive."""
return self.process_instance.is_alive(*args, **kwargs)
@property
def pid(self):
r"""Process ID."""
return self.process_instance.pid
@property
def ident(self):
r"""Process ID."""
return self.process_instance.ident
@property
def daemon(self):
r"""bool: Indicates whether the thread/process is daemonic or not."""
return self.process_instance.daemon
@property
def exitcode(self):
r"""Exit code."""
if self.as_process:
out = int(self.check_flag_attr('error_flag'))
if self.process_instance.exitcode:
out = self.process_instance.exitcode
return out
else:
return int(self.check_flag_attr('error_flag'))
@property
def returncode(self):
r"""Return code."""
return self.exitcode
def kill(self, *args, **kwargs):
r"""Kill the process."""
self.process_instance.kill(*args, **kwargs)
return self.terminate(*args, **kwargs)
def terminate(self, no_wait=False):
r"""Set the terminate event and wait for the thread/process to stop.
Args:
no_wait (bool, optional): If True, terminate will not block until
the thread/process stops. Defaults to False and blocks.
Raises:
AssertionError: If no_wait is False and the thread/process has not
stopped after the timeout.
"""
self.debug('')
with self.lock:
self.state = 'terminated'
if self.was_terminated: # pragma: debug
self.debug('Driver already terminated.')
return
self.set_terminated_flag()
if not no_wait:
# if self.is_alive():
# self.join(self.timeout)
self.wait(timeout=self.timeout)
assert(not self.is_alive())
# if self.as_process:
# self.process_instance.terminate()
def poll(self):
r"""Check if the process is finished and return the return
code if it is."""
out = None
if not self.is_alive():
out = self.returncode
return out
def get_current_task(self):
r"""Get the current process/thread."""
return self.context.current_task()
def get_main_proc(self):
r"""Get the main process/thread."""
return self.context.main_task()
def set_started_flag(self, value=True):
r"""Set the started flag for the thread/process to True."""
self.set_flag_attr('start_flag', value=value)
def set_terminated_flag(self, value=True):
r"""Set the terminated flag for the thread/process to True."""
self.set_flag_attr('terminate_flag', value=value)
@property
def was_started(self):
r"""bool: True if the thread/process was started. False otherwise."""
return self.check_flag_attr('start_flag')
@property
def was_terminated(self):
r"""bool: True if the thread/process was terminated. False otherwise."""
return self.check_flag_attr('terminate_flag')
@property
def main_terminated(self):
r"""bool: True if the main thread/process has terminated."""
return (not self.get_main_proc().is_alive())
def wait(self, timeout=None, key=None):
r"""Wait until thread/process finish to return using sleeps rather than
blocking.
Args:
timeout (float, optional): Maximum time that should be waited for
the driver to finish. Defaults to None and is infinite.
key (str, optional): Key that should be used to register the timeout.
Defaults to None and is set based on the stack trace.
"""
self.wait_on_function(lambda: not self.is_alive(),
timeout=timeout, key_level=1, key=key)
class BreakLoopException(BaseException):
r"""Special exception that can be raised by the target function
for a loop in order to break the loop."""
__slots__ = ["break_stack"]
def __init__(self, *args, **kwargs):
import traceback
self.break_stack = ''.join(traceback.format_stack())
super(BreakLoopException, self).__init__(*args, **kwargs)
class BreakLoopError(BreakLoopException):
r"""Version of BreakLoopException that sets an error message."""
pass
class YggTaskLoop(YggTask):
r"""Class to run a loop inside a thread/process."""
_disconnect_attr = (YggTask._disconnect_attr
+ ['break_flag', 'loop_flag', 'unpause_flag'])
def __init__(self, *args, **kwargs):
super(YggTaskLoop, self).__init__(*args, **kwargs)
self._1st_main_terminated = False
self._loop_count = 0
self.create_flag_attr('break_flag')
self.create_flag_attr('loop_flag')
self.create_flag_attr('unpause_flag')
self.set_flag_attr('unpause_flag', value=True)
self.break_stack = None
@property
def loop_count(self):
r"""int: Number of loops performed."""
with self.lock:
return self._loop_count
def on_main_terminated(self, dont_break=False): # pragma: debug
r"""Actions performed when 1st main terminated.
Args:
dont_break (bool, optional): If True, the break flag won't be set.
Defaults to False.
"""
self._1st_main_terminated = True
if not dont_break:
self.debug("on_main_terminated")
self.set_break_flag()
def set_break_flag(self, value=True, break_stack=None):
r"""Set the break flag for the thread/process to True."""
if self.break_stack is None:
if break_stack is None:
import traceback
break_stack = ''.join(traceback.format_stack())
self.break_stack = break_stack
self.set_flag_attr('break_flag', value=value)
if value:
self.set_flag_attr('unpause_flag', value=True)
def pause(self):
r"""Pause the loop execution."""
self.set_flag_attr('unpause_flag', value=False)
def resume(self):
r"""Resume the loop execution."""
self.set_flag_attr('unpause_flag', value=True)
@property
def was_break(self):
r"""bool: True if the break flag was set."""
return self.check_flag_attr('break_flag')
def set_loop_flag(self, value=True):
r"""Set the loop flag for the thread/process to True."""
self.set_flag_attr('loop_flag', value=value)
@property
def was_loop(self):
r"""bool: True if the thread/process was loop. False otherwise."""
return self.check_flag_attr('loop_flag')
def wait_for_loop(self, timeout=None, key=None, nloop=0):
r"""Wait until thread/process enters loop to return using sleeps rather than
blocking.
Args:
timeout (float, optional): Maximum time that should be waited for
the thread/process to enter loop. Defaults to None and is infinite.
key (str, optional): Key that should be used to register the timeout.
Defaults to None and is set based on the stack trace.
nloop (int, optional): Number of loops that should be performed
before breaking. Defaults to 0.
"""
self.wait_on_function(
lambda: (self.was_loop and (self.loop_count >= nloop)
or (not self.is_alive())),
timeout=timeout, key=key, key_level=1)
def before_loop(self):
r"""Actions performed before the loop."""
self.debug('')
def after_loop(self):
r"""Actions performed after the loop."""
self.debug('')
def call_target(self):
r"""Call target."""
self.debug("Starting loop")
self.before_loop()
if (not self.was_break):
self.set_loop_flag()
while (not self.was_break):
if ((self.main_terminated
and (not self._1st_main_terminated))): # pragma: debug
self.on_main_terminated()
else:
self.wait_flag_attr('unpause_flag')
try:
self.run_loop()
except BreakLoopError as e:
self.error("BreakLoopError: %s", e)
self.set_break_flag(break_stack=e.break_stack)
except BreakLoopException as e:
self.debug("BreakLoopException: %s", e)
self.set_break_flag(break_stack=e.break_stack)
if not self.break_stack:
self.set_break_flag()
def run_loop(self, *args, **kwargs):
r"""Actions performed on each loop iteration."""
if self._ygg_target:
self._ygg_target(*self._ygg_args, **self._ygg_kwargs)
else:
self.set_break_flag()
with self.lock:
self._loop_count += 1
def run_error(self):
r"""Actions to perform on error in try/except wrapping run."""
super(YggTaskLoop, self).run_error()
self.debug("run_error")
self.set_break_flag()
def run(self, *args, **kwargs):
r"""Continue running until terminate event set."""
super(YggTaskLoop, self).run(*args, **kwargs)
try:
self.after_loop()
except BaseException: # pragma: debug
self.exception("AFTER LOOP ERROR")
self.set_flag_attr('error_flag')
def terminate(self, *args, **kwargs):
r"""Also set break flag."""
self.debug("terminate")
self.set_break_flag()
super(YggTaskLoop, self).terminate(*args, **kwargs)
|
11479748
|
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import mixins
from .serializers import CtfSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from .models import CtfLibrary
from rest_framework_extensions.cache.mixins import CacheResponseMixin
# Create your views here.
# class CtfViewSet(CacheResponseMixin,mixins.ListModelMixin,mixins.RetrieveModelMixin,viewsets.GenericViewSet):
# '''
# 增加:不提供前端API
# 删除:不提供前端API
# 修改:不提供前端API
# 查找:不提供api
#
# 注意:在比赛没有开始之前无法查看 TODO this
# '''
# queryset = CtfLibrary.objects.all()
# serializer_class = CtfSerializer
# permission_classes = (IsAuthenticated,)
# authentication_classes = (SessionAuthentication,JSONWebTokenAuthentication)
|
11479761
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# class LeNet(nn.Module):
# def __init__(self, n_classes):
# super(LeNet, self).__init__()
# self.conv1 = nn.Conv2d(1, 5, 5, 1)
# self.conv2 = nn.Conv2d(5, 10, 5, 1)
# self.fc1 = nn.Linear(4*4*10, 300)
# self.fc2 = nn.Linear(300, n_classes)
# def forward(self, x):
# x = self.conv1(x)
# x = F.max_pool2d(x, 2, 2)
# x = F.relu(x)
# x = self.conv2(x)
# x = F.max_pool2d(x, 2, 2)
# x = F.relu(x)
# x = x.view(-1, 4*4*10)
# x = self.fc1(x)
# x = self.fc2(x)
# return x
class LeNet(nn.Module):
def __init__(self, n_classes):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, n_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
|
11479770
|
import numpy as np
import xml.etree.cElementTree as ET
from xml.dom import minidom
from ase.data import atomic_masses
from ase.units import eV, Hartree, Bohr, Ry, J
import os
from collections import Iterable, namedtuple
from itertools import groupby
from TB2J.utils import symbol_number
import pickle
def write_uppasd(cls, path='TB2J_results/UppASD'):
if not os.path.exists(path):
os.makedirs(path)
cls.write_uppasd_posfile(os.path.join(path, 'posfile'))
cls.write_uppasd_momfile(os.path.join(path, 'momfile'))
cls.write_uppasd_exchange(os.path.join(path, 'jASD1'))
cls.write_uppasd_infile(os.path.join(path, 'input'))
def write_uppasd_posfile(cls, fname):
with open(fname, 'w') as myfile:
natom = len(cls.atoms)
for i in range(natom):
text = ""
id_spin = cls.index_spin[i]
if id_spin > -1:
pos = cls.atoms.get_scaled_positions()[i]
text = "{id_atom} {id_spin} {pos_x} {pos_y} {pos_z}\n".format(
id_atom=id_spin + 1,
id_spin=id_spin + 1,
pos_x=pos[0],
pos_y=pos[1],
pos_z=pos[2],
)
myfile.write(text)
def write_uppasd_momfile(cls, fname):
with open(fname, 'w') as myfile:
natom = len(cls.atoms)
for i in range(natom):
text = ""
id_spin = cls.index_spin[i]
if id_spin > -1:
pos = cls.atoms.get_scaled_positions()[i]
ms = np.sqrt(np.sum(np.array(cls.spinat[i])**2))
spin = np.array(cls.spinat[i]) / ms
text = "{id_atom} {id_spin} {ms} 0.0 0.0 1.0\n".format(
id_atom=id_spin + 1, id_spin=id_spin + 1, ms=ms)
myfile.write(text)
def write_uppasd_exchange(cls, fname):
with open(fname, 'w') as myfile:
nexch = len(cls.exchange_Jdict.items())
myfile.write("{num_interactions} {type_exchange}\n".format(
num_interactions=nexch, type_exchange=0))
counter = -1
for key, val in cls.exchange_Jdict.items():
counter += 1 # starts at 0
R, i, j = key
pos = cls.atoms.get_positions()
d = np.dot(np.array(R),
cls.atoms.get_cell()) + pos[j] - pos[i]
myfile.write("{i} {j} {Rx} {Ry} {Rz} {Jij}\n".format(
IID=counter,
i=i + 1,
j=j + 1,
Rx=d[0],
Ry=d[1],
Rz=d[2],
Jij=val * 1e3 / Ry)) # mRy
def write_uppasd_infile(cls, fname):
tmpl = """ simid Unamed
ncell 12 12 12
BC P P P
cell
Sym 0
posfile ./posfile
momfile ./momfile
exchange ./jASD1
#anistropy ./kfile
do_ralloy 0
Mensemble 1
tseed 4499
maptype 1
SDEalgh 1
Initmag 1
ip_mode M
ip_mcanneal 1
10000 300 1.00e-16 0.3
mode M
temp 300
mcNstep 50000
Nstep 50000
damping 0.1
timestep 1.0e-16
do_avrg Y
do_cumu Y
cumu_step 50
cumu_buff 10
do_tottraj N
tottraj_step 1000
plotenergy 1
do_sc C
do_ams Y
do_magdos Y
magdos_freq 200
magdos_sigma 30
qpoints C
do_stiffness Y
eta_max 12
eta_min 6
alat 2.83e-10
"""
with open(fname, 'w') as myfile:
myfile.write(tmpl)
|
11479776
|
from capreolus import Dependency, constants
from . import Benchmark
PACKAGE_PATH = constants["PACKAGE_PATH"]
@Benchmark.register
class Robust04(Benchmark):
"""Robust04 benchmark using the title folds from Huston and Croft. [1] Each of these is used as the test set.
Given the remaining four folds, we split them into the same train and dev sets used in recent work. [2]
[1] <NAME> and <NAME>. 2014. Parameters learned in the comparison of retrieval models using term dependencies. Technical Report.
[2] <NAME>, <NAME>, <NAME>, <NAME>. 2019. CEDR: Contextualized Embeddings for Document Ranking. SIGIR 2019.
"""
module_name = "robust04"
dependencies = [Dependency(key="collection", module="collection", name="robust04")]
qrel_file = PACKAGE_PATH / "data" / "qrels.robust2004.txt"
topic_file = PACKAGE_PATH / "data" / "topics.robust04.301-450.601-700.txt"
fold_file = PACKAGE_PATH / "data" / "rob04_cedr_folds.json"
query_type = "title"
@Benchmark.register
class Robust04Yang19(Benchmark):
"""Robust04 benchmark using the folds from Yang et al. [1]
[1] <NAME>, <NAME>, <NAME>, and <NAME>. 2019. Critically Examining the "Neural Hype": Weak Baselines and the Additivity of Effectiveness Gains from Neural Ranking Models. SIGIR 2019.
"""
module_name = "robust04.yang19"
dependencies = [Dependency(key="collection", module="collection", name="robust04")]
qrel_file = PACKAGE_PATH / "data" / "qrels.robust2004.txt"
topic_file = PACKAGE_PATH / "data" / "topics.robust04.301-450.601-700.txt"
fold_file = PACKAGE_PATH / "data" / "rob04_yang19_folds.json"
query_type = "title"
@Benchmark.register
class Robust04Yang19Desc(Robust04Yang19, Benchmark):
module_name = "robust04.yang19.desc"
query_type = "desc"
@Benchmark.register
class Robust04Huston14(Benchmark):
module_name = "robust04.huston14.title"
dependencies = [Dependency(key="collection", module="collection", name="robust04")]
qrel_file = PACKAGE_PATH / "data" / "qrels.robust2004.txt"
topic_file = PACKAGE_PATH / "data" / "topics.robust04.301-450.601-700.txt"
fold_file = PACKAGE_PATH / "data" / "rob04_huston14_title_folds.json"
query_type = "title"
@Benchmark.register
class Robust04Huston14Desc(Robust04Huston14, Benchmark):
module_name = "robust04.huston14.desc"
fold_file = PACKAGE_PATH / "data" / "rob04_huston14_desc_folds.json"
query_type = "desc"
|
11479855
|
import unittest
import config
from uri.base_uri import URI
class TestURI(URI):
fqdn = 'domain.com'
path = '/test-uri-path'
class TestEmbeddedParamsURI(URI):
fqdn = 'domain.com'
path = '/test/<embed>/uri'
class TestBaseURI(unittest.TestCase):
def test_uri(self):
self.assertEqual('/test-uri-path', TestURI.uri())
self.assertEqual('/test-uri-path?key1=value1', TestURI.uri(key1='value1'))
self.assertEqual('/test-uri-path?key2=value2&key1=value1', TestURI.uri(key1='value1', key2='value2'))
self.assertEqual('/test/key/uri', TestEmbeddedParamsURI.uri(embed='key'))
self.assertEqual('/test/key/uri?extra=param', TestEmbeddedParamsURI.uri(embed='key', extra='param'))
def test_full_uri(self):
config.DEFAULT_HTTPS = False
self.assertEqual('http://domain.com/test-uri-path', TestURI.full_uri())
self.assertEqual('https://domain.com/test-uri-path', TestURI.full_uri(https=True))
self.assertEqual('http://domain.com/test-uri-path?key1=value1', TestURI.full_uri(key1='value1'))
self.assertEqual('https://domain.com/test-uri-path?key1=value1', TestURI.full_uri(key1='value1', https=True))
self.assertEqual('http://domain.com/test-uri-path?key2=value2&key1=value1', TestURI.full_uri(key1='value1', key2='value2'))
self.assertEqual('http://domain.com/test/key/uri', TestEmbeddedParamsURI.full_uri(embed='key'))
self.assertEqual('http://domain.com/test/key/uri?extra=param', TestEmbeddedParamsURI.full_uri(embed='key', extra='param'))
config.DEFAULT_HTTPS = True
self.assertEqual('https://domain.com/test-uri-path', TestURI.full_uri())
self.assertEqual('https://domain.com/test-uri-path?key1=value1', TestURI.full_uri(key1='value1'))
self.assertEqual('https://domain.com/test-uri-path?key2=value2&key1=value1', TestURI.full_uri(key1='value1', key2='value2'))
def test_get_path(self):
self.assertEqual('/test-uri-path', TestURI.get_path())
def test_protocol_prefix(self):
self.assertEqual('http://', TestURI.protocol_prefix(https=False))
self.assertEqual('https://', TestURI.protocol_prefix(https=True))
|
11479878
|
import torch
import torch.nn as nn
from GNN.GCN_layer import GraphConvolution
from GNN.GCN_res_layer import GraphResConvolution
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class GCN(nn.Module):
def __init__(self,
state_dim=256,
feature_dim=256):
super(GCN, self).__init__()
self.state_dim = state_dim
self.gcn_0 = GraphConvolution(feature_dim, 'gcn_0', out_state_dim=self.state_dim)
self.gcn_res_1 = GraphResConvolution(self.state_dim, 'gcn_res_1')
self.gcn_res_2 = GraphResConvolution(self.state_dim, 'gcn_res_2')
self.gcn_res_3 = GraphResConvolution(self.state_dim, 'gcn_res_3')
self.gcn_res_4 = GraphResConvolution(self.state_dim, 'gcn_res_4')
self.gcn_res_5 = GraphResConvolution(self.state_dim, 'gcn_res_5')
# self.gcn_res_6 = GraphResConvolution(self.state_dim, 'gcn_res_6')
self.gcn_7 = GraphConvolution(self.state_dim , 'gcn_7', out_state_dim=32)
self.fc = nn.Linear(
in_features=32,
out_features=2,
)
def forward(self, input, adj):
input = self.gcn_0(input, adj)
input = self.gcn_res_1(input, adj)
input = self.gcn_res_2(input, adj)
input = self.gcn_res_3(input, adj)
input = self.gcn_res_4(input, adj)
input = self.gcn_res_5(input, adj)
# input = self.gcn_res_6(input, adj)
output = self.gcn_7(input, adj)
return self.fc(output)
|
11479883
|
import time
import board
import busio
from adafruit_mcp230xx.mcp23017 import MCP23017
from digitalio import Direction
import adafruit_ble
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
import adafruit_ble_midi
# These import auto-register the message type with the MIDI machinery.
# pylint: disable=unused-import
import adafruit_midi
from adafruit_midi.control_change import ControlChange
from adafruit_midi.midi_message import MIDIUnknownEvent
from adafruit_midi.note_off import NoteOff
from adafruit_midi.note_on import NoteOn
from adafruit_midi.pitch_bend import PitchBend
# i2c setup
i2c = busio.I2C(board.SCL, board.SDA)
# i2c addresses for muxes
mcp1 = MCP23017(i2c, address=0x20)
mcp2 = MCP23017(i2c, address=0x21)
# 1st solenoid array, corresponds with 1st mux
noids0 = []
for pin in range(16):
noids0.append(mcp1.get_pin(pin))
for n in noids0:
n.direction = Direction.OUTPUT
# 2nd solenoid array, corresponds with 2nd mux
noids1 = []
for pin in range(16):
noids1.append(mcp2.get_pin(pin))
for n in noids1:
n.direction = Direction.OUTPUT
# MIDI note arrays. notes0 = noids0; notes1 = noids1
notes0 = [55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70]
notes1 = [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86]
# setup MIDI BLE service
midi_service = adafruit_ble_midi.MIDIService()
advertisement = ProvideServicesAdvertisement(midi_service)
# BLE connection setup
ble = adafruit_ble.BLERadio()
if ble.connected:
for c in ble.connections:
c.disconnect()
# MIDI in setup
midi = adafruit_midi.MIDI(midi_in=midi_service, in_channel=0)
# start BLE advertising
print("advertising")
ble.start_advertising(advertisement)
# delay for solenoids
speed = 0.01
while True:
# waiting for BLE connection
print("Waiting for connection")
while not ble.connected:
pass
print("Connected")
# delay after connection established
time.sleep(1.0)
while ble.connected:
# msg holds MIDI messages
msg = midi.receive()
for i in range(16):
# states for solenoid on/off
# noid0 = mux1
# noid1 = mux2
noid0_output = noids0[i]
noid1_output = noids1[i]
# states for MIDI note recieved
# notes0 = mux1
# notes1 = mux2
notes0_played = notes0[i]
notes1_played = notes1[i]
# if NoteOn msg comes in and the MIDI note # matches with predefined notes:
if isinstance(msg, NoteOn) and msg.note is notes0_played:
print(time.monotonic(), msg.note)
# solenoid is triggered
noid0_output.value = True
# quick delay
time.sleep(speed)
# solenoid retracts
noid0_output.value = False
# identical to above if statement but for mux2
if isinstance(msg, NoteOn) and msg.note is notes1_played:
print(time.monotonic(), msg.note)
noid1_output.value = True
time.sleep(speed)
noid1_output.value = False
# if BLE disconnects try reconnecting
print("Disconnected")
print()
ble.start_advertising(advertisement)
|
11479926
|
import sys
from importlib import reload
from django.urls import clear_url_caches
import pytest
pytestmark = pytest.mark.django_db
DOCS_URL = "/docs/"
@pytest.fixture
def all_urlconfs():
return [
"apps.core.urls",
"apps.users.urls",
"conf.urls", # The ROOT_URLCONF must be last!
]
@pytest.fixture
def reloaded_urlconfs(all_urlconfs):
def _reloaded_urlconfs():
"""
Use this to ensure all urlconfs are reloaded as needed before the test.
"""
clear_url_caches()
for urlconf in all_urlconfs:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
return _reloaded_urlconfs
def test_docs_view_public_api_doc_true(client, settings, reloaded_urlconfs):
"""Test docs view when PUBLIC_API_DOCUMENTATION is True."""
settings.STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.StaticFilesStorage"
)
# added because swagger need statifiles to show web page
settings.PUBLIC_API_DOCUMENTATION = True
settings.DEBUG = False
reloaded_urlconfs()
response = client.get(DOCS_URL)
assert response.status_code == 200
def test_docs_view_debug_true(client, settings, reloaded_urlconfs):
"""Test docs view when DEBUG is True."""
settings.STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.StaticFilesStorage"
)
# added because swagger need statifiles to show web page
settings.DEBUG = True
settings.PUBLIC_API_DOCUMENTATION = False
reloaded_urlconfs()
response = client.get(DOCS_URL)
assert response.status_code == 200
def test_docs_view_env_false(client, settings, reloaded_urlconfs):
"""Test docs view when PUBLIC_API_DOCUMENTATION is False."""
settings.PUBLIC_API_DOCUMENTATION = False
settings.DEBUG = False
reloaded_urlconfs()
response = client.get(DOCS_URL)
assert response.status_code == 404
|
11479927
|
import os.path
import pathlib
import re
import site
import sysconfig
import sys
import tarfile
import tempfile
from urllib.parse import urlparse
import zipfile
from requests_download import download
from .install import Installer
address_formats = {
'github': (r'([\w\d_-]+)/([\w\d_-]+)(/(.+))?$', 'user/project[/commit-tag-or-branch]'),
}
class BadInput(Exception):
"""An error resulting from invalid input"""
pass
class InvalidAddress(BadInput):
def __init__(self, address):
self.address = address
def __str__(self): # pragma: no cover
return "Invalid address: {!r}".format(self.address)
class UnknownAddressType(BadInput):
def __init__(self, address_type):
self.address_type = address_type
def __str__(self): # pragma: no cover
return "Unknown address type: {}".format(self.address_type)
class InvalidAddressLocation(BadInput):
def __init__(self, address_type, location, expected_pattern):
self.address_type = address_type
self.location = location
self.expected_pattern = expected_pattern
def __str__(self): # pragma: no cover
return "Invalid location: {!r}\n{}: addresses should look like {}".format(
self.location, self.address_type, self.expected_pattern
)
def parse_address(address):
if os.path.isfile(address):
return 'local_file', address
elif address.startswith(('http://', 'https://')):
return 'url', address
if ':' not in address:
raise InvalidAddress(address)
address_type, location = address.split(':', 1)
try:
location_regex, location_pattern = address_formats[address_type]
except KeyError:
raise UnknownAddressType(address_type)
if not re.match(location_regex, location):
raise InvalidAddressLocation(address_type, location, location_pattern)
return address_type, location
def unpack(archive):
if zipfile.is_zipfile(archive):
z = zipfile.ZipFile(archive)
unpacked = tempfile.mkdtemp()
z.extractall(path=unpacked)
elif tarfile.is_tarfile(archive):
t = tarfile.TarFile(archive)
unpacked = tempfile.mkdtemp()
t.extractall(path=unpacked)
else:
raise RuntimeError('Unknown archive (not zip or tar): %s' % archive)
files = os.listdir(unpacked)
if len(files) == 1 and os.path.isdir(os.path.join(unpacked, files[0])):
return os.path.join(unpacked, files[0])
return unpacked
def download_unpack(url):
with tempfile.TemporaryDirectory() as td:
path = os.path.join(td, urlparse(url).path.split('/')[-1])
download(url, path)
unpacked = unpack(path)
return unpacked
def fetch(address_type, location):
if address_type == 'local_file':
return unpack(location)
if address_type == 'url':
return download_unpack(location)
if address_type == 'github':
m = re.match(address_formats['github'][0], location)
user, project, committish = m.group(1, 2, 4)
if committish is None:
committish = 'master'
url = 'https://github.com/{}/{}/archive/{}.zip'.format(user, project, committish)
return download_unpack(url)
def install_local(path, user=False, python=sys.executable):
p = pathlib.Path(path)
ininames = ['pyproject.toml', 'flit.ini']
for ininame in ininames:
inipath = p / ininame
if inipath.is_file():
return Installer(inipath, user=user, python=python, deps='production').install()
raise FileNotFoundError('Neither {} found in {}'.format(' nor '.join(ininames), p))
def installfrom(address, user=None, python=sys.executable):
if user is None:
user = site.ENABLE_USER_SITE \
and not os.access(sysconfig.get_path('purelib'), os.W_OK)
try:
return install_local(fetch(*parse_address(address)), user=user, python=python)
except BadInput as e:
print(e, file=sys.stderr)
return 2
|
11479936
|
import os
import sys
import torch
import pickle
from torch import nn
from torch import optim
from torchvision.datasets import ImageFolder
from torchvision import transforms
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
sys.path.append("../ocrd_typegroups_classifier")
from ocrd_typegroups_classifier.network.vraec import vraec101
from ocrd_typegroups_classifier.network.vraec import vraec50
from ocrd_typegroups_classifier.typegroups_classifier import TypegroupsClassifier
from ocrd_typegroups_classifier.data.qloss import QLoss
from ocrd_typegroups_classifier.data.binarization import Otsu
from ocrd_typegroups_classifier.data.binarization import Sauvola
from ocrd_typegroups_classifier.network.densenet import densenet121
from ocrd_typegroups_classifier.network.resnet import resnet18
# Loading and preparing the network
net = densenet121(num_classes=2)
#net = resnet18(num_classes=12)
# Some settings for the training
learning_rate = 0.1
#weight_decay = 0.0001
weight_decay = 0
lr_decay = lambda epoch: 0.97 ** epoch
reconstruction_loss = nn.MSELoss()
classification_loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, weight_decay=weight_decay)
scheduler = LambdaLR(optimizer, lr_lambda=[lr_decay])
# Creation of the typegroup classifier
tgc = TypegroupsClassifier(
{
'handwritten':0,
'printed':1
},
net
)
if os.path.exists(os.path.join('ocrd_typegroups_classifier', 'models', 'classifier.tgc')):
tgc = TypegroupsClassifier.load(os.path.join('ocrd_typegroups_classifier', 'models', 'classifier.tgc'))
# Data transformation & loading
# Note that due to the rotation, having several sequential shearing
# transforms sequentially is not the same as having only one with
# a larger range.
trans = transforms.Compose([
transforms.RandomAffine(2, shear=2),
transforms.RandomAffine(2, shear=2),
transforms.RandomAffine(2, shear=2),
#transforms.RandomCrop(224),
transforms.RandomResizedCrop(150, scale=(0.25, 1.0), ratio=(0.9, 1.11), interpolation=2),
transforms.ColorJitter(brightness=0.7, contrast=0.7, saturation=0.3, hue=0.02),
transforms.RandomGrayscale(p=0.75),
QLoss(min_q=2, max_q=60),
transforms.RandomChoice([
transforms.RandomApply((Otsu(),), p=0.1),
transforms.RandomApply((Sauvola(2, 8),), p=0.05)
]),
transforms.ToTensor()
])
training = ImageFolder('lines/training', transform=trans)
training.target_transform = tgc.classMap.get_target_transform(training.class_to_idx)
validation = ImageFolder('lines/validation', transform=None)
validation.target_transform = tgc.classMap.get_target_transform(validation.class_to_idx)
best_validation = 0
data_loader = torch.utils.data.DataLoader(training,
batch_size=64,
shuffle=True,
num_workers=4)
# Iterating over the data
print('Starting the training - grab a coffee and a good book!')
for epoch in range(30):
# Modify learning rate
scheduler.step()
# Iterate over the data
lossSum = 0
good = 0
known = 0
tgc.network.train()
for sample, label in tqdm(data_loader, desc='Training'):
# Move data to device
sample = sample.to(tgc.dev)
label = label.to(tgc.dev)
# Training the classifier on samples with known labels
sample, label = tgc.filter(sample, label)
if len(label)==0: # no known labels
continue
out = tgc.network(sample)
closs = classification_loss(out, label)
optimizer.zero_grad()
closs.backward()
optimizer.step()
lossSum += closs.item()
# Computing accuracy
_, p = torch.max(out, 1)
good += torch.sum(p==label).item()
known += len(label)
print('Epoch %d, loss %.1f, %d/%d=%.1f%%' % (epoch, lossSum, good, known, (100.0*good)/known))
targets = list()
results = list()
good = 0
bad = 0
with torch.no_grad():
tgc.network.eval()
for idx in tqdm(range(validation.__len__()), desc='Evaluation'):
sample, target = validation.__getitem__(idx)
path, _ = validation.samples[idx]
if target==-1:
continue
result = tgc.classify(sample, 224, 64, True)
highscore = max(result)
label = tgc.classMap.cl2id[result[highscore]]
targets.append(target)
results.append(label)
if target==label:
good += 1
else:
bad += 1
with open('results.dat', 'wb') as f:
pickle.dump(targets, f)
pickle.dump(results, f)
accuracy = 100*good/float(good+bad)
print(' Good:', good)
print(' Bad:', bad)
print('Accuracy:', accuracy)
if accuracy>best_validation:
tgc.save(os.path.join('ocrd_typegroups_classifier', 'models', 'classifier.tgc'))
best_validation = accuracy
print('Network saved')
else:
tgc.save(os.path.join('ocrd_typegroups_classifier', 'models', 'classifier-last.tgc'))
|
11479940
|
import os
from json import load
from mp_api import MAPISettings
from mp_api.routes.tasks.utils import calcs_reversed_to_trajectory
def test_calcs_reversed_to_trajectory():
with open(
os.path.join(MAPISettings().TEST_FILES, "calcs_reversed_mp_1031016.json")
) as file:
calcs_reversed = load(file)
trajectories = calcs_reversed_to_trajectory(calcs_reversed)
assert len(trajectories) == 1
assert trajectories[0]["lattice"] == [
[9.054455, 0.0, 0.0],
[0.0, 4.500098, 0.0],
[0.0, 0.0, 4.500098],
]
|
11479959
|
from __future__ import print_function
import numpy as np
from sklearn.datasets import load_diabetes
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, RidgeCV, LassoCV, ElasticNetCV
from sklearn.model_selection import cross_val_score
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
diabetes = load_diabetes()
# Create a linear regressor and compute CV score
lr = LinearRegression(normalize=True)
lr_scores = cross_val_score(lr, diabetes.data, diabetes.target, cv=10)
print('Linear regression CV score: %.6f' % lr_scores.mean())
# Create a Ridge regressor and compute CV score
rg = Ridge(0.005, normalize=True)
rg_scores = cross_val_score(rg, diabetes.data, diabetes.target, cv=10)
print('Ridge regression CV score: %.6f' % rg_scores.mean())
# Create a Lasso regressor and compute CV score
ls = Lasso(0.01, normalize=True)
ls_scores = cross_val_score(ls, diabetes.data, diabetes.target, cv=10)
print('Lasso regression CV score: %.6f' % ls_scores.mean())
# Create ElasticNet regressor and compute CV score
en = ElasticNet(alpha=0.001, l1_ratio=0.8, normalize=True)
en_scores = cross_val_score(en, diabetes.data, diabetes.target, cv=10)
print('ElasticNet regression CV score: %.6f' % en_scores.mean())
# Find the optimal alpha value for Ridge regression
rgcv = RidgeCV(alphas=(1.0, 0.1, 0.01, 0.001, 0.005, 0.0025, 0.001, 0.00025), normalize=True)
rgcv.fit(diabetes.data, diabetes.target)
print('Ridge optimal alpha: %.3f' % rgcv.alpha_)
# Find the optimal alpha value for Lasso regression
lscv = LassoCV(alphas=(1.0, 0.1, 0.01, 0.001, 0.005, 0.0025, 0.001, 0.00025), normalize=True)
lscv.fit(diabetes.data, diabetes.target)
print('Lasso optimal alpha: %.3f' % lscv.alpha_)
# Find the optimal alpha and l1_ratio for Elastic Net
encv = ElasticNetCV(alphas=(0.1, 0.01, 0.005, 0.0025, 0.001), l1_ratio=(0.1, 0.25, 0.5, 0.75, 0.8), normalize=True)
encv.fit(diabetes.data, diabetes.target)
print('ElasticNet optimal alpha: %.3f and L1 ratio: %.4f' % (encv.alpha_, encv.l1_ratio_))
|
11479970
|
if __name__ == '__main__':
import Recommender_System.utility.gpu_memory_growth
from Recommender_System.data import data_loader, data_process
from Recommender_System.algorithm.DeepFM.model import DeepFM_model
from Recommender_System.algorithm.train import train
n_user, n_item, train_data, test_data, topk_data = data_process.pack(data_loader.ml100k)
model = DeepFM_model(n_user, n_item, dim=8, layers=[16, 16, 16], l2=1e-5)
train(model, train_data, test_data, topk_data, epochs=10)
|
11479975
|
from __future__ import annotations
import typing as t
class ValidationFailedError(Exception):
def __init__(self, errors: t.Union[t.List, t.Any]):
if not isinstance(errors, list):
errors = [errors]
self.errors = errors
class AuthorizationError(Exception):
def __init__(self, errors: t.Union[t.List, t.Any]):
if not isinstance(errors, list):
errors = [errors]
self.errors = errors
|
11480038
|
class NotFoundException(Exception):
pass
class RoleNotAllowedException(Exception):
pass
class PermissionDeniedException(Exception):
pass
|
11480053
|
from abc import ABC
from abc import abstractmethod
import numpy as np
class Recommender(ABC):
def __init__(self, training_set, items):
self.training_set = training_set
self.items = items
# Dictionary to store the recommended sequences
self.cache = {}
def recommend(self, seed_rating, k):
"""
Generate a recommended sequence of length k from a seed rating.
:param seed_rating: The seed rating.
:param k: The length of the sequence.
:return: A recommended sequence.
"""
try:
# If this sequence has already been generated
return list(self.cache[(seed_rating, k)])
except KeyError:
pass
current_rating = seed_rating
sequence = []
for i in range(0, k):
# The probabilities for the next item of the sequence
prediction = self.predict(current_rating)
# noinspection PyUnresolvedReferences
item_index = np.random.multinomial(1, prediction).argmax()
next_rating = (self.items[item_index], current_rating[1], current_rating[2] + 1)
sequence.append(next_rating)
current_rating = next_rating
# After each sequence the model needs to be reset
self.reset()
# Save this sequence in the cache
self.cache[(seed_rating, k)] = sequence
return sequence
def predict_item(self, rating, item):
"""
Given the current rating of the recommended sequence, predict
the probability that the following rating will contain this item.
:param rating: The current rating of the recommended sequence.
:param item: The next item of the sequence.
:return: A probability.
"""
prediction = self.predict(rating)
return prediction[self.items.index(item)]
@abstractmethod
def predict(self, rating):
"""
Given the current rating of the recommended sequence, predict
the probabilities for all the possible items of being in the next rating.
:param rating: The current rating of the recommended sequence.
:return: An array of probabilities.
:rtype: Iterable.
"""
pass
@abstractmethod
def reset(self):
"""
Reset the internal model that represents the current sequence.
"""
pass
|
11480086
|
import sys
import os
import pinproc
import procgame.game
import procgame.dmd
import procgame.dmd.font
import time
import logging
logging.basicConfig(level=logging.WARNING, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
class PlayerGame(procgame.game.BasicGame):
anim_layer = None
def __init__(self, machine_type, width=128, height=32):
super(PlayerGame, self).__init__(machine_type)
f = procgame.dmd.font_named('Font07x5.dmd')
self.dmd = procgame.dmd.DisplayController(self, width=width, height=height, message_font=f)
self.anim_layer = procgame.dmd.AnimatedLayer()
mode = procgame.game.Mode(game=self, priority=1)
mode.layer = self.anim_layer
self.modes.add(mode)
def play(self, filename, repeat, hold=False, frametime=10):
anim = procgame.dmd.Animation().load(filename)
print("file has resolution (%d x %d)" % (anim.width, anim.height))
self.anim_layer.frames = anim.frames
self.anim_layer.repeat = repeat
self.anim_layer.hold = hold
self.anim_layer.frame_time = frametime
if not repeat and not hold:
self.anim_layer.add_frame_listener(-1, self.end_of_animation)
def end_of_animation(self):
self.end_run_loop()
def tool_populate_options(parser):
parser.add_option('-m', '--machine-type', action='store', help='wpc, wpc95, stermSAM, sternWhitestar or custom (default)')
parser.add_option('-r', '--repeat', action='store_true', help='Repeat the animation indefinitely')
parser.add_option('-l', '--hold', action='store_true', help='Hold the last frame')
parser.add_option('-f', '--frametime', type="int", dest='frametime', default=10, help='set the frame time')
parser.add_option('-s', '--size', type="int", dest='size', nargs=2, metavar="WIDTH HEIGHT", default=(128,32), help='set the WIDTH and HEIGHT of the player (in dots)')
def tool_get_usage():
return """<file.dmd>"""
def tool_run(options, args):
if len(args) != 1:
return False
if options.machine_type:
machine_type = pinproc.normalize_machine_type(options.machine_type)
else:
machine_type = pinproc.MachineTypeCustom
game = PlayerGame(machine_type=machine_type,width=options.size[0], height=options.size[1])
game.play(filename=args[0], repeat=options.repeat, hold=options.hold, frametime = options.frametime)
game.run_loop()
del game
return True
|
11480089
|
from dataclasses import dataclass
import hashlib # for mdb ids of prompts
import json
from typing import List, Optional
import requests
@dataclass
class PromptResult:
setting = "zero-shot"
value: float = 0.0
plm: str = None
metric: str = None
"""Example
{
"language": "en",
"template": "{Text}, Overall it is a {Answer} movie.",
"answer": {
"positive": ["fantastic", "interesting"],
"negative": ["boring"]
},
"supported_plm_types": ["masked_lm", "left_to_right", "encoder_decoder"],
"results": [
{
"plm": "BERT",
"metric": "accuracy",
"setting": "zero-shot",
"value": "87"
},
{
"plm": "BART",
"metric": "accuracy",
"setting": "zero-shot",
"value": "80"
}
]
}
"""
@dataclass
class Prompt:
id: str = "null" # this will be automatically assigned
language: str = "en"
description: str = "prompt description"
template: str = None
# in Prompt class, we define `answer` field as the mapping from
# the category name to a list of answer words.
# for example answers={'World': ['World News','World Report']},
# {'Sports': ['Sports']}, {'Business': ['Business']}, {'Science
# and Technology': ['Science and Technology']}
answers: dict = None
supported_plm_types: List[str] = None
signal_type: List[str] = None
# results: List[PromptResult] = None
results: List[PromptResult] = None
# features:Optional[Features] = None # {"length":Value("int64"),
# "shape":Value("string"), "skeleton": Value("string")}
features: Optional[
dict
] = None # {"length":5, "shape":"prefix", "skeleton": "what_about"}
reference: str = None
contributor: str = "Datalab"
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.template is not None and self.answers is not None:
if isinstance(self.answers, dict):
self.id = hashlib.md5(
(self.template + json.dumps(self.answers)).encode()
).hexdigest()
if isinstance(self.answers, str):
self.id = hashlib.md5(
(self.template + self.answers).encode()
).hexdigest()
else:
self.id = hashlib.md5(self.template.encode()).hexdigest()
else:
self.id = hashlib.md5(self.template.encode()).hexdigest()
class Prompts:
@classmethod
def from_url(cls, URL):
res = requests.get(URL)
prompts = json.loads(res.text)
# new_prompts = {x["id"]: Prompt(**x) for x in prompts}
# prompts = []
# for dic in dics:
# prompts.append(Prompt(**dic))
return prompts
|
11480094
|
import multiprocessing as mp
from multiprocessing import pool
class NoDaemonProcess(mp.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class MyPool(pool.Pool):
"""
A specially designed Pool class, whose processes are all not daemon.
A daemon process is not allowed to spawn more sub-processes, so it is not
favorable.
"""
Process = NoDaemonProcess
|
11480099
|
from unittest.mock import MagicMock
import pytest
from jange.base import DataStream, OperationCollection, Operation
def test_can_iterate_all_underlying_data():
data = [1, 2, 3, 4]
ds = DataStream(items=data)
assert list(ds) == data
def test_all_operations_are_applied():
data = [1, 2, 3, 4]
ds = DataStream(applied_ops=None, items=data)
op1 = MagicMock()
op2 = MagicMock()
ds.apply(op1, op2)
op1.run.assert_called_once()
op2.run.assert_called_once()
@pytest.mark.parametrize("items,dtype", [([1, 2, 3], int), (["a", "b", "c"], str)])
def test_item_type_of_stream(items, dtype):
ds = DataStream(items)
assert ds.item_type == dtype
assert len(list(ds.items)) == len(items)
@pytest.mark.parametrize(
"ops",
[
([Operation(name="op1"), Operation(name="op2")]),
(OperationCollection([Operation(name="op1"), Operation(name="op2")])),
],
)
def test_converts_applied_ops_to_operation_collection(ops):
"""Test to make sure that the applied_ops property of
a stream is maintained as OperationCollection even if
a list of operations is passed
"""
ds = DataStream(items=[1], applied_ops=ops)
assert isinstance(ds.applied_ops, OperationCollection)
@pytest.mark.parametrize("items", [[], None, tuple(), set()])
@pytest.mark.parametrize("context", [None, ["a", "b"], [1, 2]])
def test_raises_exception_when_items_is_finite_but_empty_or_none(items, context):
with pytest.raises(ValueError):
DataStream(items=items, context=context)
def test_checking_item_type_for_generators_does_not_consume():
num_elements = 10
gen = (i * 2 for i in range(num_elements))
ds = DataStream(items=gen)
assert ds.is_countable is False
assert ds.item_type == int
assert len(list(ds)) == num_elements
@pytest.mark.parametrize(
"items", [[1, 2, 3], ("a", "b")],
)
@pytest.mark.parametrize("is_items_generator", [True, False])
def test_context_is_always_available(is_items_generator, items):
"""whether or not items is generator or fixed, context should
be available
"""
expected_length = len(items)
if is_items_generator:
items = (x for x in items)
ds = DataStream(items=items, context=None)
assert len(list(ds.items)) == expected_length
assert len(list(ds.context)) == expected_length
@pytest.mark.parametrize("is_context_generator", [True, False])
@pytest.mark.parametrize("items_len,context_len", [(2, 3), (1, 2)])
def test_raises_error_when_context_is_not_countable_or_not_same_length_as_items_but_items_is_countable(
items_len, context_len, is_context_generator
):
items = list(range(items_len))
context = range(context_len)
if is_context_generator:
context = (x for x in context)
with pytest.raises(ValueError):
DataStream(items=items, context=context)
@pytest.mark.parametrize("is_items_generator", [True, False])
def test_returns_total_items_if_countable_else_exception(is_items_generator):
items = [1, 2, 3]
items_count = len(items)
if is_items_generator:
items = (x for x in items)
ds = DataStream(items)
if is_items_generator:
with pytest.raises(AttributeError):
ds.total_items
else:
assert ds.total_items == items_count
|
11480137
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import logging
import numpy as np
import tensorflow as tf
from collections import defaultdict
from data import *
from utils.analysis import *
from utils.experiments import *
from models.visual import *
from models.auditive import MusicVae
from models.synesthetic import SynestheticVae
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='SynVAE - Analysis')
arg_parser.add_argument('task', choices=['mnist', 'cifar', 'bam'], help='name of the task (mnist, cifar, bam)')
arg_parser.add_argument('musicvae_config', choices=['cat-mel_2bar_big', 'hierdec-mel_4bar', 'hierdec-mel_8bar' 'hierdec-mel_16bar'], help='name of the MusicVAE model configuration (e.g. hierdec-mel_16bar)')
arg_parser.add_argument('model_path', help='path to SynVAE model')
arg_parser.add_argument('data_path', help='path to data (not required for original MNIST)')
arg_parser.add_argument('data_split', choices=['train', 'test'], default='test', help='data split (train, test (default))')
arg_parser.add_argument('out_path', help='path to output')
arg_parser.add_argument('--beta', type=float, default=1., help='beta parameter for weighting KL-divergence (default: 1.0)')
arg_parser.add_argument('--batch_size', type=int, default=200, help='batch size (default: 200)')
arg_parser.add_argument('--ranks', default='1,5,10', help='precision ranks to use during evaluation (default: "1,5,10")')
arg_parser.add_argument('--kl', action='store_true', help='compute approximate audio-visual KL-divergence')
arg_parser.add_argument('--perplexity', type=int, default=30, help='perplexity of distributions used to approximate the data space (default: 30)')
arg_parser.add_argument('--export_data', action='store_true', help='export original samples and reconstructions')
arg_parser.add_argument('--export_latents', action='store_true', help='export latent vectors')
args = arg_parser.parse_args()
# check if directory already exists
if os.path.exists(args.out_path):
print("[Error] '%s' already exists." % (args.out_path,))
sys.exit()
# make necessary directories
os.mkdir(args.out_path)
setup_logging(os.path.join(args.out_path, 'results.log'))
# set up auditive model
music_vae = MusicVae(config_name=args.musicvae_config, batch_size=args.batch_size)
# set up visual model
if args.task == 'mnist':
visual_vae = MnistVae(latent_dim=music_vae.latent_dim, beta=args.beta, batch_size=args.batch_size)
dataset = Mnist(split='test', data_path=args.data_path)
elif args.task == 'cifar':
visual_vae = CifarVae(latent_dim=music_vae.latent_dim, beta=args.beta, batch_size=args.batch_size)
dataset = Cifar(args.data_path)
elif args.task == 'bam':
visual_vae = BamVae(latent_dim=music_vae.latent_dim, beta=args.beta, batch_size=args.batch_size)
dataset = Bam(args.data_path)
dataset.filter_labels(['emotion_gloomy', 'emotion_happy', 'emotion_peaceful', 'emotion_scary'])
dataset.filter_uncertain()
dataset.make_multiclass()
# set up synesthetic model
model = SynestheticVae(visual_model=visual_vae, auditive_model=music_vae, learning_rate=1e-4)
model.build()
# load data
iterator = dataset.get_image_iterator(batch_size=args.batch_size)
next_op = iterator.get_next()
# inference
with tf.Session() as sess:
# initialize variables and dataset iterator
sess.run(tf.global_variables_initializer())
sess.run(iterator.initializer)
# restore MusicVAE
model.restore(tf_session=sess, path=args.model_path)
# encode in batches
vis_latents, aud_latents = None, None
avg_loss = 0.
avg_mse = 0.
avg_kl = 0.
batch_idx = 0
# iterate over batches
while True:
try:
# get next batch
sys.stdout.write("\rEncoding batch %d..." % (batch_idx))
sys.stdout.flush()
batch = sess.run(next_op)
batch_idx += 1
# inference step
temperature = 0.5
cur_loss, cur_mse, cur_kl, cur_audios, cur_recons, cur_vis_latents, cur_aud_latents = sess.run([
model.loss, model.vis_model.recon_loss, model.vis_model.latent_loss, model.audios, model.reconstructions, model.vis_latents, model.aud_latents
], feed_dict={
model.images: batch, model.temperature: temperature
})
# append to result
avg_loss = ((avg_loss * (batch_idx - 1)) + cur_loss) / batch_idx
avg_mse = ((avg_mse * (batch_idx - 1)) + cur_mse) / batch_idx
avg_kl = ((avg_kl * (batch_idx - 1)) + cur_kl) / batch_idx
if batch_idx == 1:
vis_latents, aud_latents = cur_vis_latents, cur_aud_latents
else:
vis_latents = np.concatenate((vis_latents, cur_vis_latents), axis=0)
aud_latents = np.concatenate((aud_latents, cur_aud_latents), axis=0)
if args.export_data:
for idx in range(batch.shape[0]):
data_idx = ((batch_idx - 1) * args.batch_size) + idx
model.vis_model.save_image(batch[idx].squeeze(), os.path.join(args.out_path, str(data_idx) + '_orig.png'))
model.vis_model.save_image(cur_recons[idx].squeeze(), os.path.join(args.out_path, str(data_idx) + '_recon.png'))
model.aud_model.save_midi(cur_audios[idx], os.path.join(args.out_path, str(data_idx) + '_audio.mid'))
# end of dataset
except tf.errors.OutOfRangeError:
# exit batch loop and proceed to next epoch
break
dataset.labels = dataset.labels[:vis_latents.shape[0]]
logging.info("\rEncoded %d batches with average losses (All: %.2f | %s: %.2f | KL: %.2f), %d visual latent vectors and %d auditive latent vectors."
% (batch_idx, avg_loss, 'MSE', avg_mse, avg_kl, vis_latents.shape[0], aud_latents.shape[0]))
if args.export_latents:
np.save(os.path.join(args.out_path, 'aud_latents.npy'), aud_latents)
np.save(os.path.join(args.out_path, 'vis_latents.npy'), vis_latents)
logging.info("\rSaved %d auditive and visual latent vectors." % aud_latents.shape[0])
if args.kl:
logging.info("Calculating KL divergence between latents (perplexity: %d)..." % args.perplexity)
kl_va, kl_av = calc_latent_kl(vis_latents, aud_latents, perplexity=args.perplexity)
# parse precision ranks
prec_ranks = [int(r) for r in args.ranks.split(',')]
if vis_latents.shape[0] <= 20000:
logging.info("Calculating visual similarities...")
vis_sims = calc_dists(vis_latents)
else:
vis_sims = None
logging.info("Calculating metrics for visual latents...")
vis_mean_latents, label_precision, label_counts = calc_metrics(vis_latents, dataset.labels, vis_sims, len(dataset.label_descs), prec_ranks, sim_metric='euclidean')
vis_latents, vis_sims = None, None # deallocate memory
for rank in prec_ranks:
log_metrics(dataset.label_descs, rank, label_precision[rank], label_counts)
if aud_latents.shape[0] <= 20000:
logging.info("Calculating auditive similarities...")
aud_sims = calc_dists(aud_latents)
else:
aud_sims = None
logging.info("Calculating metrics for auditive latents...")
aud_mean_latents, label_precision, label_counts = calc_metrics(aud_latents, dataset.labels, aud_sims, len(dataset.label_descs), prec_ranks, sim_metric='euclidean')
aud_latents, aud_sims = None, None # deallocate memory
for rank in prec_ranks:
log_metrics(dataset.label_descs, rank, label_precision[rank], label_counts)
|
11480155
|
import theano.tensor as tt
import dmgr
import lasagne as lnn
import spaghetti as spg
from .. import augmenters
class CrfLoss:
def __init__(self, crf):
self.crf = crf
def __call__(self, prediction, target, mask):
loss = spg.objectives.neg_log_likelihood(self.crf, target, mask)
loss /= mask.sum(axis=1) # normalise to sequence length
return lnn.objectives.aggregate(loss, mode='mean')
def build_net(in_shape, out_size, model):
# input variables
input_var = (tt.tensor4('input', dtype='float32')
if len(in_shape) > 1 else
tt.tensor3('input', dtype='float32'))
target_var = tt.tensor3('target_output', dtype='float32')
mask_var = tt.matrix('mask_input', dtype='float32')
# stack more layers
network = lnn.layers.InputLayer(
name='input', shape=(None, None) + in_shape,
input_var=input_var
)
mask_in = lnn.layers.InputLayer(name='mask',
input_var=mask_var,
shape=(None, None))
network = spg.layers.CrfLayer(
network, mask_input=mask_in, num_states=out_size, name='CRF')
return network, input_var, target_var, mask_var
def build_model(in_shape, out_size, model):
network, input_var, target_var, mask_var = build_net(in_shape, out_size,
model)
loss_fn = CrfLoss(network)
return dict(network=network, input_var=input_var, target_var=target_var,
mask_var=mask_var, loss_fn=loss_fn)
def create_iterators(train_set, val_set, training, augmentation):
train_batches = dmgr.iterators.SequenceIterator(
train_set, training['batch_size'], randomise=True,
expand=True, max_seq_len=training['max_seq_len']
)
val_batches = dmgr.iterators.SequenceIterator(
val_set, training['batch_size'], randomise=False,
expand=False
)
if augmentation is not None:
train_batches = dmgr.iterators.AugmentedIterator(
train_batches, *augmenters.create_augmenters(augmentation)
)
return train_batches, val_batches
def add_sacred_config(ex):
ex.add_named_config(
name='crf',
datasource=dict(
context_size=0,
),
model=dict(
type='crf'
),
optimiser=dict(
name='adam',
params=dict(
learning_rate=0.01
),
schedule=None
),
training=dict(
batch_size=32,
max_seq_len=1024,
num_epochs=500,
early_stop=20,
early_stop_acc=True,
),
regularisation=dict(
l1=1e-4,
l2=0.0,
),
testing=dict(
test_on_val=False,
batch_size=None,
)
)
|
11480195
|
from __future__ import print_function
import argparse
from math import log10
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import models
import torch.backends.cudnn as cudnn
from dbpn_v1 import Net as DBPNLL
from dbpn import Net as DBPN
#from dbpn_iterative import Net as DBPNITER
from discriminator import Discriminator, FeatureExtractor, FeatureExtractorResnet
from data import get_training_set
from random import randrange
import pdb
import socket
import time
import utils
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--upscale_factor', type=int, default=4, help="super resolution upscale factor")
parser.add_argument('--batchSize', type=int, default=1, help='training batch size')
parser.add_argument('--pretrained_iter', type=int, default=100, help='number of epochs to train for')
parser.add_argument('--pretrained', type=bool, default=False)
parser.add_argument('--nEpochs', type=int, default=2000, help='number of epochs to train for')
parser.add_argument('--snapshots', type=int, default=25, help='Snapshots')
parser.add_argument('--start_iter', type=int, default=1, help='Starting Epoch')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning Rate. Default=0.01')
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--threads', type=int, default=1, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--gpus', default=1, type=int, help='number of gpu')
parser.add_argument('--data_dir', type=str, default='./Dataset')
parser.add_argument('--data_augmentation', type=bool, default=True)
parser.add_argument('--hr_train_dataset', type=str, default='DIV2K_train_HR')
parser.add_argument('--model_type', type=str, default='DBPNLL')
parser.add_argument('--patch_size', type=int, default=60, help='Size of cropped HR image')
parser.add_argument('--pretrained_sr', default='dl00DBPNLLPIRM_pretrained_50.pth', help='sr pretrained base model')
parser.add_argument('--load_pretrained', type=bool, default=False)
parser.add_argument('--pretrained_D', default='dnnDBPNLLPIRM_RESNET_epoch_Discriminator_499.pth', help='sr pretrained base model')
parser.add_argument('--load_pretrained_D', type=bool, default=False)
parser.add_argument('--feature_extractor', default='VGG', help='Location to save checkpoint models')
parser.add_argument('--w1', type=float, default=1e-2, help='MSE weight')
parser.add_argument('--w2', type=float, default=1e-1, help='Perceptual weight')
parser.add_argument('--w3', type=float, default=1e-3, help='Adversarial weight')
parser.add_argument('--w4', type=float, default=10, help='Style weight')
parser.add_argument('--save_folder', default='weights/', help='Location to save checkpoint models')
parser.add_argument('--prefix', default='PIRM_VGG', help='Location to save checkpoint models')
opt = parser.parse_args()
gpus_list = range(opt.gpus)
hostname = str(socket.gethostname())
cuda = opt.gpu_mode
cudnn.benchmark = True
print(opt)
def train_pretrained(epoch):
epoch_loss = 0
model.train()
for iteration, batch in enumerate(training_data_loader, 1):
input, target = batch[0], batch[1]
minibatch = input.size()[0]
for j in range(minibatch):
input[j] = utils.norm(input[j],vgg=True)
target[j] = utils.norm(target[j],vgg=True)
if cuda:
input = Variable(input).cuda(gpus_list[0])
target = Variable(target).cuda(gpus_list[0])
optimizer.zero_grad()
sr = model(input)
loss = MSE_loss(sr, target)
epoch_loss += loss.data
loss.backward()
optimizer.step()
print("Epoch: [%2d] [%4d/%4d] G_loss_pretrain: %.8f"
% ((epoch), (iteration), len(training_data_loader), loss.data))
print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
def train(epoch):
G_epoch_loss = 0
D_epoch_loss = 0
feat_epoch_loss = 0
style_epoch_loss = 0
adv_epoch_loss = 0
mse_epoch_loss = 0
model.train()
D.train()
for iteration, batch in enumerate(training_data_loader, 1):
input, target = batch[0], batch[1]
minibatch = input.size()[0]
real_label = torch.ones(minibatch) #torch.rand(minibatch,1)*0.5 + 0.7
fake_label = torch.zeros(minibatch) #torch.rand(minibatch,1)*0.3
for j in range(minibatch):
input[j] = utils.norm(input[j],vgg=True)
target[j] = utils.norm(target[j],vgg=True)
if cuda:
input = Variable(input).cuda(gpus_list[0])
target = Variable(target).cuda(gpus_list[0])
real_label = Variable(real_label).cuda(gpus_list[0])
fake_label = Variable(fake_label).cuda(gpus_list[0])
# Reset gradient
D_optimizer.zero_grad()
# Train discriminator with real data
D_real_decision = D(target)
D_real_loss = BCE_loss(D_real_decision, real_label)
# Train discriminator with fake data
recon_image = model(input)
D_fake_decision = D(recon_image)
D_fake_loss = BCE_loss(D_fake_decision, fake_label)
D_loss = D_real_loss + D_fake_loss
# Back propagation
D_loss.backward()
D_optimizer.step()
# Reset gradient
optimizer.zero_grad()
# Train generator
recon_image = model(input)
D_fake_decision = D(recon_image)
# Adversarial loss
GAN_loss = opt.w3 * BCE_loss(D_fake_decision, real_label)
# Content losses
mse_loss = opt.w1 * MSE_loss(recon_image, target)
#Perceptual loss
x_VGG = Variable(batch[1].cuda())
recon_VGG = Variable(recon_image.data.cuda())
real_feature = feature_extractor(x_VGG)
fake_feature = feature_extractor(recon_VGG)
vgg_loss = opt.w2 * sum([ MSE_loss(fake_feature[i], real_feature[i].detach()) for i in range(len(real_feature))])
style_loss = opt.w4 * sum([ MSE_loss(utils.gram_matrix(fake_feature[i]), utils.gram_matrix(real_feature[i]).detach()) for i in range(len(real_feature))])
# Back propagation
G_loss = mse_loss + vgg_loss + GAN_loss + style_loss
G_loss.backward()
optimizer.step()
# log
G_epoch_loss += G_loss.data
D_epoch_loss += D_loss.data
feat_epoch_loss += (vgg_loss.data)
style_epoch_loss += (style_loss.data)
adv_epoch_loss += (GAN_loss.data)
mse_epoch_loss += (mse_loss.data)
print("Epoch: [%2d] [%4d/%4d] G_loss: %.8f, D_loss: %.8f, mse:%.8f, perceptual: %.8f, style: %.8f, adv: %.8f"
% ((epoch), (iteration), len(training_data_loader), G_loss.data, D_loss.data,mse_loss.data, vgg_loss.data, style_loss.data, GAN_loss.data))
print("===> Epoch {} Complete: Avg. Loss G: {:.4f} D: {:.4f} MSE: {:.4f} Perceptual: {:.4f} Style: {:.4f} Adv: {:.4f}".format(epoch, G_epoch_loss / len(training_data_loader), D_epoch_loss / len(training_data_loader), mse_epoch_loss/ len(training_data_loader), feat_epoch_loss/ len(training_data_loader),style_epoch_loss/ len(training_data_loader), adv_epoch_loss/ len(training_data_loader) ))
def test():
avg_psnr = 0
for batch in testing_data_loader:
input, target = Variable(batch[0]), Variable(batch[1])
if cuda:
input = input.cuda(gpus_list[0])
target = target.cuda(gpus_list[0])
prediction = model(input)
mse = criterion(prediction, target)
psnr = 10 * log10(1 / mse.data[0])
avg_psnr += psnr
print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr / len(testing_data_loader)))
def checkpoint(epoch, pretrained_flag=False):
if pretrained_flag:
model_out_path = opt.save_folder+hostname+opt.model_type+opt.prefix+"_pretrained_{}.pth".format(epoch)
else:
model_out_path = opt.save_folder+hostname+opt.model_type+opt.prefix+opt.feature_extractor+"_epoch_{}.pth".format(epoch)
model_out_path_D = opt.save_folder+hostname+opt.model_type+opt.prefix+opt.feature_extractor+"_epoch_Discriminator_{}.pth".format(epoch)
torch.save(model.state_dict(), model_out_path)
torch.save(D.state_dict(), model_out_path_D)
print("Checkpoint saved to {}".format(model_out_path))
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
print('===> Loading datasets')
train_set = get_training_set(opt.data_dir, opt.hr_train_dataset, opt.upscale_factor, opt.patch_size, opt.data_augmentation)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print('===> Building model ', opt.model_type)
if opt.model_type == 'DBPNLL':
model = DBPNLL(num_channels=3, base_filter=64, feat = 256, num_stages=10, scale_factor=opt.upscale_factor)
#elif opt.model_type == 'DBPN-RES-MR64-3':
# model = DBPNITER(num_channels=3, base_filter=64, feat = 256, num_stages=3, scale_factor=opt.upscale_factor)
else:
model = DBPN(num_channels=3, base_filter=64, feat = 256, num_stages=7, scale_factor=opt.upscale_factor)
model = torch.nn.DataParallel(model, device_ids=gpus_list)
###Discriminator
D = Discriminator(num_channels=3, base_filter=64, image_size=opt.patch_size*opt.upscale_factor)
D = torch.nn.DataParallel(D, device_ids=gpus_list)
###Feature Extractor
if opt.feature_extractor=='VGG':
feature_extractor = FeatureExtractor(models.vgg19(pretrained=True))
else:
feature_extractor = FeatureExtractorResnet(models.resnet152(pretrained=True))
###LOSS
MSE_loss = nn.MSELoss()
BCE_loss = nn.BCELoss()
print('---------- Generator architecture -------------')
utils.print_network(model)
print('---------- Discriminator architecture ---------')
utils.print_network(D)
print('-----------------------------------------------')
if opt.load_pretrained:
model_name = os.path.join(opt.save_folder + opt.pretrained_sr)
if os.path.exists(model_name):
#model= torch.load(model_name, map_location=lambda storage, loc: storage)
model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))
print('Pre-trained SR model is loaded.')
if opt.load_pretrained_D:
D_name = os.path.join(opt.save_folder + opt.pretrained_D)
if os.path.exists(D_name):
#model= torch.load(model_name, map_location=lambda storage, loc: storage)
D.load_state_dict(torch.load(D_name, map_location=lambda storage, loc: storage))
print('Pre-trained Discriminator model is loaded.')
if cuda:
model = model.cuda(gpus_list[0])
D = D.cuda(gpus_list[0])
feature_extractor = feature_extractor.cuda(gpus_list[0])
MSE_loss = MSE_loss.cuda(gpus_list[0])
BCE_loss = BCE_loss.cuda(gpus_list[0])
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)
D_optimizer = optim.Adam(D.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)
##PRETRAINED
if opt.pretrained:
print('Pre-training starts.')
for epoch in range(1, opt.pretrained_iter + 1):
train_pretrained(epoch)
print('Pre-training finished.')
checkpoint(epoch, pretrained_flag=True)
###GAN Training
for epoch in range(opt.start_iter, opt.nEpochs + 1):
train(epoch)
#test()
# learning rate is decayed by a factor of 10 every half of total epochs
if (epoch+1) % (opt.nEpochs/2) == 0:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10.0
print('G: Learning rate decay: lr={}'.format(optimizer.param_groups[0]['lr']))
for param_group in D_optimizer.param_groups:
param_group['lr'] /= 10.0
print('D: Learning rate decay: lr={}'.format(D_optimizer.param_groups[0]['lr']))
if (epoch+1) % (opt.snapshots) == 0:
checkpoint(epoch)
|
11480205
|
from __future__ import absolute_import, division, print_function
import io
import os.path
import tarfile
import tempfile
from requests.utils import urlparse
import appr.pack as packager
from appr.client import ApprClient
class FormatBase(object):
media_type = NotImplementedError
target = NotImplementedError
kub_class = NotImplementedError
manifest_file = []
appr_client = ApprClient
def __init__(self, name, version=None, endpoint=None, ssl_verify=True, **kwargs):
self._deploy_name = name
self._deploy_version = version or {"key": "version", "value": 'default'}
self.endpoint = endpoint
self._registry = self.appr_client(endpoint=self.endpoint, requests_verify=ssl_verify)
self._package = None
self._manifest = None
@property
def package(self):
if self._package is None:
result = self._fetch_package()
self._package = packager.ApprPackage(result, b64_encoded=True)
return self._package
def _create_manifest(self):
raise NotImplementedError
@property
def manifest(self):
if self._manifest is None:
self._manifest = self._create_manifest()
return self._manifest
def __unicode__(self):
return ("(<{class_name}({name}=={version})>".format(class_name=self.__class__.__name__,
name=self.name, version=self.version))
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
return self.__str__()
@property
def author(self):
pass
@property
def version(self):
return self.manifest.version
@property
def description(self):
pass
@property
def name(self):
return self.manifest.name
@property
def variables(self):
pass
def _fetch_package(self):
parse = urlparse(self._deploy_name)
if parse.scheme in ["http", "https"]:
# @TODO
pass
elif parse.scheme == "file":
parts = parse.path.split("/")
_, ext = os.path.splitext(parts[-1])
if ext == ".gz":
filepath = parse.path
else:
filepath = tempfile.NamedTemporaryFile().name
packager.pack_kub(filepath)
with open(filepath, "rb") as tarf:
return tarf.read()
else:
return self._registry.pull_json(self._deploy_name, self._deploy_version,
self.media_type)['blob']
def make_tarfile(self, source_dir):
output = io.BytesIO()
with tarfile.open(fileobj=output, mode="w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
return output
|
11480252
|
OCCLUSION_MARKERS = [
"robot0:ffocclusion",
"robot0:mfocclusion",
"robot0:rfocclusion",
"robot0:lfocclusion",
"robot0:thocclusion",
]
OCCLUSION_DIST_CUTOFF = -0.0001 # neg; penetrated.
def occlusion_markers_exist(sim):
for marker in OCCLUSION_MARKERS:
if marker not in sim.model.geom_names:
return False
return True
def check_occlusion(sim, dist_cutoff=OCCLUSION_DIST_CUTOFF):
"""
Check whether there is any collision or contact with the finger occlusion detection
geoms (class = "D_Occlusion").
Given a finger occlusion geom, if there is a contact and the contact distance is smaller
than `dist_cutoff`, we consider it as "being occluded".
Returns: a list of 5 binary, indicating whether a finger (ff, mf, rf, lf, th) is occluded.
"""
target_geom_ids = [sim.model.geom_name2id(m) for m in OCCLUSION_MARKERS]
geom_ids_with_contact = set()
for i in range(sim.data.ncon):
contact = sim.data.contact[i]
if contact.dist < dist_cutoff:
geom1 = contact.geom1
geom2 = contact.geom2
geom_ids_with_contact.add(geom1)
geom_ids_with_contact.add(geom2)
return [int(g_id in geom_ids_with_contact) for g_id in target_geom_ids]
def recolor_occlusion_geoms(sim, robot_occlusion_data):
"""
Color the occlusion geoms differently according to whether the simulator and the
phasespace tracker matches.
"""
colormap = [
[0, 0, 0, 0.1], # transparent grey for both off
[1, 0, 0, 0.7], # red for robot not but sim occluded
[0, 0, 1, 0.7], # blue for robot occluded but sim not
[1, 1, 0, 1.0], # solid yellow for both occluded
]
sim_occlusion_data = check_occlusion(sim)
geom_ids = [sim.model.geom_name2id(m) for m in OCCLUSION_MARKERS]
for g_id, robot_occluded, sim_occluded in zip(
geom_ids, robot_occlusion_data, sim_occlusion_data
):
category = 2 * int(robot_occluded) + int(sim_occluded)
sim.model.geom_rgba[g_id] = colormap[category]
|
11480277
|
from ipware import get_client_ip
from rest_framework import permissions
from . import settings as api_settings
class WhiteListPermission(permissions.BasePermission):
def has_permission(self, request, view):
ip_addr = get_client_ip(request)[0]
return True if ip_addr in api_settings.REST_FRAMEWORK_WHITELIST else False
|
11480281
|
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import nltk
import urllib.request
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize,sent_tokenize
from string import punctuation
from heapq import nlargest
from collections import defaultdict
import requests
url = "https://en.wikipedia.org/wiki/Machine_learning"
#request = urllib.request.urlopen(url).read().decode('utf8','ignore')
#soup = BeautifulSoup(request,'html.parser')
response = requests.get(url)
soup = BeautifulSoup(response.content,'html.parser')
text_p = soup.find_all('p')
print(text_p)
for i in range(len(text_p)):
text += text_p[i].text
text = text.lower()
tokens = [t for t in text.split()]
#print(tokens)
clean_token = tokens[:]
#define irrelevant words that include stop words , punctuations and numbers
stopword = set(stopwords.words('english')+list(punctuation)+list("0123456789"))
for token in tokens:
if token in stopword:
clean_token.remove(token)
#print(clean_token)
'''Frequency distribution of 100 most common words called BAG OF WORDS'''
freq = nltk.FreqDist(clean_token)
top_words = []
top_words = freq.most_common(100)
#print(top_words)
'''Tokenize the web page text into Sentences'''
sentences = sent_tokenize(text)
#print(sentences)
'''Create ranking ,Higher the presence of the frequent words in the sentence,higher will be the ranking'''
ranking = defaultdict(int)
for i,sent in enumerate(sentences):
for word in word_tokenize(sent.lower()):
if word in freq:
ranking[i] += freq[word]
top_sentences = nlargest(10,ranking,ranking.get)
#print(top_sentences)
sorted_sentences = [sentences[j] for j in sorted(top_sentences)]
#print(sorted_sentences)
|
11480291
|
from optimum.onnxruntime.modeling_ort import (
ORTModelForCausalLM,
ORTModelForFeatureExtraction,
ORTModelForQuestionAnswering,
ORTModelForSequenceClassification,
ORTModelForTokenClassification,
)
task_ortmodel_map = {
"feature-extraction": ORTModelForFeatureExtraction,
"question-answering": ORTModelForQuestionAnswering,
"text-classification": ORTModelForSequenceClassification,
"token-classification": ORTModelForTokenClassification,
"causal-lm": ORTModelForCausalLM,
}
|
11480295
|
from samplics.sampling.selection import SampleSelection
from samplics.sampling.size import (
SampleSize,
SampleSizeOneMean,
SampleSizeOneProportion,
SampleSizeOneTotal,
allocate,
calculate_power,
power_for_proportion,
sample_size_for_mean_wald,
sample_size_for_proportion_fleiss,
sample_size_for_proportion_wald,
)
from samplics.sampling.size_and_power import power_for_one_mean, power_for_one_proportion
__all__ = [
"allocate",
"calculate_power",
"power_for_proportion",
"power_for_one_proportion",
"power_for_one_mean",
"SampleSelection",
"SampleSize",
"SampleSizeOneMean",
"SampleSizeOneProportion",
"SampleSizeOneTotal",
"sample_size_for_mean_wald",
"sample_size_for_proportion_fleiss",
"sample_size_for_proportion_wald",
]
|
11480301
|
from django.shortcuts import render
def plain_text_view(request, template_name):
return render(request, template_name, content_type='text/plain')
|
11480303
|
from ast import literal_eval
def decode_string(s):
"""Convert a string literal to a number or a bool.
Args:
s (str): String
Returns:
str,float,int or bool: Value decoded
Examples:
>>> decode_string('a')
'a'
>>> val = decode_string('1.0')
>>> type(val)
<class 'int'>
>>> val
1
>>> val = decode_string('1')
>>> type(val)
<class 'int'>
>>> val
1
>>> val = decode_string('1.5')
>>> type(val)
<class 'float'>
>>> val
1.5
>>> val = decode_string('True')
>>> type(val)
<class 'bool'>
>>> val
True
"""
if isinstance(s, str):
# Does it represent a literal?
try:
val = literal_eval(s)
except:
# if it doesn't represent a literal, no conversion is done
val = s
else:
# It's already something other than a string
val = s
# Is the float actually an int? (i.e. is the float 1.0 ?)
if isinstance(val, float):
if val.is_integer():
return int(val)
return val
return val
|
11480309
|
from pysimm import system, lmps, forcefield
def run(test=False):
# use a smiles string to query the pubchem search database and read the mol file returned from the http request
try:
s = system.read_pubchem_smiles('CO')
except:
import os
s = system.read_mol(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'CO.mol'))
# the resulting system has sufficient information to type with a forcefield, here we will use the Dreiding force field
s.apply_forcefield(forcefield.Dreiding())
# we'll perform energy minimization using the fire algorithm in LAMMPS
lmps.quick_min(s, min_style='fire')
# write a few different file formats
s.write_xyz('methanol.xyz')
s.write_yaml('methanol.yaml')
s.write_lammps('methanol.lmps')
s.write_chemdoodle_json('methanol.json')
if __name__ == '__main__':
run()
|
11480425
|
from django.db import models
from profiles.models import Profile, ProfileHub
from hubs.models import Hub, HubGeolocation
class Offer(models.Model):
title = models.CharField(max_length=100, blank=False)
description = models.TextField(blank=False)
number = models.CharField(max_length=10, blank=True)
street = models.CharField(max_length=200, blank=False)
postal_code = models.CharField(max_length=10, blank=False)
city = models.CharField(max_length=50, blank=False)
seller=models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='profile_offers')
is_CAPS = models.BooleanField(default=True)
is_BARTER = models.BooleanField(default=True)
is_GIVE = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
price_CAPS = models.PositiveIntegerField(null=True, blank=True)
price_barter = models.CharField(max_length = 200, null=True, blank=True)
class OfferHub(models.Model):
offer = models.OneToOneField(Offer, on_delete=models.CASCADE)
hub = models.ForeignKey(Hub, on_delete=models.CASCADE, null= True, related_name='offers')
distance_km = models.DecimalField(max_digits=10, decimal_places=3, blank=False, null=True)
lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
|
11480460
|
import unittest
import board_outline_stitcher
P = board_outline_stitcher.Path
class PathStitcherTest(unittest.TestCase):
def test_start_point(self):
p = P('M 1 2 L 3 4')
self.assertEqual(p.start_point, [1.0, 2.0])
self.assertEqual(p.end_point, [3.0, 4.0])
def test_stitch_case_2(self):
p1 = P('M 1 2 L 3 4')
p2 = P('M 3 4 L 5 6')
self.assertEqual(p1.check(p2, 0.1), 2)
p1.adopt(p2, 0.1)
self.assertEqual(p1.start_point, [1.0, 2.0])
self.assertEqual(p1.end_point, [5.0, 6.0])
self.assertEqual(p1.cmds, ["M 1 2", "L 3 4", "L 5 6"])
def test_stitch_case_3(self):
p1 = P('M 1 2 L 3 4')
p2 = P('M 3 4 L 5 6')
self.assertEqual(p2.check(p1, 0.1), 3)
p2.adopt(p1, 0.1)
self.assertEqual(p2.start_point, [1.0, 2.0])
self.assertEqual(p2.end_point, [5.0, 6.0])
self.assertEqual(p2.cmds, ["M 1 2", "L 3 4", "L 5 6"])
def test_reverse(self):
p = P('M 1 2 L 3 4 M 0 0 L 5 6')
p.reverse()
self.assertEqual([5.0, 6.0], p.start_point)
self.assertEqual([1.0, 2.0], p.end_point)
self.assertEqual(["M 5 6", "L 0 0", "M 3 4", "L 1 2"], p.cmds)
def test_reverse_arc(self):
p = P('M 0 0 A 1 2 3 4 1 6 7 L -1 -1')
p.reverse()
self.assertEqual(["M -1 -1", "L 6 7", "A 1 2 3 4 0 0 0"], p.cmds)
def test_adopt_specific(self):
# found by manual testing, this caused problems
p1 = P('M 16266 2875 L 17323 3486')
p2 = P('M 16929.6 4168.06 A 393.701 393.701 0.0 0 0 17323.3 3486.15')
p1.adopt(p2, 50)
self.assertEqual(["M 16266 2875", "L 17323 3486", "A 393.701 393.701 0.0 0 1 16929.6 4168.06"], p1.cmds)
def test_extract_style_attr(self):
self.assertEqual("foo", board_outline_stitcher.extract_style_attr("a:foo;b:bar", "a"))
self.assertEqual("bar", board_outline_stitcher.extract_style_attr("a:foo;b:bar", "b"))
self.assertEqual(None, board_outline_stitcher.extract_style_attr("a:foo;b:bar", "c"))
self.assertEqual("foo", board_outline_stitcher.extract_style_attr(" a : foo ;; b : bar", "a"))
self.assertEqual("bar", board_outline_stitcher.extract_style_attr(" a : foo ;; b : bar", "b"))
self.assertEqual(None, board_outline_stitcher.extract_style_attr(" a : foo ;; b : bar", "c"))
self.assertEqual("foo bar", board_outline_stitcher.extract_style_attr("a:foo bar;b:bar", "a"))
if __name__ == '__main__':
unittest.main()
|
11480464
|
import math
import pypact as pp
from tests.testerbase import Tester
DECIMAL_PLACE_ACC = 6
class GroupStructuresUnitTest(Tester):
def test_group66(self):
g = pp.ALL_GROUPS[66]
self.assertEqual(67, len(g), "Assert the length of group 66 is 67")
self.assertEqual(2.50e7, g[0], "Assert the first entry for group 66")
self.assertEqual(1.49e7, g[3], "Assert the forth entry for group 66")
self.assertEqual(1.00e-5, g[-1], "Assert the last entry for group 66")
# for regression check the sum of all entries
self.assertAlmostEqual(113960160.65750997, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 66")
self._check_list_is_decreasing(g)
def test_group69(self):
g = pp.ALL_GROUPS[69]
self.assertEqual(70, len(g), "Assert the length of group 69 is 70")
self.assertEqual(1.00000E7, g[0], "Assert the first entry for group 69")
self.assertEqual(2.23100E6, g[3], "Assert the forth entry for group 69")
self.assertEqual(1.00e-5, g[-1], "Assert the last entry for group 69")
# for regression check the sum of all entries
self.assertAlmostEqual(25417961.86301, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 69")
self._check_list_is_decreasing(g)
def test_group100(self):
g = pp.ALL_GROUPS[100]
self.assertEqual(101, len(g), "Assert the length of group 100 is 101")
self.assertEqual(1.49180E7, g[0], "Assert the first entry for group 100")
self.assertEqual(1.10515E7, g[3], "Assert the forth entry for group 100")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 100")
# for regression check the sum of all entries
self.assertAlmostEqual(156097943.581636, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 100")
self._check_list_is_decreasing(g)
def test_group162(self):
g = pp.ALL_GROUPS[162]
self.assertEqual(163, len(g), "Assert the length of group 162 is 163")
self.assertEqual(1.000000E+09, g[0], "Assert the first entry for group 162")
self.assertEqual(8.800000E+08, g[3], "Assert the forth entry for group 162")
self.assertEqual(5.00000E3, g[-1], "Assert the last entry for group 162")
# for regression check the sum of all entries
self.assertAlmostEqual(14460200000.0, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 162")
self._check_list_is_decreasing(g)
def test_group172(self):
g = pp.ALL_GROUPS[172]
self.assertEqual(173, len(g), "Assert the length of group 172 is 173")
self.assertEqual(1.96403E7, g[0], "Assert the first entry for group 172")
self.assertEqual(1.38403E7, g[3], "Assert the forth entry for group 172")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 172")
# for regression check the sum of all entries
self.assertAlmostEqual(143972448.3481201, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 172")
self._check_list_is_decreasing(g)
def test_group175(self):
g = pp.ALL_GROUPS[175]
self.assertEqual(176, len(g), "Assert the length of group 175 is 176")
self.assertEqual(1.96403E7, g[0], "Assert the first entry for group 175")
self.assertEqual(1.45499E7, g[6], "Assert the seventh entry for group 175")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 175")
# for regression check the sum of all entries
self.assertAlmostEqual(431739677.108859, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 175")
self._check_list_is_decreasing(g)
def test_group211(self):
g = pp.ALL_GROUPS[211]
self.assertEqual(212, len(g), "Assert the length of group 211 is 212")
self.assertEqual(5.5000E7, g[0], "Assert the first entry for group 211")
self.assertEqual(5.2000E7, g[3], "Assert the forth entry for group 211")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 211")
# for regression check the sum of all entries
self.assertAlmostEqual(1781739677.1088598, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 211")
self._check_list_is_decreasing(g)
def test_group351(self):
g = pp.ALL_GROUPS[351]
self.assertEqual(352, len(g), "Assert the length of group 351 is 352")
self.assertEqual(5.5000E7, g[0], "Assert the first entry for group 351")
self.assertEqual(5.2000E7, g[3], "Assert the forth entry for group 351")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 351")
# for regression check the sum of all entries
self.assertAlmostEqual(1769655563.5445998, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 351")
self._check_list_is_decreasing(g)
def test_group586(self):
g = pp.ALL_GROUPS[586]
self.assertEqual(587, len(g), "Assert the length of group 586 is 587")
self.assertEqual(2.00000E7, g[0], "Assert the first entry for group 586")
self.assertEqual(1.6487E+07, g[3], "Assert the forth entry for group 586")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 586")
# for regression check the sum of all entries
self.assertAlmostEqual(432119907.0250101, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 586")
self._check_list_is_decreasing(g)
def test_group616(self):
g = pp.ALL_GROUPS[616]
self.assertEqual(617, len(g), "Assert the length of group 616 is 617")
self.assertEqual(2.00000E7, g[0], "Assert the first entry for group 616")
self.assertEqual(1.81970E7, g[3], "Assert the forth entry for group 616")
self.assertEqual(1.0E-5, g[-1], "Assert the last entry for group 616")
# for regression check the sum of all entries
self.assertAlmostEqual(463318411.1208999, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 616")
self._check_list_is_decreasing(g)
def test_group709(self):
g = pp.ALL_GROUPS[709]
self.assertEqual(710, len(g), "Assert the length of group 709 is 710")
self.assertEqual(1.0e+09, g[0], "Assert the first entry for group 709")
self.assertEqual(8.8e+08, g[3], "Assert the forth entry for group 709")
self.assertEqual(1.0e-5, g[-1], "Assert the last entry for group 709")
# for regression check the sum of all entries
self.assertAlmostEqual(15992185618.888683, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 709")
self._check_list_is_decreasing(g)
def test_group1102(self):
g = pp.ALL_GROUPS[1102]
self.assertEqual(1103, len(g), "Assert the length of group 1102 is 1103")
self.assertEqual(1.0000e+09, g[0], "Assert the first entry for group 1102")
self.assertEqual(8.7096e+08, g[3], "Assert the forth entry for group 1102")
self.assertEqual(1.0000e-5, g[-1], "Assert the last entry for group 1102")
# for regression check the sum of all entries
self.assertAlmostEqual(23889164999.810318, sum(g),
places=DECIMAL_PLACE_ACC,
msg="Assert the sum of all entries of 1102")
self._check_list_is_decreasing(g)
def _check_list_is_decreasing(self, l):
self.assertTrue(all(earlier >= later for earlier, later in zip(l, l[1:])),
"Assert list is in descending order")
|
11480470
|
def getHprot(f):
sizes=[]
genes=[]
hprots=[]
p=''
G=0
H=0
with open(f) as inFile:
for line in inFile:
if line.strip()=='##FASTA': # Reached end of annotation
break
else:
if line.strip()=='##gff-version 3' or '##sequence-region' in line:
continue
else:
if line.split()[0]!=p:
genes.append(G)
hprots.append(H)
p=line.split()[0]
G=0
H=0
G+=1
# Detect hypothetical proteins
for t in line.split('\t')[8].split(';'):
if t.split('=')[0]=='product':
prod=t.split('=')[1]
if prod=='hypothetical protein':
H+=1
genes.append(G)
hprots.append(H)
genes=genes[1:]
hprots=hprots[1:]
return np.array(hprots)/np.array(genes)
|
11480487
|
import os
import json
import boto3
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.client('dynamodb')
comprehend = boto3.client('comprehend')
def lambda_handler(event, context):
print('Received event: ' + json.dumps(event, indent=2))
ids = event['ID'].split(',')
table_name=os.environ['table_name']
responses = []
for id in ids:
try:
response = dynamodb.scan(
ExpressionAttributeNames={'#ID': 'ID'},
ExpressionAttributeValues={':id' : {'S': id}},
FilterExpression='#ID = :id',
TableName=table_name
)
items = response['Items']
postedtime = items[0]['PostedTime']['S']
if 'Feedback' in items[0]:
feedback = items[0]['Feedback']['S']
response = comprehend.detect_sentiment(Text=feedback, LanguageCode='en')
sentiment = response['Sentiment']
print(sentiment)
response = dynamodb.update_item(
ExpressionAttributeNames={'#ST': 'Sentiment'},
ExpressionAttributeValues={':st' : {'S': sentiment}},
Key={'ID': {'S': id}, 'PostedTime': {'S': postedtime}},
ReturnValues='ALL_NEW',
TableName=table_name,
UpdateExpression='SET #ST = :st'
)
responses.append('{} - {}'.format(response['Attributes']['ID']['S'], response['Attributes']['Sentiment']['S']))
except Exception as e:
print('Actual error is: {0}'.format(e))
return responses
|
11480524
|
from escpos.printer import Usb
from escpos.exceptions import USBNotFoundError
import usb.core
import usb.util
def connectToPrinter():
try:
p = Usb(0x0416, 0x5011, in_ep=81, out_ep=3)
except USBNotFoundError:
p = None
print("Printer not connected")
return p
def printReciept(cart, date, invoiceId, bill, discount):
p = connectToPrinter()
if p is None:
return
'''
p.set(align='center', bold=True, double_height=True, double_width=True)
#p.textln('<NAME>')
p.set(align='center', bold=True, double_height=False, double_width=False)
#p.textln('<NAME>, Karachi')
#p.textln('Phone: 0336 2510 211')
#p.textln('-------------------------------------------')
'''
lines = [line.rstrip('\n') for line in open('data/recieptInfo.txt')]
p.set(align='center', bold=True, double_height=True, double_width=True)
p.textln(lines[:1])
p.set(align='center', bold=True, double_height=False, double_width=False)
for l in lines[1:]:
p.textln(l)
p.textln('===============================================')
p.ln(1)
p.textln('Invoice ID: '+str(invoiceId)+' Date: '+ str(date))
p.ln(2)
p.set(align='center', bold=True, double_height=False, double_width=False)
# width of paper -> 48 chars
# Product
p.textln("No |Product |Qty |Price |Discount |Total Price ")
p.textln("------------------------------------------------")
for prd in cart:
# if the name of the product exceeds 7 characters, print only the first 7
if len(prd.name) > 7:
nm = prd.name[:7]
else:
nm = prd.name
p.textln(prepareLine(prd.pid, nm, prd.qty, prd.origPrice, (prd.origPrice - prd.price) * prd.qty, prd.price * prd.qty))
#p.textln(prepareLine(4, 'Brush', 10, 200, 10000))
'''
p.textln("2 | Toothpick | 50 | 20 | 1000 ")
p.textln("578 | Battery | 10 | 100 | 1000 ")
p.textln("89 | Brush | 5 | 40 | 200 ")
'''
p.textln("------------------------------------------------")
p.textln(" Total: "+ str(bill + discount))
p.textln(" Discount: -"+ str(discount))
p.textln(" After Discount: "+ str(bill))
p.ln(9)
p.textln("------------------------------------------------")
p.textln(" Notes ")
p.ln(1)
#p.set(align='center', bold=False, double_height=False, double_width=False)
#p.textln("Ganyani, Kirmani and Allahwala IT Consulting")
#p.image("logo.gif")
#p.barcode('3422323', 'EAN13', 64, 2, '', '')
p.cut(mode='PART')
def prepareLine (pid, name, qty, price, discount, tPrice):
ln = ''
pid = str(pid)
ln = ln + preparePhrase(pid, 3)
ln = ln + preparePhrase(name, 8)
ln = ln + preparePhrase(qty, 5)
ln = ln + preparePhrase(price, 6)
ln = ln + preparePhrase(discount, 9)
ln = ln + preparePhrase(tPrice, 10)
return ln
def preparePhrase (itm, l):
itm = str(itm)
return itm + ' '*(l-len(itm)) + '|'
#p = connectToPrinter()
#printReciept(p)
|
11480540
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.utils.data as data
from torchvision import datasets, transforms
import os
import pickle
import numpy as np
from PIL import Image
import time
import math
LOAD_DIR = "."
BATCH_SIZE=1024
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 让torch判断是否使用GPU
if torch.cuda.is_available():
print("cuda is available")
else:
print("cuda unavailable")
class ConvNet2(nn.Module):
def __init__(self):
super().__init__()
# (3,32,32)
self.conv1 = nn.Sequential(
nn.Conv2d(3,16,5,padding=2),
nn.BatchNorm2d(16,affine=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
) # (16,16,16)
self.conv2 = nn.Sequential(
nn.Conv2d(32,64,3,padding=1),
nn.BatchNorm2d(64,affine=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
) # (64,8,8)
self.conv3 = nn.Sequential(
nn.Conv2d(64,128,3,padding=1),
nn.BatchNorm2d(128,affine=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
) # (128,4,4)
self.fc1 = nn.Sequential(nn.Linear(128*4*4,256),nn.ReLU())
self.fc2 = nn.Sequential(nn.Linear(256,64),nn.ReLU())
self.fc3 = nn.Linear(64,16)
self.conv64 = nn.Sequential(
nn.Conv2d(3,16,5,padding=2),
nn.BatchNorm2d(16,affine=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4)
) # (16,16,16) -> (64,16,16)
# self.dropout = nn.Dropout(0.25)
def forward(self,x32,x64):
in_size = x32.size(0)
out = torch.cat([self.conv1(x32),self.conv64(x64)],dim=1)
out = self.conv2(out)
out = self.conv3(out)
out = out.view(in_size,-1) # 扁平化flat然后传入全连接层
out = self.fc1(out)
# out = self.dropout(out)
out = self.fc2(out)
out = self.fc3(out)
return out
transform = transforms.Compose([transforms.ToTensor()])
def from_ctufile(load_type,video_number,frame_number,ctu_number,layer2):
# https://pytorch-cn.readthedocs.io/zh/latest/package_references/Tensor/
ctu_file = "{}/dataset/pkl/{}/v_{}.pkl".format(LOAD_DIR,load_type,video_number)
f_pkl = open(ctu_file,'rb')
video_dict = pickle.load(f_pkl)
f_pkl.close()
ctu_info = video_dict[frame_number][ctu_number]
if layer2 == 0:
label_list = [ctu_info[0],ctu_info[1],ctu_info[4],ctu_info[5]]
elif layer2 == 1:
label_list = [ctu_info[2],ctu_info[3],ctu_info[6],ctu_info[7]]
elif layer2 == 2:
label_list = [ctu_info[8],ctu_info[9],ctu_info[12],ctu_info[13]]
elif layer2 == 3:
label_list = [ctu_info[10],ctu_info[11],ctu_info[14],ctu_info[15]]
else:
print("layer2 loading error!!!")
label = torch.tensor(label_list)
# label = one_hot_label(label_list)
return label
class ImageSet(data.Dataset):
def __init__(self,root):
# 所有图片的绝对路径
self.img_files = []
self.root = root
for img in os.listdir(root):
ctu_numbers_per_frame = img.split('_')[3]
for ctu_number in range(int(ctu_numbers_per_frame)):
for layer2 in range(4):
self.img_files.append((img,ctu_number,layer2))
self.transforms=transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.root,self.img_files[index][0]))
video_number = self.img_files[index][0].split('_')[1]
frame_number = self.img_files[index][0].split('_')[2]
ctu_number = self.img_files[index][1]
layer2 = self.img_files[index][2]
img_width, _ = img.size
img_row = ctu_number // math.ceil(img_width / 64)
img_colonm = ctu_number % math.ceil(img_width / 64)
start_pixel_x = img_colonm * 64 + (layer2 % 2)*32
start_pixel_y = img_row * 64 + (layer2 // 2)*32
cropped_img32 = img.crop((start_pixel_x, start_pixel_y, start_pixel_x + 32, start_pixel_y + 32)) # 依次对抽取到的帧进行裁剪
cropped_img64 = img.crop((img_colonm * 64, img_row * 64, img_colonm * 64 + 64, img_row * 64 + 64))
img.close()
if "train" in self.root:
load_type = "train"
elif "validation" in self.root:
load_type = "validation"
elif "test" in self.root:
load_type = "test"
else:
print("load type error!!!")
img_data32 = self.transforms(cropped_img32)
img_data64 = self.transforms(cropped_img64)
cropped_img32.close()
cropped_img64.close()
label = from_ctufile(load_type,video_number,frame_number,str(ctu_number),layer2)
return img_data32,img_data64,label,layer2
def __len__(self):
return len(self.img_files)
test_loader = data.DataLoader(ImageSet("./dataset/img/test/"),batch_size=BATCH_SIZE,shuffle=False)
model = ConvNet2().to(DEVICE)
model.load_state_dict(torch.load('{}/hevc_encoder_model.pt'.format(LOAD_DIR)))
print("loaded model from drive")
print(model)
criterion = nn.CrossEntropyLoss()
def test(model, device, test_loader):
model.load_state_dict(torch.load('hevc_encoder_model.pt'))
model.eval()
test_loss = 0
correct = 0
label = []
for i in range(16):
label.append(str(i))
with torch.no_grad():
for img_data32,img_data64, target,layer2 in test_loader:
img_data32,img_data64, target = img_data32.to(device),img_data64.to(device), target.to(device)
output = model(img_data32,img_data64)
test_loss += criterion(output[:,0:4], target[:,0]).item()+criterion(output[:,4:8], target[:,1]).item()+criterion(output[:,8:12], target[:,2]).item()+criterion(output[:,12:16], target[:,3]).item() # 将一批的损失相加
for i,single_pred in enumerate(output):
pred_0 = torch.argmax(single_pred[0:4])
pred_1 = torch.argmax(single_pred[4:8])
pred_2 = torch.argmax(single_pred[8:12])
pred_3 = torch.argmax(single_pred[12:16])
pred = str(int(pred_0)) + str(int(pred_1)) + str(int(pred_2)) + str(int(pred_3))
if "0" in pred and pred != "0000":
pred = pred.replace("0","1")
if "1" in pred and pred != "1111":
pred = pred.replace("1","2")
if int(layer2[i]) == 0:
label[0],label[1],label[4],label[5] = pred[0],pred[1],pred[2],pred[3]
elif int(layer2[i]) == 1:
if pred == "0000" and label[0] != "0":
pred = "1111"
label[2],label[3],label[6],label[7] = pred[0],pred[1],pred[2],pred[3]
elif int(layer2[i]) == 2:
if pred == "0000" and label[2] != "0":
pred = "1111"
label[8],label[9],label[12],label[13] = pred[0],pred[1],pred[2],pred[3]
else:
if pred == "0000" and label[8] != "0":
pred = "1111"
label[10],label[11],label[14],label[15] = pred[0],pred[1],pred[2],pred[3]
target_0 = int(target[i,0])
target_1 = int(target[i,1])
target_2 = int(target[i,2])
target_3 = int(target[i,3])
if str(pred[0]) == str(target_0):
correct += 1
if str(pred[1]) == str(target_1):
correct += 1
if str(pred[2]) == str(target_2):
correct += 1
if str(pred[3]) == str(target_3):
correct += 1
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(test_loss, correct, len(test_loader.dataset)*4,
25. * correct / len(test_loader.dataset)))
test(model, DEVICE, test_loader)
|
11480546
|
import torch
import torch.nn as nn
from pytorch3d.structures import Pointclouds
from pytorch3d.renderer import compositing
from pytorch3d.renderer.points import rasterize_points
class RasterizePointsXYsBlending(nn.Module):
"""
Code inspired fromSynSin: End-to-end View Synthesis from a Single Image (CVPR 2020)
Rasterizes a set of points using a differentiable renderer. Points are
accumulated in a z-buffer using an accumulation function
defined in opts.accumulation and are normalised with a value M=opts.M.
Inputs:
- pts3D: the 3D points to be projected (BxNx3)
- src: the corresponding features (BxNxK) where K is # of features
- n_filters: size of feature
- radius: radius of where pixels project to (in pixels)
- img_size: size of the image being created
- points_per_pixel: number of values stored in z-buffer per pixel
Outputs:
- transformed_src_alphas: features projected and accumulated
in the new view
"""
def __init__(self, n_filters=64, radius=1.5, img_size=256,
points_per_pixel=8, gamma=1.0):
super().__init__()
self.radius = radius
self.img_size = img_size
self.points_per_pixel = points_per_pixel
self.gamma = gamma
def forward(self, pts, features):
# Make sure pts and features are equal
assert pts.size(2) == 3
assert pts.size(1) == features.size(1)
pts[:, :, 0] = - pts[:, :, 0]
pts[:, :, 1] = - pts[:, :, 1]
radius = float(self.radius) / float(self.img_size) * 2.0
params = compositing.CompositeParams(radius=radius)
pointcloud = Pointclouds(points=pts, features=features)
points_idx, _, dist = rasterize_points(pointcloud, self.img_size, radius, self.points_per_pixel)
dist = dist / pow(radius, 2)
alphas = (1 - dist.clamp(max=1, min=1e-3).pow(0.5).pow(self.gamma).permute(0, 3, 1, 2))
transformed_feature_alphas = compositing.alpha_composite(points_idx.permute(0, 3, 1, 2).long(),
alphas,
pointcloud.features_packed().permute(1, 0),
params
)
return transformed_feature_alphas
class SynSinRenderer(nn.Module):
"""
Code inspired from SynSin: End-to-end View Synthesis from a Single Image (CVPR 2020)
Differentiable rendering of 3D points and features
Inputs:
- pts3D: the 3D points to be projected (BxNx3)
- src: the corresponding features (BxNxK) where K is # of features
- obj_T_cam_pose: Extrinsic camera matrix to project points to (Bx4x4)
- n_filters: size of feature
- radius: radius of where pixels project to (in pixels)
- img_size: size of the image being created
- points_per_pixel: number of values stored in z-buffer per pixel
- gamma: factor for alpha compositing
Outputs:
- transformed_src_alphas: features projected and accumulated
in the new view
"""
def __init__(self, n_filters=64, radius=4, img_size=256, points_per_pixel=128, gamma=1.0):
super(SynSinRenderer, self).__init__()
self.n_filters = n_filters
self.radius = radius
self.img_size = img_size
self.points_per_pixel = points_per_pixel
self.rasterizer = RasterizePointsXYsBlending(n_filters=n_filters,
radius=radius,
img_size=img_size,
points_per_pixel=points_per_pixel,
gamma=gamma)
K = torch.Tensor([[2, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]).float()
self.register_buffer('K', K)
def forward(self, points, features, obj_T_cam_pose):
# Homogenous coordinates
if points.shape[-1] == 3:
points = torch.cat((points, torch.ones_like(points[:, :, 0:1])),-1)
point_set_homog = points.permute(0, 2, 1)
# Transform into a new view
cam_points = (torch.inverse(obj_T_cam_pose).float() @ point_set_homog)
im_coords = self.K @ cam_points
# Normalize
z = im_coords[:, 2:3, :]
im_coords_homog = (im_coords / im_coords[:, -1:, :])[:, :3, :]
im_coords_homog = torch.cat((im_coords[:, 0:2, :] / z, im_coords[:, 2:3, :]), 1)
points = im_coords_homog.permute(0, 2, 1)
# Rasterize points
feature_images = self.rasterizer(points, features)
return feature_images
|
11480548
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import transformer
from .adj_decoding import (
bron_kerbosch_decode,
bron_kerbosch_pivoting_decode,
brute_force_adj_decode,
directed_trigger_graph_decode,
directed_trigger_graph_incremental_decode,
linked_decode,
)
from .biaffine import (
Biaffine,
SymmetricBiaffine,
SymmetricWeightBiaffine,
SymmetricWeightComponentBiaffine,
Triaffine,
)
from .doc_info import (
DocArgRelInfo,
DocSpanInfo,
get_doc_arg_rel_info_list,
get_doc_span_info_list,
get_span_mention_info,
)
from .dropout import SharedDropout
from .event_table import (
EventTable,
EventTableForArgRel,
EventTableForSigmoidMultiArgRel,
EventTableWithRNNCell,
)
from .gnn import GAT, GCN, normalize_adj
from .mlp import MLP, SharedDropoutMLP
from .ner_model import (
BertForBasicNER,
LSTMBiaffineNERModel,
LSTMCRFAttNERModel,
LSTMCRFNERModel,
LSTMMaskedCRFNERModel,
NERModel,
judge_ner_prediction,
)
def get_batch_span_label(num_spans, cur_span_idx_set, device):
# prepare span labels for this field and this path
span_field_labels = [
1 if span_idx in cur_span_idx_set else 0 for span_idx in range(num_spans)
]
batch_field_label = torch.tensor(
span_field_labels, dtype=torch.long, device=device, requires_grad=False
) # [num_spans], val \in {0, 1}
return batch_field_label
def append_top_span_only(
last_token_path_list, field_idx, field_idx2span_token_tup2dranges
):
new_token_path_list = []
span_token_tup2dranges = field_idx2span_token_tup2dranges[field_idx]
token_min_drange_list = [
(token_tup, dranges[0]) for token_tup, dranges in span_token_tup2dranges.items()
]
token_min_drange_list.sort(key=lambda x: x[1])
for last_token_path in last_token_path_list:
new_token_path = list(last_token_path)
if len(token_min_drange_list) == 0:
new_token_path.append(None)
else:
token_tup = token_min_drange_list[0][0]
new_token_path.append(token_tup)
new_token_path_list.append(new_token_path)
return new_token_path_list
def append_all_spans(last_token_path_list, field_idx, field_idx2span_token_tup2dranges):
new_token_path_list = []
span_token_tup2dranges = field_idx2span_token_tup2dranges[field_idx]
for last_token_path in last_token_path_list:
for token_tup in span_token_tup2dranges.keys():
new_token_path = list(last_token_path)
new_token_path.append(token_tup)
new_token_path_list.append(new_token_path)
if len(span_token_tup2dranges) == 0: # ensure every last path will be extended
new_token_path = list(last_token_path)
new_token_path.append(None)
new_token_path_list.append(new_token_path)
return new_token_path_list
class AttentiveReducer(nn.Module):
def __init__(self, hidden_size, dropout=0.1):
super(AttentiveReducer, self).__init__()
self.hidden_size = hidden_size
self.att_norm = math.sqrt(self.hidden_size)
self.fc = nn.Linear(hidden_size, 1, bias=False)
self.att = None
self.layer_norm = transformer.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, batch_token_emb, masks=None, keepdim=False):
# batch_token_emb: Size([*, seq_len, hidden_size])
# masks: Size([*, seq_len]), 1: normal, 0: pad
query = self.fc.weight
if masks is None:
att_mask = None
else:
att_mask = masks.unsqueeze(-2) # [*, 1, seq_len]
# batch_att_emb: Size([*, 1, hidden_size])
# self.att: Size([*, 1, seq_len])
batch_att_emb, self.att = transformer.attention(
query, batch_token_emb, batch_token_emb, mask=att_mask
)
batch_att_emb = self.dropout(self.layer_norm(batch_att_emb))
if keepdim:
return batch_att_emb
else:
return batch_att_emb.squeeze(-2)
def extra_repr(self):
return "hidden_size={}, att_norm={}".format(self.hidden_size, self.att_norm)
class SentencePosEncoder(nn.Module):
def __init__(self, hidden_size, max_sent_num=100, dropout=0.1):
super(SentencePosEncoder, self).__init__()
self.embedding = nn.Embedding(max_sent_num, hidden_size)
self.layer_norm = transformer.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, batch_elem_emb, sent_pos_ids=None):
if sent_pos_ids is None:
num_elem = batch_elem_emb.size(-2)
sent_pos_ids = torch.arange(
num_elem,
dtype=torch.long,
device=batch_elem_emb.device,
requires_grad=False,
)
elif not isinstance(sent_pos_ids, torch.Tensor):
sent_pos_ids = torch.tensor(
sent_pos_ids,
dtype=torch.long,
device=batch_elem_emb.device,
requires_grad=False,
)
batch_pos_emb = self.embedding(sent_pos_ids)
out = batch_elem_emb + batch_pos_emb
out = self.dropout(self.layer_norm(out))
return out
class MentionTypeEncoder(nn.Module):
def __init__(self, hidden_size, num_ment_types, dropout=0.1):
super(MentionTypeEncoder, self).__init__()
self.embedding = nn.Embedding(num_ment_types, hidden_size)
self.layer_norm = transformer.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, batch_mention_emb, mention_type_ids):
if not isinstance(mention_type_ids, torch.Tensor):
mention_type_ids = torch.tensor(
mention_type_ids,
dtype=torch.long,
device=batch_mention_emb.device,
requires_grad=False,
)
batch_mention_type_emb = self.embedding(mention_type_ids)
out = batch_mention_emb + batch_mention_type_emb
out = self.dropout(self.layer_norm(out))
return out
class MentionTypePluser(nn.Module):
def __init__(self, hidden_size, num_ment_types):
super().__init__()
self.embedding = nn.Embedding(num_ment_types, hidden_size)
def forward(self, batch_mention_emb, mention_type_ids):
if not isinstance(mention_type_ids, torch.Tensor):
mention_type_ids = torch.tensor(
mention_type_ids,
dtype=torch.long,
device=batch_mention_emb.device,
requires_grad=False,
)
batch_mention_type_emb = self.embedding(mention_type_ids)
out = batch_mention_emb + batch_mention_type_emb
return out
class MentionTypeConcatEncoder(nn.Module):
def __init__(self, hidden_size, num_ment_types, dropout=0.1):
super().__init__()
self.embedding = nn.Embedding(num_ment_types, hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, batch_mention_emb, mention_type_ids):
if not isinstance(mention_type_ids, torch.Tensor):
mention_type_ids = torch.tensor(
mention_type_ids,
dtype=torch.long,
device=batch_mention_emb.device,
requires_grad=False,
)
batch_mention_type_emb = self.embedding(mention_type_ids)
out = torch.cat([batch_mention_emb, batch_mention_type_emb], dim=-1)
out = self.dropout(out)
return out
class MentionTypeEncoderWithMentionEmbReturning(nn.Module):
def __init__(self, hidden_size, num_ment_types, dropout=0.1):
super().__init__()
self.embedding = nn.Embedding(num_ment_types, hidden_size)
self.layer_norm = transformer.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, batch_mention_emb, mention_type_ids):
if not isinstance(mention_type_ids, torch.Tensor):
mention_type_ids = torch.tensor(
mention_type_ids,
dtype=torch.long,
device=batch_mention_emb.device,
requires_grad=False,
)
batch_mention_type_emb = self.embedding(mention_type_ids)
out = batch_mention_emb + batch_mention_type_emb
out = self.dropout(self.layer_norm(out))
return out, batch_mention_type_emb
class EmbPlusEncoder(nn.Module):
def __init__(self, hidden_size, dropout=0.1):
super(EmbPlusEncoder, self).__init__()
self.layer_norm = transformer.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, rep_emb1, rep_emb2):
out = rep_emb1 + rep_emb2
out = self.dropout(self.layer_norm(out))
return out
class GatedFusion(nn.Module):
r"""
Reference:
- ACL2020, Document-Level Event Role Filler Extraction using Multi-Granularity Contextualized Encoding
"""
def __init__(self, n_in):
super().__init__()
self.n_in = n_in
self.hidden2scalar1 = nn.Linear(self.n_in, 1)
self.hidden2scalar2 = nn.Linear(self.n_in, 1)
def forward(self, hidden1, hidden2):
gate_alpha = torch.sigmoid(
self.hidden2scalar1(hidden1) + self.hidden2scalar2(hidden2)
)
out = gate_alpha * hidden1 + (1 - gate_alpha) * hidden2
return out
|
11480568
|
import sys
import os.path
def locate_spy_module(module_name):
"""Tries to find shellpy module on filesystem. Given a module name it tries to locate it in pythonpath. It looks
for a module with the same name and __init__.spy inside of it
:param module_name: Filename without extension
:return: Path to shellpy file or None if not found
"""
for python_path in sys.path:
possible_module_path = os.path.join(python_path, module_name)
if os.path.exists(possible_module_path):
if os.path.exists(os.path.join(possible_module_path, '__init__.spy')):
return possible_module_path
return None
def locate_spy_file(file_name):
"""Tries to find shellpy file on filesystem. Given a filename without extension it tries to locate it with .spy
extension in pythonpath
:param file_name: Filename without extension
:return: Path to shellpy file or None if not found
"""
for python_path in sys.path:
possible_file_path = os.path.join(python_path, file_name + '.spy')
if os.path.exists(possible_file_path):
return possible_file_path
return None
|
11480570
|
from django.core.management.base import BaseCommand
from ... import models
from ...mixins import VerbosityAwareOutputMixin
from ...settings import djstripe_settings
class Command(VerbosityAwareOutputMixin, BaseCommand):
"""Command to process all Events.
Optional arguments are provided to limit the number of Events processed.
Note: this is only guaranteed go back at most 30 days based on the
current limitation of stripe's events API. See: https://stripe.com/docs/api/events
"""
help = (
"Process all Events. Use optional arguments to limit the Events to process. "
"Note: this is only guaranteed go back at most 30 days based on the current "
"limitation of stripe's events API. See: https://stripe.com/docs/api/events"
)
def add_arguments(self, parser):
"""Add optional arugments to filter Events by."""
# Use a mutually exclusive group to prevent multiple arguments being
# specified together.
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--ids",
nargs="*",
help="An optional space separated list of specific Event IDs to sync.",
)
group.add_argument(
"--failed",
action="store_true",
help="Syncs and processes only the events that have failed webhooks.",
)
group.add_argument(
"--type",
help=(
"A string containing a specific event name,"
" or group of events using * as a wildcard."
" The list will be filtered to include only"
" events with a matching event property."
),
)
def handle(self, *args, **options):
"""Try to process Events listed from the API."""
# Set the verbosity to determine how much we output, if at all.
self.set_verbosity(options)
event_ids = options["ids"]
failed = options["failed"]
type_filter = options["type"]
# Args are mutually exclusive,
# so output what we are doing based on that assumption.
if failed:
self.output("Processing all failed events")
elif type_filter:
self.output(
"Processing all events that match {filter}".format(filter=type_filter)
)
elif event_ids:
self.output("Processing specific events {events}".format(events=event_ids))
else:
self.output("Processing all available events")
# Either use the specific event IDs to retrieve data, or use the api_list
# if no specific event IDs are specified.
if event_ids:
listed_events = (
models.Event.stripe_class.retrieve(
id=event_id, api_key=djstripe_settings.STRIPE_SECRET_KEY
)
for event_id in event_ids
)
else:
list_kwargs = {}
if failed:
list_kwargs["delivery_success"] = False
if type_filter:
list_kwargs["type"] = type_filter
listed_events = models.Event.api_list(**list_kwargs)
self.process_events(listed_events)
def process_events(self, listed_events):
# Process each listed event. Capture failures and continue,
# outputting debug information as verbosity dictates.
count = 0
total = 0
for event_data in listed_events:
try:
total += 1
event = models.Event.process(data=event_data)
count += 1
self.verbose_output(f"\tSynced Event {event.id}")
except Exception as exception:
self.verbose_output(f"\tFailed processing Event {event_data['id']}")
self.output(f"\t{exception}")
self.verbose_traceback()
if total == 0:
self.output("\t(no results)")
else:
self.output(f"\tProcessed {count} out of {total} Events")
|
11480578
|
def temperature_statistics(T):
mean = 0
std = 0
# Your code goes here!
return mean, std
|
11480583
|
import os
from setuptools import setup, find_packages
path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(path, "README.md"), "r") as f:
readme = f.read()
setup(
name="countrynames",
version="1.10.6",
description="A library to map country names to ISO codes.",
long_description=readme,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="names countries iso country",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/occrp/countrynames",
license="MIT",
packages=find_packages(exclude=["ez_setup", "examples", "test"]),
namespace_packages=[],
package_data={"": ["countrynames/data.yaml", "countrynames/py.typed"]},
include_package_data=True,
zip_safe=False,
test_suite="nose.collector",
install_requires=["normality", "python-Levenshtein", "pyyaml"],
extras_require={
"dev": ["mypy", "wheel", "twine", "nose", "types-PyYAML"],
},
tests_require=[],
entry_points={},
)
|
11480609
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
import geojson as gj
import logging
import bson.objectid as boi
import emission.core.wrapper.common_place as ecwcp
import emission.core.get_database as edb
import pykov as pk
import emission.storage.decorations.common_trip_queries as esdctp
#################################################################################
############################ database functions #################################
#################################################################################
def save_common_place(common_place):
edb.save(edb.get_common_place_db(), common_place)
def get_common_place_from_db(common_place_id):
db = edb.get_common_place_db()
json_obj = db.find_one({"_id" : common_place_id})
return make_common_place(json_obj)
def get_all_common_places_for_user(user_id):
db = edb.get_common_place_db()
return db.find({"user_id" : user_id})
def get_common_place_at_location(loc):
db = edb.get_common_place_db()
return make_common_place(db.find_one({"location": loc}))
def make_new_common_place(user_id, loc):
place = ecwcp.CommonPlace()
place.user_id = user_id
place.location = loc
return place
def make_common_place(props):
return ecwcp.CommonPlace(props)
def clear_existing_places(user_id):
db = edb.get_common_place_db()
db.remove({'user_id': user_id})
################################################################################
def create_places(list_of_cluster_data, user_id):
places_to_successors = {}
places_dct = {}
logging.debug("About to create places for %d clusters" % len(list_of_cluster_data))
for dct in list_of_cluster_data:
logging.debug("Current coords = %s" % dct)
start_name = dct['start']
end_name = dct['end']
start_loc = gj.Point(dct['start_coords'])
end_loc = gj.Point(dct['end_coords'])
start_loc_str = gj.dumps(start_loc, sort_keys=True)
end_loc_str = gj.dumps(end_loc, sort_keys=True)
if start_loc_str not in places_to_successors:
places_to_successors[start_loc_str] = []
else:
places_to_successors[start_loc_str].append(end_loc)
if end_loc_str not in places_to_successors:
places_to_successors[end_loc_str] = []
if start_loc_str not in places_dct:
places_dct[start_loc_str] = dct["start_places"]
if end_loc_str not in places_dct:
places_dct[end_loc_str] = dct["end_places"]
clear_existing_places(user_id)
logging.debug("After creating map, number of places is %d" % len(places_to_successors))
for loc_str in places_to_successors.keys():
start = make_new_common_place(user_id, gj.loads(loc_str))
logging.debug("Adding %d places for this place" % len(places_dct[loc_str]))
start.places = places_dct[loc_str]
save_common_place(start)
for loc_str, successors in places_to_successors.items():
start = get_common_place_at_location(gj.loads(loc_str))
successor_places = [get_common_place_at_location(loc) for loc in successors]
start.successors = successor_places
save_common_place(start)
### Graph queries
def get_succesor(user_id, place_id, time):
temp = pk.Vector()
day = time.weekday()
place = get_common_place_from_db(place_id)
for suc in place["successors"]:
trip = esdctp.get_common_trip_from_db(user_id, place_id, suc)
for temp_hour in range(time.hour, esdctp.HOURS_IN_DAY):
counter_key = ("%s" % suc, temp_hour)
temp[counter_key] = trip.probabilites[day, temp_hour]
return boi.ObjectId(temp.choose())
def has_succesor(user_id, place_id, time):
day = time.weekday()
place = get_common_place_from_db(place_id)
for suc in place["successors"]:
trip = esdctp.get_common_trip_from_db(user_id, place_id, suc)
for temp_hour in range(time.hour, esdctp.HOURS_IN_DAY):
if trip.probabilites[day, temp_hour] > 0:
return True
return False
|
11480611
|
import re
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_fscore_support
import matplotlib.pyplot as plt
import re
import numpy as np
import torch
class TensorIndexDataset(TensorDataset):
def __getitem__(self, index):
"""
Returns in addition to the actual data item also its index (useful when assign a prediction to a item)
"""
return index, super().__getitem__(index)
def text_to_train_tensors(texts, tokenizer, max_seq_length):
train_tokens = list(map(lambda t: ['[CLS]'] + tokenizer.tokenize(t)[:max_seq_length - 1], texts))
train_tokens_ids = list(map(tokenizer.convert_tokens_to_ids, train_tokens))
train_tokens_ids = pad_sequences(train_tokens_ids, maxlen=max_seq_length, truncating="post", padding="post",
dtype="int")
train_masks = [[float(i > 0) for i in ii] for ii in train_tokens_ids]
# to tensors
# train_tokens_tensor, train_masks_tensor
return torch.tensor(train_tokens_ids), torch.tensor(train_masks)
def to_dataloader(texts, extras, ys,
tokenizer,
max_seq_length,
batch_size,
dataset_cls=TensorDataset,
sampler_cls=RandomSampler):
"""
Convert raw input into PyTorch dataloader
"""
#train_y = train_df[labels].values
# Labels
train_y_tensor = torch.tensor(ys).float()
if texts is not None and extras is not None:
# All features
train_tokens_tensor, train_masks_tensor = text_to_train_tensors(texts, tokenizer, max_seq_length)
train_extras_tensor = torch.tensor(extras, dtype=torch.float)
train_dataset = dataset_cls(train_tokens_tensor, train_masks_tensor, train_extras_tensor, train_y_tensor)
elif texts is not None and extras is None:
# Text only
train_tokens_tensor, train_masks_tensor = text_to_train_tensors(texts, tokenizer, max_seq_length)
train_dataset = dataset_cls(train_tokens_tensor, train_masks_tensor, train_y_tensor)
elif texts is None and extras is not None:
train_extras_tensor = torch.tensor(extras, dtype=torch.float)
train_dataset = dataset_cls(train_extras_tensor, train_y_tensor)
else:
raise ValueError('Either texts or extra must be set.')
train_sampler = sampler_cls(train_dataset)
return DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size)
def get_extras_gender(df, extra_cols, author2vec, author2gender, with_vec=True, with_gender=True, on_off_switch=False):
"""
Build matrix for extra data (i.e. author embeddings + gender)
"""
if with_vec:
AUTHOR_DIM = len(next(iter(author2vec.values())))
if on_off_switch:
AUTHOR_DIM += 1 # One additional dimension of binary (1/0) if embedding is available
else:
AUTHOR_DIM = 0
if with_gender:
GENDER_DIM = len(next(iter(author2gender.values())))
else:
GENDER_DIM = 0
extras = np.zeros((len(df), len(extra_cols) + AUTHOR_DIM + GENDER_DIM))
vec_found_selector = [False] * len(df)
gender_found_selector = [False] * len(df)
vec_found_count = 0
gender_found_count = 0
for i, authors in enumerate(df['authors']):
# simple extras
extras[i][:len(extra_cols)] = df[extra_cols].values[i]
# author vec
if with_vec:
for author in authors.split(';'):
if author in author2vec:
if on_off_switch:
extras[i][len(extra_cols):len(extra_cols) + AUTHOR_DIM - 1] = author2vec[author]
extras[i][len(extra_cols) + AUTHOR_DIM] = 1
else:
extras[i][len(extra_cols):len(extra_cols)+AUTHOR_DIM] = author2vec[author]
vec_found_count += 1
vec_found_selector[i] = True
break
# author gender
if with_gender:
for author in authors.split(';'):
first_name = author.split(' ')[0]
if first_name in author2gender:
extras[i][len(extra_cols)+AUTHOR_DIM:] = author2gender[first_name]
gender_found_count += 1
gender_found_selector[i] = True
break
return extras, vec_found_count, gender_found_count, vec_found_selector, gender_found_selector
def get_best_thresholds(labels, test_y, outputs, plot=False):
"""
Hyper parameter search for best classification threshold
"""
t_max = [0] * len(labels)
f_max = [0] * len(labels)
for i, label in enumerate(labels):
ts = []
fs = []
for t in np.linspace(0.1, 0.99, num=50):
p, r, f, _ = precision_recall_fscore_support(test_y[:,i], np.where(outputs[:,i]>t, 1, 0), average='micro')
ts.append(t)
fs.append(f)
if f > f_max[i]:
f_max[i] = f
t_max[i] = t
if plot:
print(f'LABEL: {label}')
print(f'f_max: {f_max[i]}')
print(f't_max: {t_max[i]}')
plt.scatter(ts, fs)
plt.show()
return t_max, f_max
def nn_output_to_submission(first_line, df, outputs, output_ids, t_max, labels, most_popular_label):
"""
Convert BERT-output into submission format (only a single task)
"""
no_label = 0
lines = [first_line]
for idx in output_ids:
pred_labels = []
for i, label in enumerate(labels):
if outputs[idx][i] > t_max[i]:
label = re.sub(r'^([-]+)', '', label) # remove leading -
pred_labels.append(label)
if len(pred_labels) == 0:
no_label += 1
# If no label was predicted -> just use most popular
pred_labels = most_popular_label
else:
pred_labels = '\t'.join(pred_labels)
isbn = df['isbn'].values[idx]
lines.append(f'{isbn}\t{pred_labels}')
return lines, no_label
|
11480680
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import transaction, IntegrityError
from core.models import Person
class Command(BaseCommand):
help = 'Creates the data management user and the admin user'
def add_arguments(self, parser):
parser.add_argument('datamanager_user_name', type=str)
parser.add_argument('datamanager_full_name', type=str)
parser.add_argument('datamanager_password', type=str)
parser.add_argument('admin_password', type=str)
parser.add_argument('--only-if-no-people', action='store_true')
def handle(self, *args, **options):
if options['only_if_no_people'] and Person.objects.count() > 0:
print('There are people in the database - not creating anyone else')
return
if not check_options(options):
return
datamanager_username = options['datamanager_user_name']
datamanager_full_name = options['datamanager_full_name']
datamanager_password = options['<PASSWORD>']
try:
create_datamanager(datamanager_username, datamanager_full_name, datamanager_password)
print('Created: data manager user')
except IntegrityError:
print('Could not create the data manager')
User.objects.create_superuser('admin', password=options['admin_password'])
print('Created: admin user')
def check_options(options):
required_options = ['datamanager_user_name', 'datamanager_full_name', 'datamanager_password', 'admin_password']
valid = True
for required_option in required_options:
if options[required_option] == '':
valid = False
print(f'{required_option} cannot be an empty string')
return valid
@transaction.atomic
def create_datamanager(username, full_name, password):
try:
user = User.objects.create_user(username, password=password)
except IntegrityError as e:
print(f'Error: integrity error. Please check that the user "{username}" does not already exist')
raise e
try:
Person.objects.create(full_name=full_name, user=user)
except IntegrityError as e:
print(f'Error: integrity error. Please make sure that a person with full_name="{full_name}" does already exist')
raise e
|
11480685
|
import random
from tqdm.autonotebook import tqdm
SEED = 11690
def _load_assorted_mistakes(file_path):
"""load assorted mistakes data; a dict of vocab along with the number of possible replacement candidates
"""
opfile = open(file_path, "r")
mistakes_vocab = {}
for i, line in enumerate(opfile):
if (i != 0):
try:
word, count = line.strip().split("\t")
mistakes_vocab[word] = count
except:
pass
opfile.close()
# print(mistakes_vocab)
return mistakes_vocab
def _load_assorted_mistakes_mappings(file_path):
"""load mistakes mappings; a dict of vocab along with their possible replacement candidates
"""
mistakes_mappings = {}
opfile = open(file_path, "r")
for line in opfile:
if line:
error, correction = line.strip().split("\t")
try:
mistakes_mappings[correction].append(error)
except:
mistakes_mappings[correction] = [error]
opfile.close()
return mistakes_mappings
def _calculate_mistaketoken_overlap(original_sentences, mistakes_vocab, return_mode=False):
"""
find overlap
to check how many tokens (space seperated) in original_sentences
match the word-tokens in the misspellings vocab loaded above
"""
overlap_words, overlap_count, total_count = {}, 0, 0
for line in tqdm(original_sentences):
words = line.strip().split()
for word in words:
total_count += 1
if word in mistakes_vocab:
try:
overlap_words[word] += 1
except:
overlap_words[word] = 1
overlap_count = sum([*overlap_words.values()])
overlap_percent = 100 * overlap_count / total_count
print(f"unique tokens overlapped with replacement lookup: {len(overlap_words)}")
print(f"total tokens overlapped with replacement lookup: {overlap_count}")
print("overlap percent wrt original_sentences: {:.4f}".format(overlap_percent))
print("overlap percent wrt mistakes_vocab: {:.4f}".format(100 * len(overlap_words) / len(mistakes_vocab)))
if return_mode:
return overlap_words, overlap_count, total_count, overlap_percent
return
def noisyfy_word_tokens(original_sentences,
mistakes_vocab,
mistakes_mappings,
expected_prob,
print_stats=True,
min_len=1):
"""
inject replacements from mistakes_vocab
expected_prob is the prob of mistakeful tokens you want in your dataset
after running the noise injection step; firstly the token overlap_percentage
is computed as only overlapped tokens can be replaced. Then chnace of replacing
a overlapped token is calculated using expected_prob & overlap_percentage
"""
assert 0.0 < expected_prob < 1.0
print("total lines in inp to noisyfy_word_tokens: {}".format(len(original_sentences)))
print("total tokens in inp to noisyfy_word_tokens: {}".format(
sum([len(line.strip().split()) for line in original_sentences])))
# print("------------------------------------")
overlap_words, overlap_count, total_count, overlap_percent = \
_calculate_mistaketoken_overlap(original_sentences,
mistakes_vocab,
return_mode=True)
# print(f"#overlap_count:{overlap_count}, #total_count:{total_count}, #overlap_percent:{overlap_percent}")
# print("------------------------------------")
# prob can be calclulated as prob*overlap = 15% as a standard
prob = expected_prob / (overlap_percent / 100)
print("{:.4f}% of overlapped tokens will get replaced to "
"match the total % of misspellings to {:.4f}%".format(100 * prob, 100 * expected_prob))
get_noisy_token = lambda token: random.choice(mistakes_mappings[token]) \
if (token in mistakes_vocab and random.uniform(0, 1) <= prob and len(token) > min_len) \
else token
error_original_pair = [
(" ".join([get_noisy_token(token) for token in line.split()]),
" ".join([token for token in line.split()])) \
for line in original_sentences
]
if print_stats:
# print some examples error sentences
"""
for i, pair in enumerate(error_original_pair):
if(i>=15): break
print(pair[0]+"\n"+pair[1])
"""
# how many tokens did we replace??
difflen = [sum([1 if a != b else 0 for a, b in zip(error.split(), original.split())]) \
for (error, original) in error_original_pair]
originallen = [len(original.split()) for (_, original) in error_original_pair]
print(f"Percentage of tokens that actually got replaced "
f"{sum(difflen)}/{sum(originallen)}={100 * sum(difflen) / sum(originallen):.4f}%")
# how many unique tokens did we replace?
diffvocabdict = {}
for (error, original) in error_original_pair:
for a, b in zip(error.split(), original.split()):
if a != b:
try:
diffvocabdict[b] += 1
except:
diffvocabdict[b] = 1
print("No of tokens in mistakes_mappings queried: {}". \
format(len(set([*mistakes_mappings.keys()]).intersection(set([*diffvocabdict.keys()])))))
return [pair[0] for pair in error_original_pair]
|
11480697
|
from copy import copy
from django.urls import reverse
from resource_tracker.models import ResourceGroupAttributeDefinition, ResourcePool, ResourceGroup
from tests.test_resource_tracker.base_test_resource_tracker import BaseTestResourceTracker
class TestResourceGroupAttributeViews(BaseTestResourceTracker):
def setUp(self):
super(TestResourceGroupAttributeViews, self).setUp()
def test_resource_group_attribute_create(self):
args = {
"resource_group_id": self.rg_physical_servers.id,
}
url = reverse('resource_tracker:resource_group_attribute_create', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertTrue("resource_group" in response.context)
# test POST without producer or consumer
new_name = "new_attribute_name"
data = {
"name": new_name
}
number_attribute_before = ResourceGroupAttributeDefinition.objects.all().count()
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
self.assertEqual(number_attribute_before + 1, ResourceGroupAttributeDefinition.objects.all().count())
self.assertTrue(ResourceGroupAttributeDefinition.objects.filter(name="new_attribute_name",
resource_group=self.rg_physical_servers).exists())
# test POST with producer
new_name = "new_attribute_name_2"
data = {
"name": new_name,
"produce_for": self.rp_vcenter_vcpu_attribute.id
}
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
self.assertTrue(ResourceGroupAttributeDefinition.objects.filter(name="new_attribute_name_2",
resource_group=self.rg_physical_servers).exists())
target_rga = ResourceGroupAttributeDefinition.objects.get(name="new_attribute_name_2",
resource_group=self.rg_physical_servers)
self.assertEqual(target_rga.produce_for, self.rp_vcenter_vcpu_attribute)
# test POST with already exist attribute
response = self.client.post(url, data=data)
self.assertEqual(200, response.status_code)
self.assertEqual(f"Attribute {new_name} already exist in {self.rg_physical_servers.name}",
response.context['form'].errors['name'][0])
def test_cannot_create_resource_group_attribute_when_logout(self):
self.client.logout()
args = {
"resource_group_id": self.rg_physical_servers.id,
}
url = reverse('resource_tracker:resource_group_attribute_create', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(302, response.status_code)
def test_resource_group_attribute_edit(self):
args = {
"resource_group_id": self.rg_physical_servers.id,
"attribute_id": self.rg_physical_servers_cpu_attribute.id
}
url = reverse('resource_tracker:resource_group_attribute_edit', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(200, response.status_code)
# test POST without producer or consumer
new_name = "new_attribute_name"
data = {
"name": new_name
}
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
self.rg_physical_servers_cpu_attribute.refresh_from_db()
self.assertEqual(self.rg_physical_servers_cpu_attribute.name, "new_attribute_name")
def test_cannot_edit_resource_group_attribute_when_logout(self):
self.client.logout()
args = {
"resource_group_id": self.rg_physical_servers.id,
"attribute_id": self.rg_physical_servers_cpu_attribute.id
}
url = reverse('resource_tracker:resource_group_attribute_edit', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(302, response.status_code)
def test_resource_group_attribute_edit_existing_name(self):
args = {
"resource_group_id": self.rg_physical_servers.id,
"attribute_id": self.rg_physical_servers_cpu_attribute.id
}
url = reverse('resource_tracker:resource_group_attribute_edit', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(200, response.status_code)
# test POST without producer or consumer
new_name = self.rg_physical_servers_memory_attribute.name
data = {
"name": new_name
}
old_name = self.rg_physical_servers_cpu_attribute.name
response = self.client.post(url, data=data)
self.assertEqual(200, response.status_code)
self.rg_physical_servers_cpu_attribute.refresh_from_db()
self.assertEqual(self.rg_physical_servers_cpu_attribute.name, old_name)
def test_resource_group_attribute_edit_same_name(self):
args = {
"resource_group_id": self.rg_physical_servers.id,
"attribute_id": self.rg_physical_servers_cpu_attribute.id
}
url = reverse('resource_tracker:resource_group_attribute_edit', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(200, response.status_code)
# test POST without producer or consumer
data = {
"name": self.rg_physical_servers_cpu_attribute.name,
"produce_for": "",
"consume_from": ""
}
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
self.rg_physical_servers_cpu_attribute.refresh_from_db()
self.assertEqual(self.rg_physical_servers_cpu_attribute.produce_for, None)
self.assertEqual(self.rg_physical_servers_cpu_attribute.consume_from, None)
def test_resource_group_attribute_delete(self):
args = {
"resource_group_id": self.rg_physical_servers.id,
"attribute_id": self.rg_physical_servers_cpu_attribute.id
}
url = reverse('resource_tracker:resource_group_attribute_delete', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(200, response.status_code)
# test POST
attribute_id = copy(self.rg_physical_servers_cpu_attribute.id)
self.assertTrue(ResourceGroupAttributeDefinition.objects.filter(id=attribute_id).exists())
response = self.client.post(url)
self.assertEqual(302, response.status_code)
self.assertFalse(ResourceGroupAttributeDefinition.objects.filter(id=attribute_id).exists())
def test_cannot_delete_resource_group_attribute_logout(self):
self.client.logout()
args = {
"resource_group_id": self.rg_physical_servers.id,
"attribute_id": self.rg_physical_servers_cpu_attribute.id
}
url = reverse('resource_tracker:resource_group_attribute_delete', kwargs=args)
# test GET
response = self.client.get(url)
self.assertEqual(302, response.status_code)
def test_resource_group_attribute_add_producer_pool_is_updated(self):
vcenter_pool = ResourcePool.objects.create(name="vcenter-pool")
vcenter_pool_vcpu_att = vcenter_pool.add_attribute_definition(name='vCPU')
server_group = ResourceGroup.objects.create(name="server-group")
server_cpu_attribute_def = server_group.add_attribute_definition(name='CPU')
server = server_group.create_resource(name=f"server-group1")
server.set_attribute(server_cpu_attribute_def, 100)
# nothing produced yet
self.assertEqual(0, vcenter_pool_vcpu_att.total_produced)
args = {
"resource_group_id": server_group.id,
"attribute_id": server_cpu_attribute_def.id
}
url = reverse('resource_tracker:resource_group_attribute_edit', kwargs=args)
data = {
"name": server_group.name,
"produce_for": vcenter_pool_vcpu_att.id,
"consume_from": ""
}
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
vcenter_pool_vcpu_att.refresh_from_db()
self.assertEqual(100, vcenter_pool_vcpu_att.total_produced)
def test_resource_group_attribute_delete_producer_pool_is_updated(self):
vcenter_pool = ResourcePool.objects.create(name="vcenter-pool")
vcenter_pool_vcpu_att = vcenter_pool.add_attribute_definition(name='vCPU')
server_group = ResourceGroup.objects.create(name="server-group")
server_cpu_attribute_def = server_group.add_attribute_definition(name='CPU')
server = server_group.create_resource(name=f"server-group1")
server.set_attribute(server_cpu_attribute_def, 100)
vcenter_pool.attribute_definitions.get(name='vCPU') \
.add_producers(server_group.attribute_definitions.get(name='CPU'))
vcenter_pool_vcpu_att.refresh_from_db()
self.assertEqual(100, vcenter_pool_vcpu_att.total_produced)
args = {
"resource_group_id": server_group.id,
"attribute_id": server_cpu_attribute_def.id
}
url = reverse('resource_tracker:resource_group_attribute_edit', kwargs=args)
data = {
"name": server_group.name,
"produce_for": "",
"consume_from": ""
}
response = self.client.post(url, data=data)
self.assertEqual(302, response.status_code)
vcenter_pool_vcpu_att.refresh_from_db()
self.assertEqual(0, vcenter_pool_vcpu_att.total_produced)
|
11480710
|
import functools
import time
import pika.exceptions
import pika
import ssl
from mlapp.handlers.message_queues.message_queue_interface import MessageQueueInterface
class RabbitMQHandler(MessageQueueInterface):
def __init__(self, settings):
"""
Initializes the RabbitMQHandler with it's special connection string
:param settings: settings from `mlapp > config.py` depending on handler type name.
"""
super(RabbitMQHandler, self).__init__()
connection_params = {
'host': settings.get('hostname'),
'port': settings.get('port')
}
if settings.get('use_ssl', False):
context=None
if settings.get('tls', False):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
elif settings.get('cert_path', False):
context = ssl.create_default_context(cafile=settings['cert_path'])
else :#context is None
raise Exception('Missing SSL context for connection to RabbitMQ, please provide certificate or use TLS')
ssl_options = pika.SSLOptions(context, settings.get('hostname'))
credentials = pika.PlainCredentials(settings.get('username'), settings.get('password'))
self.params = pika.ConnectionParameters(
**connection_params, credentials=credentials, ssl_options=ssl_options)
else:
self.params = pika.ConnectionParameters(**connection_params)
self.connection_timeout = settings.get('connection_timeout', 15)
def send_message(self, queue_name, body):
"""
Sends message to the queue
:param queue_name: name of the topic/queue to send the message to
:param body: message as string or bytes
"""
while True:
connection = None
channel = None
try:
# opening connection
connection = pika.BlockingConnection(parameters=self.params)
channel = connection.channel()
# connect to queue
channel.queue_declare(queue=queue_name, durable=True)
channel.confirm_delivery()
# sending message
channel.basic_publish(exchange='', routing_key=queue_name, body=body)
break
except pika.exceptions.AMQPChannelError as err:
print("Caught a channel error: {}, stopping...".format(err))
break
# recover on all other connection errors
except pika.exceptions.AMQPConnectionError:
print("Connection was closed, retrying...")
time.sleep(1)
continue
except Exception as e:
print(e)
time.sleep(1)
continue
finally:
# closing connection
if connection and connection.is_open and channel:
channel.close()
connection.close()
def listen_to_queues(self, queue_names, callback):
"""
Listen to queues/topics
:param queue_names: list of queue/topic names to listen to
:param callback: function to call upon receiving a message
"""
while True:
try:
# connection to rabbitMQ
conn = pika.BlockingConnection(parameters=self.params)
chan = conn.channel()
# preparing listen to queues
on_message_callback = functools.partial(self._on_message, args=(conn, callback))
for queue in queue_names:
chan.queue_declare(queue=str(queue), durable=True)
chan.basic_qos(prefetch_count=1)
chan.basic_consume(str(queue), on_message_callback)
print('[*] Waiting for messages in ' + str(queue) + '. To exit press CTRL+C')
# listening to queues
try:
chan.start_consuming()
except KeyboardInterrupt:
chan.stop_consuming()
conn.close()
break
except pika.exceptions.AMQPChannelError as err:
print("Caught a channel error: {}, stopping...".format(err))
break
# recover on all other connection errors
except pika.exceptions.AMQPConnectionError:
print("Connection was closed, retrying...")
time.sleep(1)
continue
except Exception as e:
print(e)
time.sleep(1)
continue
@staticmethod
def _ack_message(body, channel, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
print("Channel was closed during the process of this task. Message can't be acknowledged.")
@staticmethod
def _on_message(channel, method_frame, header_frame, body, args):
(connection, callback) = args
# auto ack is set to true so no need to ack message
# self._ack_message(body, channel, method_frame.delivery_tag)
if channel.is_open:
channel.basic_ack(method_frame.delivery_tag)
else:
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
print("Channel was closed during the process of this task. Message can't be acknowledged.")
connection.close()
callback(body)
|
11480767
|
import random, math
def mk_initial_balances(accts, coins):
o = []
for i in range(accts):
o.extend([i] * random.randrange((coins - len(o)) * 2 // (accts - i)))
o.extend([accts-1] * (coins - len(o)))
return o
def fragments(coins):
o = 0
for i in range(1, len(coins)):
if coins[i] != coins[i-1]:
o += 1
return o
def xfer(coins, frm, to, value):
coins = coins[::]
pos = 0
while pos < len(coins) and value > 0:
if coins[pos] == frm:
coins[pos] = to
value -= 1
pos += 1
return coins
def unscramble(coins, c1, c2):
coins = coins[::]
k1 = coins.count(c1)
pos = 0
while pos < len(coins):
if coins[pos] in (c1, c2):
coins[pos] = c1 if k1 > 0 else c2
if coins[pos] == c1:
k1 -= 1
pos += 1
return coins
def multi_unscramble(coins, addrs):
coins = coins[::]
ks = [coins.count(c) for c in addrs]
pos = 0
at = 0
while pos < len(coins):
if coins[pos] in addrs:
coins[pos] = addrs[at]
ks[at] -= 1
if ks[at] == 0:
at += 1
pos += 1
return coins
def unscramble_swap_strategy(coins, rounds):
for i in range(rounds):
c1, c2 = sorted([random.randrange(max(coins)+1) for _ in range(2)])
coins = unscramble(coins, c1, c2)
return coins
def run_with_unscrambling(coins, rounds):
M = max(coins) + 1
for i in range(rounds):
c1, c2 = [random.randrange(M) for _ in range(2)]
value = int(coins.count(c1) ** random.random())
coins = xfer(coins, c1, c2, value)
coins = unscramble(coins, min(c1, c2), max(c1, c2))
return coins
def run_with_unscramble_online(coins, rounds):
M = max(coins) + 1
for i in range(rounds):
c1, c2 = [random.randrange(M) for _ in range(2)]
value = int(coins.count(c1) ** random.random())
coins = xfer(coins, c1, c2, value)
if random.random() < 1:
cx = sorted([random.randrange(M) for _ in range(5)])
coins = multi_unscramble(coins, cx)
return coins
c = mk_initial_balances(200, 10000)
# random.shuffle(c)
# c = unscramble_swap_strategy(c, 20000)
c = run_with_unscramble_online(c, 10000)
print(fragments(c))
|
11480774
|
import importlib
from unittest import mock
from ..util import BaseCase
warn_message = 'This might go badly'
error_message = 'Something terrible happened'
log_message = 'Data received'
class ModuleTestInstance(BaseCase):
def test_basereps_module_exists(self):
importlib.import_module("pygsti.evotypes.basereps_cython")
def test_densitymx_modules_exist(self):
importlib.import_module("pygsti.evotypes.densitymx.statereps")
importlib.import_module("pygsti.evotypes.densitymx.opreps")
importlib.import_module("pygsti.evotypes.densitymx.effectreps")
def test_statevec_modules_exist(self):
importlib.import_module("pygsti.evotypes.statevec.statereps")
importlib.import_module("pygsti.evotypes.statevec.opreps")
importlib.import_module("pygsti.evotypes.statevec.effectreps")
importlib.import_module("pygsti.evotypes.statevec.termreps")
def test_stabilizer_modules_exist(self):
importlib.import_module("pygsti.evotypes.stabilizer.statereps")
importlib.import_module("pygsti.evotypes.stabilizer.opreps")
importlib.import_module("pygsti.evotypes.stabilizer.effectreps")
importlib.import_module("pygsti.evotypes.stabilizer.termreps")
def test_fastcalc_module_exists(self):
importlib.import_module("pygsti.tools.fastcalc")
def test_fastopcalc_module_exists(self):
importlib.import_module("pygsti.baseobjs.opcalc.fastopcalc")
def test_mapforwardsim_calc_modules_exist(self):
importlib.import_module("pygsti.forwardsims.mapforwardsim_calc_densitymx")
def test_termforwardsim_calc_modules_exist(self):
importlib.import_module("pygsti.forwardsims.termforwardsim_calc_statevec")
importlib.import_module("pygsti.forwardsims.termforwardsim_calc_stabilizer")
def test_fastcircuitparser_module_exists(self):
importlib.import_module("pygsti.circuits.circuitparser.fastcircuitparser")
|
11480779
|
from strips.hsp import compute_costs
from strips.operators import Action
from misc.functions import argmin, flatten, INF
def get_layers(costs):
num_layers = max(pair.level for pair in costs.values()) + 1
layers = [[] for _ in range(num_layers)]
for value, (_, level) in costs.items():
layers[level].append(value)
return layers
# def print_rpg(literal_layers, operator_layers):
# for level in range(len(literal_layers)):
# print('Level', level)
# if level != 0:
# print('Operators', str_iterable(operator_layers[level-1]))
# print('Literals', str_iterable(literal_layers[level]), '\n')
def extract_relaxed_plan(goal, literal_costs, operator_costs):
#literal_layers = get_layers(literal_costs)
operator_layers = get_layers(operator_costs)
#print_rpg(literal_layers, operator_layers)
num_goal_layers = operator_costs[goal].level + 1
goals = [set() for _ in range(num_goal_layers)]
plan = [set() for _ in range(num_goal_layers - 1)]
marked = [set() for _ in range(num_goal_layers)]
for literal in goal.conditions:
goals[literal_costs[literal].level].add(literal)
for level in reversed(range(1, num_goal_layers)):
for literal in goals[level]:
if literal in marked[level]:
continue
easiest_operator = argmin(lambda o: operator_costs[o].cost,
(o for o in operator_layers[level-1] if literal in o.effects))
#easiest_operator = argmin(lambda o: operator_costs[o].cost,
# (o for o, p in operator_costs.items() if p.level < level and literal in o.effects))
plan[level-1].add(easiest_operator)
for condition in easiest_operator.conditions:
goals[literal_costs[condition].level].add(condition)
for effect in easiest_operator.effects:
marked[level].add(effect)
marked[level-1].add(effect)
return plan, goals
###########################################################################
def plan_cost(relaxed_plan, unit=False):
if relaxed_plan is None:
return INF
return sum(operator.cost if not unit else 1
for operator in flatten(relaxed_plan))
def plan_length(relaxed_plan):
if relaxed_plan is None:
return INF
return len(flatten(relaxed_plan))
def multi_cost(goal, operator_costs, relaxed_plan, relaxed_goals):
return plan_cost(relaxed_plan), operator_costs[goal].cost, operator_costs[goal].level
###########################################################################
# TODO: use FF or FD software for heuristics
def none(operator_costs, relaxed_plan, relaxed_goals):
return []
def applicable(operator_costs, relaxed_plan, relaxed_goals):
return [o for o, (_, level) in operator_costs.items()
if isinstance(o, Action) and (level == 0)]
def backpointers(operator_costs, relaxed_plan, relaxed_goals):
# Retrace actions without using extract_relaxed_plan
# Could also do h_max and h_add versions
raise NotImplementedError()
def first_goals(operator_costs, relaxed_plan, relaxed_goals):
if len(relaxed_goals) <= 1:
return []
return [o for o, (_, level) in operator_costs.items()
if isinstance(o, Action) and (level == 0) and any(effect in relaxed_goals[1] for effect in o.effects)]
def first_operators(operator_costs, relaxed_plan, relaxed_goals):
if not relaxed_plan:
return []
return [o for o in relaxed_plan[0] if isinstance(o, Action)]
# TODO: prioritize helpful actions
###########################################################################
def ff(state, goal, operators, heuristic, helpful_actions, op=max, unit=False):
literal_costs, operator_costs = compute_costs(state, goal, operators, op=op, unit=unit)
if goal not in operator_costs:
return INF, []
relaxed_plan, relaxed_goals = extract_relaxed_plan(goal, literal_costs, operator_costs)
return heuristic(relaxed_plan), helpful_actions(operator_costs, relaxed_plan, relaxed_goals)
def ff_fn(heuristic, helpful_actions, op=max, unit=False):
return lambda s, g, o: ff(s, g, o, heuristic, helpful_actions, op=op, unit=unit)
###########################################################################
def h_ff_max(*args):
return ff_fn(plan_cost, none, op=max)(*args)[0]
def h_ff_add(*args):
return ff_fn(plan_cost, none, op=sum)(*args)[0]
|
11480781
|
import h5py
import pandas as pd
from concise.preprocessing import encodeDNA
df = pd.read_pickle("human_utrs_result.pkl")
top_n = 2000
inputs = encodeDNA(df.utr)[:top_n]
preds = df.retrained_pred.values.reshape((-1, 1))[:top_n]
fw = h5py.File("expect.human_utrs.h5", 'w')
fw.create_dataset('/inputs', data=inputs)
fw.create_dataset('/preds', data=preds)
fw.flush()
fw.close()
|
11480784
|
from .base import License
class MITLicense(License):
'''
The MIT license
'''
id = 'MIT'
rpm = 'MIT'
python = 'License :: OSI Approved :: MIT License'
url = 'http://opensource.org/licenses/MIT'
class BSD2ClauseLicense(License):
'''
BSD 2-clause "Simplified" License
'''
id = 'BSD-2-Clause'
rpm = 'BSD'
python = 'License :: OSI Approved :: BSD License'
url = 'http://opensource.org/licenses/BSD-2-Clause'
class BSD3ClauseLicense(BSD2ClauseLicense):
'''
BSD 3-clause "New" or "Revised" License
'''
id = 'BSD-3-Clause'
url = 'http://opensource.org/licenses/BSD-3-Clause'
class GPLv2LaterLicense(License):
'''
GNU General Public License v2.0 or later
'''
id = 'GPL-2.0+'
rpm = 'GPLv2+'
python = 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv2+)'
url = 'http://www.gnu.org/licenses/old-licenses/gpl-2.0.html'
class GPLv2OnlyLicense(GPLv2LaterLicense):
'''
GNU General Public License v2.0 only
'''
id = 'GPL-2.0'
rpm = 'GPLv2'
python = 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)'
class GPLv3LaterLicense(License):
'''
GNU General Public License v3.0 or later
'''
id = 'GPL-3.0+'
rpm = 'GPLv3+'
python = 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)'
url = 'http://www.gnu.org/licenses/gpl-3.0.html'
class GPLv3OnlyLicense(GPLv3LaterLicense):
'''
GNU General Public License v3.0 only
'''
id = 'GPL-3.0'
rpm = 'GPLv3'
python = 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
class LGPLv21LaterLicense(License):
'''
GNU Lesser General Public License v2.1 or later
'''
id = 'LGPL-2.1+'
rpm = 'LGPLv2+'
python = 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)'
url = 'http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html'
class LGPLv21OnlyLicense(LGPLv21LaterLicense):
'''
GNU Lesser General Public License v2.1 only
'''
id = 'LGPL-2.1'
rpm = 'LGPLv2'
python = 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)'
class LGPLv3LaterLicense(License):
'''
GNU Lesser General Public License v3.0 or later
'''
id = 'LGPL-3.0+'
rpm = 'LGPLv3+'
python = 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)'
url = 'http://www.gnu.org/licenses/lgpl-3.0.html'
class LGPLv3OnlyLicense(LGPLv3LaterLicense):
'''
GNU Lesser General Public License v3.0 only
'''
id = 'LGPL-3.0'
rpm = 'LGPLv3'
python = 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)'
class AGPLv3LaterLicense(License):
'''
GNU Affero General Public License v3.0 or later
'''
id = 'AGPL-3.0+'
rpm = 'AGPLv3+'
python = 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)'
url = 'http://www.gnu.org/licenses/agpl-3.0.html'
class AGPLv3OnlyLicense(AGPLv3LaterLicense):
'''
GNU Affero General Public License v3.0 v3.0 only
'''
id = 'AGPL-3.0'
rpm = 'AGPLv3'
python = 'License :: OSI Approved :: GNU Affero General Public License v3'
class Apachev1License(License):
'''
Apache License Version 1.0
'''
id = 'Apache-1.0'
rpm = 'ASL 1.0'
python = 'License :: OSI Approved :: Apache Software License'
url = 'https://www.apache.org/licenses/LICENSE-1.0'
class Apachev11License(Apachev1License):
'''
Apache License Version 1.1
'''
id = 'Apache-1.1'
rpm = 'ASL 1.1'
url = 'https://www.apache.org/licenses/LICENSE-1.1'
class Apachev2License(Apachev1License):
'''
Apache License Version 2.0
'''
id = 'Apache-2.0'
rpm = 'ASL 2.0'
url = 'https://www.apache.org/licenses/LICENSE-2.0'
|
11480855
|
import unittest
from typing import AnyStr
import test.resources
from spark3.types import ABI, ABIFunction, ABIFunctionElement
from spark3.utils.abi import normalize_abi, filter_by_name, filter_by_type
RESOURCE_GROUP = 'contract_test'
def _get_resource_path(file_name: str) -> AnyStr:
return test.get_resource_path([RESOURCE_GROUP], file_name)
def _read_resource(file_name: str) -> AnyStr:
return test.read_resource([RESOURCE_GROUP], file_name)
class UtilsTestCase(unittest.TestCase):
def test_normalize_abi_for_function(self):
abi: ABI = normalize_abi(_read_resource('abi1.json'))
func : ABIFunction = abi[0]
self.assertEqual('function', func['type'])
self.assertEqual('AllTypeFunction', func['name'])
self.assertEqual(True, func['constant'])
self.assertEqual([], func['outputs'])
self.assertEqual(19, len(func['inputs']))
input1: ABIFunctionElement = func['inputs'][0]
self.assertEqual('addr', input1['name'])
self.assertEqual('address', input1['type'])
input18: ABIFunctionElement = func['inputs'][18]
self.assertEqual(2, len(input18['components']))
c1: ABIFunctionElement = input18['components'][0]
self.assertEqual('value', c1['name'])
def test_filter_by_name(self):
abi: ABI = normalize_abi(_read_resource('abi1.json'))
filtered = filter_by_name('AllTypeFunction', abi)
self.assertEqual(1, len(filtered))
self.assertEqual('AllTypeFunction', filtered[0].get('name'))
filtered = filter_by_name('not_exists', abi)
self.assertEqual(0, len(filtered))
def test_filter_by_type(self):
abi: ABI = normalize_abi(_read_resource('abi1.json'))
filtered = filter_by_type('function', abi)
self.assertEqual(1, len(filtered))
self.assertEqual('AllTypeFunction', filtered[0].get('name'))
filtered = filter_by_name('event', abi)
self.assertEqual(0, len(filtered))
|
11480934
|
import webbrowser
import json
import requests
import time
import logging
def keysearch(key):
logging.basicConfig(level=logging.INFO, filename='Supreme_Log.log', filemode='a',
format = " %(asctime)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p ")
starttime = time.time()
url = 'https://www.supremenewyork.com/mobile_stock.json'
response = requests.get(url=url)
data = json.loads(response.content.decode('utf-8'))
mylist = []
global mylists
mylists = mylist
for items in data['products_and_categories']:
if items != 'new':
categories = items
for x in categories.split():
for result in data['products_and_categories']['{}'.format(x)]:
if keyword in result['name'].lower():
print('Product Found!')
name = result['name']
id = result['id']
if str(id)[0] == '3':
region = 'Supreme EU'
else:
region = 'Supreme US'
cat = result['category_name']
price = '${}'.format(result['price']*.01)
link = 'https://www.supremenewyork.com/shop/{}/{}'.format(x, id)
mylist.append(id)
print(len(mylist), end=""),
print('.)', end = ""),
print(name,'-',cat, '-', price)
webbrowser.open(link)
print('Product Found at {} and Opened in {:.2f} Seconds'.format(time.strftime("%I:%M:%S"),time.time()-starttime))
logging.info('{}: {} Found Using "{}" at {} and Opened in {:.2f} Seconds'.format(region, name, keyword, time.strftime("%I:%M:%S"),time.time()-starttime))
print()
if __name__ == "__main__":
keyword = input('Enter Keyword(s), Hit Enter When Ready:').lower()
keylist = keyword.split(",")
print()
for keyword in keylist:
keysearch(keyword)
for _ in range(600):
try:
if not mylists:
print('{}: Product Not Found for {}, Will Look Again...'.format(time.strftime("%I:%M:%S"),keyword).title())
time.sleep(0.25)
keysearch(keyword)
except Exception as e:
print('{}: or Webstore Closed'.format(e))
print('Program Ended')
print('------------------------------------------------------------------------------------------------------------')
|
11480936
|
from time import time
import os
import psutil
import math
from typing import Iterator, List, Tuple
import pandas as pd
from google.cloud import bigquery
from google.oauth2 import service_account
from bqfetch.utils import *
CREDS_SCOPES = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/devstorage.full_control"
]
DEFAULT_CHUNK_SIZE_PER_CORE_IN_GB = 2
class BigQueryTable:
'''
A simple object containing the path to the requested table.
`project_id` is the name of the BigQuery project, `dataset`
the BigQuery dataset entry and `table` the name of the
requested table.
'''
def __init__(
self,
project_id: str,
dataset: str,
table: str,
) -> None:
self._variables = {
"PROJECT_ID": project_id,
"DATASET": dataset,
"TABLE": table,
}
@property
def variables(self):
return self._variables
class FetchingChunk:
'''
Wrapper object used to store the elements to select in the
given column.
'''
def __init__(self, elements: List[str], column: str,) -> None:
self.elements = elements
self.column = column
class BigQueryClient:
'''
Wrapper of BigQuery Client object containing credentials.
Parameters:
----------
service_account_path: str
The path and file name of credentials file bq_service_account.json.
The path should be absolute.
'''
def __init__(
self,
service_account_path: str,
creds_scope: str=None,
) -> None:
if isinstance(service_account_path, str):
creds_scope = creds_scope if creds_scope is not None \
else CREDS_SCOPES
credentials = service_account.Credentials.from_service_account_file(
service_account_path, scopes=creds_scope
)
else:
raise ValueError('`service_account_path` should be of type str or Credentials')
bq_client = bigquery.Client(
credentials=credentials,
project=credentials.project_id
)
self._client = bq_client
def run(
self,
request: str
) -> bigquery.table.RowIterator:
"""
Run a SQL BigQuery request.
"""
job = self._client.query(request)
return job.result()
def delete_table(
self,
table_name: str,
not_found_ok: bool=True,
):
'''
Delete a BigQuery table.
'''
self._client.delete_table(table_name, not_found_ok=not_found_ok)
def get_nb_occurences_for_column(
self,
table: BigQueryTable,
column: str,
) -> List[int]:
'''
For each distinct element in `column`, counts the number of occurences
and returns a list containing all the countings.
Ex: For a column name contaning: John, <NAME>
>>> [2, 1]
'''
var = table.variables
nb_occurences_query = f'''
SELECT COUNT(*)
FROM `{var["PROJECT_ID"]}.{var["DATASET"]}.{var["TABLE"]}`
GROUP BY {column}
'''
nb_occurences = [nb_occurences[0] for nb_occurences in self.run(nb_occurences_query)]
return nb_occurences
def get_table_size_in_GB(
self,
table: BigQueryTable,
) -> int:
'''
Returns the size in GB of `table`.
'''
var = table.variables
size_of_table_query = f'''
SELECT SUM(size_bytes)/{1024**3} AS size_GB
FROM {var["PROJECT_ID"]}.{var["DATASET"]}.__TABLES__
WHERE table_id = '{var["TABLE"]}'
'''
size_of_table_in_GB = next(self.run(size_of_table_query).__iter__())[0]
return size_of_table_in_GB
def get_column_values(
self,
table: BigQueryTable,
column: str,
) -> pd.DataFrame:
'''
Returns a Dataframe (with 1 col) of distinct elements on the
`column` of the `table`.
'''
var = table.variables
query = f'''
SELECT DISTINCT `{column}`
FROM `{var["PROJECT_ID"]}.{var["DATASET"]}.{var["TABLE"]}`
'''
query_results = self.run(query)
return query_results.to_dataframe()
def create_partitioned_table(
self,
table: BigQueryTable,
chunk: FetchingChunk,
partitioned_table_name: str,
):
'''
Create a temporary table used to store one `chunk` of data,
extracted from the main table to fetch. This step is necessary
in order to improve performances and avoid network bottleneck.
The table is created with the name `partitioned_table_name` in the
same dataset as `bq_table`.
'''
sqlify_chunk_elements = ','.join(list(map(lambda x: f'"{x}"', chunk.elements)))
var = table.variables
query = f'''
CREATE OR REPLACE TABLE
`{var["PROJECT_ID"]}.{var["DATASET"]}.{partitioned_table_name}` AS
SELECT
*
FROM `{var["PROJECT_ID"]}.{var["DATASET"]}.{var["TABLE"]}`
WHERE {chunk.column} IN ({sqlify_chunk_elements})
'''
self.run(query)
def delete_partitioned_table(
self,
table: BigQueryTable,
partitioned_table_name: str,
):
'''
Delete the temporary table used to chunk the table.
'''
var = table.variables
table = f'{var["PROJECT_ID"]}.{var["DATASET"]}.{partitioned_table_name}'
self.delete_table(table)
class InvalidChunkRangeException(Exception):
pass
class BigQueryFetcher:
'''
An object used to fetch BigQuery tables easily and progressively
in order to handle huge tables that does not fit into memory.
The fetcher divides the table in chunks of size `chunk_size_in_GB`
based on the `column` parameter. Then each chunk is fetched
using BigQuery Storage API, sequencially or in parallel using
child processes running on multiple cores.
Ex: Fetch a huge table of users: first, all the 'user_id' are
fetched and divided in chunks of size 50000 (should fit into memory).
Then, we fetch each small chunk separately using multiprocessing
with the number of cores available of the machine.
>>> table = BigQueryTable("my_project", "dataset1", "users_table")
>>> fetcher = BigQueryFetcher('path/to/service_account.json', table)
>>> chunks = fetcher.chunks('user_id', 50000)
>>> for chunk in chunks:
df = fetcher.fetch(chunk, nb_cores=-1)
# compute df...
'''
def __init__(
self,
service_account_filename: str,
bq_table: BigQueryTable,
existing_client: BigQueryClient=None,
creds_scope: str=None,
):
self._client = existing_client if existing_client is not None \
else BigQueryClient(service_account_filename, creds_scope=creds_scope)
self._bq_table = bq_table
self._service_account_filename = service_account_filename
self._creds_scopes = CREDS_SCOPES
self._cache = {}
self._first_fetch = True
def chunks(
self,
column: str,
by_nb_chunks: int=None,
by_chunk_size_in_GB: int=None,
verbose: bool=False,
) -> Iterator:
'''
Returns a list on which iterate to get `nb_chunks` chunks of `column` items.
It allows to fetch the whole table with multiple chunks that can handle in memory.
The chosen column can be of any type, not only String or Int.
'''
assert isinstance(column, str)
if (by_nb_chunks is None and by_chunk_size_in_GB is None) \
or (by_nb_chunks is not None and by_chunk_size_in_GB is not None):
raise ValueError('Only one parameter `by_nb_chunks` or `by_chunk_size_in_GB` has to be set')
if not ((by_nb_chunks is not None and by_nb_chunks > 0) \
or (by_chunk_size_in_GB is not None and by_chunk_size_in_GB > 0)):
raise ValueError('Value has to be greater than 0')
by_nb_chunks = by_nb_chunks if by_nb_chunks is not None else \
self.get_nb_chunks_approximation(column, verbose=verbose, chunk_size_in_GB=by_chunk_size_in_GB)
indexes = self._client.get_column_values(self._bq_table, column)
chunks = divide_in_chunks(indexes, by_nb_chunks)
chunks = [FetchingChunk(x[column].tolist(), column) for x in chunks]
if verbose:
log(
'Chunking',
f'Nb values in "{column}":\t {len(indexes)}',
f'Nb chunks:\t\t\t {len(chunks)}')
return chunks
def fetch(
self,
chunk: FetchingChunk=None,
nb_cores: int=1,
memory_to_save: float = 1.0,
parallel_backend: str='billiard',
partitioned_table_name: str='TMP_TABLE',
verbose: bool=False,
) -> pd.DataFrame:
'''
Fetch a `chunk` using BigQuery Storage API as a pandas Dataframe.
The `chunk` can be given using the `chunks()` method.
Parameters:
----------
chunk: FetchingChunk
A selection of rows that we want to fetch
nb_cores: int
The number of processes to create. By default, each process
will run on a separate core. It is not recommanded to set `nb_cores`
to a value larger than the number of vCPUs on the machine.
Setting this parameter to `-1` will use the number of vCPUs on
the machine.
memory_to_save: float
The amount of memory in GB to not use on the machine to avoid overflows.
parallel_backend: str
The framework used to parallelize the fetching.
>>> Choose 'billiard' to use an old fork of the Python multiprocessing lib
which allows to use multiprocessing from a process launched as daemon
(ex: Airflow).
>>> Choose 'joblib' to use the joblib backend.
>>> Choose 'multiprocessing' to use the current version of Python
multiprocessing lib.
partitioned_table_name: str
The name of the temporary table that will be created in the same dataset as the
fetched `bq_table` at each call to fetch() in order to divide the whole table
in small chunked tables that can be fetched extremly fast.
This table will be deleted after each execution so no need to delete it manually
afterwards.
Returns:
-------
pd.DataFrame
A Dataframe containing all the data fetched from the chunk.
'''
assert nb_cores == -1 or nb_cores > 0
assert isinstance(chunk, FetchingChunk)
assert parallel_backend in ['billiard', 'joblib', 'multiprocessing']
assert memory_to_save > 0
vcpu_count = os.cpu_count()
if nb_cores > vcpu_count:
print(f'Warning: `nb_cores` ({nb_cores}) greater than cpus on machine ({vcpu_count})')
if nb_cores == -1:
nb_cores = vcpu_count
if verbose and self._first_fetch:
log(
'Fetching',
f'Use multiprocessing : \t{nb_cores > 1}',
f'Nb cores: \t\t\t{nb_cores}',
f'Parallel backend: \t\t{parallel_backend}')
self._first_fetch = False
start = time()
df = None
column = chunk.column
if nb_cores == 1:
partitioned_table_name = f'{partitioned_table_name}0'
self._client.create_partitioned_table(self._bq_table, chunk, partitioned_table_name)
df = _fetch_in_parallel(
(self._service_account_filename, self._creds_scopes, \
partitioned_table_name, self._bq_table, column, chunk.elements)
)
self._client.delete_partitioned_table(self._bq_table, partitioned_table_name)
else:
chunks_per_core = divide_in_chunks(chunk.elements, nb_cores)
for i, small_chunk in enumerate(chunks_per_core):
small_chunk = FetchingChunk(small_chunk, chunk.column)
self._client.create_partitioned_table(self._bq_table, small_chunk, f'{partitioned_table_name}{i}')
partition_list = [(self._service_account_filename, self._creds_scopes, \
f'{partitioned_table_name}{i}', self._bq_table, column, item) for i, item in enumerate(chunks_per_core)]
parallel_backends = {
'billiard': do_parallel_billiard,
'joblib': do_parallel_joblib,
'multiprocessing': do_parallel_multiprocessing,
}
parallel_function = parallel_backends[parallel_backend]
df = pd.concat(parallel_function(
_fetch_in_parallel,
len(chunks_per_core),
partition_list
))
for i in range(len(chunks_per_core)):
self._client.delete_partitioned_table(self._bq_table, f'{partitioned_table_name}{i}')
end = time() - start
if verbose:
log(
f'Time to fetch:\t\t {round(end, 2)}s',
f'Nb lines in dataframe:\t {len(df)}',
f'Size of dataframe:\t\t {ft(df.memory_usage(deep=True).sum() / 1024**3)}')
return df
def get_nb_chunks_approximation(
self,
column: str,
nb_cores: int=1,
nb_GB_to_save: int = 1,
chunk_size_in_GB: int = DEFAULT_CHUNK_SIZE_PER_CORE_IN_GB,
verbose: bool=False,
) -> int:
'''
Tries to give an estimation for the chunk size to used in order to divide the table.
The approximation uses the free memory available on the machine.
This approximation only works in the case where the number of distinct values in `column` is
approximatively the same.
To perform well, we have to predict an average chunk size, so if the size differs too much, the
prediction will not be accurate.
Ex: if `column` contains for a given value thousands rows, for a second value only ten rows etc
the approximation will not work because there is too much variance between each number of values.
Ex: if `column` refers to IDs, each row is unique so it is perfect use case to use this function.
The function will throw if there is more than 25% of values that are 25% too far from the mean.
Parameters:
----------
column: str
The column name of the table on which do the approximation.
nb_cores: int
The number of cores that will be used.
nb_GB_to_save: int
The amount of memory in GB to not use on the machine.
chunk_size_in_GB: int
The amount of memory of one chunk, this amount should fit in memory and thus be less
than the free memory available on the machine.
Returns:
-------
nb_chunks: int
The approximated number of chunks based on free space and size of table.
'''
nb_occurences = self._client.get_nb_occurences_for_column(self._bq_table, column)
mean = sum(nb_occurences) / len(nb_occurences)
coeff = 0.25
nb_dispersed_values = sum(not (mean * (1 - coeff) < count < mean * (1 + coeff)) \
for count in nb_occurences)
dispersion_quotient = nb_dispersed_values / len(nb_occurences)
if dispersion_quotient > coeff:
raise InvalidChunkRangeException(f'''Difference of range between elements of column {column} \
is too high: more than {coeff * 100}% of elements are too far from the mean.''')
available_memory_in_GB = (psutil.virtual_memory()[1] - nb_GB_to_save) / 1024**3
if chunk_size_in_GB >= available_memory_in_GB:
print(f'WARNING: you are using a chunk size bigger than the available memory ({ft(chunk_size_in_GB)}>{ft(available_memory_in_GB)})')
nb_chunks, size_of_table_in_GB = self._nb_chunks_approximation_formula(nb_cores, chunk_size_in_GB, \
available_memory_in_GB)
size_per_chunk_in_GB = math.ceil(size_of_table_in_GB / nb_chunks)
if verbose:
log(
'Chunk size approximation',
f'Available memory on device:\t {ft(available_memory_in_GB)}',
f'Size of table:\t\t {ft(size_of_table_in_GB)}',
f'Prefered size of chunk:\t {ft(chunk_size_in_GB)}',
f'Size per chunk:\t\t {ft(size_per_chunk_in_GB)}',
f'Nb chunks approximation:\t {nb_chunks}')
return nb_chunks
def _nb_chunks_approximation_formula(
self,
nb_cores: int,
prefered_chunk_size_in_GB: int,
available_memory_in_GB: int,
):
'''
Returns an estimated number of chunks to divide the whole table.
This estimation is based on the free memory and the number of cores.
Returns also the size of the table for cache and performance reasons.
'''
if not 'size_of_table_in_GB' in self._cache:
size_of_table_in_GB = self._client.get_table_size_in_GB(self._bq_table)
self._cache['size_of_table_in_GB'] = size_of_table_in_GB
sum_of_GB_for_cores = prefered_chunk_size_in_GB * nb_cores
nb_chunks = math.ceil(self._cache['size_of_table_in_GB'] / min(sum_of_GB_for_cores, available_memory_in_GB))
return nb_chunks, self._cache['size_of_table_in_GB']
def _fetch_in_parallel(
pickled_parameters: Tuple,
) -> pd.DataFrame:
'''
Fetch a BigQuery table using Storage API.
If `chunk` is given, the fetching will return only
the chunk matching the given list, based on the `column`.
Warning: imports should not be removed from the inner function
because dependencies could not be found outside when running
in child processes.
Function should be global, not inside a class.
'''
from google.cloud.bigquery_storage import BigQueryReadClient, ReadSession, DataFormat
service_account_filename, creds_scopes, partitioned_table_name, bq_table, column, chunk = pickled_parameters
credentials = service_account.Credentials.from_service_account_file(
service_account_filename, scopes=creds_scopes
)
var = bq_table.variables
bqstorageclient = BigQueryReadClient(credentials=credentials)
stringify_table = f"projects/{var['PROJECT_ID']}/datasets/{var['DATASET']}/tables/{partitioned_table_name}"
parent = "projects/{}".format(var['PROJECT_ID'])
requested_session = None
if chunk is not None:
sqlify_indexes = ','.join(list(map(lambda x: f'"{x}"', chunk)))
row_filter = ReadSession.TableReadOptions(row_restriction=f'{column} IN ({sqlify_indexes})')
requested_session = ReadSession(
table=stringify_table,
data_format=DataFormat.ARROW,
read_options=row_filter,
)
else:
requested_session = ReadSession(
table=stringify_table,
data_format=DataFormat.ARROW,
)
session = bqstorageclient.create_read_session(
parent=parent,
read_session=requested_session,
max_stream_count=1,
)
reader = bqstorageclient.read_rows(session.streams[0].name, timeout=10000)
return reader.to_dataframe(session)
|
11480954
|
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch import jit
import io
import time
import argparse
import cv2
from vgg import VGGNet
from utils import try_load
# Check device
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
# CIFAR-10 classes
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def predict(model, image, test=False):
# apply transform and convert BGR -> RGB
x = image[:, :, (2, 1, 0)]
#print('Image shape: {}'.format(x.shape))
# H x W x C -> C x H x W for conv input
x = torch.from_numpy(x).permute(2, 0, 1).to(device)
torch.set_printoptions(threshold=5000)
to_norm_tensor = transforms.Compose([
#transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
img_tensor = to_norm_tensor(x.float().div_(255))
#print('Image tensor: {}'.format(img_tensor))
#print('Image tensor shape: {}'.format(img_tensor.shape))
img_tensor.unsqueeze_(0) # add a dimension for the batch
#print('New shape: {}'.format(img_tensor.shape))
if test:
ttime = 0
for i in range (15):
t0 = time.time()
with torch.no_grad():
# forward pass
outputs = model(img_tensor)
if use_cuda:
torch.cuda.synchronize() # wait for operations to be complete
tf = time.time() - t0
ttime += tf if i > 0 else 0
score, predicted = outputs.max(1)
#print(outputs)
print(f'Predicted: {classes[predicted.item()]} | {score.item()}')
print(f'Forward pass time: {tf} seconds')
print(f'Avg forward pass time (excluding first): {ttime/14} seconds')
else:
t0 = time.time()
with torch.no_grad():
# forward pass
outputs = model(img_tensor)
if use_cuda:
torch.cuda.synchronize()
tf = time.time() - t0
score, predicted = outputs.max(1)
#print(outputs)
print(f'Predicted: {classes[predicted.item()]} | {score.item()}')
print(f'Forward pass time: {tf} seconds')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VGGNet Predict Tool')
parser.add_argument('mtype', type=str, choices=['pytorch', 'torch-script'], help='Model type')
parser.add_argument('--model', type=str, default='../data/VGG16model.pth', help='Pre-trained model')
parser.add_argument('--classes', type=int, default=10, help='Number of classes')
parser.add_argument('--input', type=int, default=32, help='Network input size')
parser.add_argument('--image', type=str, default='../data/dog.png', help='Input image')
parser.add_argument('--test_timing', type=int, default=0, help='Test timing with multiple forward pass iterations')
args = parser.parse_args()
# Model
print('==> Building model...')
if args.mtype == 'pytorch':
model = VGGNet('D-DSM', num_classes=args.classes, input_size=args.input) # depthwise separable
# Load model
print('==> Loading PyTorch model...')
model.load_state_dict(try_load(args.model))
model.eval()
model.to(device)
else:
print('==> Loading Torch Script model...')
# Load ScriptModule from io.BytesIO object
with open(args.model, 'rb') as f:
buffer = io.BytesIO(f.read())
model = torch.jit.load(buffer, map_location=device)
#print('[WARNING] ScriptModules cannot be moved to a GPU device yet. Running strictly on CPU for now.')
#device = torch.device('cpu') # 'to' is not supported on TracedModules (yet)
# if device.type == 'cuda':
# cudnn.benchmark = True
# model = torch.nn.DataParallel(model)
t0 = time.perf_counter()
predict(model, cv2.imread(args.image), test=args.test_timing)
print(f'Total time: {time.perf_counter()-t0} seconds')
|
11480960
|
import requests
from cartodb_services import StreetPointBulkGeocoder
from cartodb_services.geocodio import GeocodioGeocoder
from iso3166 import countries
from cartodb_services.tools.country import country_to_iso3
class GeocodioBulkGeocoder(GeocodioGeocoder, StreetPointBulkGeocoder):
MAX_BATCH_SIZE = 100 # Setting an upper limit (not stated in the documentation)
MIN_BATCHED_SEARCH = 0
READ_TIMEOUT = 60
CONNECT_TIMEOUT = 10
MAX_RETRIES = 1
def __init__(self, token, logger, service_params=None):
GeocodioGeocoder.__init__(self, token, logger, service_params)
self.connect_timeout = self.CONNECT_TIMEOUT
self.read_timeout = self.READ_TIMEOUT
self.max_retries = self.MAX_RETRIES
if service_params is not None:
self.connect_timeout = service_params.get('connect_timeout', self.CONNECT_TIMEOUT)
self.read_timeout = service_params.get('read_timeout', self.READ_TIMEOUT)
self.max_retries = service_params.get('max_retries', self.MAX_RETRIES)
self.session = requests.Session()
def _should_use_batch(self, searches):
return len(searches) >= self.MIN_BATCHED_SEARCH
def _serial_geocode(self, searches):
results = []
for search in searches:
elements = self._encoded_elements(search)
result = self.geocode_meta(*elements)
if result:
results.append((search[0], result[0], result[1]))
else:
results.append((search[0], None, None))
return results
def _encoded_elements(self, search):
(search_id, address, city, state, country) = search
address = address.encode('utf-8') if address else None
city = city.encode('utf-8') if city else None
state = state.encode('utf-8') if state else None
country = self._country_code(country) if country else None
return address, city, state, country
def _batch_geocode(self, searches):
if len(searches) == 1:
return self._serial_geocode(searches)
else:
frees = []
for search in searches:
elements = self._encoded_elements(search)
free = ', '.join([elem for elem in elements if elem])
frees.append(free)
full_results = self.geocode_free_text_meta(frees)
results = []
for s, r in zip(searches, full_results):
results.append((s[0], r[0], r[1]))
return results
def _country_code(self, country):
country_iso3166 = country
country_iso3 = country_to_iso3(country)
if country_iso3:
country_iso3166 = countries.get(country_iso3).alpha2.lower()
return country_iso3166
|
11480982
|
import pytest
from magnus import catalog # pylint: disable=import-error
from magnus import defaults # pylint: disable=import-error
from magnus import pipeline # pylint: disable=import-error
def test_get_run_log_store_returns_global_executor_run_log_store(mocker, monkeypatch):
mock_global_executor = mocker.MagicMock()
mock_global_executor.run_log_store = 'RunLogStore'
monkeypatch.setattr(pipeline, 'global_executor', mock_global_executor)
run_log_store = catalog.get_run_log_store()
assert run_log_store == 'RunLogStore'
def test_is_catalog_out_of_sync_returns_true_for_empty_synced_catalogs():
assert catalog.is_catalog_out_of_sync(1, []) is True
def test_is_catalog_out_of_sync_returns_false_for_same_objects():
class MockCatalog:
catalog_relative_path = None
data_hash = None
catalog_item = MockCatalog()
catalog_item.catalog_relative_path = 'path'
catalog_item.data_hash = 'hash'
synced_catalog = [catalog_item]
assert catalog.is_catalog_out_of_sync(catalog_item, synced_catalog) is False
def test_is_catalog_out_of_sync_returns_true_for_different_hash():
class MockCatalog:
catalog_relative_path = None
data_hash = None
catalog_item1 = MockCatalog()
catalog_item1.catalog_relative_path = 'path'
catalog_item1.data_hash = 'hash'
catalog_item2 = MockCatalog()
catalog_item2.catalog_relative_path = 'path'
catalog_item2.data_hash = 'not-hash'
synced_catalog = [catalog_item1]
assert catalog.is_catalog_out_of_sync(catalog_item2, synced_catalog) is True
def test_is_catalog_out_of_sync_returns_true_for_different_paths():
class MockCatalog:
catalog_relative_path = None
data_hash = None
catalog_item1 = MockCatalog()
catalog_item1.catalog_relative_path = 'path'
catalog_item1.data_hash = 'hash'
catalog_item2 = MockCatalog()
catalog_item2.catalog_relative_path = 'path1'
catalog_item2.data_hash = 'hash'
synced_catalog = [catalog_item1]
assert catalog.is_catalog_out_of_sync(catalog_item2, synced_catalog) is True
def test_base_catalog_inits_empty_config_if_none_config():
base_catalog = catalog.BaseCatalog(config=None)
assert base_catalog.config == {}
def test_base_catalog_get_raises_exception():
base_catalog = catalog.BaseCatalog(config=None)
with pytest.raises(NotImplementedError):
base_catalog.get(name='test', run_id='test')
def test_base_catalog_put_raises_exception():
base_catalog = catalog.BaseCatalog(config=None)
with pytest.raises(NotImplementedError):
base_catalog.put(name='test', run_id='test')
def test_base_catalog_sync_between_runs_raises_exception():
base_catalog = catalog.BaseCatalog(config=None)
with pytest.raises(NotImplementedError):
base_catalog.sync_between_runs(previous_run_id=1, run_id=2)
def test_base_catalog_inits_default_compute_folder_if_none_config():
base_catalog = catalog.BaseCatalog(config=None)
assert base_catalog.compute_data_folder == defaults.COMPUTE_DATA_FOLDER
def test_do_nothing_catalog_get_returns_empty_list(monkeypatch, mocker):
mock_base_catalog = mocker.MagicMock()
monkeypatch.setattr(catalog, 'BaseCatalog', mock_base_catalog)
catalog_handler = catalog.DoNothingCatalog(config=None)
assert catalog_handler.get(name='does not matter', run_id='none') == []
def test_do_nothing_catalog_put_returns_empty_list(monkeypatch, mocker):
mock_base_catalog = mocker.MagicMock()
monkeypatch.setattr(catalog, 'BaseCatalog', mock_base_catalog)
catalog_handler = catalog.DoNothingCatalog(config=None)
assert catalog_handler.put(name='does not matter', run_id='none') == []
def test_file_system_catalog_get_catalog_location_defaults_if_location_not_provided(monkeypatch, mocker):
mock_base_catalog = mocker.MagicMock()
monkeypatch.setattr(catalog, 'BaseCatalog', mock_base_catalog)
catalog_handler = catalog.FileSystemCatalog(config=None)
assert catalog_handler.get_catalog_location() == defaults.CATALOG_LOCATION_FOLDER
def test_file_system_catalog_get_catalog_location_returns_config_catalog_location_if_provided(monkeypatch, mocker):
mock_base_catalog = mocker.MagicMock()
monkeypatch.setattr(catalog, 'BaseCatalog', mock_base_catalog)
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system',
'catalog_location': 'this'})
assert catalog_handler.get_catalog_location() == 'this'
def test_file_system_catalog_get_creates_catalog_location_using_run_id(monkeypatch, mocker):
mock_does_dir_exist = mocker.MagicMock(side_effect=[True, Exception()])
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.get('testing', run_id='dummy_run_id')
mock_does_dir_exist.assert_any_call(catalog.Path('this_location') / 'dummy_run_id')
def test_file_system_catalog_get_uses_compute_data_folder_provided(monkeypatch, mocker):
mock_does_dir_exist = mocker.MagicMock(side_effect=Exception())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.get('testing', run_id='dummy_run_id', compute_data_folder='this_compute_folder')
mock_does_dir_exist.assert_called_once_with('this_compute_folder')
def test_file_system_catalog_get_raises_exception_if_compute_data_folder_does_not_exist(monkeypatch, mocker):
mock_does_dir_exist = mocker.MagicMock(return_value=False)
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.get('testing', run_id='dummy_run_id', compute_data_folder='this_compute_folder')
def test_file_system_catalog_get_raises_exception_if_catalog_does_not_exist(monkeypatch, mocker):
# mock_does_dir_exist = mocker.MagicMock(return_value=[True, False]) # Should be better than this
def mock_does_dir_exist(dir_name):
if dir_name == 'this_compute_folder':
return True
return False
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.get('testing', run_id='dummy_run_id', compute_data_folder='this_compute_folder')
def test_file_system_catalog_get_copies_files_from_catalog_to_compute_folder(mocker, monkeypatch):
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mocker.MagicMock())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mocker.MagicMock(return_value=True))
monkeypatch.setattr(catalog.utils, 'remove_prefix', mocker.MagicMock())
monkeypatch.setattr(catalog.utils, 'get_data_hash', mocker.MagicMock())
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
class MockFile:
def __init__(self, name):
self.name = name
file1 = MockFile('file1')
file2 = MockFile('file2')
mock_glob = mocker.MagicMock(return_value=[file1, file2])
monkeypatch.setattr(catalog.Path, 'glob', mock_glob)
monkeypatch.setattr(catalog, 'get_run_log_store', mocker.MagicMock())
mock_copy = mocker.MagicMock()
monkeypatch.setattr(catalog.shutil, 'copy', mock_copy)
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
catalog_handler.get('testing', run_id='dummy_run_id')
assert mock_copy.call_count == 2
mock_copy.assert_any_call(file1, 'data')
mock_copy.assert_any_call(file2, 'data')
def test_file_system_catalog_put_copies_files_from_compute_folder_to_catalog_if_synced_changed(mocker, monkeypatch):
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mocker.MagicMock())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mocker.MagicMock(return_value=True))
monkeypatch.setattr(catalog.utils, 'remove_prefix', mocker.MagicMock())
monkeypatch.setattr(catalog.utils, 'get_data_hash', mocker.MagicMock())
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
class MockFile:
def __init__(self, name):
self.name = name
file1 = MockFile('file1')
file2 = MockFile('file2')
mock_glob = mocker.MagicMock(return_value=[file1, file2])
monkeypatch.setattr(catalog.Path, 'glob', mock_glob)
monkeypatch.setattr(catalog, 'get_run_log_store', mocker.MagicMock())
monkeypatch.setattr(catalog, 'is_catalog_out_of_sync', mocker.MagicMock(return_value=True))
mock_copy = mocker.MagicMock()
monkeypatch.setattr(catalog.shutil, 'copy', mock_copy)
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
catalog_handler.put('testing', run_id='dummy_run_id')
assert mock_copy.call_count == 2
mock_copy.assert_any_call(file1, catalog.Path('this_location') / 'dummy_run_id')
mock_copy.assert_any_call(file2, catalog.Path('this_location') / 'dummy_run_id')
def test_file_system_catalog_put_does_not_copy_files_from_compute_folder_to_catalog_if_synced_same(mocker, monkeypatch):
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mocker.MagicMock())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mocker.MagicMock(return_value=True))
monkeypatch.setattr(catalog.utils, 'remove_prefix', mocker.MagicMock())
monkeypatch.setattr(catalog.utils, 'get_data_hash', mocker.MagicMock())
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
class MockFile:
def __init__(self, name):
self.name = name
file1 = MockFile('file1')
file2 = MockFile('file2')
mock_glob = mocker.MagicMock(return_value=[file1, file2])
monkeypatch.setattr(catalog.Path, 'glob', mock_glob)
monkeypatch.setattr(catalog, 'get_run_log_store', mocker.MagicMock())
monkeypatch.setattr(catalog, 'is_catalog_out_of_sync', mocker.MagicMock(return_value=False))
mock_copy = mocker.MagicMock()
monkeypatch.setattr(catalog.shutil, 'copy', mock_copy)
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
catalog_handler.put('testing', run_id='dummy_run_id')
assert mock_copy.call_count == 0
def test_file_system_catalog_put_uses_compute_folder_by_default(monkeypatch, mocker):
mock_safe_make_dir = mocker.MagicMock()
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mock_safe_make_dir)
mock_does_dir_exist = mocker.MagicMock(side_effect=Exception())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.put('testing', run_id='dummy_run_id')
mock_does_dir_exist.assert_called_once_with('data')
def test_file_system_catalog_put_uses_compute_folder_provided(monkeypatch, mocker):
mock_safe_make_dir = mocker.MagicMock()
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mock_safe_make_dir)
mock_does_dir_exist = mocker.MagicMock(side_effect=Exception())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.put('testing', run_id='dummy_run_id', compute_data_folder='not_data')
mock_does_dir_exist.assert_called_once_with('not_data')
def test_file_system_catalog_put_raises_exception_if_compute_data_folder_does_not_exist(monkeypatch, mocker):
mock_safe_make_dir = mocker.MagicMock()
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mock_safe_make_dir)
mock_does_dir_exist = mocker.MagicMock(return_value=False)
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.put('testing', run_id='dummy_run_id', compute_data_folder='this_compute_folder')
def test_file_system_catalog_put_creates_catalog_location_using_run_id(monkeypatch, mocker):
mock_safe_make_dir = mocker.MagicMock()
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mock_safe_make_dir)
mock_does_dir_exist = mocker.MagicMock(side_effect=Exception())
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.put('testing', run_id='dummy_run_id')
mock_safe_make_dir.assert_called_once_with(catalog.Path('this_location') / 'dummy_run_id')
def test_file_system_sync_between_runs_raises_exception_if_previous_catalog_does_not_exist(monkeypatch, mocker):
mock_safe_make_dir = mocker.MagicMock()
monkeypatch.setattr(catalog.utils, 'safe_make_dir', mock_safe_make_dir)
mock_does_dir_exist = mocker.MagicMock(return_value=False)
monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
mocker.MagicMock(return_value='this_location'))
monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
with pytest.raises(Exception):
catalog_handler.sync_between_runs('previous', 'current')
# def test_file_system_sync_between_runs_copies_all_files_from_old_to_new(monkeypatch, mocker):
# monkeypatch.setattr(catalog.utils, 'safe_make_dir', mocker.MagicMock())
# mock_does_dir_exist = mocker.MagicMock(return_value=True)
# monkeypatch.setattr(catalog.utils, 'does_dir_exist', mock_does_dir_exist)
# monkeypatch.setattr(catalog.FileSystemCatalog, 'get_catalog_location',
# mocker.MagicMock(return_value='this_location'))
# monkeypatch.setattr(catalog, 'BaseCatalog', mocker.MagicMock())
# mock_path = mocker.MagicMock()
# monkeypatch.setattr(catalog, 'Path', mock_path)
# mock_path.glob.return_value = ['file1', 'file2']
# mock_copy = mocker.MagicMock()
# monkeypatch.setattr(catalog.shutil, 'copy', mock_copy)
# catalog_handler = catalog.FileSystemCatalog(config={'type': 'file-system'})
# print(mock_path.__dict__)
# assert mock_copy.call_count == 2
|
11481087
|
import attr
import numpy as np
import collections
import itertools
import pyrsistent
import json
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensor2struct.models import abstract_preproc, decoder
from tensor2struct.modules import attention, variational_lstm, lstm, embedders, rat
from tensor2struct.utils import serialization, vocab, registry, bpe
from tensor2struct.models.ast_decoder.utils import lstm_init
import logging
logger = logging.getLogger("tensor2struct")
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@attr.s
class DecItem:
actions = attr.ib()
@registry.register("decoder", "batched_lstm_dec")
class Decoder(torch.nn.Module):
batched = True
Preproc = decoder.DecoderPreproc
def __init__(
self,
device,
preproc,
action_emb_size,
desc_attn="bahdanau",
enc_recurrent_size=256,
recurrent_size=256,
dropout=0.1,
input_feed=True,
tie_weights=False,
layernorm=False,
label_smooth=0.0,
):
super().__init__()
self._device = device
self.preproc = preproc
self.vocab = preproc.vocab
self.dropout = dropout
self.action_emb_size = action_emb_size
self.enc_recurrent_size = enc_recurrent_size
self.recurrent_size = recurrent_size
self.input_feed = input_feed
self.tie_weights = tie_weights
self.label_smooth = label_smooth
self.layernorm = layernorm
# attention
self.attn_type = desc_attn
self.desc_attn = attention.BahdanauAttention(
query_size=self.recurrent_size,
value_size=self.enc_recurrent_size,
proj_size=50,
)
self.embedder = embedders.LookupEmbeddings(
device=self._device,
vocab=self.vocab,
embedder=None,
emb_size=self.action_emb_size,
learnable_words=None,
)
if self.input_feed:
self.h_input_size = recurrent_size
self.feat2hidden = nn.Linear(
enc_recurrent_size + recurrent_size, self.h_input_size, bias=False
)
self.hidden2action = nn.Linear(
self.h_input_size, len(self.vocab), bias=False
)
self.lstm = lstm.UniLSTM(
input_size=self.action_emb_size + self.h_input_size,
hidden_size=recurrent_size,
dropout=self.dropout,
layernorm=self.layernorm,
)
else:
self.h_input_size = recurrent_size
self.hidden2action = nn.Linear(
self.h_input_size, len(self.vocab), bias=False
)
self.lstm = lstm.UniLSTM(
input_size=self.action_emb_size,
hidden_size=recurrent_size,
dropout=self.dropout,
layernorm=self.layernorm,
)
if tie_weights:
self.hidden2action.weight = self.embedder.embedding.weight
def forward(self, dec_input, enc_output, compute_loss=True, infer=False):
ret_dic = {}
if compute_loss:
ret_dic["loss"] = self.compute_loss(dec_input, enc_output)
if infer:
traversal, initial_choices_list = self.begin_batched_inference(
dec_input, enc_output
)
ret_dic["initial_state"] = traversal
ret_dic["initial_choices_list"] = initial_choices_list
return ret_dic
def compute_loss(self, dec_input, enc_output):
if self.input_feed:
loss = self.compute_loss_with_input_feeding(dec_input, enc_output)
else:
loss = self.compute_loss_without_input_feeding(dec_input, enc_output)
return loss
def compute_loss_without_input_feeding(self, dec_input, enc_output):
"""
TODO: haven't test this function yet
"""
logger.warn("Decoder without input feeding has not been tested")
input_actions = [item["actions"][:-1] for item in dec_input]
embed = self.embedder(input_actions)
init_hidden = enc_output.src_summary
dec_rep_packed, _ = self.lstm(embed.ps, hidden_state=(init_hidden, init_hidden))
dec_rep = dec_rep_packed.data
# attention
context, _ = self.desc_attn(
dec_rep, enc_output.src_memory.expand(len(input_actions), -1, -1),
)
feat = torch.cat([dec_rep, context], dim=1)
logits = self.score_action(feat)
ignore_index = len(self.vocab) + 1
gold = self.obtain_gold_action(input_actions, ignore_index=1)
loss = F.cross_entropy(logits, gold, reduction="sum", ignore_index=ignore_index)
return loss
def compute_loss_with_input_feeding(self, dec_input, enc_output):
input_actions = [item["actions"][:-1] for item in dec_input]
embed_packed = self.embedder(input_actions)
embed_batched, tgt_lengths = embed_packed.pad(batch_first=True)
bs = len(dec_input)
init_hidden = enc_output.src_summary
assert len(self.lstm.lstm_cells) == 1
lstm_cell = self.lstm.lstm_cells[0]
lstm_cell.set_dropout_masks(batch_size=bs)
h_input = torch.zeros([bs, self.h_input_size]).to(self._device)
recurrent_state = (init_hidden, init_hidden)
embed_batch_second = embed_batched.transpose(0, 1)
logits_list = []
for i in range(max(tgt_lengths)):
embed = embed_batch_second[i]
_input = torch.cat([embed, h_input], dim=1)
recurrent_state = lstm_cell(_input, recurrent_state)
h = recurrent_state[0]
c, _ = self.desc_attn(h, enc_output.src_memory)
h_input = torch.tanh(self.feat2hidden(torch.cat([c, h], dim=1)))
logits = torch.log_softmax(self.hidden2action(h_input), dim=1)
logits_list.append(logits)
logits = torch.stack(logits_list, dim=1) # bs * seq_len * vocab_len
ignore_index = self.vocab.index(vocab.BOS)
target_actions = [item["actions"][1:] for item in dec_input]
target_idx = self.actions_list_to_idx(target_actions, ignore_index)
# TODO: make token_nll and seq_nll more explicit
if self.label_smooth > 0:
smooth_loss, nll_loss = label_smoothed_nll_loss(
logits.view(-1, len(self.vocab)),
target_idx.view(-1),
self.label_smooth,
ignore_index=ignore_index,
)
num_tgts = sum(tgt_lengths)
if self.training:
loss = smooth_loss / num_tgts
else:
loss = nll_loss / num_tgts
else:
sum_loss = F.cross_entropy(
logits.view(-1, len(self.vocab)),
target_idx.view(-1),
reduction="sum",
ignore_index=ignore_index,
)
loss = sum_loss / bs
return loss
def actions_list_to_idx(self, actions_list, ignore_index):
max_len = max(len(al) for al in actions_list)
res_list = []
for al in actions_list:
true_action_ids = [self.vocab.index(a) for a in al]
assert ignore_index not in true_action_ids
action_ids = true_action_ids + [ignore_index] * (
max_len - len(al)
)
al_t = torch.LongTensor(action_ids)
res_list.append(al_t)
res = torch.stack(res_list, dim=0).to(self._device)
return res
def begin_batched_inference(self, orig_item, enc_state):
inferer = BatchedInference(self, enc_state)
choices = inferer.step()
return inferer, choices
def begin_inference(self, orig_item, enc_state):
inferer = decoder.Inference(self, enc_state)
choices = inferer.step()
return inferer, choices
class BatchedInference:
batched = True
def __init__(self, model, enc_output):
if model is None:
return None
self.model = model
self.vocab = model.vocab
self.embedder = model.embedder.embedding
self.src_memory = enc_output.src_memory
self._device = model._device
self.bs = self.src_memory.size()[0]
self.rnn_cell = model.lstm.lstm_cells[0]
self.rnn_cell.set_dropout_masks(batch_size=self.bs)
# init state
init_hidden = enc_output.src_summary
self.recurrent_state = (init_hidden, init_hidden)
self.h_input = torch.zeros([self.bs, self.model.h_input_size]).to(self._device)
self.actions_list = [pyrsistent.pvector()] * self.bs
def clone(self):
other = self.__class__(None, None)
other.model = self.model
other.embedder = self.embedder
other.rnn_cell = self.rnn_cell
other.vocab = self.vocab
other._device = self._device
other.bs = self.bs
other.src_memory = self.src_memory
other.recurrent_state = self.recurrent_state
other.h_input = self.h_input
other.actions_list = self.actions_list
return other
def step(self, action=None):
if self.model.input_feed:
return self.step_with_input_feed(action)
else:
return self.step_without_input_feed(action)
def step_without_input_feed(self, action=None):
raise NotImplementedError
def step_with_input_feed(self, actions=None):
if actions is None:
actions = [vocab.BOS] * self.bs
for i, ac in enumerate(actions):
self.actions_list[i] = self.actions_list[i].append(ac)
if all(vocab.EOS in ac for ac in self.actions_list):
return None
action_idx = torch.LongTensor(
[self.vocab.index(action) for action in actions]
).to(self.model._device)
action_emb = self.embedder(action_idx)
lstm_input = torch.cat([action_emb, self.h_input], dim=1)
new_state = self.rnn_cell(lstm_input, self.recurrent_state)
self.recurrent_state = new_state
hidden_state = new_state[0]
context, _ = self.model.desc_attn(hidden_state, self.src_memory)
h_input = torch.tanh(
self.model.feat2hidden(torch.cat([context, hidden_state], dim=1))
)
scores = F.log_softmax(self.model.hidden2action(h_input), dim=1).squeeze(0)
self.h_input = h_input
res = []
num_k = 60 if len(self.model.vocab) > 60 else len(self.model.vocab)
topk_values, topk_indices = scores.topk(k=num_k, dim=1)
for b_idx in range(self.bs):
candidates = [
(self.vocab[i.item()], scores[b_idx, i]) for i in topk_indices[b_idx]
]
res.append(candidates)
return res
def finalize(self,):
res = []
for actions in self.actions_list:
if vocab.EOS in actions:
eos_idx = actions.index(vocab.EOS)
else:
eos_idx = len(actions) - 1
code = " ".join(actions[1:eos_idx])
res.append(code)
return res
@registry.register("decoder", "batched_transformer_dec")
class TransformerDecoder(torch.nn.Module):
batched = True
Preproc = decoder.DecoderPreproc
def __init__(
self,
device,
preproc,
action_emb_size,
num_layers=2,
num_heads=4,
enc_recurrent_size=256,
recurrent_size=256,
dropout=0.1,
tie_weights=False,
label_smooth=0.0,
):
super().__init__()
self._device = device
self.preproc = preproc
self.vocab = preproc.vocab
self.dropout = dropout
self.action_emb_size = action_emb_size
self.enc_recurrent_size = enc_recurrent_size
self.recurrent_size = recurrent_size
self.tie_weights = tie_weights
self.label_smooth = label_smooth
self.embedder = embedders.LookupEmbeddings(
device=device,
vocab=self.vocab,
embedder=None,
emb_size=self.action_emb_size,
learnable_words=None,
)
self.decoder = rat.TransformerDecoder(
device=device,
num_layers=num_layers,
num_heads=num_heads,
hidden_size=recurrent_size,
dropout=dropout
)
self.score_fn = torch.nn.Linear(recurrent_size, len(self.vocab))
if tie_weights:
self.score_fn.weight = self.embedder.embedding.weight
def forward(self, dec_input, enc_output, compute_loss=True, infer=False):
ret_dic = {}
if compute_loss:
ret_dic["loss"] = self.compute_loss(dec_input, enc_output)
if infer:
traversal, initial_choices_list = self.begin_batched_inference(
dec_input, enc_output
)
ret_dic["initial_state"] = traversal
ret_dic["initial_choices_list"] = initial_choices_list
return ret_dic
def compute_loss(self, dec_input, enc_output):
bs = len(dec_input)
src_memory = enc_output.src_memory
src_mask = rat.get_src_attn_mask(enc_output.lengths).to(self._device)
input_actions = [item["actions"][:-1] for item in dec_input]
embed_packed = self.embedder(input_actions)
tgt_emb, tgt_lengths = embed_packed.pad(batch_first=True)
ignore_index = len(self.vocab) + 1
tgt_input_idx = self.actions_list_to_idx(input_actions, ignore_index)
tgt_mask = rat.make_std_mask(tgt_input_idx, ignore_index).to(self._device)
output_actions = [item["actions"][1:] for item in dec_input]
tgt_output_idx = self.actions_list_to_idx(output_actions, ignore_index)
target_enc = self.decoder(tgt_emb, src_memory, src_mask, tgt_mask)
logits = torch.log_softmax(self.score_fn(target_enc), dim=-1)
if self.label_smooth > 0:
num_tgts = sum(tgt_lengths)
smooth_loss, nll_loss = label_smoothed_nll_loss(
logits.view(-1, len(self.vocab)),
tgt_output_idx.view(-1),
self.label_smooth,
ignore_index=ignore_index,
)
if self.training:
loss = smooth_loss / num_tgts
else:
loss = nll_loss / num_tgts
else:
sum_loss = F.cross_entropy(
logits.view(-1, len(self.vocab)),
tgt_output_idx.view(-1),
reduction="sum",
ignore_index=ignore_index,
)
loss = sum_loss / bs
return loss
def actions_list_to_idx(self, actions_list, ignore_index):
max_len = max(len(al) for al in actions_list)
res_list = []
for al in actions_list:
action_ids = [self.vocab.index(a) for a in al] + [ignore_index] * (
max_len - len(al)
)
al_t = torch.LongTensor(action_ids)
res_list.append(al_t)
res = torch.stack(res_list, dim=0).to(self._device)
return res
def begin_batched_inference(self, dec_input, enc_state):
inferer = BatchedTransformerInference(self, enc_state)
choices = inferer.step()
return inferer, choices
def begin_inference(self, orig_item, enc_state):
inferer = UnBatchedTransformerInference(self, enc_state)
choices = inferer.step()
return inferer, choices
class BatchedTransformerInference:
batched = True
def __init__(self, model, enc_output):
if model is None:
return None
self.model = model
self.vocab = model.vocab
self._device = model._device
self.src_memory = enc_output.src_memory
self.src_mask = rat.get_src_attn_mask(enc_output.lengths).to(self._device)
self.bs = self.src_memory.size()[0]
self.actions_list = [pyrsistent.pvector() for _ in range(self.bs)]
def clone(self):
other = self.__class__(None, None)
other.model = self.model
other.vocab = self.vocab
other._device = self._device
other.bs = self.bs
other.src_memory = self.src_memory
other.src_mask = self.src_mask
other.actions_list = self.actions_list
return other
def step(self, actions=None):
if actions is None:
actions = [vocab.BOS] * self.bs
for i, ac in enumerate(actions):
self.actions_list[i] = self.actions_list[i].append(ac)
if all(vocab.EOS in ac for ac in self.actions_list):
return None
action_emb_packed = self.model.embedder(self.actions_list)
action_emb, _ = action_emb_packed.pad(batch_first=True)
# TODO: use incremental decoding
tgt_mask = rat.subsequent_mask(len(self.actions_list[0])).to(self._device)
tgt_enc = self.model.decoder(
action_emb, self.src_memory, self.src_mask, tgt_mask
)
tgt_enc = tgt_enc[:, -1]
scores = F.log_softmax(self.model.score_fn(tgt_enc), dim=1)
res = []
# TODO: this num should be set per task, not fixed
num_k = 3 if len(self.model.vocab) > 3 else len(self.model.vocab)
topk_values, topk_indices = scores.topk(k=num_k, dim=1)
for b_idx in range(self.bs):
candidates = [
(self.vocab[i.item()], scores[b_idx, i]) for i in topk_indices[b_idx]
]
res.append(candidates)
return res
def finalize(self,):
res = []
for actions in self.actions_list:
if vocab.EOS in actions:
eos_idx = actions.index(vocab.EOS)
else:
eos_idx = len(actions) - 1
code = " ".join(actions[1:eos_idx]) # exclude bos and eos
res.append(code)
return res
class UnBatchedTransformerInference:
batched = False
def __init__(self, model, enc_output):
if model is None:
return None
self.model = model
self.vocab = model.vocab
self._device = model._device
self.src_memory = enc_output.src_memory
self.src_mask = rat.get_src_attn_mask(enc_output.lengths).to(self._device)
self.actions = pyrsistent.pvector()
def clone(self):
other = self.__class__(None, None)
other.model = self.model
other.vocab = self.vocab
other._device = self._device
other.src_memory = self.src_memory
other.src_mask = self.src_mask
other.actions = self.actions
return other
def step(self, action=None):
if action is None:
action = vocab.BOS
self.actions = self.actions.append(action)
if action == vocab.EOS:
return None
action_emb_packed = self.model.embedder([self.actions])
action_emb, _ = action_emb_packed.pad(batch_first=True)
tgt_mask = rat.subsequent_mask(len(self.actions)).to(self._device)
tgt_enc = self.model.decoder(
action_emb, self.src_memory, self.src_mask, tgt_mask
)
tgt_enc = tgt_enc[:, -1]
scores = F.log_softmax(self.model.score_fn(tgt_enc), dim=1)
num_k = 100 if len(self.model.vocab) > 100 else len(self.model.vocab)
topk_values, topk_indices = scores.topk(k=num_k, dim=1)
candidates = [(self.vocab[i.item()], scores[0, i]) for i in topk_indices[0]]
return candidates
def finalize(self,):
actions = [a for a in self.actions]
code = " ".join(actions[1:-1]) # exclude bos and eos
return actions, code
|
11481094
|
import torch
from torch import nn
import tqdm
from typing import Callable, Generator
HUGE = 1e15
def beam_search(model_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
beam_size: int,
max_len: int,
eos_id: int,
bos_id: int,
dataloader: Generator[torch.Tensor, None, None]):
return_outputs = []
return_logprobs = []
for batch in dataloader:
device = batch.get_device()
batch_size = batch.size()[0]
beam_outputs = torch.full(
(batch_size, 1, 1), bos_id, dtype=torch.long).to(device)
beam_inputs = torch.full(
(batch_size, ), bos_id, dtype=torch.long).to(device)
beam_hiddens = batch
beam_logprobs = torch.zeros(batch_size, 1).to(device)
finish_mask = torch.zeros(batch_size, 1).to(device)
for i in range(max_len):
outputs, beam_hiddens_ = model_func(beam_hiddens, beam_inputs)
vocabulary = outputs.size()[-1]
# (B, b) -> (B, b, V)
beam_logprobs = beam_logprobs.unsqueeze(
2).repeat(1, 1, vocabulary)
# (B x b, V) -> (B, b, V)
outputs = outputs.view(beam_logprobs.size())
finish_mask = finish_mask.unsqueeze(2).repeat(1, 1, vocabulary)
outputs = outputs * (1 - finish_mask) - HUGE * finish_mask
outputs[:, :, eos_id] = outputs[:, :, eos_id] * \
(1 - finish_mask[:, :, 0])
beam_logprobs = (beam_logprobs + outputs).view(batch_size, -1)
beam_logprobs, indices = torch.topk(beam_logprobs, beam_size)
beam_indices = indices // vocabulary
word_indices = indices % vocabulary
beam_inputs = word_indices.view(-1)
finish_mask = (word_indices == eos_id).float()
# (B, b, i+1) -> (B, b, i+1)
beam_outputs = torch.gather(
beam_outputs, 1, beam_indices.unsqueeze(2).repeat(1, 1, i+1))
# cat((B, b, i+1), (B, b, 1)) -> (B, b, i+2)
beam_outputs = torch.cat(
[beam_outputs, word_indices.unsqueeze(2)], dim=2)
# (B, b, H) -> (B, b, H) -> (B x b, H)
hid_size = beam_hiddens_.size()[-1]
beam_hiddens = torch.gather(
beam_hiddens_.view(batch_size, -1, hid_size),
1,
beam_indices.unsqueeze(2).repeat(1, 1, hid_size))\
.view(-1, hid_size)
return_outputs.append(beam_outputs)
return_logprobs.append(beam_logprobs)
return_outputs = torch.cat(return_outputs, dim=0)
return_logprobs = torch.cat(return_logprobs, dim=0)
return (return_outputs, return_logprobs)
class LanguageModel(nn.Module):
def __init__(self, vocabulary, hidden_size):
super(LanguageModel, self).__init__()
self.word_emb = nn.Embedding(vocabulary, hidden_size)
self.gru = nn.GRUCell(hidden_size, hidden_size)
self.output = nn.Sequential(nn.Linear(hidden_size, vocabulary),
nn.LogSoftmax(dim=-1))
def forward(self, hidden, inputs):
hid = self.gru(self.word_emb(inputs), hidden)
return self.output(hid), hid
def generating_random_data(batch_size: int, hidden_size: int, size: int):
for i in tqdm.tqdm(range(size)):
yield torch.randn(batch_size, hidden_size).cuda()
if __name__ == "__main__":
language_model = LanguageModel(200, 128).cuda()
with torch.no_grad():
beam_search(language_model, 10, 15, 99, 0,
generating_random_data(128, 128, 1000))
|
11481101
|
from evdev import InputDevice, list_devices, ecodes
class Controller:
# xbox one controller joystick max range (negative + positive range)
JS_MAX_RANGE = 65534
# xbox one controller joystick center range
JS_CENTER = JS_MAX_RANGE / 2
# xbox one controller trigger max range (pressed in all of the way)
TRIG_MAX_RANGE = 1023
# button mappings for xbox one controller
BUTTONS = {
0: "LSX",
1: "LSY",
2: "RSV",
4: "SCAN",
5: "RSH",
9: "RT",
10: "LT",
16: "DPADHOR",
17: "DPADVERT",
158: "OPTION",
172: "HOME",
304: "A",
305: "B",
308: "Y",
307: "X",
310: "LB",
311: "UNK",
315: "START",
317: "LSB",
318: "RSB",
}
def __init__(self):
self.controller = InputDevice(list_devices()[0])
def get_input(self) -> list:
"""
Get contoller input from linux input devices.
"""
for e in self.controller.read_loop():
key_code = e.code
button = self.BUTTONS[key_code]
if e.type == ecodes.EV_KEY:
val = e.value
yield (button, val)
# for abs values (joystick, triggers)
if e.type == ecodes.EV_ABS:
val = e.value
button = self.BUTTONS[key_code]
# getting left joystick x-axis values (used for turning)
if button == "LSX":
# Splitting maximum joystick range into postive and negative
# values. Negative = left, Positive = right
val = val - self.JS_CENTER
yield (button, val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.