id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1712700
|
from typing import List
import click
from bubop import (
check_optional_mutually_exclusive,
format_dict,
log_to_syslog,
logger,
loguru_tqdm_sink,
)
from taskwarrior_syncall import inform_about_app_extras
try:
from taskwarrior_syncall import GCalSide
except ImportError:
inform_about_app_extras(["google"])
from taskwarrior_syncall import (
Aggregator,
TaskWarriorSide,
__version__,
cache_or_reuse_cached_combination,
convert_gcal_to_tw,
convert_tw_to_gcal,
fetch_app_configuration,
get_resolution_strategy,
inform_about_combination_name_usage,
list_named_combinations,
opt_combination,
opt_custom_combination_savename,
opt_gcal_calendar,
opt_google_oauth_port,
opt_google_secret_override,
opt_list_combinations,
opt_resolution_strategy,
opt_tw_project,
opt_tw_tags,
report_toplevel_exception,
)
@click.command()
# google calendar options ---------------------------------------------------------------------
@opt_gcal_calendar()
@opt_google_secret_override()
@opt_google_oauth_port()
# taskwarrior options -------------------------------------------------------------------------
@opt_tw_tags()
@opt_tw_project()
# misc options --------------------------------------------------------------------------------
@opt_list_combinations("TW", "Google Calendar")
@opt_resolution_strategy()
@opt_combination("TW", "Google Calendar")
@opt_custom_combination_savename("TW", "Google Calendar")
@click.option("-v", "--verbose", count=True)
@click.version_option(__version__)
def main(
gcal_calendar: str,
google_secret: str,
oauth_port: int,
tw_tags: List[str],
tw_project: str,
resolution_strategy: str,
verbose: int,
combination_name: str,
custom_combination_savename: str,
do_list_combinations: bool,
):
"""Synchronize calendars from your Google Calendar with filters from Taskwarrior.
The list of TW tasks is determined by a combination of TW tags and a TW project while the
calendar in GCal should be provided by their name. if it doesn't exist it will be crated
"""
# setup logger ----------------------------------------------------------------------------
loguru_tqdm_sink(verbosity=verbose)
log_to_syslog(name="tw_gcal_sync")
logger.debug("Initialising...")
inform_about_config = False
if do_list_combinations:
list_named_combinations(config_fname="tw_gcal_configs")
return 0
# cli validation --------------------------------------------------------------------------
check_optional_mutually_exclusive(combination_name, custom_combination_savename)
combination_of_tw_project_tags_and_gcal_calendar = any(
[
tw_project,
tw_tags,
gcal_calendar,
]
)
check_optional_mutually_exclusive(
combination_name, combination_of_tw_project_tags_and_gcal_calendar
)
# existing combination name is provided ---------------------------------------------------
if combination_name is not None:
app_config = fetch_app_configuration(
config_fname="tw_gcal_configs", combination=combination_name
)
tw_tags = app_config["tw_tags"]
tw_project = app_config["tw_project"]
gcal_calendar = app_config["gcal_calendar"]
# combination manually specified ----------------------------------------------------------
else:
inform_about_config = True
combination_name = cache_or_reuse_cached_combination(
config_args={
"gcal_calendar": gcal_calendar,
"tw_project": tw_project,
"tw_tags": tw_tags,
},
config_fname="tw_gcal_configs",
custom_combination_savename=custom_combination_savename,
)
# at least one of tw_tags, tw_project should be set ---------------------------------------
if not tw_tags and not tw_project:
raise RuntimeError(
"You have to provide at least one valid tag or a valid project ID to use for"
" the synchronization"
)
# announce configuration ------------------------------------------------------------------
logger.info(
format_dict(
header="Configuration",
items={
"TW Tags": tw_tags,
"TW Project": tw_project,
"Google Calendar": gcal_calendar,
},
prefix="\n\n",
suffix="\n",
)
)
# initialize sides ------------------------------------------------------------------------
tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project)
gcal_side = GCalSide(
calendar_summary=gcal_calendar, oauth_port=oauth_port, client_secret=google_secret
)
# sync ------------------------------------------------------------------------------------
try:
with Aggregator(
side_A=gcal_side,
side_B=tw_side,
converter_B_to_A=convert_tw_to_gcal,
converter_A_to_B=convert_gcal_to_tw,
resolution_strategy=get_resolution_strategy(
resolution_strategy, side_A_type=type(gcal_side), side_B_type=type(tw_side)
),
config_fname=combination_name,
ignore_keys=(
(),
("due", "end", "entry", "modified", "urgency"),
),
) as aggregator:
aggregator.sync()
except KeyboardInterrupt:
logger.error("Exiting...")
return 1
except:
report_toplevel_exception(is_verbose=verbose >= 1)
return 1
if inform_about_config:
inform_about_combination_name_usage(combination_name)
return 0
if __name__ == "__main__":
main()
|
1712807
|
import sys
from itertools import compress, chain
from ..exceptions import *
from .valuechange import ValueChange
class Wire:
def __init__(self, name, width=1):
self.name = name
self._data = ValueChange(width)
@classmethod
def from_data(cls, name, data, width=1):
wire = cls(name=name, width=width)
for key in compress(
range(len(data)),
map(lambda pair: pair[0] != pair[1], zip(chain([None], data), data)),
):
wire[key] = data[key]
return wire
def __setitem__(self, key, value):
self._data[key] = value
def __getitem__(self, key):
return self._data.get(key)
def __delitem__(self, key):
del self._data[key] # throws error if not present
def width(self):
return self._data.width
def length(self):
"""Returns the time duration of the wire."""
return self._data.length()
def end(self):
"""Returns the final value on the wire"""
return self._data[self._data.length()]
def times(self, length=0):
"""Returns a list of times with high value on the wire."""
return self._data.search(end=max(length, self.length()))
@classmethod
def const(cls, value):
wire = cls(name=f"c_{value}", width=0)
wire[0] = value
return wire
@classmethod
def time(cls, value):
wire = cls(name=f"t_{value}", width=1)
wire[0] = 0
wire[value] = 1
wire[value + 1] = 0
return wire
def __invert__(self):
wire = Wire(name="~" + self.name)
wire._data = self._data.__invert__()
return wire
def __neg__(self):
wire = Wire(name="-" + self.name)
wire._data = self._data.__invert__()
return wire
def __and__(self, other):
wire = Wire(name="(" + self.name + " & " + other.name + ")")
wire._data = self._data.__and__(other._data)
return wire
def __or__(self, other):
wire = Wire(name="(" + self.name + " | " + other.name + ")")
wire._data = self._data.__or__(other._data)
return wire
def __xor__(self, other):
wire = Wire(name="(" + self.name + " ^ " + other.name + ")")
wire._data = self._data.__xor__(other._data)
return wire
def __eq__(self, other):
wire = Wire(name="(" + self.name + " == " + other.name + ")")
wire._data = self._data.__eq__(other._data)
return wire
def __ne__(self, other):
wire = Wire(name="(" + self.name + " != " + other.name + ")")
wire._data = self._data.__ne__(other._data)
return wire
def __gt__(self, other):
wire = Wire(name="(" + self.name + " > " + other.name + ")")
wire._data = self._data.__gt__(other._data)
return wire
def __ge__(self, other):
wire = Wire(name="(" + self.name + " >= " + other.name + ")")
wire._data = self._data.__ge__(other._data)
return wire
def __lt__(self, other):
wire = Wire(name="(" + self.name + " < " + other.name + ")")
wire._data = self._data.__lt__(other._data)
return wire
def __le__(self, other):
wire = Wire(name="(" + self.name + " <= " + other.name + ")")
wire._data = self._data.__le__(other._data)
return wire
def __lshift__(self, other):
wire = Wire(name="(" + self.name + " << " + other.name + ")")
wire._data = self._data.__lshift__(other._data)
return wire
def __rshift__(self, other):
wire = Wire(name="(" + self.name + " >> " + other.name + ")")
wire._data = self._data.__rshift__(other._data)
return wire
def __add__(self, other):
wire = Wire(name="(" + self.name + " + " + other.name + ")")
wire._data = self._data.__add__(other._data)
return wire
def __sub__(self, other):
wire = Wire(name="(" + self.name + " - " + other.name + ")")
wire._data = self._data.__sub__(other._data)
return wire
def __mod__(self, other):
wire = Wire(name="(" + self.name + " % " + other.name + ")")
wire._data = self._data.__mod__(other._data)
return wire
def _from(self):
wire = Wire(name="from " + self.name)
wire._data = self._data._from()
return wire
def _after(self):
wire = Wire(name="after " + self.name)
wire._data = self._data._after()
return wire
def _until(self):
wire = Wire(name="until " + self.name)
wire._data = self._data._until()
return wire
def _before(self):
wire = Wire(name="before " + self.name)
wire._data = self._data._before()
return wire
def _next(self, amt=1):
wire = Wire(name="next " + self.name)
wire._data = self._data._next(amt)
return wire
def _prev(self, amt=1):
wire = Wire(name="prev " + self.name)
wire._data = self._data._prev(amt)
return wire
def _acc(self):
wire = Wire(name="acc " + self.name)
wire._data = self._data._acc()
return wire
|
1712822
|
from unittest import mock
import pytest
from .vcr import replace_auth, vcr
@pytest.fixture
def username():
return "cuca"
@pytest.fixture
def password():
return "<PASSWORD>"
@pytest.fixture
def request_body(username, password):
body = (
"<?xml version=''1.0'' encoding=''utf-8''?>"
'<soap-env:Envelope xmlns:soap-env="http://schemas.xmlsoap.org/soap/envelope/">'
'<soap-env:Body><ns0:buscaEventosLista xmlns:ns0="http://resource.webservice.correios.com.br/">'
"<usuario>{}</usuario><senha>{}senha><tipo>L</tipo><resultado>T</resultado><lingua>101</lingua>"
"<objetos>JB683971943BR</objetos><objetos>JT365572014BR</objetos>"
"</ns0:buscaEventosLista></soap-env:Body></soap-env:Envelope>"
""
)
body = body.format(username, password)
return body.encode()
def test_replace_auth(request_body, username, password):
request = mock.Mock(body=request_body)
body = str(replace_auth(request))
assert username not in body
assert password not in body
def test_vcr_uses_replace_auth():
assert vcr.before_record_request == replace_auth
|
1712837
|
import datetime
from django.test import TestCase
from django.utils import formats
from audit_trail.stringifier import ModelFieldStringifier
from ..models import TestStringifierModel, SomePerson, AzazaField
from audit_trail.models import AuditTrail
class TestModelDefaultFieldStringifier(TestCase):
def get_field(self, field_name):
return TestStringifierModel._meta.get_field(field_name)
def test_char_field(self):
tsm = TestStringifierModel(char='abc')
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('char'), tsm.char), 'abc')
def test_integer_field(self):
tsm = TestStringifierModel(integer=123)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('integer'), tsm.integer), '123')
def test_integer_field_negative(self):
tsm = TestStringifierModel(integer=-123)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('integer'), tsm.integer), '-123')
def test_datetime_field(self):
now = datetime.datetime.now()
tsm = TestStringifierModel(datetime=now)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('datetime'), tsm.datetime),
formats.date_format(now, "DATETIME_FORMAT"))
def test_datetime_field_null(self):
tsm = TestStringifierModel.objects.create()
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('datetime'), tsm.datetime), None)
def test_date_field(self):
today = datetime.date.today()
tsm = TestStringifierModel(date=today)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('date'), tsm.date),
formats.date_format(today, "DATE_FORMAT"))
def test_fk_field(self):
person = SomePerson.objects.create()
AuditTrail.objects.all().delete()
tsm = TestStringifierModel(fk=person)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('fk'), tsm.fk), unicode(person))
def test_fk_field_update(self):
person = SomePerson.objects.create()
AuditTrail.objects.all().delete()
tsm = TestStringifierModel(fk=person)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('fk'), tsm.fk_id), unicode(person))
def test_boolean_field(self):
tsm = TestStringifierModel(boolean=True)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('boolean'), tsm.boolean), 'True')
tsm = TestStringifierModel(boolean=False)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('boolean'), tsm.boolean), 'False')
def test_float_field(self):
tsm = TestStringifierModel(float=3.14)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('float'), tsm.float), '3.14')
def test_choices(self):
tsm = TestStringifierModel(choice=0)
self.assertEqual(ModelFieldStringifier.stringify(self.get_field('choice'), tsm.choice),
tsm.get_choice_display())
def stringify_azaza_field(value, *args):
return u'Azaza %s' % unicode(value)
class TestExtendModelDefaultFieldStringifier(TestCase):
def test_add_custom_stringifier(self):
self.assertNotIn(AzazaField, ModelFieldStringifier.custom_stringify_methods)
ModelFieldStringifier.add_stringifier(AzazaField, stringify_azaza_field)
self.assertEqual(ModelFieldStringifier.custom_stringify_methods[AzazaField], stringify_azaza_field)
def test_custom_stringifier(self):
ModelFieldStringifier.add_stringifier(AzazaField, stringify_azaza_field)
tsm = TestStringifierModel(azaza='ololo')
field = TestStringifierModel._meta.get_field('azaza')
self.assertEqual(ModelFieldStringifier.stringify(field, tsm.azaza), stringify_azaza_field(tsm.azaza))
|
1712858
|
from wtforms import Form, StringField, PasswordField, validators
class RegistrationForm(Form):
username = StringField('Username', [validators.Length(min=4, max=25)])
password = PasswordField('<PASSWORD>', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('<PASSWORD>')
class LoginForm(Form):
username = StringField('Username', [validators.Length(min=4, max=25)])
password = PasswordField('Password')
|
1712894
|
from pyignite import Client
from SimpleData import SimpleData
import time
# Progress
# Writing and Reading from python works
# Writing and Reading from dotnet works
# Finds its own Binary type
# Works with objects and dynamics
# TODO: Move this to samples
# Not sure about subscribing to a stream
def preview(cache):
print("Size of %s %s" % (cache.name, cache.get_size()))
client = Client()
client.connect('localhost', 10800)
client.register_binary_type(SimpleData)
print(client.get_cache_names())
initial_caches_num = len(client.get_cache_names())
stream_name = None
for cache_name in list(client.get_cache_names()):
if "DynamicDataStream" in cache_name:
stream_name = cache_name
if stream_name == None:
print('Perper stream not started yet')
else:
simpleDataStream = client.get_or_create_cache(stream_name)
simpleDataStream.put(
simpleDataStream.get_size() + 1,
SimpleData(name='Goshko', priority=1231, json='test')
)
simpleDataStream.put(simpleDataStream.get_size() + 1, "TESTING DYNAMICS")
for el in simpleDataStream.scan():
print(el[1])
print(el[1].__class__)
last_size = simpleDataStream.get_size();
while True:
caches_num = len(client.get_cache_names())
if caches_num > initial_caches_num:
print(f"New cache: {list(client.get_cache_names())[-1]}")
new_cache_name = list(client.get_cache_names())[-1]
new_cache = client.get_cache(new_cache_name)
print(new_cache.scan())
current_size = simpleDataStream.get_size()
if (current_size > last_size):
last_size = current_size
print("New Item...")
## This is not possible because it is not sorted by timestamp / key
for item in simpleDataStream.scan():
print(item)
*_, new = simpleDataStream.scan()
print(new)
time.sleep(1)
|
1712931
|
def extractItsgellisalinHomeBlog(item):
'''
DISABLED
Parser for 'itsgellisalin.home.blog'
'''
# Gone
return None
|
1712937
|
from typing import Type
from guniflask.beans.factory import BeanFactory
from guniflask.context.event import ApplicationEvent
from guniflask.context.event_listener import ApplicationEventListener
from guniflask.data_model.typing import inspect_args
class ApplicationEventPublisher:
def __init__(self, bean_factory: BeanFactory):
self._bean_factory = bean_factory
self._app_listeners = set()
self._app_listener_beans = set()
def add_application_listener(self, listener: ApplicationEventListener):
self._app_listeners.add(listener)
def add_application_listener_bean(self, bean_name: str):
self._app_listener_beans.add(bean_name)
def publish_event(self, event: ApplicationEvent):
for listener in self._get_application_listeners():
assert isinstance(listener, ApplicationEventListener)
accepted_event_type = self._resolve_accepted_event_type(listener.on_application_event)
if accepted_event_type is None or isinstance(event, accepted_event_type):
listener.on_application_event(event)
def _get_application_listeners(self):
for listener in self._app_listeners:
yield listener
for bean_name in self._app_listener_beans:
listener = self._bean_factory.get_bean(bean_name, required_type=ApplicationEventListener)
if listener is not None:
yield listener
def _resolve_accepted_event_type(self, method) -> Type[ApplicationEvent]:
args, hints = inspect_args(method)
event_type_arg = None
for k in args:
event_type_arg = k
break
if event_type_arg is not None:
return hints.get(event_type_arg)
|
1712940
|
from pynput.keyboard import Key, Listener
import winsound
import string
keyReleaseDict={}
for keys in Key:
keyReleaseDict[str(keys)] = True
for alphabets in string.printable:
if alphabets == "'":
quote = "'"
keyReleaseDict[f'"{quote}"'] = True
continue
elif alphabets == "\\":
keyReleaseDict["'\\\\'"] = True
continue
else:
keyReleaseDict[f"'{alphabets}'"] = True
#print(keyReleaseDict)
def playDownSound(key):
#Check if key has been release
try:
if keyReleaseDict[str(key)] == True: # and active:
if key == Key.space:
winsound.PlaySound('Sounds/space.wav', winsound.SND_ASYNC)
elif key == Key.backspace:
winsound.PlaySound('Sounds/backspace.wav', winsound.SND_ASYNC)
elif key == Key.enter:
winsound.PlaySound('Sounds/enter.wav', winsound.SND_ASYNC)
else:
winsound.PlaySound('Sounds/normal.wav', winsound.SND_ASYNC)
except:
winsound.PlaySound('Sounds/normal.wav', winsound.SND_ASYNC)
#print(key)
keyReleaseDict[str(key)] = False
def playReleaseSound(key):
#if active:
keyReleaseDict[str(key)] = True
if key == Key.space:
winsound.PlaySound('Sounds/relspace.wav', winsound.SND_ASYNC)
elif key == Key.backspace:
winsound.PlaySound('Sounds/relbackspace.wav', winsound.SND_ASYNC)
elif key == Key.enter:
winsound.PlaySound('Sounds/relenter.wav', winsound.SND_ASYNC)
else:
winsound.PlaySound('Sounds/release.wav', winsound.SND_ASYNC)
with Listener(on_press=playDownSound, on_release=playReleaseSound) as listener:
#root.mainloop()
listener.join()
|
1712949
|
from .connections import Connection
from .lib.Gen.ttypes import *
from typing import Union
class Shop(Connection):
def __init__(self, auth):
super().__init__("/SHOP4")
self.auth = auth
self.updateHeaders({
'User-Agent': self.auth.UA,
'X-Line-Application': self.auth.LA,
})
def afterLogin(self, *args, **kws):
for k,v in kws.items():
try:
setattr(self, k, v)
except:
pass
self.updateHeaders({
"X-Line-Access": self.authToken
})
async def getBalance(self, appStoreCode: int = 1) -> Coin:
return await self.call("getTotalBalance", appStoreCode)
async def getProduct(self, packageId: int, language: str = "EN", country: str = "ID") -> Product:
return await self.call("getProduct", packageId, language, country)
async def getProductList(self, productIdList: Union[str, list], language: str = "EN", country: str ="ID") -> ProductList:
productIdList = productIdList if isinstance(productIdList, list) else [productIdList]
return await self.call("getProductList", productIdList, language, country)
async def getPurchaseHistory(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getPurchaseHistory", start, size, language, country)
async def getPresentsSent(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getPresentsSent", start, size, language, country)
async def getPresentsReceive(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getPresentsReceived", start, size, language, country)
async def getDownloads(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getDownloads", start, size, language, country)
async def getEventPackages(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getEventPackages", start, size, language, country)
async def getNewlyReleasedPackages(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getNewlyReleasedPackages", start, size, language, country)
async def getPopularPackages(self, start: int = 1, size: int = 10, language: str = "EN", country: str = "ID") -> ProductList:
return await self.call("getPopularPackages", start, size, language, country)
async def buyFreeProduct(self,
receiverMid: str,
productId: str = None,
packageId: int = None,
messageTemplate: int = 1,
language: str = "EN",
country: str = "ID"):
return await self.call("buyFreeProduct", receiverMid=receiverMid,
productId=productId, packageId=packageId,
messageTemplate=messageTemplate,
language=language, country=country
)
async def buyCoinProduct(self,
receiverMid: str,
productId: str = None,
packageId: int = None,
language: str = "EN",
location: str = None,
currency: str = None,
price: str = None,
appStoreCode: int = 1, #0 APPLE,1 GOOGLE
messageText: str = None,
messageTemplate: int = 1
):
payment = PaymentReservation(receiverMid=receiverMid, productId=productId,
packageId=packageId, language=language, location=location,
currency=currency, messageText=messageText, price=price,
messageTemplate=messageTemplate, appStoreCode=appStoreCode)
return await self.call("buyCoinProduct", payment)
async def reserveCoinPurchase(self,
productId: int,
pgCode: int,
currency: str,
price: str,
appStoreCode: int = 1,#0 APPLE, 1 GOOGLE
redirectUrl: str = None,
country: str = "ID",
language: str = "EN"):
req = CoinPurchaseReservation(
productId=productId, currency=currency,
pgCode=pgCode, price=price,
appStoreCode=appStoreCode, redirectUrl=redirectUrl,
language=language, country=country
)
return await self.call("reserveCoinPurchase", req)
|
1712953
|
from marinetrafficapi.query_params import QueryParams
class VD01QueryParams(QueryParams):
"""Query params for VD01 API call."""
ship_id = 'shipid', 'A uniquely assigned ID by \n' \
'MarineTraffic for the subject vessel.'
vessel_id = 'vessel_id', 'The Maritime Mobile Service Identity \n' \
'(MMSI) or the IMO number of the vessel.'
|
1712962
|
from tkinter import *
import tkinter.font
from tkinter import ttk
class TagList(Frame):
def __init__(self, parent):
#self.container = container
self.master = parent
self.tree = None
self.tree_columns = ("tag","count")
self._setup_widgets()
self._init_tree()
def _setup_widgets(self):
container = self.master
self.tree = ttk.Treeview(container,columns=self.tree_columns, show="headings", selectmode="browse")
vsb = ttk.Scrollbar(container,orient="vertical", command=self.tree.yview)
hsb = ttk.Scrollbar(container,orient="horizontal", command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
self.tree.grid(column=0, row=0, sticky='nsew')
vsb.grid(column=1, row=0, sticky='ns')
hsb.grid(column=0, row=1, sticky='ew')
container.columnconfigure(0, weight=1)
container.rowconfigure(0, weight=1)
def _init_tree(self):
for col in self.tree_columns:
self.tree.heading(col, text=col.title(),
command=lambda c=col: self.sortby(self.tree, c, 0))
self.tree.column(col, width=tkinter.font.Font().measure(col.title()))
def clear(self):
pass
def bindSelect(self, who):
self.tree.bind("<<TreeviewSelect>>", who)
def getselect(self):
sel = self.tree.selection()
return self.tree.set(sel, 'tag')
def build(self, tree_data):
self.tree.delete(*self.tree.get_children())
for item in tree_data:
self.tree.insert('', 'end', values=item)
# # adjust columns lengths if necessary
# for indx, val in enumerate(item):
# ilen = tkinter.font.Font().measure(val)
# if self.tree.column(self.tree_columns[indx], width=None) < ilen:
# self.tree.column(self.tree_columns[indx], width=ilen)
def sortby(self, tree, col, descending):
"""Sort tree contents when a column is clicked on."""
# grab values to sort
data = []
if col == 'count':
# sort by count, then tag
data = [(tree.set(child,col), child, tree.set(child,'tag')) for child in tree.get_children('')]
data.sort(key=lambda t: (int(t[0]),t[2]), reverse=descending)
else:
data = [(tree.set(child, col), child) for child in tree.get_children('')]
data.sort(reverse=descending)
# reorder data
for indx, item in enumerate(data):
tree.move(item[1], '', indx)
# switch the heading so that it will sort in the opposite direction
tree.heading(col,
command=lambda col=col: self.sortby(tree, col, int(not descending)))
|
1713102
|
import os
class WsgiConfig(object):
DEBUG = False
FLASK_SERVER_NAME = os.getenv('SHOVEL_FLASK_SERVER_NAME', 'localhost')
FLASK_SERVER_PORT = os.getenv('SHOVEL_FLASK_SERVER_PORT', '5000')
# FIXME this should be investigated further
if FLASK_SERVER_PORT == '80':
SERVER_NAME = FLASK_SERVER_NAME
else:
SERVER_NAME = '{}:{}'.format(FLASK_SERVER_NAME, FLASK_SERVER_PORT)
BROKER_USER = os.getenv('RABBITMQ_USER', 'guest')
BROKER_PASS = os.getenv('RABBITMQ_PASSWORD', '<PASSWORD>')
BROKER_HOST = os.getenv('RABBITMQ_HOST', 'localhost')
BROKER_PORT = os.getenv('RABBITMQ_PORT', 5672)
BROKER_URL = 'amqp://{}:{}@{}:{}/'.format(BROKER_USER, BROKER_PASS, BROKER_HOST, BROKER_PORT)
QUEUE_TO_CONSUME = 'QUEUE_IN'
QUEUE_TO_PUBLISH = 'QUEUE_OUT'
ALLOWED_AUTH_KEYS = (
'myshovel'
)
REMOTE_AUTH_KEY = 'myshovel'
REMOTE_QUEUE_ENDPOINT = os.getenv('REMOTE_QUEUE_ENDPOINT', 'http://localhost:5000/publish')
|
1713118
|
import tensorflow as tf
import ops_compress
from BlockWiseEmbedding import BlockWiseEmbeddingForInput as bl
from BlockWiseEmbedding import BlockWiseEmbeddingForSoftmax as bs
import time
class NextItNet_Decoder:
def __init__(self, model_para):
self.model_para = model_para
embedding_width = model_para['in_embed_size']
output_dim = model_para['dilated_channels']
# embedding
# self.allitem_embeddings = tf.get_variable('allitem_embeddings',
# [model_para['item_size'], embedding_width],
# initializer=tf.truncated_normal_initializer(stddev=0.02))
# block-wise embedding, factor :1 means embeddings and other means block-wise embedding
if (model_para['SoftmaxType'] == 'Block_Input_Full' or model_para['SoftmaxType'] == 'Block_Input_Softmax'
or model_para['SoftmaxType']=='Block_Input_Softmax_Inference') and model_para['factor'] != 1:
self.allitem_embeddings = bl(model_para['item_size'], embedding_width,
block_factor=model_para['factor'],
block=model_para["block"])
print("using block embedding for input")
else:
self.allitem_embeddings = bl(model_para['item_size'], embedding_width,
block_factor=1,
block=model_para["block"])
print("using embedding")
self.allitem_embeddings.build()
def train_graph(self): #, is_negsample=False):
model_para = self.model_para
self.itemseq_input = tf.placeholder('int32',
[model_para['batch_size'], model_para['seq_len']], name='itemseq_input')
label_seq, dilate_input=self.model_graph(self.itemseq_input, train=True)
# dilate_input : [batch_size, seq_len, dilated_channels]
if model_para['SoftmaxType'] == "neg":
print("using neg")
logits_2D = tf.reshape(dilate_input, [-1,model_para['dilated_channels']])
self.softmax_w = tf.get_variable("softmax_w", [model_para['item_size'], model_para['dilated_channels']],tf.float32,tf.random_normal_initializer(0.0, 0.01))
self.softmax_b = tf.get_variable("softmax_b", [model_para['item_size']], tf.float32, tf.constant_initializer(0.1))
label_flat = tf.reshape(label_seq, [-1, 1]) # 1 is the number of positive example
num_sampled = int(0.2* model_para['item_size'])#sample 20% as negatives
# tf.nn.nce_loss
loss =tf.nn.sampled_softmax_loss(self.softmax_w, self.softmax_b, label_flat, logits_2D, num_sampled, model_para['item_size'])
elif model_para['SoftmaxType'] == "FullSoftmax_conv":
print("using FullSoftmax_conv")
if model_para['dilated_channels']!= model_para['out_embed_size']:
self.softmax_pro_w = tf.get_variable("softmax_pro_w", [model_para['dilated_channels'], model_para['embed_size']],
tf.float32, tf.random_normal_initializer(0.0, 0.01))
dilate_input = tf.tensordot(dilate_input, self.softmax_pro_w, axes=1)
logits = ops_compress.conv1d(tf.nn.relu(dilate_input), model_para['item_size'], name='logits')
logits_2D = tf.reshape(logits, [-1, model_para['item_size']])
label_flat = tf.reshape(label_seq, [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_flat, logits=logits_2D)
elif model_para['SoftmaxType'] == "FullSoftmax" or model_para['SoftmaxType'] == "Block_Input_Full":
print("using FullSoftmax")
if model_para['dilated_channels']!= model_para['out_embed_size']:
self.softmax_pro_w = tf.get_variable("softmax_pro_w", [model_para['dilated_channels'], model_para['out_embed_size']],
tf.float32, tf.random_normal_initializer(0.0, 0.01))
dilate_input = tf.tensordot(dilate_input, self.softmax_pro_w, axes=1)
self.softmax_w = tf.get_variable("softmax_w", [model_para['out_embed_size'], model_para['item_size']],
tf.float32, tf.random_normal_initializer(0.0, 0.01))
self.softmax_b = tf.get_variable("softmax_b", [model_para['item_size']], tf.float32,
tf.constant_initializer(0.1))
label_flat = tf.reshape(label_seq, [-1])
logits_2D = tf.reshape(dilate_input, [-1, model_para['out_embed_size']])
logits_2D = tf.matmul(logits_2D, self.softmax_w)
logits_2D = tf.nn.bias_add(logits_2D, self.softmax_b)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_flat, logits=logits_2D)
elif model_para['SoftmaxType'] == "FullSoftmax_Tied":
print("using FullSoftmax_Tied")
if model_para['dilated_channels']!= model_para['embed_size']:
self.softmax_pro_w = tf.get_variable("softmax_pro_w", [model_para['dilated_channels'], model_para['embed_size']],
tf.float32, tf.random_normal_initializer(0.0, 0.01))
dilate_input = tf.tensordot(dilate_input, self.softmax_pro_w, axes=1)
self.softmax_w = tf.transpose(self.allitem_embeddings.embedding)
# self.softmax_b = tf.get_variable("softmax_b", [model_para['item_size']], tf.float32,
# tf.constant_initializer(0.1))
label_flat = tf.reshape(label_seq, [-1])
logits_2D = tf.reshape(dilate_input, [-1, model_para['dilated_channels']])
logits_2D = tf.matmul(logits_2D, self.softmax_w)
# logits_2D = tf.nn.bias_add(logits_2D, self.softmax_b)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_flat, logits=logits_2D)
elif model_para['SoftmaxType'] == "Block_for_Softmax":
print("using Block_for_Softmax")
logits_2D = tf.reshape(dilate_input, [-1, model_para['dilated_channels']])
block = model_para['block']
assert model_para['dilated_channels'] == model_para['out_embed_size']
softmax_layer = bs(input_dim=model_para['dilated_channels'], block= block,
block_factor=model_para['factor'])
loss, _ = softmax_layer.loss(logits_2D, tf.reshape(label_seq, [-1]), "loss")
elif (model_para['SoftmaxType'] == 'Block_Input_Softmax'
or model_para['SoftmaxType'] == 'Block_Input_Softmax_Inference') and model_para['factor'] != 1:
print("using Block_Input_Softmax")
logits_2D = tf.reshape(dilate_input, [-1, model_para['dilated_channels']])
block = model_para['block']
assert model_para['dilated_channels'] == model_para['out_embed_size']
softmax_layer = bs(input_dim=model_para['dilated_channels'], block= block,
block_factor=model_para['factor'])
loss, _ = softmax_layer.loss(logits_2D, tf.reshape(label_seq, [-1]), "loss")
elif model_para['SoftmaxType'] == 'LowrankSoftmax' and model_para['factor'] != 1:
print("using LowrankSoftmax")
logits_2D = tf.reshape(dilate_input, [-1, model_para['dilated_channels']])
block = model_para['block']
softmax_layer = lowranksoftmax.LowRankSoftmax(input_dim=model_para['dilated_channels'],
block=block, block_factor=model_para['factor'])
loss = softmax_layer.loss(logits_2D, tf.reshape(label_seq, [-1]), "loss")
elif model_para['SoftmaxType'] == 'Block_Input_LowrankSoftmax' and model_para['factor'] != 1:
print("using Block_Input_LowrankSoftmax")
logits_2D = tf.reshape(dilate_input, [-1, model_para['dilated_channels']])
block = model_para['block']
softmax_layer = lowranksoftmax.LowRankSoftmax(input_dim=model_para['dilated_channels'],
block=block, block_factor=model_para['factor'])
loss = softmax_layer.loss(logits_2D, tf.reshape(label_seq, [-1]), "loss")
self.loss = tf.reduce_mean(loss)
regularization = 0.001 * tf.reduce_mean([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
self.loss = self.loss + regularization
# self.arg_max_prediction = tf.argmax(logits_2D, 1) #useless, if using negative sampling (i.e., negsample=True), it should be changed such as in predict_graph module
def model_graph(self, itemseq_input, train=True):
model_para = self.model_para
context_seq = itemseq_input[:, 0:-1]
label_seq = itemseq_input[:, 1:]
print("context_seq: ", context_seq.shape)
context_embedding = self.allitem_embeddings.get_input(context_seq)
# self_attention
# mask = self_attention.make_std_mask(context_seq, pad=model_para['pad'])
# context_embedding = self_attention.attention(context_embedding, context_embedding, context_embedding, mask,
# dropout=0.5, train=train)
dilate_input = context_embedding
if (model_para['SoftmaxType'] == "FullSoftmax" or model_para['SoftmaxType'] == "FullSoftmax_conv" or
model_para['SoftmaxType'] == "Block_for_Softmax") and \
model_para['in_embed_size'] != model_para['dilated_channels']:
embed_proj_w = tf.get_variable("embed_w", [model_para['in_embed_size'], model_para['dilated_channels']])
dilate_input = tf.tensordot(dilate_input, embed_proj_w, axes=1)
if model_para['parametersharing_type'] == 'original':
for layer_id, dilation in enumerate(model_para['dilations']):
dilate_input = ops_compress.nextitnet_residual_block(dilate_input, dilation,
layer_id, model_para['dilated_channels'],
model_para['kernel_size'], causal=True, train=train)
elif model_para['parametersharing_type'] == 'cross-layer':
for layer_id, dilation in enumerate(model_para['dilations']):
dilate_input = ops_compress.nextitnet_residual_block_cross_layer(dilate_input, dilation,
layer_id, model_para['dilated_channels'],
model_para['kernel_size'], causal=True, train=train)
elif model_para['parametersharing_type'] == 'cross-block':
for layer_id, dilation in enumerate(model_para['dilations']):
dilate_input = ops_compress.nextitnet_residual_block_cross_block(dilate_input, dilation,
layer_id, model_para['dilated_channels'],
model_para['kernel_size'], causal=True, train=train)
elif model_para['parametersharing_type'] == 'adjacent-layer':
for layer_id, dilation in enumerate(model_para['dilations']):
dilate_input = ops_compress.nextitnet_residual_block_adjacent_layer(dilate_input, dilation,
layer_id, model_para['dilated_channels'],
model_para['kernel_size'], causal=True, train=train)
elif model_para['parametersharing_type'] == 'adjacent-block':
for layer_id, dilation in enumerate(model_para['dilations']):
dilate_input = ops_compress.nextitnet_residual_adjacent_block(dilate_input, dilation,
layer_id, model_para['dilated_channels'],
model_para['kernel_size'], causal=True, train=train)
return label_seq, dilate_input
# output top-n based on recalled items instead of all items. You can use this interface for practical recommender systems.
def predict_graph_onrecall(self, reuse=False): #is_negsample=False,
if reuse:
tf.get_variable_scope().reuse_variables()
model_para = self.model_para #
self.input_predict = tf.placeholder('int32', [model_para['batch_size'], model_para['seq_len']], name='input_predict')
self.input_recall = tf.placeholder('int32', [model_para['batch_size'], model_para['seq_len']], name='input_recall')# candidate items
label_seq, dilate_input = self.model_graph(self.input_predict, train=False)
# label_flat = tf.reshape(label_seq[:, -1:], [-1]) # [batch_size]
if model_para['SoftmaxType'] == 'neg_nowork':
logits_2D=dilate_input[:, -1:, :]
recall_mat = tf.nn.embedding_lookup(self.softmax_w, self.input_recall)
logits_2D = tf.matmul(logits_2D, tf.transpose(recall_mat,[0,2,1]))
logits_2D=tf.reshape(logits_2D, [-1, tf.shape(self.input_recall)[1]])
recall_bias = tf.nn.embedding_lookup(self.softmax_b, self.input_recall)
logits_2D=tf.add(logits_2D,recall_bias)
probs_flat = tf.nn.softmax(logits_2D, name='softmax')
elif model_para['SoftmaxType'] == 'neg':
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['out_embed_size']])
logits_2D = tf.matmul(logits_2D, tf.transpose(self.softmax_w))
logits_2D = tf.nn.bias_add(logits_2D, self.softmax_b)
probs_flat = tf.nn.softmax(logits_2D)
elif model_para['SoftmaxType'] == 'FullSoftmax_conv':
print("recall one valid using FullSoftmax_conv")
if model_para['dilated_channels']!= model_para['embed_size']:
dilate_input = tf.tensordot(dilate_input, self.softmax_pro_w, axes=1)
logits = ops_compress.conv1d(tf.nn.relu(dilate_input[:, -1:, :]), model_para['item_size'], name='logits')
logits_2D = tf.reshape(logits, [-1, model_para['item_size']]) #[batch_size, item_size]
probs_flat = tf.nn.softmax(logits_2D, name='softmax')
elif model_para["SoftmaxType"] == "FullSoftmax" or model_para['SoftmaxType'] == "Block_Input_Full" or model_para['SoftmaxType'] == "FullSoftmax_Tied":
print("valid using FullSoftmax")
if model_para['dilated_channels'] != model_para['out_embed_size']:
dilate_input = tf.tensordot(dilate_input, self.softmax_pro_w, axes=1)
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['out_embed_size']])
logits_2D = tf.matmul(logits_2D, self.softmax_w)
logits_2D = tf.nn.bias_add(logits_2D, self.softmax_b)
probs_flat = tf.nn.softmax(logits_2D)
elif model_para["SoftmaxType"] == "Block_for_Softmax":
print("recall one valid using Block_for_Softmax")
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['dilated_channels']]) #[batch_size, dilated_channels]
block = model_para['block']
softmax_layer = bs(model_para["dilated_channels"], block,
block_factor=model_para['factor'])
# loss, _ = softmax_layer.loss(logits_2D, label_flat, train=False, name="loss")
probs_flat = softmax_layer.softmax(logits_2D, name='softmax')
elif model_para["SoftmaxType"] == "Block_Input_Softmax" and model_para['factor'] != 1:
print("recall one valid using Block_Input_Softmax")
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['dilated_channels']])
block = model_para['block']
softmax_layer = bs(model_para["dilated_channels"], block,
block_factor=model_para['factor'])
# loss, _ = softmax_layer.loss(logits_2D, label_flat, train=False, name="loss")
probs_flat = softmax_layer.softmax(logits_2D, name='softmax')
elif model_para["SoftmaxType"] == "LowrankSoftmax":
print("recall one valid using LowrankSoftmax")
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['dilated_channels']])
block = model_para['block']
softmax_layer = lowranksoftmax.LowRankSoftmax(input_dim=model_para['dilated_channels'], block= block,
block_factor=model_para['factor'])
# loss, _ = softmax_layer.loss(logits_2D, label_flat, train=False, name="loss")
probs_flat = softmax_layer.softmax(logits_2D, name='softmax') # [batch_size, item_size]
elif model_para["SoftmaxType"] == "Block_Input_LowrankSoftmax":
print("recall one valid using Block_Input_LowrankSoftmax")
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['dilated_channels']])
block = model_para['block']
softmax_layer = lowranksoftmax.LowRankSoftmax(input_dim=model_para['dilated_channels'], block= block,
block_factor=model_para['factor'])
# loss, _ = softmax_layer.loss(logits_2D, label_flat, train=False, name="loss")
probs_flat = softmax_layer.softmax(logits_2D, name='softmax') # [batch_size, item_size]
elif model_para["SoftmaxType"] == "Block_Input_Softmax_Inference" and model_para['factor'] != 1:
print("recall one valid using Block_Input_Softmax")
logits_2D = tf.reshape(dilate_input[:, -1:, :], [-1, model_para['dilated_channels']])
block = model_para['block']
softmax_layer = bs(model_para["dilated_channels"], block=block, block_factor=model_para['factor'])
# loss, _ = softmax_layer.loss(logits_2D, label_flat, train=False, name="loss")
probs_flat = softmax_layer.softmax_inference_top(logits_2D, name='softmax', top_v=model_para['top_k'])
# self.loss_test = tf.reduce_mean(loss)
self.g_probs = probs_flat
# newly added for weishi, since each input is one user (i.e., a batch), in fact we just need to rank the first batch, the below code is to select top-5
# self.top_k= tf.nn.top_k(self.g_probs[:,-1], k=model_para['top_k'],name='top-k')
#be carefule with the top-k values since the index represents the orders of your recalled items but not the original order.
# self.top_k = tf.nn.top_k(self.g_probs, k=model_para['top_k'], name='top-k') # [batch_size, top_k]
# self.top_k=tf.gather(self.input_recall, tf.contrib.framework.argsort(self.g_probs),name='top-k')
# output top-n based on recalled items instead of all items. You can use this interface for practical recommender systems.
def predict_graph_onrecall_ori(self, is_negsample=False, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
model_para = self.model_para
self.input_predict = tf.placeholder('int32', [model_para['batch_size'], model_para['seq_len']], name='input_predict')
self.input_recall = tf.placeholder('int32', [model_para['batch_size'], model_para['seq_len']], name='input_recall') # candidate items
label_seq, dilate_input = self.model_graph(self.input_predict, train=False)
if is_negsample:
logits_2D = dilate_input[:, -1:, :]
recall_mat = tf.nn.embedding_lookup(self.softmax_w, self.input_recall)
logits_2D = tf.matmul(logits_2D, tf.transpose(recall_mat, [0, 2, 1]))
logits_2D = tf.reshape(logits_2D, [-1, tf.shape(self.input_recall)[1]])
recall_bias = tf.nn.embedding_lookup(self.softmax_b, self.input_recall)
logits_2D = tf.add(logits_2D, recall_bias)
else:
# logits = ops.conv1d(tf.nn.relu(dilate_input), model_para['item_size'], name='logits')
logits = ops_compress.conv1d(tf.nn.relu(dilate_input[:, -1:, :]), model_para['item_size'], name='logits')
logits_2D = tf.reshape(logits, [-1, model_para['item_size']])
probs_flat = tf.nn.softmax(logits_2D, name='softmax')
self.g_probs = probs_flat
# newly added for weishi, since each input is one user (i.e., a batch), in fact we just need to rank the first batch, the below code is to select top-5
# self.top_k= tf.nn.top_k(self.g_probs[:,-1], k=model_para['top_k'],name='top-k')
# be carefule with the top-k values since the index represents the orders of your recalled items but not the original order.
self.top_k = tf.nn.top_k(self.g_probs, k=model_para['top_k'], name='top-k')
# self.top_k=tf.gather(self.input_recall, tf.contrib.framework.argsort(self.g_probs),name='top-k')
|
1713145
|
from datetime import datetime
import unittest
from pybacklogpy.Category import Category
from tests.utils import get_project_id_and_key, response_to_json
class TestCategory(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.category = Category()
cls.project_id, cls.project_key = get_project_id_and_key()
# def test_add_category(self):
# now = datetime.now()
# category_name =
# response = self.category.add_category(
# project_id_or_key=self.project_key,
# name='test{YYYYMMDDHHMM}'.format(YYYYMMDDHHMM=now.strftime('%Y%m%d%H%M'))
# )
# response_json = response_to_json(response)
# self.assertTrue(response.ok, msg='カテゴリーの追加に失敗')
def test_get_category_list(self):
response = self.category.get_category_list(project_id_or_key=self.project_key)
self.assertTrue(response.ok, msg='カテゴリー一覧の取得に失敗')
response_list = response_to_json(response)
self.assertIsInstance(response_list, list, msg='カテゴリー一覧の取得に失敗')
if __name__ == '__main__':
unittest.main()
|
1713164
|
from django.conf import settings
from django.contrib import admin
from django.forms.widgets import Media
from django_summernote.admin import SummernoteModelAdmin, SummernoteModelAdminMixin
from django_summernote.utils import get_theme_files
from .models import Post, Book, Author
class BookAdmin(admin.ModelAdmin):
model = Book
pass
class PostAdmin(SummernoteModelAdmin):
pass
class BookInline(SummernoteModelAdminMixin, admin.StackedInline):
model = Book
extra = 1
class AuthorAdmin(SummernoteModelAdminMixin, admin.ModelAdmin):
# For non-bootstrapped admin site,
# JavaScript and CSS files should be imported manually like below.
@property
def media(self):
media = super().media + Media(
js = get_theme_files(settings.SUMMERNOTE_THEME, 'base_js'),
css = {
'all': get_theme_files(settings.SUMMERNOTE_THEME, 'base_css'),
})
return media
model = Author
inlines = [
BookInline,
]
admin.site.register(Book, BookAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Author, AuthorAdmin)
|
1713172
|
import time
import unittest
from vika import Vika
from . import TEST_API_BASE, TEST_API_TOKEN, TEST_TABLE
class TestUpdateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_update(self):
# 更新单个字段
record = self.dst.records.get(title="无人生还")
record.title = "无人生还2"
self.assertEqual(record.title, "无人生还2")
time.sleep(1)
# 更新多个字段
record = self.dst.records.get(title="无人生还2")
r = record.update({
"title": '无人生还3',
"comment": '真好看'
})
self.assertEqual(r.title, "无人生还3")
self.assertEqual(r.comment, "真好看")
time.sleep(1)
# 更新多条记录
self.dst.records.filter(
title="无人生还3").update(title="无人生还4")
record = self.dst.records.get(title="无人生还4")
self.assertEqual(record.title, "无人生还4")
time.sleep(1)
def tearDown(self):
self.dst.records.filter(title="无人生还4").update(title="无人生还")
if __name__ == '__main__':
unittest.main()
|
1713218
|
import os
from time import sleep
import pytest
import requests
from datadog_checks.dev import docker_run, get_docker_hostname, get_here
BOOTSTRAP = {
'action': 'create_cluster',
'cluster': {'name': 'demo.local'},
'node': {'paths': {'persistent_path': '/var/opt/redislabs/persist', 'ephemeral_path': '/var/opt/redislabs/tmp'}},
'credentials': {'username': '<EMAIL>', 'password': '<PASSWORD>'},
}
DATABASE = {
'name': 'db01',
'memory_size': 100000000,
'replication': False,
'eviction_policy': 'volatile-lru',
'sharding': False,
'shards_count': 1,
'port': 12000,
'data_persistence': 'aof',
'aof_policy': 'appendfsync-always',
}
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(get_here(), 'docker-compose.yml')
with docker_run(compose_file, log_patterns='MainThread: Done'):
# Let the cluster settle first
sleep(10)
# Bootstrap the cluster
url = 'https://{}:9443/v1/bootstrap/create_cluster'.format(get_docker_hostname())
r = requests.post(url, json=BOOTSTRAP, verify=False)
if r.status_code != 200:
print("Error: Unable to bootstrap")
counter = 0
# Check to ensure it's running properly
while True:
counter += 1
try:
j = requests.get(
'https://{}:9443/v1/cluster'.format("localhost"),
auth=(BOOTSTRAP['credentials']['username'], BOOTSTRAP['credentials']['password']),
headers={'Content-Type': 'application/json'},
timeout=10,
verify=False,
)
if j.status_code == 200:
break
else:
print("Retrying:", counter)
sleep(5)
except Exception as e:
print("Retrying:", counter, " Error:", str(e))
sleep(5)
if counter > 9:
break
# Create a database
x = requests.post(
'https://{}:9443/v1/bdbs'.format("localhost"),
auth=(BOOTSTRAP['credentials']['username'], BOOTSTRAP['credentials']['password']),
headers={'Content-Type': 'application/json'},
timeout=10,
verify=False,
json=DATABASE,
)
if x.status_code != 200:
print("Error: Unable to create database")
print("OK: bootstrap complete")
yield
@pytest.fixture
def instance():
return {
'host': get_docker_hostname(),
'port': 9443,
'username': BOOTSTRAP['credentials']['username'],
'password': <PASSWORD>['<PASSWORD>'],
}
|
1713221
|
from django.contrib import admin
from .models import (Category, Comment, Favorite, Gallery, Implication, Post,
PostTag, ScoreVote, Configuration)
admin.site.register(Post)
admin.site.register(Category)
admin.site.register(PostTag)
admin.site.register(Implication)
admin.site.register(Favorite)
admin.site.register(ScoreVote)
admin.site.register(Gallery)
admin.site.register(Comment)
admin.site.register(Configuration)
|
1713293
|
import streamlit as st
import streamlit.components.v1 as stc
import pandas as pd
from file_utils import file_uploads, FileDownloader
import streamlit.components.v1 as components
import textwrap # SVG testing
import base64
from logger import logger
import os
## CUSTOM COMPONENTS
# https://docs.streamlit.io/en/stable/publish_streamlit_components.html
base_dir = os.path.dirname(os.path.abspath(__file__))
_vanilla_component = components.declare_component("vanilla_component", os.path.join(base_dir, "..", "component-template", "streamlit-vite", "vanilla_component" , "frontend" ,"dist"))
_vue_component = components.declare_component("vue_component", path=os.path.join(base_dir, "..", "component-template", "streamlit-vite", "vue_component" , "frontend" ,"dist"))
## SESSIONS
def init_sessions():
if 'my_hours_per_week' not in st.session_state:
st.session_state.my_hours_per_week = 40
if 'my_amount' not in st.session_state:
st.session_state.my_amount = 5
if 'expander_form' not in st.session_state:
st.session_state['expander_form'] = False
## METHODS
def form3_callback():
if 'my_amount' in st.session_state:
st.write(st.session_state.my_amount)
if 'my_hours_per_week' in st.session_state:
st.write(st.session_state.my_hours_per_week)
## APP RUN
init_sessions()
def app_run():
logger.info('In Demos')
st.title("Demos")
# init_sessions()
if 'expander_form' in st.session_state:
st.write(st.session_state.expander_form)
config = {
"container": "container",
"fitCenter": True,
"linkCenter": True,
"defaultNode": {
"type": "circle",
"size": [40],
"color": "#5B8FF9",
"style": { "fill": "#9EC9FF", "lineWidth": 3 },
"labelCfg": { "style": { "fill": "#000", "fontSize": 14 } }
},
"defaultEdge": {
"type": "quadratic",
"labelCfg": { "autoRotate": True, },
},
"modes": {
"default": ["drag-canvas", "drag-node"],
},
"nodeStateStyles": {
"hover": { "fillOpacity": 0.8 },
"selected": { "lineWidth": 5 }
}
}
nodes = [
{ "id": "node1", "x": 50, "y": 350, "label": "A", },
{ "id": "node2", "x": 250, "y": 150, "label": "B", },
{ "id": "node3", "x": 450, "y": 350, "label": "C", },
]
edges = []
for x in range(8):
edges.append({ "source": "node1", "target": "node2", "label": f'{x}th edge of A-B', })
for x in range(5):
edges.append({ "source": "node2", "target": "node3", "label": f'{x}th edge of B-C', })
# rv0 = g6(name="NameViteVanilla", config=config, nodes=nodes, edges=edges, key="c0")
# st.write(rv0)
rv1 = _vue_component(key="c1", name="ViteVue1") # create your component
st.write(rv1)
rv2 = _vanilla_component(key="c2", name="ViteVanilla")
st.write(rv2)
rv3 = _vue_component(key="c4", name="ViteVue2")
st.write(rv3)
with st.expander('File Demos'):
st.subheader("CSV Files")
menu = ["Single CSV", "Multiple CSV", "About"]
choice = st.selectbox("Select CSV File Upload Mode", menu)
if choice == "Single CSV":
csv_file = file_uploads(multiple=False, label="Input Single CSV and get data" )
if csv_file is not None:
# if csvFile.type == "text/plain"
# raw_text = str(csvFile.read(), "utf-8")
# st.write(raw_text)
# "application/pdf"
st.write(type(csv_file))
df = pd.read_csv(csv_file)
st.dataframe(df)
if st.button("Get CSV for download"):
FileDownloader(data=df.to_csv(), file_ext='csv').download()
elif choice == "Multiple CSV":
csv_files = file_uploads(file_types=['csv'], multiple=True, label="Upload Multiple CSV", folder_path="./" )
st.write(type(csv_files))
else:
st.subheader("About")
st.subheader("Text Download")
my_text = st.text_area("Enter Message for download")
if st.button("Create Download"):
st.write(my_text)
FileDownloader(my_text).download()
with st.expander('Forms Demo', expanded=False):
mc1, mc2 = st.columns(2)
# first form - use with
with mc1:
st.subheader("Form 1")
with st.form(key='form1'):
firstname = st.text_input("Firstname")
lastname = st.text_input("Lastname")
dob = st.date_input("Date of Birth")
submit_button = st.form_submit_button(label='Sign Up')
if submit_button:
st.success("Hello {}. Your account is created".format(firstname))
# second form
with mc2:
st.subheader("Form 2")
form2 = st.form(key='form2')
username = form2.text_input("Username")
jobtype = form2.selectbox("Job", ["Dev", "Data Scientist", "UX Designer"])
submit_button2 = form2.form_submit_button("Login")
if submit_button2:
st.success("{}. Logged In".format(username))
# 3rd form
st.subheader("Form 3 - columnar")
with st.form(key='form3'):
c1, c2, c3 = st.columns([3, 2, 1])
with c1:
amount = st.number_input("Hourly Rate in $", 0, 50000, key='my_amount')
with c2:
hours_per_week = st.number_input("Hours Per Week", 1, 120, key='my_hours_per_week')
with c3:
st.text("Salary")
submit_salary = st.form_submit_button(label="Calculate", on_click=form3_callback)
if submit_salary:
daily = [amount * 8]
weekly = [amount * hours_per_week]
df = pd.DataFrame({ 'hourly': amount, 'daily': daily, 'weekly': weekly })
st.dataframe(df)
with st.expander('Streamlit Components Static'):
stc.html("<p style='color: red;'>Streamlit is Awesome</p>")
st.markdown("<p style='color: blue;'>Streamlit is Awesome Markdown</p>", unsafe_allow_html=True)
stc.html("""
<style>
* {box-sizing: border-box}
body {font-family: Verdana, sans-serif; margin:0}
.mySlides {display: none}
img {vertical-align: middle;}
/* Slideshow container */
.slideshow-container {
max-width: 1000px;
position: relative;
margin: auto;
}
/* Next & previous buttons */
.prev, .next {
cursor: pointer;
position: absolute;
top: 50%;
width: auto;
padding: 16px;
margin-top: -22px;
color: white;
font-weight: bold;
font-size: 18px;
transition: 0.6s ease;
border-radius: 0 3px 3px 0;
user-select: none;
}
/* Position the "next button" to the right */
.next {
right: 0;
border-radius: 3px 0 0 3px;
}
/* On hover, add a black background color with a little bit see-through */
.prev:hover, .next:hover {
background-color: rgba(0,0,0,0.8);
}
/* Caption text */
.text {
color: #f2f2f2;
font-size: 15px;
padding: 8px 12px;
position: absolute;
bottom: 8px;
width: 100%;
text-align: center;
}
/* Number text (1/3 etc) */
.numbertext {
color: #f2f2f2;
font-size: 12px;
padding: 8px 12px;
position: absolute;
top: 0;
}
/* The dots/bullets/indicators */
.dot {
cursor: pointer;
height: 15px;
width: 15px;
margin: 0 2px;
background-color: #bbb;
border-radius: 50%;
display: inline-block;
transition: background-color 0.6s ease;
}
.active, .dot:hover {
background-color: #717171;
}
/* Fading animation */
.fade {
-webkit-animation-name: fade;
-webkit-animation-duration: 1.5s;
animation-name: fade;
animation-duration: 1.5s;
}
@-webkit-keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
@keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
/* On smaller screens, decrease text size */
@media only screen and (max-width: 300px) {
.prev, .next,.text {font-size: 11px}
}
</style>
<body>
<div class="slideshow-container">
<div class="mySlides fade">
<div class="numbertext">1 / 3</div>
<img src="https://www.w3schools.com/howto/img_nature_wide.jpg" style="width:100%">
<div class="text">Caption Text</div>
</div>
<div class="mySlides fade">
<div class="numbertext">2 / 3</div>
<img src="https://www.w3schools.com/howto/img_snow_wide.jpg" style="width:100%">
<div class="text">Caption Two</div>
</div>
<div class="mySlides fade">
<div class="numbertext">3 / 3</div>
<img src="https://www.w3schools.com/howto/img_mountains_wide.jpg" style="width:100%">
<div class="text">Caption Three</div>
</div>
<a class="prev" onclick="plusSlides(-1)">❮ AAAA</a>
<a class="next" onclick="plusSlides(1)">❯ BBBB</a>
</div>
<br>
<div style="text-align:center">
<span class="dot" onclick="currentSlide(1)"></span>
<span class="dot" onclick="currentSlide(2)"></span>
<span class="dot" onclick="currentSlide(3)"></span>
</div>
<script>
var slideIndex = 1;
showSlides(slideIndex);
function plusSlides(n) {
showSlides(slideIndex += n);
}
function currentSlide(n) {
showSlides(slideIndex = n);
}
function showSlides(n) {
var i;
var slides = document.getElementsByClassName("mySlides");
var dots = document.getElementsByClassName("dot");
if (n > slides.length) {slideIndex = 1}
if (n < 1) {slideIndex = slides.length}
for (i = 0; i < slides.length; i++) {
slides[i].style.display = "none";
}
for (i = 0; i < dots.length; i++) {
dots[i].className = dots[i].className.replace(" active", "");
}
slides[slideIndex-1].style.display = "block";
dots[slideIndex-1].className += " active";
}
</script>
</body>
""")
# SVG Testing
def render_svg(svg):
"""Renders the given svg string."""
b64 = base64.b64encode(svg.encode('utf-8')).decode("utf-8")
html = r'<div style="width:100%%;overflow:scroll;"><img src="data:image/svg+xml;base64,%s"/></div>' % b64
st.write(html, unsafe_allow_html=True)
def render_svg_example():
# text_file = open("ud.svg", "r")
# svg = text_file.read()
# text_file.close()
svg = """
<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100">
<circle cx="50" cy="50" r="40" stroke="green" stroke-width="4" fill="yellow" />
</svg>
"""
st.write('## Rendering an SVG in Streamlit')
# st.write('### SVG Input')
# st.code(textwrap.dedent(svg), 'svg')
st.write('### SVG Output')
render_svg(svg)
render_svg_example()
|
1713323
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:testRandomServiceMerge1.root'
),
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(3)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testRandomServiceTest1.root')
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
t1 = cms.PSet(
initialSeed = cms.untracked.uint32(7)
),
t2 = cms.PSet(
engineName = cms.untracked.string('RanecuEngine'),
initialSeedSet = cms.untracked.vuint32(7, 7)
),
t3 = cms.PSet(
initialSeed = cms.untracked.uint32(7),
engineName = cms.untracked.string('TRandom3')
),
t4 = cms.PSet(
engineName = cms.untracked.string('HepJamesRandom'),
initialSeed = cms.untracked.uint32(7)
),
t6 = cms.PSet(
initialSeed = cms.untracked.uint32(7),
engineName = cms.untracked.string('MixMaxRng')
),
enableChecking = cms.untracked.bool(True),
restoreStateLabel = cms.untracked.string('randomEngineStateProducer')
)
process.t1 = cms.EDAnalyzer("TestRandomNumberServiceGlobal",
engineName = cms.untracked.string('HepJamesRandom'),
seeds = cms.untracked.vuint32(81),
offset = cms.untracked.uint32(0),
maxEvents = cms.untracked.uint32(15),
nStreams = cms.untracked.uint32(1),
skippedEvents = cms.untracked.vuint32(2),
seedByLumi = cms.untracked.vuint32(0, 82, 82, 202, 202)
)
process.t2 = cms.EDAnalyzer("TestRandomNumberServiceGlobal",
engineName = cms.untracked.string('RanecuEngine'),
seeds = cms.untracked.vuint32(1, 2),
offset = cms.untracked.uint32(0),
maxEvents = cms.untracked.uint32(15),
nStreams = cms.untracked.uint32(1),
skippedEvents = cms.untracked.vuint32(2),
seedByLumi = cms.untracked.vuint32(0, 2, 2, 203, 203)
)
process.t3 = cms.EDAnalyzer("TestRandomNumberServiceGlobal",
engineName = cms.untracked.string('TRandom3'),
seeds = cms.untracked.vuint32(83),
offset = cms.untracked.uint32(0),
maxEvents = cms.untracked.uint32(15),
nStreams = cms.untracked.uint32(1),
skippedEvents = cms.untracked.vuint32(2),
seedByLumi = cms.untracked.vuint32(0, 84, 84, 204, 204)
)
process.t4 = cms.EDAnalyzer("TestRandomNumberServiceGlobal",
engineName = cms.untracked.string('HepJamesRandom'),
seeds = cms.untracked.vuint32(84),
offset = cms.untracked.uint32(0),
maxEvents = cms.untracked.uint32(15),
nStreams = cms.untracked.uint32(1),
skippedEvents = cms.untracked.vuint32(2),
seedByLumi = cms.untracked.vuint32(0, 85, 85, 205, 205)
)
process.t6 = cms.EDAnalyzer("TestRandomNumberServiceGlobal",
engineName = cms.untracked.string('MixMaxRng'),
seeds = cms.untracked.vuint32(85),
offset = cms.untracked.uint32(0),
maxEvents = cms.untracked.uint32(15),
nStreams = cms.untracked.uint32(1),
skippedEvents = cms.untracked.vuint32(2),
seedByLumi = cms.untracked.vuint32(0, 86, 86, 206, 206)
)
process.p = cms.Path(process.t1+process.t2+process.t3+process.t4)
process.o = cms.EndPath(process.out)
|
1713362
|
from typing import Any
from strawberry.asgi.handlers import (
GraphQLTransportWSHandler as BaseGraphQLTransportWSHandler,
)
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
async def get_context(self) -> Any:
return await self._get_context()
async def get_root_value(self) -> Any:
return await self._get_root_value()
|
1713380
|
import re
from io import BytesIO
from fabric.contrib.console import confirm
from fabric.contrib.files import sed, uncomment, exists
from fabric.decorators import task
from fabric.operations import sudo, prompt, put
from fabric.utils import abort
from fabdeb.apt import apt_install
from fabdeb.os import user_exists, service_restart, check_sudo, check_os
from fabdeb.tools import print_green
__all__ = ('install_proftpd', 'add_user_to_proftpd')
# # # COMMANDS # # #
@task
def install_proftpd():
"""
Install proftpd server
"""
check_sudo()
check_os()
if not confirm('Do you want to install proftpd?'):
return
print_green('INFO: Install proftpd...')
apt_install('proftpd', noconfirm=True)
conf_fn = '/etc/proftpd/proftpd.conf'
sudo('cp {fn} {fn}.bak'.format(fn=conf_fn), warn_only=True)
sed(conf_fn, r'UseIPv6\s+on', r'UseIPv6\t\t\t\toff\nUseReverseDNS\t\t\toff', use_sudo=True, backup='')
sn = prompt('Set ftp server name', default='MyFTPServer', validate=r'[\w\- ]+')
sed(conf_fn, r'ServerName\s+".+"', r'ServerName\t\t\t"{}"'.format(sn), use_sudo=True, backup='')
sed(conf_fn, r'TimeoutNoTransfer.+', r'TimeoutNoTransfer\t\t3600', use_sudo=True, backup='')
sed(conf_fn, r'TimeoutStalled.+', r'TimeoutStalled\t\t\t3600', use_sudo=True, backup='')
sed(conf_fn, r'TimeoutIdle.+', r'TimeoutIdle\t\t\t7200', use_sudo=True, backup='')
uncomment(conf_fn, r'#\s*DefaultRoot', use_sudo=True, backup='')
uncomment(conf_fn, r'#\s*RequireValidShell', use_sudo=True, backup='') # todo uncomment only first value instead all
uncomment(conf_fn, r'#\s*PassivePorts', use_sudo=True, backup='')
t = (r'<Global>\n'
r' RootLogin off\n'
r'</Global>\n'
r'AuthUserFile /etc/proftpd/ftpd.passwd\n'
r'<Directory ~/>\n'
r' HideFiles "(\\\\.ftpaccess)$"\n'
r'</Directory>\n')
sed(conf_fn, r'(# Include other custom configuration files)', r'{}\n\1'.format(t), use_sudo=True, backup='')
print_green('INFO: Install proftpd... OK')
@task
def add_user_to_proftpd(username):
"""
Setup proftpd for user (user's home directory will be available via ftp)
"""
check_sudo()
check_os()
if not confirm('Do you want to setup proftpd for user "{}"?'.format(username)):
return
if not exists('/usr/sbin/proftpd'):
abort('proftpd is not installed')
print_green('INFO: Add user "{}" to proftpd...'.format(username))
if not user_exists(username):
abort('User {} does not exists'.format(username))
t = sudo('id {}'.format(username))
uid, gid = re.search(r'uid=(\d+)\(.+gid=(\d+)\(', t).groups()
passwd_fn = '/etc/proftpd/ftpd.passwd'
sudo('ftpasswd --passwd --file={passwd} --name={user} --shell=/bin/false '
'--home=/home/{user} --uid={uid} --gid={gid}'.format(passwd=passwd_fn, user=username, uid=uid, gid=gid))
ftpaccess = ('<Limit READ WRITE DIRS>\n'
' Order deny,allow\n'
' Allowuser {user}\n'
'</Limit>\n').format(user=username).encode()
put(BytesIO(ftpaccess), '/home/{}/.ftpaccess'.format(username), use_sudo=True)
if confirm('Do you want to restart proftpd?'):
service_restart('proftpd')
print_green('INFO: Add user "{}" to proftpd... OK'.format(username))
|
1713383
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from .augment import random_photometric
from .flow_util import flow_to_color
from .losses import charbonnier_loss
from .flownet import flownet
from .unsupervised import _track_image, _track_loss, FLOW_SCALE
def supervised_loss(batch, params, normalization=None):
channel_mean = tf.constant(normalization[0]) / 255.0
im1, im2, flow_gt, mask_gt = batch
im1 = im1 / 255.0
im2 = im2 / 255.0
im_shape = tf.shape(im1)[1:3]
# -------------------------------------------------------------------------
im1_photo, im2_photo = random_photometric(
[im1, im2],
noise_stddev=0.04, min_contrast=-0.3, max_contrast=0.3,
brightness_stddev=0.02, min_colour=0.9, max_colour=1.1,
min_gamma=0.7, max_gamma=1.5)
_track_image(im1_photo, 'im1_photo')
_track_image(im2_photo, 'im2_photo')
_track_image(flow_to_color(flow_gt), 'flow_gt')
_track_image(mask_gt, 'mask_gt')
# Images for neural network input with mean-zero values in [-1, 1]
im1_photo = im1_photo - channel_mean
im2_photo = im2_photo - channel_mean
flownet_spec = params.get('flownet', 'S')
full_resolution = params.get('full_res')
train_all = params.get('train_all')
# -------------------------------------------------------------------------
# FlowNet
flows_fw = flownet(im1_photo, im2_photo,
flownet_spec=flownet_spec,
full_resolution=full_resolution,
train_all=train_all)
if not train_all:
flows_fw = [flows_fw[-1]]
final_loss = 0.0
for i, net_flows in enumerate(reversed(flows_fw)):
flow_fw = net_flows[0]
if params.get('full_res'):
final_flow_fw = flow_fw * FLOW_SCALE * 4
else:
final_flow_fw = tf.image.resize_bilinear(flow_fw, im_shape) * FLOW_SCALE * 4
_track_image(flow_to_color(final_flow_fw), 'flow_pred_' + str(i))
net_loss = charbonnier_loss(final_flow_fw - flow_gt, mask_gt)
final_loss += net_loss / (2 ** i)
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
final_loss += regularization_loss
_track_loss(regularization_loss, 'loss/regularization')
_track_loss(final_loss, 'loss/combined')
return final_loss
|
1713413
|
from ewah.hooks.sql_base import EWAHSQLBaseHook
import cx_Oracle
from typing import Optional, List, Union
class EWAHOracleSQLOperator(EWAHSQLBaseHook):
_DEFAULT_PORT = 1521
_ATTR_RELABEL = {
"user": "login",
"sid": "schema",
}
conn_name_attr = "ewah_oracle_conn_id"
default_conn_name = "ewah_oracle_default"
conn_type = "ewah_oracle"
hook_name = "EWAH Oracle Connection"
@staticmethod
def get_ui_field_behaviour() -> dict:
return {
"hidden_fields": ["extra"],
"relabeling": {
"password": "Password",
"login": "User",
"schema": "SID",
"host": "Hostname / IP",
"port": "Port (default: 1521)",
},
}
@staticmethod
def get_connection_form_widgets() -> dict:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from wtforms import StringField
return {
f"extra__ewah_oracle__ssh_conn_id": StringField(
"SSH Connection ID (optional)",
widget=BS3TextFieldWidget(),
),
}
@staticmethod
def _adjust_sql(sql):
sql = sql.strip()
if sql[-1:] == ";":
sql = sql[:-1].strip()
return sql
def _get_db_conn(self):
return cx_Oracle.connect(
self.conn.user,
self.conn.password,
"{0}:{1}/{2}".format(
self.local_bind_address[0],
self.local_bind_address[1],
self.conn.sid,
),
encoding="UTF-8",
)
def _get_cursor(self):
return self.dbconn.cursor()
def _get_dictcursor(self):
class dictcur(object):
# need to monkeypatch the built-in execute function to always return a dict
def __init__(self, cursor):
self._original_cursor = cursor
def execute(self, *args, **kwargs):
# rowfactory needs to be set AFTER EACH execution!
self._original_cursor.execute(*args, **kwargs)
self._original_cursor.rowfactory = lambda *a: dict(
zip([d[0] for d in self._original_cursor.description], a)
)
# cx_Oracle's cursor's execute method returns a cursor object
# -> return the correct cursor in the monkeypatched version as well!
return self._original_cursor
def __getattr__(self, attr):
# anything other than the execute method: just go straight to the cursor
return getattr(self._original_cursor, attr)
return dictcur(self.dbconn.cursor())
def execute(
self, sql: str, params: Optional[dict] = None, commit: bool = False, cursor=None
) -> None:
params = params or {}
self.log.info(
"Executing SQL:\n\n{0}\n\nWith params:\n{1}".format(
sql,
"\n".join(
[
"{0}: {1}".format(key, str(value))
for (key, value) in params.items()
]
)
if params
else "No params!",
)
)
(cursor or self.cursor).execute(self._adjust_sql(sql), **params)
if commit:
self.commit()
def get_data_from_sql(
self, sql: str, params: Optional[dict] = None, return_dict: bool = True
) -> Union[List[list], List[dict]]:
cur = self.dictcursor if return_dict else self.cursor
self.execute(sql, params=params, cursor=cur, commit=False)
return cur.fetchall()
|
1713437
|
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.marketplace import models as marketplace_models
from .. import tasks
from . import fixtures
@freeze_time('2020-02-01')
class TaskTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.BookingFixture()
self.fixture.order_item.state = marketplace_models.OrderItem.States.EXECUTING
self.fixture.order_item.save()
def test_reject_past_booking(self):
self.fixture.resource.attributes['schedules'] = [
{
'start': '2020-01-01T02:00:00+03:00',
'end': '2020-01-15T02:00:00+03:00',
'id': '1',
},
{
'start': '2020-01-16T02:00:00+03:00',
'end': '2020-01-17T02:00:00+03:00',
'id': '2',
},
]
self.fixture.resource.save()
tasks.reject_past_bookings()
self.fixture.resource.refresh_from_db()
self.assertEqual(
self.fixture.resource.state, marketplace_models.Resource.States.TERMINATED
)
def test_do_not_reject_actual_booking(self):
self.fixture.resource.attributes['schedules'] = [
{
'start': '2020-01-01T02:00:00+03:00',
'end': '2020-01-15T02:00:00+03:00',
'id': '1',
},
{
'start': '2020-03-01T02:00:00+03:00',
'end': '2020-03-15T02:00:00+03:00',
'id': '2',
},
]
self.fixture.resource.save()
tasks.reject_past_bookings()
self.fixture.resource.refresh_from_db()
self.assertEqual(
self.fixture.resource.state, marketplace_models.Resource.States.CREATING
)
|
1713442
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='rmate',
version='1.0.3',
url='https://github.com/sclukey/rmate-python',
description='Edit files over SSH.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Topic :: Utilities',
'Environment :: Console',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
scripts=['bin/rmate'],
)
|
1713452
|
import unittest
from druzhba.db import ConnectionParams, DatabaseConfig
class DbTest(unittest.TestCase):
def test_connection_string(self):
config = DatabaseConfig(
"test_db",
"mysql",
("postgresql://test_user:test_password@test-db.prod:5439/" "test_db_name"),
)
self.assertIsNotNone(config)
def test_encoded_connection_string(self):
config = DatabaseConfig(
"test_db",
"mysql",
(
"postgresql://test_user:test_%3Fpassword@test-<EMAIL>:5439/"
"test_db_name"
),
)
params = config.get_connection_params()
self.assertIsNotNone(params)
self.assertEqual(params.password, "<PASSWORD>")
self.assertEqual(params.additional, {})
def test_unencoded_connection_string(self):
"""
'?' in the password will cause the urlparse to fail
"""
config = DatabaseConfig(
"test_db",
"mysql",
("postgresql://test_user:test_?password@test-db.prod:5439/" "test_db_name"),
)
self.assertRaises(ValueError, config.get_connection_params)
def test_additional_parameters(self):
config = DatabaseConfig(
"test_db",
"mysql",
(
"postgresql://test_user:test_password@test-db.prod:5439/"
"test_db_name?sslmode=disable&connect_timeout=60"
),
)
params = config.get_connection_params()
self.assertIsNotNone(params)
self.assertEqual(
params.additional, {"sslmode": "disable", "connect_timeout": "60"}
)
class ConnectionParamsTest(unittest.TestCase):
def test_bad_connection_params(self):
self.assertRaises(TypeError, ConnectionParams, "name", "host", "port", "user")
|
1713454
|
import ast
from typing import Dict
from boa3.model import set_internal_call
from boa3.model.builtin.interop.nativecontract import StdLibMethod
from boa3.model.variable import Variable
class MemorySearchMethod(StdLibMethod):
def __init__(self):
from boa3.model.type.type import Type
identifier = 'memory_search'
native_identifier = 'memorySearch'
args: Dict[str, Variable] = {
'mem': Variable(Type.union.build([Type.str, Type.bytes])),
'value': Variable(Type.union.build([Type.str, Type.bytes])),
'start': Variable(Type.int),
'backward': Variable(Type.bool),
}
start_default = set_internal_call(ast.parse("{0}".format(Type.int.default_value)
).body[0].value)
backward_default = set_internal_call(ast.parse("{0}".format(Type.bool.default_value)
).body[0].value)
super().__init__(identifier, native_identifier, args, defaults=[start_default, backward_default], return_type=Type.int)
|
1713473
|
from test.common_test_util import expected_result
from test.hquery.hquery_test_util import query_html_doc
def test_parentheses_boost_precedence():
assert query_html_doc('', '(2+3)*3') == expected_result('15')
assert query_html_doc('', '3*(3+2)') == expected_result('15')
assert query_html_doc('', '2+3*3 != (2+3)*3') == expected_result('true')
def test_union_operator_combines_node_sets():
html_body = """
<div>one</div>
<div>two</div>
<p>three</p>"""
assert query_html_doc(html_body, '//div | //p') == expected_result("""
<div>
one
</div>
<div>
two
</div>
<p>
three
</p>""")
def test_union_operator_produces_node_set_sorted_in_document_order():
html_body = """
<div>one</div>
<p>two</p>
<div>three</div>"""
assert query_html_doc(html_body, '//p | //div') == expected_result("""
<div>
one
</div>
<p>
two
</p>
<div>
three
</div>""")
|
1713545
|
import os
import sys
import io
import openpyxl
from .file import ArchiveFile
from .db_core import VfsDatabase, VfsNode
from .ff_adf import AdfDatabase
from .export_import_adf import adf_export_xlsx_0x0b73315d
from .util import make_dir_for_file
def process_translation_adf(vfs: VfsDatabase, adf_db: AdfDatabase, node: VfsNode):
adf = adf_db.read_node(vfs, node)
txt_buffer = adf.table_instance_values[0]['Text']
txt_buffer = [(t + 256) % 256 for t in txt_buffer]
txt_buffer = bytearray(txt_buffer)
tr = {}
with ArchiveFile(io.BytesIO(txt_buffer)) as tf:
for prs in adf.table_instance_values[0]['SortedPairs']:
tf.seek(prs['TextOffset'])
text = tf.read_strz().decode('utf-8')
tf.seek(prs['NameOffset'])
name = tf.read_strz().decode('utf-8')
tr[name] = text
if sys.platform == 'linux':
debug_file = os.path.join(vfs.working_dir, 'text_debug.txt')
make_dir_for_file(debug_file)
with open(debug_file, 'w') as dt:
for k, v in tr.items():
buf = '{}\t{}\n'.format(k, v.replace('\n', '<br>').replace('"', '"'))
dt.write(buf)
return tr
def process_codex_adf(vfs: VfsDatabase, adf_db: AdfDatabase, node: VfsNode, export_path='./digest/'):
codex_fn = adf_export_xlsx_0x0b73315d(vfs, adf_db, node, export_path=export_path, allow_overwrite=True)
codex_wb = openpyxl.load_workbook(filename=codex_fn)
cat_id = []
cat_name = []
for col in codex_wb['CollectableCategories'].columns:
c = [v.value for v in col]
if c[0] == 'id':
cat_id = c[1:]
elif c[0] == 'name':
cat_name = c[1:]
codex_id = []
codex_name = []
codex_desc = []
codex_icon = []
codex_category = []
for col in codex_wb['Collectables'].columns:
c = [v.value for v in col]
if c[0] == 'id':
codex_id = c[1:]
elif c[0] == 'name':
codex_name = c[1:]
elif c[0] == 'description':
codex_desc = c[1:]
elif c[0] == 'icon':
codex_icon = c[1:]
elif c[0] == 'collection_id':
codex_category = c[1:]
categories = dict(zip(cat_id, cat_name))
codex = {}
for cid, name, desc, icon, category in zip(codex_id, codex_name, codex_desc, codex_icon, codex_category):
if cid is not None:
codex[cid] = (name, desc, icon, category)
return categories, codex
|
1713559
|
import importlib
import pathlib
from types import ModuleType
from typing import Optional
from . import py_module
from .exceptions import InvalidPluginError
from .plugin import PluginBase
from .py_module import _parse_entry_point
def _load(module: ModuleType, func_name: str) -> PluginBase:
entry_point = getattr(module, func_name, None)
if entry_point is None or not callable(entry_point):
raise InvalidPluginError(
module.__file__, f"expected to have `{func_name}` method: {module.__file__}"
)
try:
ret = entry_point()
except Exception as e:
raise RuntimeError(
f"an error occured while loading {module.__file__}::{func_name}",
) from e
if not isinstance(ret, PluginBase):
raise InvalidPluginError(
module.__file__, f"`{func_name}` must return an instance of PluginBase"
)
return ret
def load_from_file(path: pathlib.Path) -> PluginBase:
module = py_module.load(path, "plugin")
return _load(module, "plugin")
def load_from_module(entry_point: str) -> PluginBase:
parsed = _parse_entry_point(entry_point)
if parsed is None:
raise ValueError(f"invalid entry_point: {entry_point}")
module_name, func_name = parsed
module = importlib.import_module(module_name)
return _load(module, func_name)
def load_plugin(
function: Optional[str] = None, script: Optional[pathlib.Path] = None
) -> PluginBase:
if function is None and script is None:
raise TypeError("must specify either function or script")
if function is not None and script is not None:
raise TypeError("only one of function or script must be speicifed")
if function is not None:
try:
return load_from_module(function)
except BaseException:
if script is not None:
pass # fallback
else:
raise
assert script is not None
return load_from_file(script)
|
1713571
|
import os
import pytest
from ansys.mapdl import core as pymapdl
from ansys.mapdl.core import examples
def test_convert_no_use_function_names(tmpdir):
vm_file = examples.vmfiles["vm1"]
pyscript = str(tmpdir.mkdir("tmpdir").join("vm1.py"))
clines = pymapdl.convert_script(
vm_file, pyscript, loglevel="ERROR", use_function_names=False
)
assert clines
@pytest.mark.skipif(os.name == "nt", reason="Requires multiple instances")
def test_convert(tmpdir):
vm_file = examples.vmfiles["vm1"]
pyscript = str(tmpdir.mkdir("tmpdir").join("vm1.py"))
clines = pymapdl.convert_script(vm_file, pyscript, loglevel="ERROR")
assert clines
|
1713639
|
import os
import sys
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
import torch
|
1713645
|
import time
import jimi
class _collect(jimi.action._action):
useCustomData = bool()
customData = dict()
limit = int()
def __init__(self,restrictClass=True):
self.events = []
return super(_collect, self).__init__(restrictClass)
def doAction(self,data):
try:
if "skip" in data["flowData"]:
del data["flowData"]["skip"]
return { "result" : True, "rc" : 0 }
except KeyError:
pass
if self.useCustomData:
customData = jimi.helpers.evalDict(self.customData,{"data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"] })
self.events.append(customData)
else:
self.events.append(data["flowData"]["event"])
self.data = data
if self.limit != 0 and self.limit < len(self.events):
self.continueFlow()
# Returning false to stop flow continue
return { "result" : False, "rc" : 9 }
def continueFlow(self):
if self.events:
tempDataCopy = jimi.conduct.copyData(self.data)
tempDataCopy["flowData"]["event"] = self.events
tempDataCopy["flowData"]["skip"] = 1
self.events = []
tempDataCopy["flowData"]["eventStats"] = { "first" : True, "current" : 0, "total" : 1, "last" : True }
self.data["persistentData"]["system"]["conduct"].triggerHandler(self.data["flowData"]["flow_id"],tempDataCopy,flowIDType=True)
def postRun(self):
self.continueFlow()
|
1713688
|
def helper(got,expect):
if got == expect: print True
else: print False, expect, got
print "\nstr.count"
helper('abcd abcba '.count('abc'),2)
helper('abcd abcba '.count('z'),0)
helper('abcd abcba '.count('abc',1),1)
helper('abcd abcba '.count('abc',-1),0)
helper('abcd abcba '.count('abc',5),1)
helper('abcd abcba '.count('abc',-5),0)
helper('abcd abcba '.count('abc',1,8),1)
helper('abcd abcba '.count('abc',-6,-3),1)
helper('abcd abcba '.count('abc',4,-1),1)
helper('abcd abcba '.count('abc',-6,10),1)
helper('abcd abcda '.count('ad',-6,-3),0)
helper('abcd abcba '.count('a',-6,-6),0)
helper('abcd abcba '.count('a',6,-7),0)
helper('abcd abcba '.count('a',3,1),0)
helper('abcd abcba '.count('a',-100,100),3)
helper('abcd abcba '.count('a',100,-100),0)
|
1713711
|
from __future__ import absolute_import, unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test import override_settings
from django.urls import reverse
from rest_framework.test import APITestCase
from documents.models import DocumentType
from documents.permissions import permission_document_view
from documents.tests.literals import (
TEST_DOCUMENT_TYPE_LABEL, TEST_SMALL_DOCUMENT_PATH
)
from permissions.classes import Permission
from permissions.models import Role
from permissions.tests.literals import TEST_ROLE_LABEL
from user_management.tests.literals import (
TEST_ADMIN_EMAIL, TEST_ADMIN_PASSWORD, TEST_ADMIN_USERNAME
)
from ..models import AccessControlList
from ..permissions import permission_acl_view
@override_settings(OCR_AUTO_OCR=False)
class ACLAPITestCase(APITestCase):
def setUp(self):
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=<PASSWORD>
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=<PASSWORD>
)
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_LABEL
)
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
self.role = Role.objects.create(label=TEST_ROLE_LABEL)
self.document_content_type = ContentType.objects.get_for_model(
self.document
)
Permission.invalidate_cache()
def tearDown(self):
if hasattr(self, 'document_type'):
self.document_type.delete()
def _create_acl(self):
self.acl = AccessControlList.objects.create(
content_object=self.document,
role=self.role
)
self.acl.permissions.add(permission_document_view.stored_permission)
def test_object_acl_list_view(self):
self._create_acl()
response = self.client.get(
reverse(
'rest_api:accesscontrollist-list',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk
)
)
)
self.assertEqual(
response.data['results'][0]['content_type']['app_label'],
self.document_content_type.app_label
)
self.assertEqual(
response.data['results'][0]['role']['label'], TEST_ROLE_LABEL
)
def test_object_acl_delete_view(self):
self._create_acl()
response = self.client.delete(
reverse(
'rest_api:accesscontrollist-detail',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk, self.acl.pk
)
)
)
self.assertEqual(response.status_code, 204)
self.assertEqual(AccessControlList.objects.count(), 0)
def test_object_acl_detail_view(self):
self._create_acl()
response = self.client.get(
reverse(
'rest_api:accesscontrollist-detail',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk, self.acl.pk
)
)
)
self.assertEqual(
response.data['content_type']['app_label'],
self.document_content_type.app_label
)
self.assertEqual(
response.data['role']['label'], TEST_ROLE_LABEL
)
def test_object_acl_permission_delete_view(self):
self._create_acl()
permission = self.acl.permissions.first()
response = self.client.delete(
reverse(
'rest_api:accesscontrollist-permission-detail',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk, self.acl.pk,
permission.pk
)
)
)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.acl.permissions.count(), 0)
def test_object_acl_permission_detail_view(self):
self._create_acl()
permission = self.acl.permissions.first()
response = self.client.get(
reverse(
'rest_api:accesscontrollist-permission-detail',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk, self.acl.pk,
permission.pk
)
)
)
self.assertEqual(
response.data['pk'], permission_document_view.pk
)
def test_object_acl_permission_list_view(self):
self._create_acl()
response = self.client.get(
reverse(
'rest_api:accesscontrollist-permission-list',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk, self.acl.pk
)
)
)
self.assertEqual(
response.data['results'][0]['pk'],
permission_document_view.pk
)
def test_object_acl_permission_list_post_view(self):
self._create_acl()
response = self.client.post(
reverse(
'rest_api:accesscontrollist-permission-list',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk, self.acl.pk
)
), data={'permission_pk': permission_acl_view.pk}
)
self.assertEqual(response.status_code, 201)
self.assertQuerysetEqual(
ordered=False, qs=self.acl.permissions.all(), values=(
repr(permission_document_view.stored_permission),
repr(permission_acl_view.stored_permission)
)
)
def test_object_acl_post_no_permissions_added_view(self):
response = self.client.post(
reverse(
'rest_api:accesscontrollist-list',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk
)
), data={'role_pk': self.role.pk}
)
self.assertEqual(response.status_code, 201)
self.assertEqual(
self.document.acls.first().role, self.role
)
self.assertEqual(
self.document.acls.first().content_object, self.document
)
self.assertEqual(
self.document.acls.first().permissions.count(), 0
)
def test_object_acl_post_with_permissions_added_view(self):
response = self.client.post(
reverse(
'rest_api:accesscontrollist-list',
args=(
self.document_content_type.app_label,
self.document_content_type.model,
self.document.pk
)
), data={
'role_pk': self.role.pk,
'permissions_pk_list': permission_acl_view.pk
}
)
self.assertEqual(response.status_code, 201)
self.assertEqual(
self.document.acls.first().content_object, self.document
)
self.assertEqual(
self.document.acls.first().role, self.role
)
self.assertEqual(
self.document.acls.first().permissions.first(),
permission_acl_view.stored_permission
)
|
1713716
|
import io
import pytest
import numpy
from unittest import mock
from gym.spaces import Box
def test_attributes(basic_env):
assert basic_env.action_space.n == 2 ** 2
assert basic_env.observation_space == Box(low=1., high=float('Inf'), shape=(1, 2))
assert basic_env.starting_bank == 10
assert basic_env.balance == basic_env.starting_bank
assert basic_env.current_step == 0
assert numpy.array_equal(basic_env.bet_size_matrix, numpy.ones(shape=(1, 2)))
@pytest.mark.parametrize("action,expected_reward", [numpy.array((0, 0)),
numpy.array((1, 1)),
numpy.array((2, -1)),
numpy.array((3, 0))])
def test_step(basic_env, action, expected_reward):
odds, reward, done, _ = basic_env.step(action)
assert reward == expected_reward
assert not done
assert basic_env.current_step == 1
def test_reset(basic_env):
odds, reward, done, info = basic_env.step(1)
assert reward == 1
assert basic_env.balance == basic_env.starting_bank + 1
assert not done
assert basic_env.current_step == 1
assert info['legal_bet']
assert info['results'] == 1
assert info['reward'] == 1
assert not info['done']
odds, reward, done, _ = basic_env.step(2)
assert reward == 2
assert done
basic_env.reset()
assert basic_env.balance == basic_env.starting_bank
def test_info(basic_env):
info = basic_env.create_info(1)
assert info['current_step'] == 0
numpy.testing.assert_array_equal(info['odds'], numpy.array([[1, 2]]))
assert info['verbose_action'] == [['l']]
assert info['action'] == 1
assert info['balance'] == 10
assert info['reward'] == 0
assert not info['legal_bet']
assert info['results'] is None
assert not info['done']
basic_env.pretty_print_info(info)
def test_render(basic_env):
with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout:
basic_env.render()
assert fake_stdout.getvalue() == 'Current balance at step 0: 10\n'
@pytest.mark.parametrize("action", range(4))
def test_step_when_balance_is_0(basic_env, action):
basic_env.balance = 0
odds, reward, done, _ = basic_env.step(action)
assert reward == 0
assert done
assert basic_env.current_step == 0
def test_step_illegal_action(basic_env):
basic_env.balance = 1
odds, reward, done, _ = basic_env.step(3) # illegal - making a double when when the balance is 1
assert reward == -2
assert not done
assert basic_env.current_step == 1
@pytest.mark.parametrize("current_step,expected_results", [(0, numpy.array([[0, 1]], dtype=numpy.float64)),
(1, numpy.array([[1, 0]], dtype=numpy.float64))])
def test_get_results(basic_env, current_step, expected_results):
basic_env.current_step = current_step
results = basic_env.get_results()
assert numpy.array_equal(results, expected_results)
|
1713726
|
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.getcwd())))
from data import goes16s3
from tools import utils, inference_tools, plotting
from slomo import unet
import matplotlib
#matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import time
import os
import seaborn as sns
sns.set_context("paper", font_scale=1.6)
year = 2017
month = 9
day = 8
n_channels = 8
t = 1.0
product = 'ABI-L1b-RadC'
data_directory = '/nex/datapoolne/goes16'
#product = 'ABI-L1b-RadM'
#data_directory = '/nobackupp10/tvandal/data/goes16'
zoom=False
nn_model = unet.UNetMedium
discard = 64
dayofyear = dt.datetime(year, month, day).timetuple().tm_yday
multivariate = True
checkpoint = '../saved-models/1.4.1-unet-medium/9Min-%iChannels-MV/' % n_channels
if product == 'ABI-L1b-RadC':
down = 20
frame_directory = 'figures/animation-conus'
min_hour = 15
else:
down = 7
frame_directory = 'figures/animation-mesoscale'
min_hour = 16
if zoom:
frame_directory += '-zoom'
if not os.path.exists(frame_directory):
os.makedirs(frame_directory)
flownet, interpnet, warper= inference_tools.load_models(n_channels, checkpoint,
multivariate=multivariate,
nn_model=nn_model)
noaadata = goes16s3.NOAAGOESS3(product=product, channels=range(1,n_channels+1),
save_directory=data_directory, skip_connection=True)
channel_idxs = [c-1 for c in noaadata.channels]
files = noaadata.local_files(year=year, dayofyear=dayofyear)
files = files.dropna()
counter = 0
I0 = None
for j, row in enumerate(files.values):
# (2017, 251, 0, 2, 168, 'RadC')
year, dayofyear, hour, minute, dsecond, spatial = files.iloc[j].name
if (product == 'ABI-L1b-RadM') and (spatial != 'RadM1'):
continue
if (product == 'ABI-L1b-RadM') and (minute % 5 != 0):
continue
if (hour < min_hour) or (hour > 22):
I0 = None
continue
if I0 is None:
I0 = goes16s3._open_and_merge_2km(row[channel_idxs])
continue
print("Frame: {}".format(counter))
timestamp = dt.datetime(year, 1, 1, hour, minute) + dt.timedelta(days=dayofyear-1)
I1 = goes16s3._open_and_merge_2km(row[channel_idxs])
vector_data = inference_tools.single_inference_split(I0.values, I1.values, 1.,
flownet, interpnet, multivariate,
overlap=128, block_size=256+128,
discard=discard)
total_flow = vector_data['f_01'] + vector_data['delta_f_t1']
c = 0
u = total_flow[2*c] * -1.
v = total_flow[2*c+1]
RGB = I0.values[[1,2,0]]
RGB = np.transpose(RGB, (1,2,0))[discard:-discard,discard:-discard]
#RGB = I0.values[7][discard:-discard,discard:-discard]
if zoom:
u = u[180:250, 180:250]
v = v[180:250, 180:250]
RGB = RGB[180:250, 180:250]
down = 2
ax = plotting.flow_quiver_plot(u, v, down=down, vmax=0.60, background_img=RGB)
ax.text(0.07*total_flow.shape[0], 0.95*total_flow.shape[1], timestamp, fontsize=14,
color='white')
plt.savefig("{}/quiver_plot_band{}-{}-{:03d}.png".format(frame_directory, c+1, product, counter), dpi=300, pad_inches=0)
plt.show()
plt.close()
ax = plotting.plot_3channel_image(I0.values)
plt.savefig("{}/rbg-{:03d}.png".format(frame_directory, counter), dpi=300, pad_inches=0)
plt.close()
I0 = I1
counter += 1
|
1713731
|
import rospy
from datetime import datetime
import csv
import rosbag
import os
import cv2
import time
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from sensor_msgs.msg import Image
def getCurrentTimeStamp():
now = datetime.now()
timestamp = time.mktime(now.timetuple())
return int(timestamp)
'''
Sensor: GelSight Tactile Sensor
Data: Tactile Images
Format: .jpg
'''
class GelSight:
def __init__(self, object_dir):
self.object_dir = object_dir
self.bridge = CvBridge()
self.gelsight_count = 0
self.gel_path = object_dir
# First check the usb channel for the input video using 'ls -lrth /dev/video*'
# Make the according changes in the usb_cam-test.launch for group: camera1 -> video_device/value ="/dev/video1"
# Launch the file: roslaunch usb_cam usb_cam-test.launch
print("reading in the gelsight images...")
self.gel_sub = rospy.Subscriber('/gelsight/usb_cam/image_raw', Image, self.gelSightCallback)
def gelSightCallback(self, img):
try:
self.img = self.bridge.imgmsg_to_cv2(img, 'bgr8')
except CvBridgeError, e:
print(e)
def stopRecord(self):
self.gel_sub.unregister()
def __str__(self):
return 'GelSight'
|
1713769
|
from django.conf.urls import patterns, url
# Uncomment the next two lines to enable the admin:
urlpatterns = patterns('parks.views',
# Examples:
# returns park list
url(r'^search/$', 'get_parks', name='get_parks'),
# returns list of all neighborhood names and ids and activity names and ids
url(r'^get_neighborhoods_and_activities_list/$', 'get_neighborhoods_and_activities_list', name='get_neighborhoods_and_activities_list'),
#latest image
url(r'^get_latest_picture/$', 'get_latest_picture', name='get_latest_picture'),
# featured parks
url(r'^get_featured_parks/$', 'get_featured_parks', name='get_featured_parks'),
# returns facilities
url(r'^(?P<park_id>\d+)/facilities/$', 'get_facilities', name='get_facilities'),
# park detail page
url(r'^(?P<park_slug>[-\w]+)/$', 'park_ajax', name='park'),
# nearby parks
url(r'^(?P<park_id>\d+)/nearby_parks/$', 'get_nearby_parks', name='get_nearby_parks'),
# recommended parks
url(r'^(?P<park_id>\d+)/recommended_parks/$', 'get_recommended_parks', name='get_recommended_parks'),
)
|
1713803
|
from domHttpx.colors import Color
def tab():
print('\n')
def info(text):
print('[' + Color.CBLUE2 + 'INFO' + Color.ENDC + '] ' + text)
def success(text):
print('\n[' + Color.CGREEN2 + 'SUCCESS' + Color.ENDC + '] ' + text)
def error(text):
print('[' + Color.CRED2 + 'ERROR' + Color.ENDC + '] ' + text)
def title(title):
return f"[{Color.CCYAN2}{title}{Color.ENDC}]"
def sc_200(sc):
return f"[{Color.CGREEN2}{sc}{Color.ENDC}]"
def sc_500(sc):
return f"[{Color.CYELLOW2}{sc}{Color.ENDC}]"
def sc_other(sc):
return f"[{Color.CRED2}{sc}{Color.ENDC}]"
|
1713826
|
from run import app
# region Route Test
def test_get_routes():
request, response = app.test_client.get(
'/api/v1/api-map'
)
assert response.status == 200
# endregion
|
1713833
|
from setuptools import setup, find_packages
setup(
name="acres",
version="0.1.0",
author="<NAME>",
description="ML-based barcode sharpening.",
license="MIT",
packages=find_packages(),
install_requires=[
# When running on GCP, tensorflow is guaranteed via `runtimeVersion: "1.5"`.
# Specifying tensorflow version even seems to break GPU access.
# "tensorflow == 1.5.0",
"tensorflow",
"tqdm == 4.23",
"google-cloud-storage == 1.10.0",
]
)
|
1713859
|
import pathlib
import unittest
import numpy as np
import pandas as pd
from surgeo.models.geocode_model import GeocodeModel
class TestGeocodeModel(unittest.TestCase):
_GEOCODE_MODEL = GeocodeModel()
_GEOCODE_MODEL_TRACT = GeocodeModel(geo_level='TRACT')
_DATA_FOLDER = pathlib.Path(__file__).resolve().parents[1] / 'data'
def test_get_probabilities(self):
"""Test Geocode model versus known result"""
# Get our data and clean it
input_data = pd.read_csv(
self._DATA_FOLDER / 'geocode_input.csv',
skip_blank_lines=False,
)
# Get prob
result = self._GEOCODE_MODEL.get_probabilities(input_data['zcta5'])
# Get true result
true_result = pd.read_csv(
self._DATA_FOLDER / 'geocode_output.csv',
)
# Clean for consistency
result = result.round(4).fillna('')
true_result = true_result.round(4).fillna('')
# Check that all items in the series are equal
self.assertTrue(
result.equals(true_result)
)
def test_get_probabilities_tract(self):
"""Test Geocode model versus known result with Tracts"""
# Get our data and clean it
input_data = pd.read_csv(
self._DATA_FOLDER / 'tract_input.csv',
skip_blank_lines=False,
)
# Get prob
result = self._GEOCODE_MODEL_TRACT.get_probabilities_tract(input_data[['state','county','tract']])
# Get true result
true_result = pd.read_csv(
self._DATA_FOLDER / 'tract_output.csv',
)
# Clean for consistency
result = result.round(4).fillna('')
true_result = result.round(4).fillna('')
# Check that all items in the series are equal
self.assertTrue(
result.equals(true_result)
)
if __name__ == '__main__':
unittest.main()
|
1713863
|
from blog.models import Entry, Blogmark, Quotation
from django.conf import settings
from django.core.cache import cache
def all(request):
return {
"GOOGLE_ANALYTICS_ID": settings.GOOGLE_ANALYTICS_ID,
"years_with_content": years_with_content(),
}
def years_with_content():
cache_key = "years-with-content"
years = cache.get(cache_key)
if not years:
years = list(
set(
list(Entry.objects.datetimes("created", "year"))
+ list(Blogmark.objects.datetimes("created", "year"))
+ list(Quotation.objects.datetimes("created", "year"))
)
)
years.sort()
cache.set(cache_key, years, 24 * 60 * 60)
return years
|
1713900
|
from .viz import plot_cluster_centers, plot_segmentation
__all__ = ('plot_cluster_centers', 'plot_segmentation')
|
1713933
|
from scipy.misc.pilutil import imresize
import numpy as np
__author__ = 'peter'
def resize_while_preserving_aspect_ratio(im, x_dim=None, y_dim=None):
"""
Resize an image, while preserving the aspect ratio. For this you need to specify either x_dim or y_dim.
:param im: The image: a 2D or 3D array.
:param x_dim: An integer indicating the desired size, or None, to leave it loose.
:param y_dim: An integer indicating the desired size, or None, to leave it loose.
:return: A new image whose x_dim or y_dim matches the constraint
"""
assert not (x_dim is None and y_dim is None), 'You can not leave both constraints at None!'
x_dim = float('inf') if x_dim is None else x_dim
y_dim = float('inf') if y_dim is None else y_dim
box_aspect_ratio = x_dim/float(y_dim)
image_aspect_ratio = im.shape[1] / float(im.shape[0])
if image_aspect_ratio > box_aspect_ratio: # Active constraint is width
return imresize(im, size=(int(x_dim/image_aspect_ratio+.5), x_dim))
else: # Active constraint is height
return imresize(im, size=(y_dim, int(y_dim*image_aspect_ratio+.5)))
def equalize_image_dims(list_of_images, x_dim = None, y_dim = None):
"""
Resize images so that they match roughly in size although their aspect ratio will be preserved.
:param list_of_images: A list of numpy arrays representing images (2D or 3D arrays)
:param size: A 2-tuple specifying the desired (y_size, x_size).
Each of (y_size, x_size) can be:
- An integar, meaning that this axis of the image will remain equal or smaller than this number of pixels.
- None, meaning that there is no constraint along this axis (e.g. (224, None) just states that the image will be
scaled to 224 pixels in the vertical direction - the horizontal will be whatever size is needed to maintain
the aspect ratio.
- 'max': Meaning that we take the largest image size along this axis.
- 'min': Meaning what we take the largest image size along this axis.
The image will then be scaled so that the image size remains inside this box (although, unless the aspect ratio
matches exactly, one dimension will be smaller).
:return: Another list of images.
"""
assert not (x_dim is None and y_dim is None), 'You can not leave both constraints at None!'
if len(list_of_images)==0:
return []
x_dim = max(im.shape[1] for im in list_of_images) if x_dim=='max' else \
min(im.shape[1] for im in list_of_images) if x_dim=='min' else \
x_dim
y_dim = max(im.shape[0] for im in list_of_images) if y_dim=='max' else \
min(im.shape[0] for im in list_of_images) if y_dim=='min' else \
y_dim
new_list_of_images = [resize_while_preserving_aspect_ratio(im, x_dim=x_dim, y_dim=y_dim) for im in list_of_images]
return new_list_of_images
def resize_and_crop(im, width, height):
im_aspect = float(im.shape[0])/im.shape[1]
new_aspect = float(height)/width
if im_aspect > new_aspect: # Need to chop the top and bottom
new_height = int(width*im_aspect)
resized_im = imresize(im, (new_height, width))
start = (new_height-height)/2
output_im = resized_im[start:start+height, :]
else: # Need to chop the left and right.
new_width = int(height/im_aspect)
resized_im = imresize(im, (height, new_width))
start = (new_width-width)/2
output_im = resized_im[:, start:start+width]
assert output_im.shape[:2] == (height, width)
return output_im
def resize_image(im, width=None, height=None, mode='squeeze'):
assert isinstance(im, np.ndarray) and im.ndim in (2, 3)
if mode == 'squeeze':
im = imresize(im, size=(height, width))
elif mode == 'preserve_aspect':
im = resize_while_preserving_aspect_ratio(im, x_dim=width, y_dim=height)
elif mode == 'crop':
current_height, current_width = im.shape[:2]
assert height>=height and width>=width, "Crop size must be smaller than image size"
row_start = (current_height-height)/2
col_start = (current_width-width)/2
im = im[..., row_start:row_start+224, col_start:col_start+224, :]
elif mode in ('resize_and_crop', 'scale_crop'):
assert height is not None and width is not None, "You need to specify both height and width. for 'scale_crop' mode"
return resize_and_crop(im, width=width, height=height)
else:
raise Exception("Unknown resize mode: '{}'".format(mode))
return im
def get_dark_edge_slice(im, cut_edges_thresh=0):
vnonzero = np.flatnonzero(im.mean(axis=2).mean(axis=1)>cut_edges_thresh)
hnonzero = np.flatnonzero(im.mean(axis=2).mean(axis=0)>cut_edges_thresh)
edge_crops = slice(vnonzero[0], vnonzero[-1]+1), slice(hnonzero[0], hnonzero[-1]+1)
return edge_crops
def cut_dark_edges(im, slices = None, cut_edges_thresh=0):
if slices is None:
slices = get_dark_edge_slice(im, cut_edges_thresh=cut_edges_thresh)
y_slice, x_slice = slices
return im[y_slice, x_slice]
|
1713940
|
import torch
import torch.nn as nn
import numpy as np
from gns import graph_network
from torch_geometric.nn import radius_graph
from typing import Dict
class LearnedSimulator(nn.Module):
"""Learned simulator from https://arxiv.org/pdf/2002.09405.pdf."""
def __init__(
self,
particle_dimensions: int,
nnode_in: int,
nedge_in: int,
latent_dim: int,
nmessage_passing_steps: int,
nmlp_layers: int,
mlp_hidden_dim: int,
connectivity_radius: float,
boundaries: np.ndarray,
normalization_stats: Dict,
nparticle_types: int,
particle_type_embedding_size,
device="cpu"):
"""Initializes the model.
Args:
particle_dimensions: Dimensionality of the problem.
nnode_in: Number of node inputs.
nedge_in: Number of edge inputs.
latent_dim: Size of latent dimension (128)
nmessage_passing_steps: Number of message passing steps.
nmlp_layers: Number of hidden layers in the MLP (typically of size 2).
connectivity_radius: Scalar with the radius of connectivity.
boundaries: Array of 2-tuples, containing the lower and upper boundaries
of the cuboid containing the particles along each dimensions, matching
the dimensionality of the problem.
normalization_stats: Dictionary with statistics with keys "acceleration"
and "velocity", containing a named tuple for each with mean and std
fields, matching the dimensionality of the problem.
nparticle_types: Number of different particle types.
particle_type_embedding_size: Embedding size for the particle type.
device: Runtime device (cuda or cpu).
"""
super(LearnedSimulator, self).__init__()
self._boundaries = boundaries
self._connectivity_radius = connectivity_radius
self._normalization_stats = normalization_stats
self._nparticle_types = nparticle_types
# Particle type embedding has shape (9, 16)
self._particle_type_embedding = nn.Embedding(
nparticle_types, particle_type_embedding_size)
# Initialize the EncodeProcessDecode
self._encode_process_decode = graph_network.EncodeProcessDecode(
nnode_in_features=nnode_in,
nnode_out_features=particle_dimensions,
nedge_in_features=nedge_in,
latent_dim=latent_dim,
nmessage_passing_steps=nmessage_passing_steps,
nmlp_layers=nmlp_layers,
mlp_hidden_dim=mlp_hidden_dim)
self._device = device
def forward(self):
"""Forward hook runs on class instantiation"""
pass
def _compute_graph_connectivity(
self,
node_features: torch.tensor,
nparticles_per_example: torch.tensor,
radius: float,
add_self_edges: bool = True):
"""Generate graph edges to all particles within a threshold radius
Args:
node_features: Node features with shape (nparticles, dim).
nparticles_per_example: Number of particles per example. Default is 2
examples per batch.
radius: Threshold to construct edges to all particles within the radius.
add_self_edges: Boolean flag to include self edge (default: True)
"""
# Specify examples id for particles
batch_ids = torch.cat(
[torch.LongTensor([i for _ in range(n)])
for i, n in enumerate(nparticles_per_example)]).to(self._device)
# radius_graph accepts r < radius not r <= radius
# A torch tensor list of source and target nodes with shape (2, nedges)
edge_index = radius_graph(
node_features, r=radius, batch=batch_ids, loop=add_self_edges)
# The flow direction when using in combination with message passing is
# "source_to_target"
receivers = edge_index[0, :]
senders = edge_index[1, :]
return receivers, senders
def _encoder_preprocessor(
self,
position_sequence: torch.tensor,
nparticles_per_example: torch.tensor,
particle_types: torch.tensor):
"""Extracts important features from the position sequence. Returns a tuple
of node_features (nparticles, 30), edge_index (nparticles, nparticles), and
edge_features (nparticles, 3).
Args:
position_sequence: A sequence of particle positions. Shape is
(nparticles, 6, dim). Includes current + last 5 positions
nparticles_per_example: Number of particles per example. Default is 2
examples per batch.
particle_types: Particle types with shape (nparticles).
"""
nparticles = position_sequence.shape[0]
most_recent_position = position_sequence[:, -1] # (n_nodes, 2)
velocity_sequence = time_diff(position_sequence)
# Get connectivity of the graph with shape of (nparticles, 2)
senders, receivers = self._compute_graph_connectivity(
most_recent_position, nparticles_per_example, self._connectivity_radius)
node_features = []
# Normalized velocity sequence, merging spatial an time axis.
velocity_stats = self._normalization_stats["velocity"]
normalized_velocity_sequence = (
velocity_sequence - velocity_stats['mean']) / velocity_stats['std']
flat_velocity_sequence = normalized_velocity_sequence.view(
nparticles, -1)
# There are 5 previous steps, with dim 2
# node_features shape (nparticles, 5 * 2 = 10)
node_features.append(flat_velocity_sequence)
# Normalized clipped distances to lower and upper boundaries.
# boundaries are an array of shape [num_dimensions, 2], where the second
# axis, provides the lower/upper boundaries.
boundaries = torch.tensor(
self._boundaries, requires_grad=False).float().to(self._device)
distance_to_lower_boundary = (
most_recent_position - boundaries[:, 0][None])
distance_to_upper_boundary = (
boundaries[:, 1][None] - most_recent_position)
distance_to_boundaries = torch.cat(
[distance_to_lower_boundary, distance_to_upper_boundary], dim=1)
normalized_clipped_distance_to_boundaries = torch.clamp(
distance_to_boundaries / self._connectivity_radius, -1., 1.)
# The distance to 4 boundaries (top/bottom/left/right)
# node_features shape (nparticles, 10+4)
node_features.append(normalized_clipped_distance_to_boundaries)
# Particle type
if self._nparticle_types > 1:
particle_type_embeddings = self._particle_type_embedding(
particle_types)
node_features.append(particle_type_embeddings)
# Final node_features shape (nparticles, 30) for 2D
# 30 = 10 (5 velocity sequences*dim) + 4 boundaries + 16 particle embedding
# Collect edge features.
edge_features = []
# Relative displacement and distances normalized to radius
# with shape (nedges, 2)
# normalized_relative_displacements = (
# torch.gather(most_recent_position, 0, senders) -
# torch.gather(most_recent_position, 0, receivers)
# ) / self._connectivity_radius
normalized_relative_displacements = (
most_recent_position[senders, :] -
most_recent_position[receivers, :]
) / self._connectivity_radius
# Add relative displacement between two particles as an edge feature
# with shape (nparticles, ndim)
edge_features.append(normalized_relative_displacements)
# Add relative distance between 2 particles with shape (nparticles, 1)
# Edge features has a final shape of (nparticles, ndim + 1)
normalized_relative_distances = torch.norm(
normalized_relative_displacements, dim=-1, keepdim=True)
edge_features.append(normalized_relative_distances)
return (torch.cat(node_features, dim=-1),
torch.stack([senders, receivers]),
torch.cat(edge_features, dim=-1))
def _decoder_postprocessor(
self,
normalized_acceleration: torch.tensor,
position_sequence: torch.tensor) -> torch.tensor:
""" Compute new position based on acceleration and current position.
The model produces the output in normalized space so we apply inverse
normalization.
Args:
normalized_acceleration: Normalized acceleration (nparticles, dim).
position_sequence: Position sequence of shape (nparticles, dim).
Returns:
torch.tensor: New position of the particles.
"""
# Extract real acceleration values from normalized values
acceleration_stats = self._normalization_stats["acceleration"]
acceleration = (
normalized_acceleration * acceleration_stats['std']
) + acceleration_stats['mean']
# Use an Euler integrator to go from acceleration to position, assuming
# a dt=1 corresponding to the size of the finite difference.
most_recent_position = position_sequence[:, -1]
most_recent_velocity = most_recent_position - position_sequence[:, -2]
# TODO: Fix dt
new_velocity = most_recent_velocity + acceleration # * dt = 1
new_position = most_recent_position + new_velocity # * dt = 1
return new_position
def predict_positions(
self,
current_positions: torch.tensor,
nparticles_per_example: torch.tensor,
particle_types: torch.tensor) -> torch.tensor:
"""Predict position based on acceleration.
Args:
current_positions: Current particle positions (nparticles, dim).
nparticles_per_example: Number of particles per example. Default is 2
examples per batch.
particle_types: Particle types with shape (nparticles).
Returns:
next_positions (torch.tensor): Next position of particles.
"""
node_features, edge_index, edge_features = self._encoder_preprocessor(
current_positions, nparticles_per_example, particle_types)
predicted_normalized_acceleration = self._encode_process_decode(
node_features, edge_index, edge_features)
next_positions = self._decoder_postprocessor(
predicted_normalized_acceleration, current_positions)
return next_positions
def predict_accelerations(
self,
next_positions: torch.tensor,
position_sequence_noise: torch.tensor,
position_sequence: torch.tensor,
nparticles_per_example: torch.tensor,
particle_types: torch.tensor):
"""Produces normalized and predicted acceleration targets.
Args:
next_positions: Tensor of shape (nparticles_in_batch, dim) with the
positions the model should output given the inputs.
position_sequence_noise: Tensor of the same shape as `position_sequence`
with the noise to apply to each particle.
position_sequence: A sequence of particle positions. Shape is
(nparticles, 6, dim). Includes current + last 5 positions.
nparticles_per_example: Number of particles per example. Default is 2
examples per batch.
particle_types: Particle types with shape (nparticles).
Returns:
Tensors of shape (nparticles_in_batch, dim) with the predicted and target
normalized accelerations.
"""
# Add noise to the input position sequence.
noisy_position_sequence = position_sequence + position_sequence_noise
# Perform the forward pass with the noisy position sequence.
node_features, edge_index, edge_features = self._encoder_preprocessor(
noisy_position_sequence, nparticles_per_example, particle_types)
predicted_normalized_acceleration = self._encode_process_decode(
node_features, edge_index, edge_features)
# Calculate the target acceleration, using an `adjusted_next_position `that
# is shifted by the noise in the last input position.
next_position_adjusted = next_positions + position_sequence_noise[:, -1]
target_normalized_acceleration = self._inverse_decoder_postprocessor(
next_position_adjusted, noisy_position_sequence)
# As a result the inverted Euler update in the `_inverse_decoder` produces:
# * A target acceleration that does not explicitly correct for the noise in
# the input positions, as the `next_position_adjusted` is different
# from the true `next_position`.
# * A target acceleration that exactly corrects noise in the input velocity
# since the target next velocity calculated by the inverse Euler update
# as `next_position_adjusted - noisy_position_sequence[:,-1]`
# matches the ground truth next velocity (noise cancels out).
return predicted_normalized_acceleration, target_normalized_acceleration
def _inverse_decoder_postprocessor(
self,
next_position: torch.tensor,
position_sequence: torch.tensor):
"""Inverse of `_decoder_postprocessor`.
Args:
next_position: Tensor of shape (nparticles_in_batch, dim) with the
positions the model should output given the inputs.
position_sequence: A sequence of particle positions. Shape is
(nparticles, 6, dim). Includes current + last 5 positions.
Returns:
normalized_acceleration (torch.tensor): Normalized acceleration.
"""
previous_position = position_sequence[:, -1]
previous_velocity = previous_position - position_sequence[:, -2]
next_velocity = next_position - previous_position
acceleration = next_velocity - previous_velocity
acceleration_stats = self._normalization_stats["acceleration"]
normalized_acceleration = (
acceleration - acceleration_stats['mean']) / acceleration_stats['std']
return normalized_acceleration
def save(
self,
path: str = 'model.pt'):
"""Save model state
Args:
path: Model path
"""
torch.save(self.state_dict(), path)
def load(
self,
path: str):
"""Load model state from file
Args:
path: Model path
"""
self.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
def time_diff(
position_sequence: torch.tensor) -> torch.tensor:
"""Finite difference between two input position sequence
Args:
position_sequence: Input position sequence & shape(nparticles, 6 steps, dim)
Returns:
torch.tensor: Velocity sequence
"""
return position_sequence[:, 1:] - position_sequence[:, :-1]
|
1713953
|
import datetime
from time import gmtime,localtime, strftime
import time
from flask import Flask
from flask import jsonify
import socket
# print a nice greeting.
def say_hello(username = "World"):
return 'Hello %s!</p>\n' % username
def get_date_time():
secs = int(time.time())
dtval = localtime()
return jsonify(date=strftime("%d %b %Y ", dtval),
day=strftime("%A", dtval),
timezone=strftime("%Z (%z)", dtval),
time=strftime('%X', dtval),
hostname=socket.gethostname(),
epoc=secs)
# some bits of text for the page.
header_text = '''
<html>\n<head> <title>EB Flask Test</title> </head>\n<body>'''
instructions = '''
<p><em>Hint</em>: This is a RESTful web service! Append a username
to the URL (for example: <code>/Thelonious</code>) to say hello to
someone specific.</p>\n
<h2>links</h2>
<ul>
<li><a href="./freddy">Hello Freddy</a></li>
<li><a href="./date">Get date</a></li>
<li><a href="./">home</a></li>
</ul>
'''
home_link = '<p><a href="/">Back</a></p>\n'
footer_text = '</body>\n</html>'
# EB looks for an 'application' callable by default.
application = Flask(__name__)
# add a rule for the index page.
application.add_url_rule('/', 'index', (lambda: header_text +
say_hello() + instructions + footer_text))
# add a rule when the page is accessed with a name appended to the site
# URL.
application.add_url_rule('/<username>', 'hello', (lambda username:
say_hello(username) + home_link))
application.add_url_rule('/date', 'next', get_date_time)
# run the app.
if __name__ == "__main__":
# Setting debug to True enables debug output. This line should be
# removed before deploying a production app.
application.debug = True
application.run()
|
1714008
|
import numpy as np
import cv2
import pickle
class LoadData:
'''
Class to laod the data
'''
def __init__(self, data_dir, classes, h):
'''
:param data_dir: directory where the dataset is kept
:param classes: number of classes in the dataset
:param cached_data_file: location where cached file has to be stored
:param normVal: normalization value, as defined in ERFNet paper
'''
self.data_dir = data_dir
self.classes = classes
self.h = h
def compute_class_weights(self, histogram):
'''
Helper function to compute the class weights
:param histogram: distribution of class samples
:return: None, but updates the classWeights variable
'''
print(" original histogram " + str(histogram))
normHist = histogram / np.sum(histogram)
print(normHist)
return normHist
def readFile(self, fileName):
'''
Function to read the data
:param fileName: file that stores the image locations
:param trainStg: if processing training or validation data
:return: 0 if successful
'''
global_hist = np.zeros(self.classes, dtype=np.float32)
with open(self.data_dir + '/' + fileName, 'r') as textFile:
for line in textFile:
# we expect the text file to contain the data in following format
# <RGB Image>, <Label Image>
line_arr = line.split(',')
label_file = ((self.data_dir).strip() + '/' + line_arr[1].strip()).strip()
label_img = cv2.imread(label_file, 0)
label_img = cv2.resize(label_img, (self.h*2, self.h), interpolation=cv2.INTER_NEAREST)
hist = np.histogram(label_img, self.classes)
global_hist += hist[0]
#compute the class imbalance information
return self.compute_class_weights(global_hist)
def processData(self):
'''
main_multiscale.py calls this function
We expect train.txt and val.txt files to be inside the data directory.
:return:
'''
print('Processing training data')
return self.readFile('train.txt')
if __name__ == "__main__":
ld =LoadData('D:\DATA\cityscape', 20, 256)
print(ld.processData())
|
1714045
|
from .tasks import SetupTask
from .tasks import InvokeTask
from .tasks import MakeZipTask
from .tasks import AWSLambdaGetConfigTask
from .tasks import AWSLambdaInvokeTask
from .tasks import AWSLambdaUpdateCodeTask
|
1714064
|
import numpy as np
import scipy.signal, scipy.linalg
from .utils import *
from .signal import *
from .wavelets import *
__all__ = ["allpass", "leja", "sfact", "selesnick_hwlet", "evenbly_white_hwlet"]
def allpass(tau, L):
"""
Return the filter d[n] such that
A(z) = z^{-L} D(1/z) / D(z)
approximates A(z) = z^{-tau}.
The length of the filter d[n] is L+1.
"""
n = np.arange(L)
x = np.r_[1, (L - n) * (L - n - tau) / (n + 1) / (n + 1 + tau)]
return np.cumprod(x)
def leja(a):
"""
Leja ordering of given numbers:
* |a[0]| = max_j |a[j]|
* prod_{i=0}^{k-1} |a[k] - a[i]| = max_j prod_{i=0}^{k-1} |a[j] - a[i]|
When used as a preprocessing for np.poly it increases its numerical precision.
"""
n = len(a)
c = np.argmax(np.abs(a))
a[[c, 0]] = a[[0, c]]
for k in range(1, n):
A = np.abs(a[:, np.newaxis][:, [0] * k] - a[np.newaxis, :k][[0] * n, :])
A = np.prod(A, -1)
c = np.argmax(A)
a[[k, c]] = a[[c, k]]
return a
def sfact(h, min_phase=False, eps=1e-5):
"""
Return a mid-phase (or min-phase) spectral factorization of the polynomial h of degree 2n; i.e., a polynomial g of degree n such that
h(X) = X^n g(X) g_conj(1/X)
The min_phase parameter is ignored if h is a complex signal.
This code is inspired by Selesnick's sfactM.m and sfact.m.
"""
assert len(h) % 2 == 1, "Polynomial should have even degree."
h = np.array(h)
assert np.allclose(
h, h[::-1].conj(), atol=0
), "Coefficient sequence should be Hermitian."
isreal = np.all(np.isreal(h))
# find roots of original polynomials
roots = np.roots(h)
# classify roots on unit circle
roots_circ = roots[np.abs(np.abs(roots) - 1) < eps]
assert (
len(roots_circ) % 2 == 0
), "There should be an even number of roots of unit modulus."
if min_phase and len(roots_circ) > 0:
raise NotImplementedError
# all roots on unit circle should appear an even number of times
plus_one = np.abs(roots_circ - 1) < eps
others = ~plus_one
num_plus_one = sum(plus_one)
assert num_plus_one % 2 == 0, "The root +1 should appear an even number of times."
roots_circ_other = roots_circ[others]
roots_circ_other = roots_circ_other[np.argsort(np.angle(roots_circ_other))]
roots_circ_other = (roots_circ_other[::2] + roots_circ_other[1::2]) / 2
# collect half the +1's and half of all other roots
roots_circ = np.r_[
roots_circ_other,
[+1] * (num_plus_one // 2),
]
# roots inside unit disk (for a polynomial with real coefficients, those roots should come in complex conjugate pairs unless they are real)
roots_int = roots[np.abs(roots) <= 1 - eps]
if isreal and not min_phase:
pos_imags, reals = scipy.signal.filter_design._cplxreal(roots_int)
A1 = np.r_[pos_imags[::2], pos_imags[::2].conj()]
A2 = np.r_[pos_imags[1::2], pos_imags[1::2].conj()]
imags = np.r_[1 / A1, A2]
reals = np.r_[1 / reals[::2], reals[1::2]]
roots_int = np.r_[imags, reals]
# roots of the spectral factorization
roots = np.r_[roots_circ, roots_int]
roots = leja(roots)
# build corresponding polynomial
g = np.poly(roots)
g = g * np.sqrt(h[-1] / (g[0] * g[-1]))
if min(g) + max(g) < 0:
g = -g
# check that g is indeed a spectral factor of h
assert np.allclose(np.convolve(g, g[::-1].conj()), h, atol=0), "No spectral factor"
return g
def selesnick_hwlet(K, L, min_phase=False):
"""
Return Selesnick's Hilbert transform wavelet pair (h, g).
The parameter K determines the number of zeros at z=-1.
The parameter L determines the support of the filter implementing the fractional delay.
The length of both scaling filters is 2(K+L).
This code is inspired by Selesnick's hwlet.m.
"""
d = allpass(1 / 2, L)
# filter for z^(K+L) S(z)
s1 = scipy.special.binom(2 * K, np.arange(2 * K + 1))
s2 = np.convolve(d, d[::-1])
s = np.convolve(s1, s2)
# solve convolution system for z^(K+L-1) R(z)
A = convmtx(s, 2 * (K + L) - 1)
A = A[1::2]
b = np.zeros(2 * (K + L) - 1)
b[K + L - 1] = 1
r = np.linalg.solve(A, b)
r = (r + r[::-1]) / 2
assert np.allclose(A @ r, b)
# find spectral factor Q(z) and compute filter for z^K F(z)
q = sfact(r, min_phase=min_phase)
b = scipy.special.binom(K, np.arange(K + 1))
f = np.convolve(q, b)
h = np.convolve(f, d)
g = np.convolve(f, d[::-1])
# build orthogonal wavelet
h = orthogonal_wavelet.from_scaling_filter(signal(h))
g = orthogonal_wavelet.from_scaling_filter(signal(g))
return h, g
def evenbly_white_hwlet():
"""
Return Evenbly-White's filter pair of length 4.
"""
h_s = signal(
np.array([-0.129_409_52, 0.224_143_87, 0.836_516_3, 0.482_962_91]), start=-2
)
g_s = signal(
np.array([0.482_962_91, 0.836_516_3, 0.224_143_87, -0.129_409_52]), start=0
)
h = orthogonal_wavelet.from_scaling_filter(h_s)
g = orthogonal_wavelet.from_scaling_filter(g_s)
return h, g
|
1714079
|
import os
import tempfile
try:
import subprocess32 as subprocess
except:
import subprocess
import random
import uuid
from concurrent.futures import ThreadPoolExecutor
import re
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestermisc.cloud_openstack_utils import OS_SimpleClient
# setup base logger
baseLogger = core_utils.setup_logger('cloud_openstack_submitter')
def _init_script_replace(string, **kwarg):
new_string = string
macro_map = {
'\$\(workerID\)': str(kwarg['workerID']),
'\$\(batchID\)': str(kwarg['batchID']),
'\$\(accessPoint\)': str(kwarg['accessPoint']),
}
for k, v in macro_map.items():
new_string = re.sub(k, v, new_string)
return new_string
# make cloud initialization script
def _make_init_script(workspec, template_str):
# make logger
tmpLog = core_utils.make_logger(baseLogger, 'workerID={0}'.format(workspec.workerID), method_name='_make_init_script')
# make init tempfile
tmpFile = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_init.sh', dir=workspec.get_access_point())
new_template_str = _init_script_replace(template_str, **workspec.__dict__)
tmpFile.write(new_template_str)
tmpFile.close()
tmpLog.debug('done')
return tmpFile.name
# Cloud Openstack submitter
class CloudOpenstackSubmitter(PluginBase):
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
self.nProcesses = 4
self.vm_client = OS_SimpleClient(auth_config_json_file=self.authConfigFile)
def _submit_a_vm(self, workspec):
# set logger
tmpLog = self.make_logger(baseLogger, 'workerID={0}'.format(workspec.workerID), method_name='_submit_a_vm')
# initial return values
tmpRetVal = (None, 'Nothing done')
# decide id
vm_name = 'harvester-vm_{0}'.format(str(uuid.uuid4()))
# # decide image
vm_image_id = self.vmImageID
# decide flavor
#FIXME
if workspec.nCore == 1:
vm_flavor_id = self.jobType_vmFlavor_map['SCORE']
elif workspec.nCore == 8:
vm_flavor_id = self.jobType_vmFlavor_map['MCORE']
else:
vm_flavor_id = self.jobType_vmFlavor_map['other']
# decide userdata
with open(self.initScriptTemplate) as _f:
template_str = _f.read()
vm_userdata_file = _make_init_script(workspec, template_str)
vm_userdata = open(vm_userdata_file, 'r')
# get image and flavor
try:
vm_image = self.vm_client.nova.glance.find_image(vm_image_id)
vm_flavor = self.vm_client.nova.flavors.get(vm_flavor_id)
except Exception as _e:
errStr = 'Failed to create a VM with name={0} ; {1}'.format(vm_name, _e)
tmpLog.error(errStr)
tmpRetVal = (None, errStr)
return tmpRetVal
# create a VM
try:
self.vm_client.nova.servers.create( name=vm_name,
image=vm_image,
flavor=vm_flavor,
userdata=vm_userdata,
**self.vmCreateAttributes)
except Exception as _e:
errStr = 'Failed to create a VM with name={0} ; {1}'.format(vm_name, _e)
tmpLog.error(errStr)
tmpRetVal = (None, errStr)
else:
try:
vm_server = self.vm_client.nova.servers.list(search_opts={'name': vm_name}, limit=1)[0]
vm_id = vm_server.id
except Exception as _e:
errStr = 'Failed to create a VM with name={0} ; {1}'.format(vm_name, _e)
tmpLog.error(errStr)
tmpRetVal = (None, errStr)
else:
workspec.batchID = vm_id
tmpLog.info('Created a VM with name={vm_name} id={vm_id}'.format(vm_name=vm_name, vm_id=vm_id))
tmpRetVal = (True, '')
vm_userdata.close()
# return
return tmpRetVal
# submit workers
def submit_workers(self, workspec_list):
# set logger
tmpLog = self.make_logger(baseLogger, method_name='submit_workers')
nWorkers = len(workspec_list)
tmpLog.debug('start nWorkers={0}'.format(nWorkers))
# exec with multi-thread
with ThreadPoolExecutor(self.nProcesses) as thread_pool:
retValList = thread_pool.map(self._submit_a_vm, workspec_list)
tmpLog.debug('{0} workers submitted'.format(nWorkers))
# return
retList = list(retValList)
tmpLog.debug('done')
return retList
|
1714117
|
from torch.nn import functional as F
from models.DLCT.utils import PositionWiseFeedForward
import torch
from torch import nn
from models.DLCT.attention import MultiHeadBoxAttention as MultiHeadAttention
from ..relative_embedding import BoxRelationalEmbedding, GridRelationalEmbedding, AllRelationalEmbedding
class SelfAtt(nn.Module):
def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1, identity_map_reordering=False,
attention_module=None, attention_module_kwargs=None):
super(SelfAtt, self).__init__()
self.identity_map_reordering = identity_map_reordering
self.mhatt = MultiHeadAttention(d_model, d_k, d_v, h, dropout, identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
self.dropout = nn.Dropout(dropout)
self.lnorm = nn.LayerNorm(d_model)
self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout, identity_map_reordering=identity_map_reordering)
def forward(self, queries, keys, values, relative_geometry_weights, attention_mask=None, attention_weights=None,
pos=None):
# print('-' * 50)
# print('layer input')
# print(queries[11])
q = queries + pos
k = keys + pos
att = self.mhatt(q, k, values, relative_geometry_weights, attention_mask, attention_weights)
# print('mhatt outpout')
# print(att[11])
att = self.lnorm(queries + self.dropout(att))
# print('norm out')
# print(att[11])
ff = self.pwff(att)
# print('ff out')
# print(ff[11])
# print('-' * 50)
return ff
class LCCA(nn.Module):
def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1, identity_map_reordering=False,
attention_module=None, attention_module_kwargs=None):
super(LCCA, self).__init__()
self.identity_map_reordering = identity_map_reordering
self.mhatt = MultiHeadAttention(d_model, d_k, d_v, h, dropout, identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
self.dropout = nn.Dropout(dropout)
self.lnorm = nn.LayerNorm(d_model)
self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout, identity_map_reordering=identity_map_reordering)
def forward(self, queries, keys, values, relative_geometry_weights, attention_mask=None, attention_weights=None,
pos_source=None, pos_cross=None):
# print('-' * 50)
# print('layer input')
# print(queries[11])
q = queries + pos_source
k = keys + pos_cross
att = self.mhatt(q, k, values, relative_geometry_weights, attention_mask, attention_weights)
# print('mhatt outpout')
# print(att[11])
att = self.lnorm(queries + self.dropout(att))
# print('norm out')
# print(att[11])
ff = self.pwff(att)
# print('ff out')
# print(ff[11])
# print('-' * 50)
return ff
class MultiLevelEncoder(nn.Module):
def __init__(self, N, padding_idx, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1,
identity_map_reordering=False, attention_module=None, attention_module_kwargs=None):
super(MultiLevelEncoder, self).__init__()
self.d_model = d_model
self.dropout = dropout
self.layers_region = nn.ModuleList([SelfAtt(d_model, d_k, d_v, h, d_ff, dropout,
identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
for _ in range(N)])
self.layers_grid = nn.ModuleList([SelfAtt(d_model, d_k, d_v, h, d_ff, dropout,
identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
for _ in range(N)])
self.region2grid = nn.ModuleList([LCCA(d_model, d_k, d_v, h, d_ff, dropout,
identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
for _ in range(N)])
self.grid2region = nn.ModuleList([LCCA(d_model, d_k, d_v, h, d_ff, dropout,
identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
for _ in range(N)])
self.padding_idx = padding_idx
self.WGs = nn.ModuleList([nn.Linear(64, 1, bias=True) for _ in range(h)])
def forward(self, regions, grids, boxes, aligns, attention_weights=None, region_embed=None, grid_embed=None):
# input (b_s, seq_len, d_in)
attention_mask_region = (torch.sum(regions == 0, -1) != 0).unsqueeze(1).unsqueeze(1) # (b_s, 1, 1, seq_len)
attention_mask_grid = (torch.sum(grids == 0, -1) != 0).unsqueeze(1).unsqueeze(1) # (b_s, 1, 1, seq_len)
# box embedding
relative_geometry_embeddings = AllRelationalEmbedding(boxes)
flatten_relative_geometry_embeddings = relative_geometry_embeddings.view(-1, 64)
box_size_per_head = list(relative_geometry_embeddings.shape[:3])
box_size_per_head.insert(1, 1)
relative_geometry_weights_per_head = [l(flatten_relative_geometry_embeddings).view(box_size_per_head) for l in
self.WGs]
relative_geometry_weights = torch.cat((relative_geometry_weights_per_head), 1)
relative_geometry_weights = F.relu(relative_geometry_weights)
n_regions = regions.shape[1] # 50
n_grids = grids.shape[1] # 49
region2region = relative_geometry_weights[:, :, :n_regions, :n_regions]
grid2grid = relative_geometry_weights[:, :, n_regions:, n_regions:]
# region2grid = relative_geometry_weights[:, :, :n_regions, n_regions:]
# grid2region = relative_geometry_weights[:, :, n_regions:, :n_regions]
region2all = relative_geometry_weights[:,:,:n_regions,:]
grid2all = relative_geometry_weights[:, :, n_regions:, :]
bs = regions.shape[0]
outs = []
out_region = regions
out_grid = grids
aligns = aligns.unsqueeze(1) # bs * 1 * n_regions * n_grids
tmp_mask = torch.eye(n_regions, device=out_region.device).unsqueeze(0).unsqueeze(0)
tmp_mask = tmp_mask.repeat(bs, 1, 1, 1) # bs * 1 * n_regions * n_regions
region_aligns = (torch.cat([tmp_mask, aligns], dim=-1) == 0) # bs * 1 * n_regions *(n_regions+n_grids)
tmp_mask = torch.eye(n_grids, device=out_region.device).unsqueeze(0).unsqueeze(0)
tmp_mask = tmp_mask.repeat(bs, 1, 1, 1) # bs * 1 * n_grids * n_grids
grid_aligns = (torch.cat([aligns.permute(0, 1, 3, 2), tmp_mask], dim=-1)==0) # bs * 1 * n_grids *(n_grids+n_regions)
pos_cross = torch.cat([region_embed,grid_embed],dim=-2)
for l_region, l_grid, l_r2g, l_g2r in zip(self.layers_region, self.layers_grid, self.region2grid,
self.grid2region):
# print('encoder layer in')
# print(out[11])
# print('region self att')
out_region = l_region(out_region, out_region, out_region, region2region, attention_mask_region,
attention_weights, pos=region_embed)
# print('grid self att')
out_grid = l_grid(out_grid, out_grid, out_grid, grid2grid, attention_mask_grid, attention_weights,
pos=grid_embed)
out_all = torch.cat([out_region, out_grid], dim=1)
# print('region cross')
out_region = l_r2g(out_region, out_all, out_all, region2all, region_aligns, attention_weights,
pos_source=region_embed, pos_cross=pos_cross)
# print('grid cross')
out_grid = l_g2r(out_grid, out_all, out_all, grid2all, grid_aligns,
attention_weights, pos_source=grid_embed, pos_cross=pos_cross)
# print('encoder layer out')
# print(out[11])
# outs.append(out.unsqueeze(1))
out = torch.cat([out_region, out_grid], dim=1)
attention_mask = torch.cat([attention_mask_region, attention_mask_grid], dim=-1)
# outs = torch.cat(outs, 1)
# print('encoder out')
# print(out.view(-1)[0].item())
return out, attention_mask
class TransformerEncoder(MultiLevelEncoder):
def __init__(self, N, padding_idx, d_in=2048, **kwargs):
super(TransformerEncoder, self).__init__(N, padding_idx, **kwargs)
self.fc_region = nn.Linear(d_in, self.d_model)
self.dropout_region = nn.Dropout(p=self.dropout)
self.layer_norm_region = nn.LayerNorm(self.d_model)
self.fc_grid = nn.Linear(d_in, self.d_model)
self.dropout_grid = nn.Dropout(p=self.dropout)
self.layer_nrom_grid = nn.LayerNorm(self.d_model)
def forward(self, regions, grids, boxes, aligns, attention_weights=None, region_embed=None, grid_embed=None):
mask_regions = (torch.sum(regions, dim=-1) == 0).unsqueeze(-1)
mask_grids = (torch.sum(grids, dim=-1) == 0).unsqueeze(-1)
# print('\ninput', input.view(-1)[0].item())
out_region = F.relu(self.fc_region(regions))
out_region = self.dropout_region(out_region)
out_region = self.layer_norm_region(out_region)
out_region = out_region.masked_fill(mask_regions, 0)
out_grid = F.relu(self.fc_grid(grids))
out_grid = self.dropout_grid(out_grid)
out_grid = self.layer_nrom_grid(out_grid)
out_grid = out_grid.masked_fill(mask_grids, 0)
# print('out4',out[11])
return super(TransformerEncoder, self).forward(out_region, out_grid, boxes, aligns,
attention_weights=attention_weights,
region_embed=region_embed, grid_embed=grid_embed)
|
1714121
|
from .__about__ import __version__
from .check import JfrogPlatformCheck
__all__ = ['__version__', 'JfrogPlatformCheck']
|
1714128
|
from PyQt4 import QtGui, QtCore
import maya.OpenMayaUI as mui
import sip
DOCK_OBJECT = "dock_name"
DEFAULT_DOCK = QtCore.Qt.RightDockWidgetArea
# Get Main Window
pointer = mui.MQtUtil.mainWindow()
maya_main_window = sip.wrapinstance(long(pointer), QtCore.QObject)
# Check for existing dock
existing_dock = maya_main_window.findChild(QtGui.QDockWidget, DOCK_OBJECT)
if existing_dock:
existing_dock.deleteLater()
# Create Your New DockWidget Instance here
dock_instance = MyDockInstance()
# Add to Dock
# If Existing DockWdiget found on default DockArea then Dock is Tabified otherwise added as first DockWidget
docked = False
for dock_widget in maya_main_window.findChildren(QtGui.QDockWidget):
if maya_main_window.dockWidgetArea(dock_widget) == DEFAULT_DOCK:
maya_main_window.tabifyDockWidget(dock_widget, dock_instance)
docked = True
break
if not docked:
maya_main_window.addDockWidget(DEFAULT_DOCK, dock_instance)
|
1714151
|
import can_decoder
import mdf_iter
from pathlib import Path
def setup_fs():
"""Helper function to setup the file system for the examples.
"""
from fsspec.implementations.local import LocalFileSystem
fs = LocalFileSystem()
return fs
def example_decode_using_dateframe_j1939_a():
"""Example of loading a file and using the dataframe decoder to perform bulk operations on the data.
"""
# Specify path to the DBC file containing the decoding rules.
dbc_path = Path(__file__).parent / "CSS-Electronics-SAE-J1939-DEMO.dbc"
# Import the decoding rules.
db = can_decoder.load_dbc(dbc_path)
# Create decoder.
dataframe_decoder = can_decoder.DataFrameDecoder(db)
# Setup filesystem and which log file to decode.
fs = setup_fs()
device = "LOG/EEEE0005"
log_file = "{}/00000001/00000001.MF4".format(device)
with fs.open(log_file, "rb") as handle:
# Open the file and extract a dataframe with the raw CAN records.
mdf_file = mdf_iter.MdfFile(handle)
df = mdf_file.get_data_frame()
# Decode the dataframe in a bulk operation.
decoded_result = dataframe_decoder.decode_frame(df)
print("Found a total of {} decoded messages".format(len(decoded_result)))
print(decoded_result)
return
def example_decode_using_dateframe_j1939_b():
"""Example of loading a file and using the dataframe decoder to perform bulk J1939 decoding. Uses SPNs instead
of signal names, and drops the raw data column."""
# Specify path to the DBC file containing the decoding rules.
dbc_path = Path(__file__).parent / "CSS-Electronics-SAE-J1939-DEMO.dbc"
# Import the decoding rules.
db = can_decoder.load_dbc(dbc_path, use_custom_attribute="SPN")
# Create decoder.
dataframe_decoder = can_decoder.DataFrameDecoder(db)
# Setup filesystem and which log file to decode.
fs = setup_fs()
device = "LOG/EEEE0005"
log_file = "{}/00000001/00000001.MF4".format(device)
with fs.open(log_file, "rb") as handle:
# Open the file and extract a dataframe with the raw CAN records.
mdf_file = mdf_iter.MdfFile(handle)
df = mdf_file.get_data_frame()
# Decode the dataframe in a bulk operation.
decoded_result = dataframe_decoder.decode_frame(df, columns_to_drop=["Raw Value"])
print("Found a total of {} decoded messages".format(len(decoded_result)))
print(decoded_result)
return
def example_decode_using_dateframe_obd2():
"""Example of loading a file and using the dataframe decoder to perform bulk J1939 decoding. Uses SPNs instead
of signal names, and drops the raw data column."""
# Specify path to the DBC file containing the decoding rules.
dbc_path = Path(__file__).parent / "CSS-Electronics-OBD2-v1.3.dbc"
# Import the decoding rules.
db = can_decoder.load_dbc(dbc_path)
# Create decoder.
dataframe_decoder = can_decoder.DataFrameDecoder(db)
# Setup filesystem and which log file to decode.
fs = setup_fs()
device = "LOG/EEEE0005"
log_file = "{}/00000001/00000001.MF4".format(device)
with fs.open(log_file, "rb") as handle:
# Open the file and extract a dataframe with the raw CAN records.
mdf_file = mdf_iter.MdfFile(handle)
df = mdf_file.get_data_frame()
# Decode the dataframe in a bulk operation.
decoded_result = dataframe_decoder.decode_frame(df, columns_to_drop=["Raw Value"])
# The DBC file contains fields for response and length. Remove these.
valid_indices = ~decoded_result["Signal"].isin(["response", "length"])
decoded_result = decoded_result[valid_indices]
print("Found a total of {} decoded messages".format(len(decoded_result)))
print(decoded_result)
return
if __name__ == '__main__':
example_decode_using_dateframe_j1939_a()
example_decode_using_dateframe_j1939_b()
example_decode_using_dateframe_obd2()
pass
|
1714161
|
import anki_vector
from anki_vector.events import Events
import cv2 as cv
import numpy as np
import time
import math
# Constants
Debug = False
# milliseconds per main loop execution
MainLoopDelay = 20
HeadTilt = anki_vector.robot.MIN_HEAD_ANGLE + anki_vector.util.degrees(5.0)
LiftHeight = 0.0
class Camera:
FovH = anki_vector.util.degrees(90.0)
FovV = anki_vector.util.degrees(50.0)
PixelsH = 640.0
PixelsV = 360.0
FocalLengthH = (PixelsH / 2.0) / math.tan(FovH.radians / 2.0)
DegressPerPixelH = FovH.degrees / PixelsH
DegreesPerPixelV = FovV.degrees / PixelsV
class Ball:
def __init__(self):
# typical globals, but can be changed per instance if required
# i.e. if more than one type and color of ball is used
# these are based on the blue ball from a LEGO Mindstorms kit
self.HsvMin = (105, 100, 10)
self.HsvMax = (120, 200, 255)
self.RadiusMax = 200.0
self.RadiusMin = 15.0 # was 25
self.RadiusScale = 38.0
self.RadiusTolerance = 0.2
self.AspectRatio = 1.0
self.AspectRatioTolerance = 0.5
self.AspectMin = (1 - self.AspectRatioTolerance) * self.AspectRatio
self.AspectMax = (1 + self.AspectRatioTolerance) * self.AspectRatio
self.TargetX = 310
self.TargetY = 70
self.found = False
self.x = 0.0
self.y = 0.0
self.center = 0.0
self.radius = 0.0
self.deltaX = 0.0
self.deltaY = 0.0
self.imageId = 0
self.distance = anki_vector.util.distance_mm(0.0)
self.angle = anki_vector.util.degrees(0.0)
self.mask = True
self.debug = True
self.testing = False
# this will change depending on the size of the ball used, so shouldn't
# really be hard coded here for a generic class...
# hack... fix later... or override
def computeDistance(self, radius):
# formula determined by measuring distance to ball versus
# radius, entering into spreadsheet, calculating a regression formula
distance = anki_vector.util.distance_mm(-174.1 * math.log(radius) + 852.5)
return distance
def computeAngle(self, x, camera):
# measuring angle is based on camera FOV
angle = anki_vector.util.radians(math.atan((x - (camera.PixelsH / 2.0)) / camera.FocalLengthH))
return angle
# find the ball in an image
def findBall(self, imageId: int, image: np.array, maskImage: np.array, camera: Camera):
# only do this if we have a new image
if (imageId != self.imageId):
self.imageId = imageId
# Much information and some code was obtained from here:
# https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
# blur, convert to HSV, look for the ball HSV values, do some filtering
maskedImage = cv.bitwise_and(image, maskImage)
blurImage = cv.GaussianBlur(maskedImage, (11, 11), 0)
hsvImage = cv.cvtColor(blurImage, cv.COLOR_BGR2HSV)
self.trackerImage = cv.inRange(hsvImage, self.HsvMin, self.HsvMax)
self.trackerImage = cv.erode(self.trackerImage, None, iterations = 2)
self.trackerImage = cv.dilate(self.trackerImage, None, iterations = 2)
# find contours
im2, contours, hierarchy = cv.findContours(self.trackerImage.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# We're given a a bunch of contours that might enclose the ball
# To help decide if a contour is a ball:
# - pick the largest contour
# - find the radius of the enclosing circle (scale based on distance)
# - compute the aspect ratio
# If the radius and aspect ratio are within the tolerances of a ball,
# it most likely is a ball. However, some features on the rink can
# still come close to looking like a ball... more work could be done.
if len(contours) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(contours, key=cv.contourArea)
((x, y), radius) = cv.minEnclosingCircle(c)
M = cv.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# compute the aspect ratio
bX, bY, bW, bH = cv.boundingRect(c)
aspectRatio = float(bW) / float(bH)
deltaX = self.TargetX - int(x) # positive is to the left
deltaY = self.TargetY - int(y) # positive is away
# compute angle and distance based on radius
# formula determined by measuring distance to ball versus
# radius, entering into spreadsheet, calculating a regression formula
# measuring angle is based on camera FOV
#
# this will change depending on the size of the ball used, so shouldn't
# really be hard coded here for a generic class...
# hack... fix later...
distance = self.computeDistance(radius)
angle = self.computeAngle(x, camera)
if (self.debug):
print(f'[findBall] ({x:.0f},{y:.0f}) ({deltaX:.0f},{deltaY:.0f}) R = {radius:.2f}')
print(f'[findBall] width = {bW:.0f} height = {bH:.0f} aspect = {aspectRatio:.2f}')
print(f'[findBall] angle = {angle.degrees:.2f} distance = {distance.distance_mm:.2f}')
# perform the actual checks
if ( (self.RadiusMin < radius < self.RadiusMax) and (self.AspectMin < aspectRatio < self.AspectMax) or
self.testing ):
if (self.debug):
print('[findBall] Got it')
self.found = True
self.x = x
self.y = y
self.center = center
self.radius = radius
self.deltaX = deltaX
self.deltaY = deltaY
self.distance = distance
self.angle = angle
else:
self.found = False
# left of center is negative
def goToObject(robot: anki_vector.Robot, distance: anki_vector.util.Distance, angle: anki_vector.util.Angle):
global camera
pDistance = 1.0
pAngle = 1.0
leftSpeed = pDistance * distance.distance_mm + pAngle * angle.degrees
rightSpeed = pDistance * distance.distance_mm - pAngle * angle.degrees
#print(f'[goToPixel] {leftSpeed:.1f}:{rightSpeed:.1f}')
robot.motors.set_wheel_motors(leftSpeed, rightSpeed)
def lookAround(robot: anki_vector.Robot, scanAngleIndex: int):
ScanAngles = [0, -30, 30, -60, 60, -90, 90, -120, 120, -150, 150, 180]
if (actionsDone()):
robot.motors.set_wheel_motors(0, 0)
scanAngleIndex += 1
if (scanAngleIndex == len(ScanAngles)):
scanAngleIndex = 0
action(robot.behavior.turn_in_place(angle=anki_vector.util.degrees(ScanAngles[scanAngleIndex]),
speed=anki_vector.util.degrees(60.0),
is_absolute=True))
return scanAngleIndex
def allDone(robot: anki_vector.Robot):
print('[allDone] Cleaning up')
robot.motors.set_wheel_motors(0, 0)
cv.destroyAllWindows()
robot.disconnect()
exit()
def actionsDone():
global actionList
done = True
for i in actionList:
if not(i.done()):
done = False
return done
def action(b):
global actionList
actionList.append(b)
def main():
global actionList
actionList = []
ball = Ball()
camera = Camera()
cvImageId = 0
scanAngleIndex = 0
# open the video window
cv.namedWindow('Vector', cv.WINDOW_NORMAL)
if ball.testing:
cv.namedWindow('Tracker', cv.WINDOW_NORMAL)
# read in the mask for Vector's lift (all the way down)
vectorMaskImage = cv.imread('Vector_Mask.png')
args = anki_vector.util.parse_command_args()
# use AsyncRobot so that behaviors don't block
robot = anki_vector.AsyncRobot(args.serial, enable_camera_feed=True, default_logging=Debug)
robot.connect()
time.sleep(1)
done = False
displayImage = False
action(robot.behavior.set_head_angle(HeadTilt))
action(robot.behavior.set_lift_height(LiftHeight))
while not(actionsDone()):
pass
while not(done):
# check to see if we have a new image
if (robot.camera.latest_image_id != cvImageId):
# if we do, convert it to an OpenCV image
# and keep track of the id
pilImage = robot.camera.latest_image
cvImageId = robot.camera.latest_image_id
cvImage = cv.cvtColor(np.array(pilImage), cv.COLOR_RGB2BGR)
# display it
displayImage = True
# locate the ball (if we can see it)
ball.findBall(imageId = cvImageId, image = cvImage, maskImage = vectorMaskImage, camera = camera)
# ball overlay
if ball.found:
goToObject(robot, ball.distance, ball.angle)
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv.circle(cvImage, (int(ball.x), int(ball.y)), int(ball.radius),
(0, 255, 255), 2)
cv.circle(cvImage, ball.center, 5, (0, 0, 255), -1)
else:
#scanAngleIndex = lookAround(robot, scanAngleIndex)
robot.motors.set_wheel_motors(0, 0)
# display the image with any overlays
cv.imshow('Vector', cvImage)
if ball.testing:
cv.imshow('Tracker', ball.trackerImage)
# waitKey performs the display update
# and checks to see if we hit the 'q' key
c = cv.waitKey(MainLoopDelay)
if (chr(c & 0xff) == 'q'):
done = True
allDone(robot)
if __name__ == "__main__":
main()
|
1714211
|
from PySide2.QtWidgets import QDockWidget
class QSmartDockWidget(QDockWidget):
"""
Wrapper class for dock widgets.
"""
def __init__(self, caption, parent=None, on_close=None, on_raise=None):
super().__init__(caption, parent)
self.old_size = None
self.original_min = None
self.original_max = None
self._on_close = on_close
self._on_raise = on_raise
def restore_original_size(self):
if self.original_min is None or self.original_max is None:
return
self.setMinimumWidth(self.original_min.width())
self.setMinimumHeight(self.original_min.height())
self.setMaximumWidth(self.original_max.width())
self.setMaximumHeight(self.original_max.height())
def closeEvent(self, event): # pylint: disable=unused-argument
if self._on_close is not None:
self._on_close()
self.widget().close()
def raise_(self):
if self._on_raise is not None:
self._on_raise()
super().raise_()
|
1714225
|
import cv2 as cv
import numpy as np
from skimage.measure import compare_ssim
####### 接口函数为is_continue()、continue_frames()、cut_clips() #######
def matches(current, next):
queryImage = current
trainingImage = next # 读取要匹配的灰度照片
sift = cv.xfeatures2d.SIFT_create() # 创建sift检测器
kp1, des1 = sift.detectAndCompute(queryImage, None)
kp2, des2 = sift.detectAndCompute(trainingImage, None)
FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv.FlannBasedMatcher(indexParams, searchParams)
# cv.imshow("queryImage", queryImage)
# cv.imshow("trainingImage", trainingImage)
if len(kp1) != 1 and len(kp2) != 1 and kp1 and kp2: # 特征点个数不能为1
matches = flann.knnMatch(des1, des2, k=2)
else:
return 0
matchesMask = [[0, 0] for i in range(len(matches))]
sum = 0 # 匹配成功的点数
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance: # 判断是否匹配
if kp1[i].pt[1] < 210:
matchesMask[i] = [1, 0]
sum += 1
return sum
def is_continue_(current, next):
similarity = compare_ssim(current, next)
if similarity > 0.95:
return True
else:
return False
def log_(file_, **massage):
condition_ = massage
for k in massage.keys():
file_.write(k + ":{}\n".format(str(massage[k])))
return condition_
# if warning_:
# file_.write("##############warning##############\n")
def is_continue_withlog(current, next, log_file):
sum_current_matches = matches(current, current)
sum_next_matches = matches(current, next)
H1 = cv.calcHist([current], [0], None, [256], [0, 256])
H1 = cv.normalize(H1, H1, 0, 1, cv.NORM_MINMAX, -1) # 对图片进行归一化处理
H2 = cv.calcHist([next], [0], None, [256], [0, 256]) # 计算图img2的直方图
H2 = cv.normalize(H2, H2, 0, 1, cv.NORM_MINMAX, -1)
similarity = cv.compareHist(H1, H2, 0) # 利用compareHist()进行比较相似度
similarity_ = compare_ssim(current, next)
if sum_next_matches < int(sum_current_matches * 0.1): # and sum_current_matches >= 90:#如果特征充足且未匹配上,则不连续
# H1 = cv.calcHist([current], [0], None, [256], [0, 256])
# H1 = cv.normalize(H1, H1, 0, 1, cv.NORM_MINMAX, -1) # 对图片进行归一化处理
#
# H2 = cv.calcHist([n ext], [0], None, [256], [0, 256]) # 计算图img2的直方图
# H2 = cv.normalize(H2, H2, 0, 1, cv.NORM_MINMAX, -1)
#
# similarity = cv.compareHist(H1, H2, 0) # 利用compareHist()进行比较相似度
# similarity_ = compare_ssim(current, next)
if similarity + similarity_ > 1.7:
print(sum_current_matches, sum_next_matches)
print(similarity)
print(similarity_)
condition = log_(log_file, cut_class="Warning",
sum_current_matches=sum_current_matches,
sum_next_matches=sum_next_matches,
similarity=similarity,
similarity_=similarity_)
print("##############warning##############")
return True, condition
print(sum_current_matches, sum_next_matches)
print(similarity)
print(similarity_)
condition = log_(log_file, cut_class="Normal",
sum_current_matches=sum_current_matches,
sum_next_matches=sum_next_matches,
similarity=similarity,
similarity_=similarity_)
return False, condition
elif sum_next_matches < int(matches(next, next) * 0.1):
print(sum_current_matches, sum_next_matches, matches(next, next))
print(similarity)
print(similarity_)
condition = log_(log_file, cut_class="Reversed_Normal",
sum_current_matches=sum_current_matches,
sum_next_matches=sum_next_matches,
similarity=similarity,
similarity_=similarity_)
return False, condition
# elif abs(-sum_current_matches)/sum_current_matches> 0.8:
# print(sum_current_matches, sum_next_matches, matches(next, next), 2)
# return False
# elif sum_next_matches < sum_current_matches*0.05:#如果特征未配上或者特征不足,
# print(sum_current_matches, sum_next_matches, 3)
# return False
else:
# print(similarity)
condition = log_(log_file, cut_class="Continue",
sum_current_matches=sum_current_matches,
sum_next_matches=sum_next_matches,
similarity=similarity,
similarity_=similarity_)
return True, condition
###################################################
################## 以下为接口函数 ##################
###################################################
def is_continue(current, next): # 判断两帧是否连续,输入两帧
sum_current_matches = matches(current, current)
sum_next_matches = matches(current, next)
if sum_next_matches < int(sum_current_matches * 0.1): # and sum_current_matches >= 90:#如果特征充足且未匹配上,则不连续
H1 = cv.calcHist([current], [0], None, [256], [0, 256])
H1 = cv.normalize(H1, H1, 0, 1, cv.NORM_MINMAX, -1) # 对图片进行归一化处理
H2 = cv.calcHist([next], [0], None, [256], [0, 256]) # 计算图img2的直方图
H2 = cv.normalize(H2, H2, 0, 1, cv.NORM_MINMAX, -1)
similarity = cv.compareHist(H1, H2, 0) # 利用compareHist()进行比较相似度
similarity_ = compare_ssim(current, next)
if similarity + similarity_ > 1.7:
return True
return False
elif sum_next_matches < int(matches(next, next) * 0.1):
return False
else:
return True
def continue_frames(yuv_imgs): # 输入重建帧及其参考帧,根据是否连续进行补全,例如七帧或五帧
grey_imgs = yuv_imgs[..., 0]
imgs_len = len(grey_imgs)
for i in range(imgs_len // 2):
if is_continue(grey_imgs[imgs_len // 2 + i], grey_imgs[imgs_len // 2 + i + 1]):
pass
else:
yuv_imgs[imgs_len // 2 + i + 1] = yuv_imgs[imgs_len // 2 + i]
if is_continue(grey_imgs[imgs_len // 2 - i], grey_imgs[imgs_len // 2 - i - 1]):
pass
else:
yuv_imgs[imgs_len // 2 - i - 1] = yuv_imgs[imgs_len // 2 - i]
return yuv_imgs # 返回补全后结果
def cut_clips(yuv_video): # 将视频切成片段
gery_imgs = yuv_video[..., 0]
gery_imgs.dtype = np.uint8
continue_frames = []
continue_ = []
cuts = []
for j, img in enumerate(gery_imgs):
is_continue_, logs = is_continue(gery_imgs[j - 1], img)
if len(continue_) == 0:
cuts.append(0)
continue_.append(yuv_video[j])
elif is_continue_:
continue_.append(yuv_video[j])
if j == len(gery_imgs) - 1:
continue_frames.append(continue_)
else:
if j == len(gery_imgs) - 1:
continue_frames.append(continue_)
continue_ = [yuv_video[j]]
cuts.append(j)
continue_frames.append(continue_)
else:
continue_frames.append(continue_)
continue_ = [yuv_video[j]]
cuts.append(j)
return continue_frames, cuts # 第一个返回值是片段列表,第二个是每个片段第一帧的帧数
|
1714271
|
t = int(input())
arr = []
while t:
a, b, c = map(int, input().split(" "))
arr.clear()
arr.append(a)
arr.append(b)
arr.append(c)
arr.sort()
print(arr[1])
t = t-1
|
1714314
|
from annotation_utils.linemod import Linemod_Dataset
dataset = Linemod_Dataset.load_from_path('/home/clayton/workspace/prj/data_keep/data/misc_dataset/new/LINEMOD/cat/train.json')
dataset.save_to_path('temp.json', overwrite=True)
assert Linemod_Dataset.from_dict(dataset.to_dict()) == dataset
print('Test Passed')
|
1714419
|
import json
import numpy as np
video = json.load(open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances.json'))
all_video_id = list(range(1, len(video['videos']) + 1))
train_video_id = np.random.choice(all_video_id, int(len(all_video_id) * 0.8), replace=False)
test_video_id = list(set(all_video_id) - set(train_video_id))
# num_train_video = int(len(all_video_id) * 0.8)
video_info_train = []
video_info_val = []
images_train = []
images_val = []
id_train = 1
id_val = 1
id_image_train = 1
id_image_val = 1
# breakpoint()
for info in video['videos']:
if info['id'] in train_video_id:
video_info_train.append({'id': id_train, 'name': info['file_names'][0].split('/')[0]})
for index in range(len(info['file_names'])):
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_image_train, 'video_id': id_train, 'frame_id': index}
images_train.append(image_info)
id_image_train += 1
id_train += 1
else:
video_info_val.append({'id': id_val, 'name': info['file_names'][0].split('/')[0]})
for index in range(len(info['file_names'])):
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_image_val, 'video_id': id_val, 'frame_id': index}
images_val.append(image_info)
id_image_val += 1
id_val += 1
'''
>>> data['images'][101]
{'file_name': 'b1c66a42-6f7d68ca/b1c66a42-6f7d68ca-0000102.jpg',
'height': 720, 'width': 1280, 'id': 102, 'video_id': 1, 'frame_id': 101}
'''
# breakpoint()
annotation_train = []
annotation_val = []
id_train = 1
id_train_image = 1
id_val = 1
id_val_image = 1
pre_video_train_id = -1 #video['annotations'][0]['video_id']
pre_video_test_id = -1
for index1 in range(len(video['annotations'])):
if video['annotations'][index1]['video_id'] in train_video_id:
if pre_video_train_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
# print(id_train_image)
for item in cur_video_ann[key]:
if item[0] is None:
annotation_train.append({'id': id_train, 'image_id': id_train_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 1
})
else:
annotation_train.append({'id': id_train, 'image_id': id_train_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
instance_id += 1
id_train += 1
id_train_image += 1
pre_video_train_id = video['annotations'][index1]['video_id']
else:
if pre_video_test_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
for item in cur_video_ann[key]:
if item[0] is None:
annotation_val.append({'id': id_val, 'image_id': id_val_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 1
})
else:
annotation_val.append({'id': id_val, 'image_id': id_val_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
instance_id += 1
id_val += 1
id_val_image += 1
pre_video_test_id = video['annotations'][index1]['video_id']
'''
{'id': 301, 'image_id': 11, 'category_id': 3,
'instance_id': 25, 'bdd100k_id': '00122086',
'occluded': True, 'truncated': False,
'bbox': [664.2417908674067, 367.9733233708366, 36.698191808020056, 28.229378313861503],
'area': 1035.9671399832512, 'iscrowd': 0, 'ignore': 0,
'segmentation':
[[664.2417908674067, 367.9733233708366, 664.2417908674067, 396.2027016846981,
700.9399826754268, 396.2027016846981, 700.9399826754268, 367.9733233708366]]}
'''
# breakpoint()
new_annotation = dict()
new_annotation['categories'] = video['categories']
new_annotation['videos'] = video_info_train
new_annotation['images'] = images_train
new_annotation['annotations'] = annotation_train
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_train1.json', 'w'))
new_annotation['videos'] = video_info_val
new_annotation['images'] = images_val
new_annotation['annotations'] = annotation_val
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_test1.json', 'w'))
#
# # postprocessing.
train_data = json.load(open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_train1.json'))
not_none_image_id = []
for ann in train_data['annotations']:
if ann['bbox'] is not None:
not_none_image_id.append(ann['image_id'])
full_image_id = list(range(1, train_data['annotations'][-1]['image_id']+1))
none_image_id_train = list(set(full_image_id).difference(set(not_none_image_id)))
test_data = json.load(open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_test1.json'))
not_none_image_id = []
for ann in test_data['annotations']:
if ann['bbox'] is not None:
not_none_image_id.append(ann['image_id'])
full_image_id = list(range(1, test_data['annotations'][-1]['image_id']+1))
none_image_id_test = list(set(full_image_id).difference(set(not_none_image_id)))
# breakpoint()
video_info_train = []
video_info_val = []
images_train = []
images_val = []
id_train = 1
id_val = 1
id_image_train = 1
id_image_val = 1
id_real_image_train = 1
id_real_image_val = 1
for info in video['videos']:
if info['id'] in train_video_id:
video_info_train.append({'id': id_train, 'name': info['file_names'][0].split('/')[0]})
temp = 0
for index in range(len(info['file_names'])):
if id_image_train not in none_image_id_train:
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_real_image_train,
'video_id': id_train, 'frame_id': temp}
temp += 1
images_train.append(image_info)
id_real_image_train += 1
id_image_train += 1
id_train += 1
else:
video_info_val.append({'id': id_val, 'name': info['file_names'][0].split('/')[0]})
temp = 0
for index in range(len(info['file_names'])):
if id_image_val not in none_image_id_test:
image_info = {'file_name': info['file_names'][index],
'height': info['height'], 'width': info['width'], 'id': id_real_image_val,
'video_id': id_val, 'frame_id': temp}
temp += 1
images_val.append(image_info)
id_real_image_val += 1
id_image_val += 1
id_val += 1
'''
>>> data['images'][101]
{'file_name': 'b1c66a42-6f7d68ca/b1c66a42-6f7d68ca-0000102.jpg',
'height': 720, 'width': 1280, 'id': 102, 'video_id': 1, 'frame_id': 101}
'''
# breakpoint()
annotation_train = []
annotation_val = []
id_train = 1
id_train_image = 1
id_val = 1
id_val_image = 1
id_real_val_image = 1
id_real_train_image = 1
pre_video_train_id = -1 #video['annotations'][0]['video_id']
pre_video_test_id = -1
for index1 in range(len(video['annotations'])):
if video['annotations'][index1]['video_id'] in train_video_id:
if pre_video_train_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
# print(id_train_image)
for item in cur_video_ann[key]:
if item[0] is not None:
annotation_train.append({'id': id_train, 'image_id': id_real_train_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
instance_id += 1
id_train += 1
if id_train_image not in none_image_id_train:
id_real_train_image += 1
id_train_image += 1
pre_video_train_id = video['annotations'][index1]['video_id']
else:
if pre_video_test_id != video['annotations'][index1]['video_id']:
cur_video_ann = {}
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index] = []
for index in range(len(video['annotations'][index1]['bboxes'])):
cur_video_ann[index].append([video['annotations'][index1]['bboxes'][index],
video['annotations'][index1]['areas'][index],
video['annotations'][index1]['iscrowd'],
video['annotations'][index1]['category_id']])
# annotation_train.append({'id':id_train, 'image_id'})
if index1 == len(video['annotations']) - 1 or video['annotations'][index1]['video_id'] != video['annotations'][index1+1]['video_id']:
for key in list(cur_video_ann.keys()):
instance_id = 1
for item in cur_video_ann[key]:
if item[0] is not None:
annotation_val.append({'id': id_val, 'image_id': id_real_val_image, 'category_id': item[3],
'instances_id': instance_id,
'bdd100k_id': 0, 'occluded': False, 'truncated': False,
'bbox': item[0], 'area': item[1], 'iscrowd': item[2], 'ignore': 0
})
instance_id += 1
id_val += 1
if id_val_image not in none_image_id_test:
id_real_val_image += 1
id_val_image += 1
pre_video_test_id = video['annotations'][index1]['video_id']
new_annotation = dict()
new_annotation['categories'] = video['categories']
new_annotation['videos'] = video_info_train
new_annotation['images'] = images_train
new_annotation['annotations'] = annotation_train
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_train1.json', 'w'))
new_annotation['videos'] = video_info_val
new_annotation['images'] = images_val
new_annotation['annotations'] = annotation_val
json.dump(new_annotation, open('/nobackup-slow/dataset/my_xfdu/video/vis/train/instances_test1.json', 'w'))
breakpoint()
|
1714449
|
import torch.utils.data as data
import os
import os.path
from scipy.ndimage import imread
import numpy as np
from skimage import color
import random
#from matlab_imresize.imresize import *
# import matlab_imresize
from skimage import img_as_float
from skimage.measure import compare_ssim,compare_psnr
def BayesSR_loader(root, im_path, video_name, task, task_param,
# DownResizer=None,UpResizer=None,
input_frame_size = ( 128,128,3)):
# if task == 'denoise':
# root_input = os.path.join(root,'input',im_path)
# else:
root_input = os.path.join(root,video_name, 'bicubic')
root_target =os.path.join(root,video_name, 'original')
# path_y_ = os.path.join(root,'target_ours',im_path)
# frame_prefix = im_path
path = []
im = []
target = []
cur_idex = int(im_path[-6:-4])
for i in range(0,7):
temp = os.path.join(root_input, im_path[:-6]+
str((cur_idex-3+i)).zfill(2)+im_path[-4:])
path.append(temp)
# if video_name == 'foliage':
# print("")
for i in range(0,7):
if os.path.exists(path[i]):
im.append(imread(path[i]))
else:
im.append(imread(path[3]))
target = imread(os.path.join(root_target,im_path ))
if task == 'sr':
# for i in range(0,7):
# step 1: turn to low resolution
#temp = img_as_float(im[i])
# ratio = task_param[0]
#temp = imresize(temp,1/ratio)
#temp = convertDouble2Byte(temp)
# temp = im[i]
# step 2: preprocess for network
# temp = img_as_float(temp)
# temp = imresize(temp,ratio)
# temp = UpResizer.imresize(temp)
# temp = convertDouble2Byte(temp)
# im[i] = temp
print(compare_psnr(im[3],target))
elif task=='denoise':
# for i in range(0,7):
# temp = im[i]
#sigma = task_param[0]
#gaussian_noise = np.clip(np.random.normal(0.0, sigma * 255.0, input_frame_size), 0, 255).astype("uint8")
#temp = np.clip(temp.astype("uint32") + gaussian_noise.astype("uint32"), 0, 255).astype("uint8")
#b = np.clip((np.random.rand(input_frame_size[0],input_frame_size[1], input_frame_size[2]) * 255.0), 0, 255).astype("uint8")
#p = task_param[1]
#tm = np.random.rand(input_frame_size[0], input_frame_size[1] ) <= p
#tm = np.stack((tm, tm, tm), axis=2)
#temp[tm] = b[tm]
#im[i] = temp
print(compare_psnr(im[3],target))
pass
elif task== 'deblock':
for i in range(0,7):
temp = img_as_float(im[i])
raise("not implemented for deblock")
for i in range(0,7):
im[i] = np.transpose(im[i],(2,0,1))
target= np.transpose(target,(2,0,1))
return [im[i].astype("float32")/255.0 for i in range(0,7)], target.astype("float32")/255.0
class ListDataset(data.Dataset):
def __init__(self, root, path_list,videos_list,
task = 'sr', task_param = [4.0], loader=BayesSR_loader): #transform=None, target_transform=None, co_transform=None,
self.root = root
self.videos_list = videos_list
self.path_list = path_list
# self.transform = transform
# self.target_transform = target_transform
# self.co_transform = co_transform
self.loader = loader
self.task = task
self.task_param = task_param
print("task is " + task," with parameter " )
print(task_param)
# if task == 'sr':
# self.DownResizer = matlab_imresize.Imresize((256,448,3),1/task_param[0])
# self.UpResizer = matlab_imresize.Imresize((256/task_param[0],448/task_param[0],3),task_param[0])
# self.DownResizer
# self.task_param += self.UpResizer
def __getitem__(self, index):
path = self.path_list[index]
video = self.videos_list[index]
# print(path)
Xs,y, = self.loader(self.root, path,video, self.task, self.task_param)
return Xs,y,path,video
def __len__(self):
return len(self.path_list)
|
1714456
|
from __future__ import absolute_import, division, print_function
def run():
try: import numpy
except ImportError: print("None")
else: print(numpy.__version__)
if (__name__ == "__main__"):
run()
|
1714476
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.gan.inception_score import InceptionScore
def calculate_inception_score(p_yx):
p_y = torch.unsqueeze(p_yx.mean(axis=0), 0)
kl_d = torch.kl_div(torch.log(p_y), p_yx)
sum_kl_d = kl_d.sum(axis=1)
avg_kl_d = torch.mean(sum_kl_d)
is_score = torch.exp(avg_kl_d)
return is_score
def test_inception_score():
p_yx = torch.rand(20, 10)
m = InceptionScore(num_features=10, feature_extractor=torch.nn.Identity())
m.update(p_yx)
assert pytest.approx(calculate_inception_score(p_yx)) == m.compute()
p_yx = torch.rand(20, 3, 299, 299)
m = InceptionScore()
m.update(p_yx)
assert isinstance(m.compute(), float)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
p_yx = torch.rand(20, 10).to("cpu")
m = InceptionScore(num_features=10, feature_extractor=torch.nn.Identity().to("cpu"), device="cuda")
m.update(p_yx)
assert pytest.approx(calculate_inception_score(p_yx)) == m.compute()
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero, got:"):
InceptionScore(num_features=-1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(3))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1000, got: 0"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(
NotComputableError, match=r"InceptionScore must have at least one example before it can be computed."
):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).compute()
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
InceptionScore(feature_extractor=torch.nn.Identity())
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(metric_device):
n_iters = 60
s = 16
offset = n_iters * s
n_probabilities = 10
y = torch.rand(offset * idist.get_world_size(), n_probabilities)
def update(_, i):
return y[i * s + rank * offset : (i + 1) * s + rank * offset, :]
engine = Engine(update)
m = InceptionScore(num_features=n_probabilities, feature_extractor=torch.nn.Identity(), device=metric_device)
m.attach(engine, "InceptionScore")
engine.run(data=list(range(n_iters)), max_epochs=1)
assert "InceptionScore" in engine.state.metrics
assert pytest.approx(calculate_inception_score(y)) == m.compute()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = torch.device(f"cuda:{local_rank}")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
1714484
|
import os
import pickle
import numpy as np
from scipy.io import loadmat
from sklearn.preprocessing import normalize
from sklearn.metrics import (f1_score, accuracy_score, precision_recall_fscore_support,
precision_recall_curve, auc, roc_curve)
from imblearn.metrics import classification_report_imbalanced
def evaluate_f1(y, y_pred, pos_label=1):
precision, recall, f1, support = precision_recall_fscore_support(y, y_pred, pos_label=pos_label)
return precision, recall, f1
def evaluate_macro_f1(y, y_pred, pos_label=1):
f1 = f1_score(y, y_pred, pos_label=pos_label, average='macro')
return f1
def evaluate_auc_prc(y, pred):
precision, recall, thresholds = precision_recall_curve(y, pred)
aucprc = auc(recall, precision)
return aucprc
def evaluate_auc_roc(y, pred):
fpr, tpr, thresholds = roc_curve(y, pred)
roc_auc = auc(fpr, tpr)
return roc_auc
def evaluate_f2(y, y_pred):
precision, recall, f1, support = precision_recall_fscore_support(y, y_pred, pos_label=1)
#print classification_report(y, y_pred)
f2 = (1+0.5**2)*(precision[1]*recall[1])/(0.5**2*precision[1]+recall[1])
return f2
def load_imb_Gaussian(data_dir):
train = pickle.load(open(os.path.join(data_dir, 'train.pkl'), 'rb'), encoding='bytes')
train_x, train_y = train[b'x'], train[b'y']
valid = pickle.load(open(os.path.join(data_dir, 'valid.pkl'), 'rb'), encoding='bytes')
valid_x, valid_y = valid[b'x'], valid[b'y']
test = pickle.load(open(os.path.join(data_dir, 'test.pkl'), 'rb'), encoding='bytes')
test_x, test_y = test[b'x'], test[b'y']
return train_x, train_y, valid_x, valid_y, test_x, test_y
def load_imb_Credit_Fraud(data_dir):
train = pickle.load(open(os.path.join(data_dir, 'train.pkl'), 'rb'), encoding='bytes')
train_x, train_y = train[b'x'], train[b'y']
valid = pickle.load(open(os.path.join(data_dir, 'valid.pkl'), 'rb'), encoding='bytes')
valid_x, valid_y = valid[b'x'], valid[b'y']
test = pickle.load(open(os.path.join(data_dir, 'test.pkl'), 'rb'), encoding='bytes')
test_x, test_y = test[b'x'], test[b'y']
return train_x, train_y, valid_x, valid_y, test_x, test_y
def load_imb_Page(data_dir):
train = pickle.load(open(os.path.join(data_dir, 'train.pkl'), 'rb'), encoding='bytes')
train_x, train_y = train[b'x'], train[b'y']
valid = pickle.load(open(os.path.join(data_dir, 'valid.pkl'), 'rb'), encoding='bytes')
valid_x, valid_y = valid[b'x'], valid[b'y']
test = pickle.load(open(os.path.join(data_dir, 'test.pkl'), 'rb'), encoding='bytes')
test_x, test_y = test[b'x'], test[b'y']
print(train_x.shape)
return train_x, train_y, valid_x, valid_y, test_x, test_y
def load_imb_Vehicle(data_dir):
train = pickle.load(open(data_dir, 'rb'), encoding='bytes')
train_x, train_y = train[b'x'], train[b'y']
return train_x, train_y #valid_x, valid_y
def load_checker_board(data_dir):
train = pickle.load(open(data_dir, 'rb'), encoding='bytes')
train_x, train_y = train[b'x'], train[b'y']
return train_x, train_y
def load_imb_data(data_dir):
train = pickle.load(open(data_dir, 'rb'), encoding='bytes')
train_x, train_y = train[b'x'], train[b'y']
return train_x, train_y
|
1714488
|
import panel as pn
import param
class Dashboard(param.Parameterized):
file_selector = param.FileSelector()
progress = pn.widgets.Progress(active=False)
@param.depends("file_selector", watch=True)
def activate_upload_status(self):
self.progress.active = True
dashboard = Dashboard()
pn.Column(dashboard.progress, pn.Param(dashboard.param.file_selector)).servable()
|
1714508
|
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import random
from transformers import AutoTokenizer
from tqdm import tqdm
from utils.generic_utils import read
class MMDialDataset(Dataset):
def __init__(self, data, tokenizer):
super().__init__()
self.data = data
self.tokenizer = tokenizer
def __len__(self):
return self.data.__len__()
def __getitem__(self, i):
context = MMDialDataset.extract(self.data[i], '<|context|>', keep_tokens=True)
labels = self.data[i][len(context):]
# belief = MMDialDataset.extract(labels, '<|belief|>')
# action = MMDialDataset.extract(labels, '<|action|>')
# response = MMDialDataset.extract(labels, '<|response|>')
ret = self.tokenizer(self.data[i], truncation=True, return_tensors='pt')
context_tokenized = self.tokenizer(context, truncation=True, return_tensors='pt')
ret['context_input_ids'] = context_tokenized['input_ids']
ret['context_attention_mask'] = context_tokenized['attention_mask']
labels_tokenized = self.tokenizer(labels, truncation=True, return_tensors='pt')
ret['labels'] = labels_tokenized['input_ids']
ret['labels_len'] = ret['labels'].shape[-1]
ret['id'] = i
return ret
@classmethod
def get_token_text(cls, token):
return token.replace('<', '').replace('>', '').replace('|', '').strip()
@classmethod
def extract(cls, text, begin_token, end_token=None, keep_tokens=False):
end_token = end_token or f'<|endof{MMDialDataset.get_token_text(begin_token)}|>'
begin_idx = text.find(begin_token)
end_idx = text.find(end_token)
if begin_idx == -1:
return ''
elif end_idx == -1:
return text[begin_idx + len(begin_token):].strip() if not keep_tokens else text[begin_idx:]
return text[begin_idx + len(begin_token): end_idx].strip() if not keep_tokens else text[begin_idx: end_idx + len(end_token)]
@classmethod
def create_data(cls, paths, tokenizer_or_transformer_model, split=(1,), shuffle=True):
assert sum(split) == 1
data = []
for path in paths:
data.extend(read(path))
if isinstance(tokenizer_or_transformer_model, str):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_or_transformer_model)
else:
tokenizer = tokenizer_or_transformer_model
if shuffle:
random.shuffle(data)
splits = []
begin_idx = 0
for i, s in enumerate(split):
if i == len(split) - 1:
end_idx = len(data)
else:
end_idx = int(begin_idx + len(data) * s)
splits.append(MMDialDataset(data[begin_idx: end_idx], tokenizer=tokenizer))
begin_idx = end_idx
return splits[0] if len(split) == 1 else splits
if __name__ == '__main__':
dataset = MMDialDataset.create_data(['resources/gpt2/resources/test.inp'], 'gpt2')
for sample in dataset:
print(sample)
input()
|
1714517
|
import requests
from url_normalize import url_normalize
from assert_functions import assert_valid_response, assert_invalid_response
from values import api_url, auth_header, ArticlesValues as V
def test_not_authorized():
r = requests.get(api_url + f"/articles")
assert r.status_code == 401
def test_route():
r = requests.get(api_url + f"/articles", headers=auth_header)
assert_valid_response(r, V.result_fields)
assert len(r.json()["result"]) == 10
def test_existing_id():
article_id = V.existing_article_id
r = requests.get(api_url + f"/articles/{article_id}", headers=auth_header)
assert_valid_response(r, V.result_fields, dict_result=True)
def test_nonexisting_id():
article_id = V.nonexisting_article_id
r = requests.get(api_url + f"/articles/{article_id}", headers=auth_header)
assert_invalid_response(r)
def test_existing_urls():
urls = V.existing_urls
for u in urls:
r = requests.get(api_url + f"/articles?url={u}", headers=auth_header)
assert_valid_response(r, V.result_fields)
for article in r.json()["result"]:
normalized_u = url_normalize(u)
assert (
article["url"] == normalized_u or article["redirect_to"] == normalized_u
)
def test_nonexisting_urls():
urls = V.nonexisting_urls
for u in urls:
r = requests.get(api_url + f"/articles?url={u}", headers=auth_header)
assert_invalid_response(r)
|
1714542
|
import torch
from torch_geometric.data import Batch, Data
from torch_geometric.nn import ChebConv, GCNConv, MessagePassing
class MyConv(MessagePassing):
def forward(self, x, edge_index):
return self.propagate(edge_index, x=x)
def test_static_graph():
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
x1, x2 = torch.randn(3, 8), torch.randn(3, 8)
data1 = Data(edge_index=edge_index, x=x1)
data2 = Data(edge_index=edge_index, x=x2)
batch = Batch.from_data_list([data1, data2])
x = torch.stack([x1, x2], dim=0)
for conv in [MyConv(), GCNConv(8, 16), ChebConv(8, 16, K=2)]:
out1 = conv(batch.x, batch.edge_index)
assert out1.size(0) == 6
conv.node_dim = 1
out2 = conv(x, edge_index)
assert out2.size()[:2] == (2, 3)
assert torch.allclose(out1, out2.view(-1, out2.size(-1)))
|
1714554
|
from pathlib import Path
import io
import os
import pty
import subprocess
from test_driver.logger import rootlog
class VLan:
"""This class handles a VLAN that the run-vm scripts identify via its
number handles. The network's lifetime equals the object's lifetime.
"""
nr: int
socket_dir: Path
process: subprocess.Popen
pid: int
fd: io.TextIOBase
def __repr__(self) -> str:
return f"<Vlan Nr. {self.nr}>"
def __init__(self, nr: int, tmp_dir: Path):
self.nr = nr
self.socket_dir = tmp_dir / f"vde{self.nr}.ctl"
# TODO: don't side-effect environment here
os.environ[f"QEMU_VDE_SOCKET_{self.nr}"] = str(self.socket_dir)
rootlog.info("start vlan")
pty_master, pty_slave = pty.openpty()
self.process = subprocess.Popen(
["vde_switch", "-s", self.socket_dir, "--dirmode", "0700"],
stdin=pty_slave,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
self.pid = self.process.pid
self.fd = os.fdopen(pty_master, "w")
self.fd.write("version\n")
# TODO: perl version checks if this can be read from
# an if not, dies. we could hang here forever. Fix it.
assert self.process.stdout is not None
self.process.stdout.readline()
if not (self.socket_dir / "ctl").exists():
rootlog.error("cannot start vde_switch")
rootlog.info(f"running vlan (pid {self.pid})")
def __del__(self) -> None:
rootlog.info(f"kill vlan (pid {self.pid})")
self.fd.close()
self.process.terminate()
|
1714590
|
import setuptools
setuptools.setup(
name='evolutionary-optimization',
version='0.1',
packages=setuptools.find_packages(include='evopt.*'),
url='',
license='',
author='<NAME>',
author_email='',
description='Evolutionary Optimization',
install_requires=[
'matplotlib',
'numpy'
]
)
|
1714622
|
import itertools
import os, random
import statistics
import matplotlib
import matplotlib.pyplot as plt
from analyze_entropy import comp_entropy
from analyze_prob_attn import compute_idf, get_ban_positions
# from data_collection import CUR_DIR, PROB_META_DIR, spec_name, MODEL_NAME, DATA_NAME
from util import convert_enc_attn, parse_arg
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
font_size = 14
matplotlib.font_manager._rebuild()
GLOBAL_FIGURE_WIDTH = 8
dpi = 800
# plt.rcParams["font.weight"] = "light"
plt.rcParams.update({'font.size': 14})
plt.rcParams['font.family'] = 'DejaVu Sans'
dir_datadrive = '/mnt/data0/jcxu/data/prob_gpt'
# Density: x: entropy, two plots: is bigram or not
FIG_SIZE_x = GLOBAL_FIGURE_WIDTH
def get_ys(t, logits, BOS_token=0):
# for one time step, get the tokens last_inp, cur_inp, cur_pred, and next_pred
cur_pred = logits[t]
try:
next_pred = logits[t + 1]
except IndexError:
next_pred = None
if t - 2 >= 0:
last_inp = logits[t - 2]
elif t - 2 == -1:
last_inp = BOS_token
else:
last_inp = None
if t - 1 >= 0:
cur_inp = logits[t - 1]
elif t - 1 == -1:
cur_inp = BOS_token
else:
cur_inp = None
return last_inp, cur_inp, cur_pred, next_pred
from collections import Counter
def truncate_attention_cell(attn_distb, input_doc, idf_ban_pos, tgt_prob_mass=0.9) -> Counter:
# for each attention distribution, remove the idf ban tokens (positions), get the accumulated prob up to prob_mass.
# return
sorts = np.argsort(attn_distb, axis=-1, kind=None, order=None)[::-1]
cum_prob_mass = 0
cnt = Counter()
for topk in sorts:
prob_mass = attn_distb[topk]
cum_prob_mass += prob_mass
if topk not in idf_ban_pos:
cnt[input_doc[topk]] = prob_mass
if cum_prob_mass > tgt_prob_mass or prob_mass < 0.01:
break
return cnt
def _y_entropy_step(attn_lle, input_doc, idf_ban_pos):
num_layer, num_head, src_len = attn_lle.shape
all_attns = Counter()
for l in range(num_layer):
for h in range(num_head):
cell_attn = truncate_attention_cell(attn_lle[l][h], input_doc, idf_ban_pos=idf_ban_pos)
all_attns = all_attns + cell_attn
return all_attns
def retrieve_tok_val(cnter, token):
try:
v = cnter[token]
except:
v = 0
return v
import matplotlib
import matplotlib.pyplot as plt
def plot_hist(val_ent_pairs, title):
weights = [p[0] for p in val_ent_pairs]
xs = [p[1] for p in val_ent_pairs]
plt.hist(xs, bins=20, weights=weights, density=True)
plt.xlabel('Pred Ent')
plt.ylabel('Cum Attn')
plt.title(title)
plt.grid(True)
plt.show()
import seaborn as sns
def get_statistics(matrix):
result = [0 for _ in range(len(matrix))]
for idx, row in enumerate(matrix):
try:
m = statistics.mean(row)
except:
m = 0
print("NO DATA!")
result[idx] = m
return result
def proceed_data(segs, val_ent_pairs, step_size=0.5):
cat_bins = [[[] for _ in range(segs)] for _ in range(5)]
for p in val_ent_pairs:
last_inp, cur_inp, cur_pred, next_pred, pred_ent, atte_ent = p
# attn_val, ent, attn_e = p[0], p[1], p[2]
cat = int(pred_ent // step_size)
try:
cat_bins[0][cat].append(last_inp)
cat_bins[1][cat].append(cur_inp)
cat_bins[2][cat].append(cur_pred)
cat_bins[3][cat].append(next_pred)
cat_bins[4][cat].append(atte_ent)
except:
pass
last_inp_mean = get_statistics(cat_bins[0])
cur_inp_mean = get_statistics(cat_bins[1])
cur_pred_mean = get_statistics(cat_bins[2])
next_pred_mean = get_statistics(cat_bins[3])
atte_ent_mean = get_statistics(cat_bins[4])
return last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, atte_ent_mean
def read_stack_data(last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, seps=10):
bar0 = last_inp_mean
bar1 = cur_inp_mean
bar2 = cur_pred_mean
bar3 = next_pred_mean
from operator import add
bar01 = np.add(bar0, bar1).tolist()
bar012 = np.add(bar01, bar2).tolist()
# x = list(range(10))
return bar0, bar1, bar2, bar3, bar01, bar012
def plot_single_line(this_fig, spec_config, input_data, step_size=0.5, ent_max=5,
show_x_ticks=False, show_y_ticks=True, data_name="", model_name="", ymin=2, ymax=5):
segs = np.arange(0, ent_max, step_size).tolist()
# colorblind = sns.color_palette("coolwarm", 10)[::-1]
last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, atte_ent_mean = input_data
axes = this_fig.add_subplot(spec_config)
sns.lineplot(x=list(np.arange(0, 5, step_size)), y=atte_ent_mean, markers=True, dashes=False)
# axes = sns.boxplot(x=x, y=y, palette=colorblind, showfliers=False)
axes.xaxis.set_major_locator(MultipleLocator(1))
axes.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
axes.xaxis.set_minor_locator(MultipleLocator(0.5))
# axes.get_ylim()
# axes.set_ylim(ymin, ymax)
if show_x_ticks:
# x_vals = [m * step_size for m in xticks]
# axes.set_xticklabels(x_vals, rotation='vertical')center
pass
else:
plt.setp(axes.get_xticklabels(), visible=False)
# if not show_y_ticks:
# plt.setp(axes.get_yticklabels(), visible=False)
if data_name != "":
axes.set_ylabel(data_name)
else:
axes.set_ylabel("")
if model_name != "":
axes.set_title(model_name)
return axes
def plot_single_box(this_fig, spec_config, input_data, step_size=0.5, ent_max=5,
show_x_ticks=False, show_y_ticks=True, ylim=0.8, data_name="", model_name="", show_legend=False):
segs = np.arange(0, ent_max, step_size).tolist()
# colorblind = sns.color_palette("coolwarm", 10)[::-1]
last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean, atte_ent_mean = input_data
bar0, bar1, bar2, bar3, bar01, bar012 = read_stack_data(last_inp_mean, cur_inp_mean, cur_pred_mean, next_pred_mean)
colorblind = sns.color_palette("coolwarm", 4)
# colorblind = sns.color_palette("Set2")
# colorblind = sns.color_palette()
catnames = ['$y_{t-2}$', '$y_{t-1}$',
'$y_{t}$', '$y_{t+1}$']
linewidth = 1.5
axes = this_fig.add_subplot(spec_config)
x = list(np.arange(0, 5, 0.5))
axes.bar(x, bar0, color=colorblind[0],
# edgecolor=colorblind[0],linewidth=linewidth,
label=catnames[0], width=step_size,
# hatch='/'
)
axes.bar(x, bar1, bottom=bar0,
# edgecolor='white', linewidth=1,
label=catnames[1], width=step_size,
# hatch='-',
facecolor=colorblind[1],
# histtype='step', facecolor='g',
# alpha=0.75
# ,hatch='-'
)
axes.bar(x, bar2, bottom=bar01,
# edgecolor=colorblind[3], linewidth=0,
label=catnames[2], width=step_size, facecolor=colorblind[3],
# histtype='step',
# hatch='|'
# ,hatch='|'
)
axes.bar(x, bar3, bottom=bar012, color=colorblind[2], label=catnames[3], width=step_size,
# edgecolor=colorblind[2], linewidth=linewidth,
# hatch='\\'
)
# axes = sns.boxplot(x=x, y=y, palette=colorblind, showfliers=False)
axes.xaxis.set_major_locator(MultipleLocator(1))
axes.xaxis.set_major_formatter(FormatStrFormatter('%d'))
if show_legend:
axes.legend(ncol=2, frameon=False)
# For the minor ticks, use no labels; default NullFormatter.
axes.xaxis.set_minor_locator(MultipleLocator(0.5))
axes.set_ylim(0, ylim)
if show_x_ticks:
# x_vals = [m * step_size for m in xticks]
# axes.set_xticklabels(x_vals, rotation='vertical')center
pass
else:
plt.setp(axes.get_xticklabels(), visible=False)
if not show_y_ticks:
plt.setp(axes.get_yticklabels(), visible=False)
if data_name != "":
axes.set_ylabel(data_name)
else:
axes.set_ylabel("")
if model_name != "":
axes.set_title(model_name)
return axes
def plot_box(val_ent_pairs, title=None, step_size=.25):
# max_pred_ent = max([p[1] for p in val_ent_pairs])
# segs = np.linspace(0, max_pred_ent + 0.1, num=20).tolist()
segs = np.arange(0, 8, step_size).tolist()
colorblind = sns.color_palette("coolwarm", 10)[::-1]
bins = [[] for _ in range(len(segs))]
x, y = [], []
for p in val_ent_pairs:
v, ent = p[0], p[1]
cat = int(ent // step_size)
try:
bins[cat].append(v)
x.append(cat)
y.append(v)
except:
pass
fig1, ax1 = plt.subplots()
ax1.set_title(title)
ax1 = sns.violinplot(x=x, y=y, cut=0, palette=colorblind, inner='quartile')
# ax1.set_xticks( np.arange(0, 8, step_size).tolist())
# ax1.set_xticklabels(np.arange(0, 8, step_size).tolist())
return ax1
def plot_single_scatter(val_ent_pairs, title):
y_attn_frac = [m[0] for m in val_ent_pairs]
x_pred_ent = [m[1] for m in val_ent_pairs]
ax = sns.jointplot(x=x_pred_ent, y=y_attn_frac, kind="hex", color="#4CB391")
# ax = sns.scatterplot(x=x_pred_ent,y=y_attn_frac)
#
# sns.histplot(x=x_pred_ent, y=y_attn_frac, bins=50, pthresh=.1, cmap="mako")
# sns.kdeplot(x=x_pred_ent, y=y_attn_frac, levels=5,linewidths=1)
# ax.set_title(title)
plt.show()
return ax
def analyze_attention_y_entropy(max_time_step, attn_tlle, pred_distribution, input_doc, ban_positions, logits, nuc,
top_p):
# T = attn_tlle.shape[0]
# data_pairs = [[], [], [], []]
data = []
for t in range(max_time_step):
try:
t_pred_ent = comp_entropy(pred_distribution[t], nuc, top_p)
last_inp, cur_inp, cur_pred, next_pred = get_ys(t, logits)
all_attns_counter = _y_entropy_step(attn_tlle[t], input_doc, ban_positions)
total_attn_val = sum(all_attns_counter.values())
all_attention = list(all_attns_counter.values())
np_attn = np.asarray(all_attention) / total_attn_val
attn_ent = comp_entropy(np_attn)
last_inp_val = retrieve_tok_val(all_attns_counter, last_inp)
cur_inp_val = retrieve_tok_val(all_attns_counter, cur_inp)
cur_pred_val = retrieve_tok_val(all_attns_counter, cur_pred)
next_pred_val = retrieve_tok_val(all_attns_counter, next_pred)
# data_pairs[0].append((last_inp_val / total_attn_val, t_pred_ent))
# data_pairs[1].append((cur_inp_val / total_attn_val, t_pred_ent))
# data_pairs[2].append((cur_pred_val / total_attn_val, t_pred_ent))
data.append((last_inp_val / total_attn_val, cur_inp_val / total_attn_val,
cur_pred_val / total_attn_val, next_pred_val / total_attn_val,
t_pred_ent, attn_ent))
except:
pass
# data_pairs[3].append((next_pred_val / total_attn_val, t_pred_ent))
return data
import pickle
import numpy as np
from scipy.stats import entropy
import matplotlib.gridspec as gridspec
import multiprocessing
def detect_useless_ids(indices):
last = -100
good_indices = []
for x in indices:
if x - 5 > last:
last = x
good_indices.append(x)
else:
break
return good_indices
def process_data_single(args, f, eos_token_ids):
print("running")
BOS_TOKEN = 0
with open(os.path.join(args.cur_dir, f), 'rb') as fd:
data = pickle.load(fd)
attentions, pred_distb, logits, input_doc = data['attentions'], data['pred_distributions'], data['logits'], \
data['input_doc']
timesteps = len(attentions)
attentions_tlle = convert_enc_attn(attentions, merge_layer_head=False) # T,L,L,E
attention_tle = convert_enc_attn(attentions, merge_layer_head=True) # T,L,E
document_len = input_doc.shape[0]
input_doc = input_doc.astype(np.int).tolist()
logits = logits.tolist()
indices = [i for i, x in enumerate(logits) if x in eos_token_ids]
good_indices = detect_useless_ids(indices)
if good_indices:
max_t = good_indices[-1]
else:
max_t = attentions_tlle.shape[0]
# dec_inp_logits = [BOS_TOKEN] + logits[:-1]
pred_distb = np.exp(pred_distb) # time step, vocab size
# pred_ent = entropy(pred_distb, axis=-1)
idf_flag = compute_idf(attention_tle[:max_t]) # E
ban_positions = get_ban_positions(idf_flag)
# ban_positions = []
data_pairs = analyze_attention_y_entropy(max_t, attentions_tlle, pred_distb, input_doc, ban_positions, logits,
args.nucleus, args.nuc_prob)
return data_pairs
from itertools import product
def plot_stack_vocab(cnndm_peg, xsum_peg, cnndm_bart, xsum_bart):
fig = plt.figure(figsize=(FIG_SIZE_x, FIG_SIZE_x - 4))
spec2 = gridspec.GridSpec(ncols=2, nrows=2, figure=fig)
plot_single_box(this_fig=fig, spec_config=spec2[0, 0], input_data=cnndm_peg, show_x_ticks=False,
show_y_ticks=True, data_name="CNN/DM", model_name="PEGASUS", ylim=0.7)
plot_single_box(this_fig=fig, spec_config=spec2[0, 1], input_data=cnndm_bart, show_x_ticks=False,
show_y_ticks=False, model_name="BART", ylim=0.7)
plot_single_box(this_fig=fig, spec_config=spec2[1, 0], input_data=xsum_peg, show_x_ticks=True, show_y_ticks=True,
data_name='XSum', ylim=0.4)
plot_single_box(this_fig=fig, spec_config=spec2[1, 1], input_data=xsum_bart, show_x_ticks=True,
show_y_ticks=False, ylim=0.4, show_legend=True)
fig.text(0.5, 0.01, 'Prediction Entropy', ha='center', fontsize=font_size)
fig.text(0.0, 0.5, 'Vocab Projected Attention', va='center', rotation='vertical', fontsize=font_size)
fig.tight_layout()
plt.savefig(f"x_pred_ent_y_attn_frac.pdf", dpi=dpi, bbox_inches='tight')
plt.show()
plt.close()
def run_one_fig(spec, args, num_samples=300):
print(f"--{spec}--")
CUR_DIR = os.path.join(args.prob_meta_dir, spec)
args.cur_dir = CUR_DIR
files = os.listdir(CUR_DIR)
random.shuffle(files)
files = files[:num_samples]
BOS_TOKEN = 0
print(args.spec_name)
if 'pegasus' in args.model_name:
from transformers import PegasusTokenizer
bpe_tokenizer = PegasusTokenizer.from_pretrained(args.model_name)
EOS_TOK_IDs = [106, bpe_tokenizer.eos_token_id, 2] # <n>
elif 'gpt' in args.model_name:
from transformers import GPT2Tokenizer
bpe_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
EOS_TOK_IDs = [bpe_tokenizer.eos_token_id]
elif 'bart' in args.model_name:
from transformers import BartTokenizer
bpe_tokenizer = BartTokenizer.from_pretrained(args.model_name)
EOS_TOK_IDs = [bpe_tokenizer.eos_token_id]
else:
raise NotImplementedError
# process_data_single(args, files[0], eos_token_ids=EOS_TOK_IDs)
len_samples = len(files)
cpu_cnt = multiprocessing.cpu_count()
with multiprocessing.Pool(processes=cpu_cnt) as pool:
results = pool.starmap(process_data_single, zip([args] * len_samples, files, [EOS_TOK_IDs] * len_samples))
output = list(itertools.chain.from_iterable(results))
print(f"Samples: {len(output)}")
output = proceed_data(10, output)
return output
def plot_ant_entropy(cnndm_peg, xsum_peg, cnndm_bart, xsum_bart):
fig = plt.figure(figsize=(FIG_SIZE_x, FIG_SIZE_x - 5))
step_size = 0.5
d = {'PEG$_{C}$': cnndm_peg[-1],
'PEG-X': xsum_peg[-1],
'BART-C': cnndm_bart[-1],
'BART-X': xsum_bart[-1],
}
# df = pd.DataFrame(data=d)
ax = fig.add_subplot(1, 1, 1)
# line1 = sns.lineplot(x=list(np.arange(0, 5, step_size)), y=cnndm_peg[-1], label='PEG$_{C}$', markers='x')
plt.plot(list(np.arange(0, 5, step_size)), cnndm_peg[-1], label='PEG$_{C}$', marker='+',
# color='k'
)
plt.plot(list(np.arange(0, 5, step_size)), xsum_peg[-1], label='PEG$_{X}$', marker='x',
# color='k'
)
plt.plot(list(np.arange(0, 5, step_size)), cnndm_bart[-1], label='BART$_{C}$', ls='--', marker='+',
# color='k'
)
plt.plot(list(np.arange(0, 5, step_size)), xsum_bart[-1], label='BART$_{X}$', ls='--', marker='x',
# color='k'
)
plt.legend(loc='best', ncol=2, frameon=False)
# spec2 = gridspec.GridSpec(ncols=2, nrows=2, figure=fig)
# plot_single_line(this_fig=fig, spec_config=spec2[0, 0], input_data=cnndm_peg, show_x_ticks=False,
# show_y_ticks=True, data_name="CNN/DM", model_name="PEGASUS", ymin=2, ymax=4
# )
# plot_single_line(this_fig=fig, spec_config=spec2[0, 1], input_data=cnndm_bart, show_x_ticks=False,
# show_y_ticks=False, model_name="BART", ymin=2, ymax=4)
# plot_single_line(this_fig=fig, spec_config=spec2[1, 0], input_data=xsum_peg, show_x_ticks=True, show_y_ticks=True,
# data_name='XSUM', ymin=2.5, ymax=4)
# plot_single_line(this_fig=fig, spec_config=spec2[1, 1], input_data=xsum_bart, show_x_ticks=True,
# show_y_ticks=False, ymin=2.5, ymax=4)
ax.set_ylabel('Attention Entropy')
ax.set_xlabel('Prediction Entropy')
ax.xaxis.set_major_locator(MultipleLocator(0.5))
# ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
# plt.xaxis.set_minor_locator(MultipleLocator(0.5))
fig.tight_layout()
plt.savefig(f"atten_entropy.pdf", dpi=dpi, bbox_inches='tight')
# fig.text(0.5, 0.01, 'Prediction Entropy', ha='center')
# fig.text(0.0, 0.5, '', va='center', rotation='vertical')
plt.show()
plt.close()
import pandas as pd
if __name__ == '__main__':
args = parse_arg()
print("Looking at attention")
if 'pegasus' in args.model_name:
from transformers import PegasusTokenizer
bpe_tokenizer = PegasusTokenizer.from_pretrained(args.model_name)
EOS_TOK_IDs = [106, bpe_tokenizer.eos_token_id] # <n>
BOS_TOK_ID = 0
else:
raise NotImplementedError
cnndm_peg = "d_cnn_dailymail-m_googlepegasuscnn_dailymail-full1"
xsum_peg = "d_xsum-m_googlepegasusxsum-full1"
cnndm_bart = "d_cnn_dailymail-m_facebookbartlargecnn-full1"
xsum_bart = 'd_xsum-m_facebookbartlargexsum-full1'
xsum_bart_out = run_one_fig(xsum_bart, args)
cnndm_peg_out = run_one_fig(cnndm_peg, args)
xsum_peg_out = run_one_fig(xsum_peg, args)
cnndm_bart_out = run_one_fig(cnndm_bart, args)
# df = pd.DataFrame(data=xsum_bart_out)
# plot_stack_vocab(cnndm_peg_out, xsum_peg_out, cnndm_bart_out, xsum_bart_out)
plot_stack_vocab(cnndm_peg_out, xsum_peg_out, cnndm_bart_out, xsum_bart_out)
plot_ant_entropy(cnndm_peg_out, xsum_peg_out, cnndm_bart_out, xsum_bart_out)
# plot_box(all_data_pairs[0], 'last_inp')
# plot_box(all_data_pairs[1], 'cur_inp')
# plot_box(all_data_pairs[2], 'cur_pred')
# plot_box(all_data_pairs[3], 'next_pred')
|
1714645
|
class Controller(object):
@classmethod
def from_flatbuffers(cls, raw_ctrl):
return cls(
id=raw_ctrl.Id(),
address=raw_ctrl.Address().decode(),
name=raw_ctrl.Name().decode(),
settings={
'powered': raw_ctrl.Powered(),
'connectable': raw_ctrl.Connectable(),
'discoverable': raw_ctrl.Discoverable(),
'low_energy': raw_ctrl.LowEnergy(),
'advertising': raw_ctrl.Advertising(),
}
)
def __init__(self, id, address, name=None, settings=None):
self.id = id
self.address = address
self.name = name
if settings is None:
settings = {}
self.powered = settings.get('powered', False)
self.connectable = settings.get('connectable', False)
self.discoverable = settings.get('discoverable', False)
self.low_energy = settings.get('low_energy', False)
self.advertising = settings.get('advertising', False)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<Controller id={}, address={}, name={}>".format(self.id, self.address, self.name)
|
1714648
|
from aiopg.sa import create_engine
import datetime
import psycopg2
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Column, Integer, String, DateTime, ForeignKey, Text, Boolean)
from sqlalchemy.orm import relationship
from sqlalchemy.schema import CreateTable, DropTable
Base = declarative_base()
class Endpoint(Base):
__tablename__ = 'endpoint'
id = Column(Integer, primary_key=True)
hash = Column(String(256), unique=True)
when = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
live = Column(Boolean, default=True)
access_logs = relationship(
'AccessLog', back_populates='endpoint', cascade='all, delete')
responses = relationship(
'Response', back_populates='endpoint', cascade='all, delete')
def __repr__(self):
return '<Endpoint(id="{}", hash="")>'.format(self.id, self.hash)
class AccessLog(Base):
__tablename__ = 'access_log'
id = Column(Integer, primary_key=True)
request = Column(Text, default='')
response = Column(Text, default='')
endpoint_id = Column(Integer, ForeignKey('endpoint.id'))
when = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
endpoint = relationship('Endpoint', back_populates='access_logs')
def __repr__(self):
return '<AccessLog(id="{}", endpoint="{}")>'.format(
self.id, self.endpoint)
class Response(Base):
__tablename__ = 'response'
id = Column(Integer, primary_key=True)
headers = Column(Text, default='{}')
status_code = Column(Integer, default=200)
body = Column(Text, default='')
endpoint_id = Column(Integer, ForeignKey('endpoint.id'))
endpoint = relationship('Endpoint', back_populates='responses')
def __repr__(self):
return '<Response(id="{}", endpoint="{}")>'.format(
self.id, self.endpoint)
async def delete_tables(pg, tables):
async with pg.acquire() as conn:
for table in reversed(tables):
drop_expr = DropTable(table)
try:
return await conn.execute(drop_expr)
except psycopg2.ProgrammingError:
pass
async def prepare_tables(pg):
tables = [Endpoint.__table__, AccessLog.__table__, Response.__table__]
# await delete_tables(pg, tables)
async with pg.acquire() as conn:
for table in tables:
try:
create_expr = CreateTable(table)
await conn.execute(create_expr)
except psycopg2.ProgrammingError:
pass
async def init_pg(app):
db_conf = app['config']['database']
engine = await create_engine(
database=db_conf['name'],
user=db_conf['user'],
password=<PASSWORD>['password'],
host=db_conf['host'],
port=db_conf['port'])
await prepare_tables(engine)
app['db'] = engine
async def close_pg(app):
app['db'].close()
await app['db'].wait_closed()
del app['db']
|
1714651
|
from prml.nn.array.array import Array, array, asarray
from prml.nn.array.reshape import reshape_method
from prml.nn.function import broadcast, broadcast_to
Array.reshape = reshape_method
|
1714674
|
import unittest
from katas.kyu_7.naughty_or_nice import naughty_or_nice
class NaughtyOrNiceTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(naughty_or_nice({
'January': {
'1': 'Naughty', '2': 'Nice', '3': 'Naughty', '4': 'Nice',
'5': 'Nice', '6': 'Nice', '7': 'Naughty', '8': 'Nice',
'9': 'Nice', '10': 'Naughty', '11': 'Nice', '12': 'Nice',
'13': 'Nice', '14': 'Naughty', '15': 'Naughty',
'16': 'Naughty', '17': 'Nice', '18': 'Nice', '19': 'Naughty',
'20': 'Nice', '21': 'Naughty', '22': 'Nice', '23': 'Naughty',
'24': 'Nice', '25': 'Naughty', '26': 'Nice', '27': 'Nice',
'28': 'Naughty', '29': 'Nice', '30': 'Nice', '31': 'Nice'
},
'February': {
'1': 'Nice', '2': 'Naughty', '3': 'Nice', '4': 'Nice',
'5': 'Nice', '6': 'Nice', '7': 'Nice', '8': 'Nice',
'9': 'Naughty', '10': 'Naughty', '11': 'Naughty',
'12': 'Nice', '13': 'Nice', '14': 'Naughty', '15': 'Naughty',
'16': 'Nice', '17': 'Nice', '18': 'Naughty', '19': 'Nice',
'20': 'Nice', '21': 'Nice', '22': 'Nice', '23': 'Nice',
'24': 'Naughty', '25': 'Naughty', '26': 'Nice',
'27': 'Naughty', '28': 'Nice'
},
'March': {
'1': 'Nice', '2': 'Naughty', '3': 'Nice', '4': 'Nice',
'5': 'Nice', '6': 'Naughty', '7': 'Nice', '8': 'Nice',
'9': 'Nice', '10': 'Naughty', '11': 'Naughty', '12': 'Nice',
'13': 'Naughty', '14': 'Naughty', '15': 'Naughty',
'16': 'Nice', '17': 'Nice', '18': 'Nice', '19': 'Naughty',
'20': 'Nice', '21': 'Naughty', '22': 'Naughty', '23': 'Nice',
'24': 'Nice', '25': 'Nice', '26': 'Nice', '27': 'Nice',
'28': 'Naughty', '29': 'Nice', '30': 'Nice', '31': 'Naughty'
},
'April': {
'1': 'Naughty', '2': 'Naughty', '3': 'Nice', '4': 'Nice',
'5': 'Nice', '6': 'Naughty', '7': 'Naughty', '8': 'Nice',
'9': 'Nice', '10': 'Nice', '11': 'Nice', '12': 'Nice',
'13': 'Naughty', '14': 'Nice', '15': 'Naughty',
'16': 'Naughty', '17': 'Nice', '18': 'Naughty', '19': 'Nice',
'20': 'Naughty', '21': 'Naughty', '22': 'Nice', '23': 'Nice',
'24': 'Naughty', '25': 'Nice', '26': 'Naughty',
'27': 'Naughty', '28': 'Nice', '29': 'Nice', '30': 'Nice'
},
'May': {
'1': 'Nice', '2': 'Naughty', '3': 'Naughty', '4': 'Nice',
'5': 'Nice', '6': 'Nice', '7': 'Naughty', '8': 'Nice',
'9': 'Nice', '10': 'Nice', '11': 'Naughty', '12': 'Naughty',
'13': 'Naughty', '14': 'Naughty', '15': 'Nice',
'16': 'Naughty', '17': 'Naughty', '18': 'Nice', '19': 'Nice',
'20': 'Nice', '21': 'Nice', '22': 'Nice', '23': 'Naughty',
'24': 'Naughty', '25': 'Nice', '26': 'Nice', '27': 'Nice',
'28': 'Naughty', '29': 'Naughty', '30': 'Naughty',
'31': 'Nice'
},
'June': {
'1': 'Naughty', '2': 'Nice', '3': 'Naughty', '4': 'Nice',
'5': 'Naughty', '6': 'Nice', '7': 'Nice', '8': 'Nice',
'9': 'Nice', '10': 'Naughty', '11': 'Naughty', '12': 'Nice',
'13': 'Nice', '14': 'Naughty', '15': 'Nice', '16': 'Naughty',
'17': 'Naughty', '18': 'Naughty', '19': 'Nice', '20': 'Nice',
'21': 'Nice', '22': 'Nice', '23': 'Nice', '24': 'Nice',
'25': 'Nice', '26': 'Nice', '27': 'Nice', '28': 'Nice',
'29': 'Naughty', '30': 'Nice'
},
'July': {
'1': 'Nice', '2': 'Nice', '3': 'Nice', '4': 'Naughty',
'5': 'Nice', '6': 'Nice', '7': 'Nice', '8': 'Nice',
'9': 'Naughty', '10': 'Nice', '11': 'Nice', '12': 'Naughty',
'13': 'Nice', '14': 'Naughty', '15': 'Nice', '16': 'Nice',
'17': 'Naughty', '18': 'Nice', '19': 'Naughty', '20': 'Nice',
'21': 'Nice', '22': 'Nice', '23': 'Nice', '24': 'Naughty',
'25': 'Naughty', '26': 'Nice', '27': 'Naughty',
'28': 'Naughty', '29': 'Nice', '30': 'Nice', '31': 'Nice'
},
'August': {
'1': 'Naughty', '2': 'Nice', '3': 'Naughty', '4': 'Nice',
'5': 'Nice', '6': 'Nice', '7': 'Nice', '8': 'Nice',
'9': 'Naughty', '10': 'Naughty', '11': 'Nice',
'12': 'Naughty', '13': 'Nice', '14': 'Naughty', '15': 'Nice',
'16': 'Nice', '17': 'Naughty', '18': 'Nice', '19': 'Naughty',
'20': 'Nice', '21': 'Nice', '22': 'Naughty', '23': 'Naughty',
'24': 'Naughty', '25': 'Naughty', '26': 'Nice', '27': 'Nice',
'28': 'Nice', '29': 'Naughty', '30': 'Naughty', '31': 'Nice'
},
'September': {
'1': 'Naughty', '2': 'Nice', '3': 'Naughty', '4': 'Nice',
'5': 'Nice', '6': 'Nice', '7': 'Nice', '8': 'Naughty',
'9': 'Naughty', '10': 'Nice', '11': 'Naughty',
'12': 'Naughty', '13': 'Nice', '14': 'Naughty', '15': 'Nice',
'16': 'Nice', '17': 'Nice', '18': 'Nice', '19': 'Nice',
'20': 'Naughty', '21': 'Nice', '22': 'Nice', '23': 'Nice',
'24': 'Nice', '25': 'Nice', '26': 'Naughty', '27': 'Nice',
'28': 'Nice', '29': 'Naughty', '30': 'Nice'
},
'October': {
'1': 'Nice', '2': 'Naughty', '3': 'Naughty', '4': 'Naughty',
'5': 'Naughty', '6': 'Nice', '7': 'Nice', '8': 'Naughty',
'9': 'Nice', '10': 'Nice', '11': 'Naughty', '12': 'Nice',
'13': 'Nice', '14': 'Nice', '15': 'Nice', '16': 'Nice',
'17': 'Naughty', '18': 'Naughty', '19': 'Nice', '20': 'Nice',
'21': 'Naughty', '22': 'Nice', '23': 'Nice', '24': 'Naughty',
'25': 'Nice', '26': 'Nice', '27': 'Nice', '28': 'Naughty',
'29': 'Naughty', '30': 'Nice', '31': 'Nice'
},
'November': {
'1': 'Naughty', '2': 'Nice', '3': 'Naughty', '4': 'Nice',
'5': 'Nice', '6': 'Nice', '7': 'Nice', '8': 'Nice',
'9': 'Nice', '10': 'Nice', '11': 'Nice', '12': 'Naughty',
'13': 'Naughty', '14': 'Naughty', '15': 'Naughty',
'16': 'Nice', '17': 'Naughty', '18': 'Nice', '19': 'Nice',
'20': 'Nice', '21': 'Naughty', '22': 'Naughty', '23': 'Nice',
'24': 'Naughty', '25': 'Naughty', '26': 'Nice', '27': 'Nice',
'28': 'Nice', '29': 'Nice', '30': 'Naughty'
},
'December': {
'1': 'Nice', '2': 'Nice', '3': 'Nice', '4': 'Naughty',
'5': 'Nice', '6': 'Naughty', '7': 'Nice', '8': 'Naughty',
'9': 'Nice', '10': 'Naughty', '11': 'Naughty',
'12': 'Naughty', '13': 'Naughty', '14': 'Naughty',
'15': 'Naughty', '16': 'Nice', '17': 'Nice', '18': 'Nice',
'19': 'Naughty', '20': 'Nice', '21': 'Naughty',
'22': 'Naughty', '23': 'Nice', '24': 'Nice', '25': 'Naughty',
'26': 'Nice', '27': 'Nice', '28': 'Nice', '29': 'Nice',
'30': 'Nice', '31': 'Nice'
}
}), 'Nice!')
|
1714678
|
import numpy as np
import torch
def parameter_number(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def normal2unit(vertices: "(vertice_num, 3)"):
"""
Return: (vertice_num, 3) => normalized into unit sphere
"""
center = vertices.mean(dim= 0)
vertices -= center
distance = vertices.norm(dim= 1)
vertices /= distance.max()
return vertices
def rotate(points, degree: float, axis: int):
"""Rotate along upward direction"""
rotate_matrix = torch.eye(3)
theta = (degree/360)*2*np.pi
cos = np.cos(theta)
sin = np.sin(theta)
axises = [0, 1, 2]
assert axis in axises
axises.remove(axis)
rotate_matrix[axises[0], axises[0]] = cos
rotate_matrix[axises[0], axises[1]] = -sin
rotate_matrix[axises[1], axises[0]] = sin
rotate_matrix[axises[1], axises[1]] = cos
points = points @ rotate_matrix
return points
class Transform():
def __init__(self,
normal: bool,
shift: float = None,
scale: float = None,
rotate: float = None,
axis: int = 0,
random:bool= False):
self.normal = normal
self.shift = shift
self.scale = scale
self.rotate = rotate
self.axis = axis
self.random = random
def __call__(self, points: "(point_num, 3)"):
if self.normal:
points = normal2unit(points)
if self.shift:
shift = self.shift
if self.random:
shift = (torch.rand(3)*2 - 1) * self.shift
points += shift
if self.scale:
scale = self.scale
points *= scale
if self.rotate:
degree = self.rotate
if self.random:
degree = (torch.rand(1).item()*2 - 1) * self.rotate
points = rotate(points, degree, self.axis)
return points
def test():
points = torch.randn(1024, 3)
transform = Transform(normal= True, scale= 10.0, axis= 1, random= True)
points = transform(points)
print(points.size())
if __name__ == '__main__':
test()
|
1714697
|
import unittest
from typing import Callable
from danlp.datasets.wiki_ann import _wikiann_process_func
from danlp.datasets.word_sim import _word_sim_process_func
from danlp.download import MODELS, download_model, DATASETS, download_dataset, _unzip_process_func, _check_process_func
from danlp.models.embeddings import _process_downloaded_embeddings, _process_embeddings_for_spacy
class TestDownload(unittest.TestCase):
def test_all_downloadable_files_has_checksums(self):
for model, data in MODELS.items():
self.assertIn('size', data, msg="Model {}".format(model))
self.assertIn('md5_checksum', data)
self.assertIn('file_extension', data)
for dataset, data in DATASETS.items():
self.assertIn('size', data, msg="Dataset {}".format(dataset))
self.assertIn('md5_checksum', data)
self.assertIn('file_extension', data)
def test_download_fails_with_wrong_title(self):
with self.assertRaises(ValueError):
download_model('do.not.exists.wv')
with self.assertRaises(ValueError):
download_dataset('do.not.exists.zip')
def test_process_functions(self):
process_functions = [
_process_downloaded_embeddings,
_process_embeddings_for_spacy,
_unzip_process_func,
_wikiann_process_func,
_word_sim_process_func
]
for proc_func in process_functions:
self.assertIsInstance(proc_func, Callable)
try:
_check_process_func(proc_func)
except AssertionError:
self.fail("{} does not have the correct arguments".format(proc_func))
if __name__ == '__main__':
unittest.main()
|
1714803
|
import os
import random
import pathlib
import logging
import datetime
import argparse
import sys
import simplejson as json
import pandas as pd
import sentencepiece as spm
import pickle
from collections import namedtuple
# logging.basicConfig(format='%(levelname)s: %(message)s',
# level=logging.DEBUG)
MetaData = namedtuple('MetaData',['support','query'])
SEED = 7
def init_logger(log_path, name="meta-data-prep"):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
logfile = os.path.join(log_path, "%s-%s.log" % (name, datetime.datetime.today()))
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
root.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
root.addHandler(consoleHandler)
logging.debug("Logging to %s" % logfile)
def sents_reader(file):
f = open(file, encoding='utf8')
lines = f.readlines()
f.close()
return [i.strip() for i in lines]
def read_para_data(domain, seen_status, data_dir):
path_prefix = os.path.join(data_dir, domain, 'split', 'clean_tok', domain + '.de-en.')
source_lang = sents_reader(path_prefix + 'en')
target_lang = sents_reader(path_prefix + 'de')
score_df = None
if seen_status == 'seen':
score_df = pd.read_csv(os.path.join(data_dir, domain, 'split', 'clean_tok', 'grading_info.csv'))
assert len(score_df) == len(source_lang)
assert len(source_lang) == len(target_lang) # check data integrity
para_data = list(zip(source_lang, target_lang))
return para_data, score_df
def build_corpus_info(para_data, score_df):
src_tokens = []
tgt_tokens = []
for idx, para_sent in enumerate(para_data):
en_sent, de_sent = para_sent
src_tokens.append(len(en_sent.split()))
tgt_tokens.append(len(de_sent.split()))
if score_df is not None: # exclude unseen cases
assert len(src_tokens) == len(score_df['diff_score'])
diff_score = score_df['diff_score']
domain_info_df = pd.DataFrame({
'src_tokens': src_tokens,
'tgt_tokens': tgt_tokens,
'diff_score': diff_score
}, index=list(range(0, len(para_data))))
else:
domain_info_df = pd.DataFrame({
'src_tokens': src_tokens,
'tgt_tokens': tgt_tokens,
}, index=list(range(0, len(para_data))))
return domain_info_df
def split_task_by_tokens(info_df, sample_tokens, used_index=[]):
shuffle_df = info_df.sample(frac=1, random_state=SEED)
cur_tokens = 0
sampled_index = []
for idx, row in shuffle_df.iterrows():
if cur_tokens >= sample_tokens:
break
if idx not in used_index:
sampled_index.append(idx)
cur_tokens += row['src_tokens']
# logging tokens
logging.info('sampled_len: {}, sampled_toknes: {}, target_tokens: {}'.format(
len(sampled_index), cur_tokens, sample_tokens
))
return sampled_index
def split_dev_data(all_indices, used_indices):
remain_indices = list(set(all_indices).difference(used_indices))
logging.info('[Remaining Sentences] All: {} | Meta-Used: {} | Dev: {}'.format(
len(all_indices),
len(used_indices),
len(remain_indices),
))
return remain_indices
def dump_dict_to_file(name_emb, emb_filename):
json.dump(name_emb, open(os.path.join(emb_filename, 'meta_split_info.json'), "w"))
pickle.dump(name_emb, open(os.path.join(emb_filename, 'meta_split_info.pkl'), "wb"))
def write_sents(para_list, meta_split_dir, set_type, source_lang='en', target_lang='de', scores=None):
src_writer = open(os.path.join(meta_split_dir, set_type + '.' + source_lang), 'w', encoding='utf8')
tgt_writer = open(os.path.join(meta_split_dir, set_type + '.' + target_lang), 'w', encoding='utf8')
score_writer = open(os.path.join(meta_split_dir, set_type + '.' + 'score'), 'w', encoding='utf8')
if scores is not None:
assert len(scores) == len(para_list)
for idx, para_tuple in enumerate(para_list):
src_sent, tgt_sent = para_tuple
src_writer.write(src_sent + '\n')
tgt_writer.write(tgt_sent + '\n')
if scores is not None:
score_writer.write(str(scores[idx]) + '\n')
def write_dev_data(para_data, domain_sample_info, split_dir, split_type, spm_model, is_spm=False):
dev_sents = [para_data[i] for i in domain_sample_info['meta-dev']]
meta_split_dir = os.path.join(split_dir, split_type)
pathlib.Path(meta_split_dir).mkdir(parents=True, exist_ok=True)
write_sents(dev_sents, meta_split_dir, 'dev', 'en', 'de')
if is_spm:
spm_dir = meta_split_dir + '-spm'
pathlib.Path(spm_dir).mkdir(parents=True, exist_ok=True)
sp = spm.SentencePieceProcessor()
sp.Load(spm_model)
spm_dev = []
for _, para_tuple in enumerate(dev_sents):
en, de = para_tuple
en_spm = ' '.join(sp.EncodeAsPieces(en))
de_spm = ' '.join(sp.EncodeAsPieces(de))
spm_dev.append((en_spm, de_spm))
write_sents(spm_dev, spm_dir, 'dev', 'en', 'de')
def write_split_data(para_data, domain_sample_info, split_dir, split_type, spm_model, is_spm=False):
if split_type not in ['meta-train', 'meta-test', 'meta-dev']:
raise 'Not supported meta split type'
meta_split_dir = os.path.join(split_dir, split_type)
pathlib.Path(meta_split_dir).mkdir(parents=True, exist_ok=True)
split_indices = domain_sample_info[split_type]
support_sents = [para_data[i] for i in split_indices.support]
query_sents = [para_data[i] for i in split_indices.query]
if domain_sample_info['status'] == 'seen':
scores = domain_sample_info[split_type + '-score']
write_sents(support_sents, meta_split_dir, 'support', 'en', 'de', scores=scores.support)
write_sents(query_sents, meta_split_dir, 'query', 'en', 'de', scores=scores.query)
else:
write_sents(support_sents, meta_split_dir, 'support', 'en', 'de')
write_sents(query_sents, meta_split_dir, 'query', 'en', 'de')
if is_spm:
spm_dir = meta_split_dir + '-spm'
pathlib.Path(spm_dir).mkdir(parents=True, exist_ok=True)
sp = spm.SentencePieceProcessor()
sp.Load(spm_model)
spm_support = []
spm_query = []
for _, para_tuple in enumerate(support_sents):
en, de = para_tuple
en_spm = ' '.join(sp.EncodeAsPieces(en))
de_spm = ' '.join(sp.EncodeAsPieces(de))
spm_support.append((en_spm, de_spm))
for _, para_tuple in enumerate(query_sents):
en, de = para_tuple
en_spm = ' '.join(sp.EncodeAsPieces(en))
de_spm = ' '.join(sp.EncodeAsPieces(de))
spm_query.append((en_spm, de_spm))
if domain_sample_info['status'] == 'seen':
scores = domain_sample_info[split_type + '-score']
write_sents(spm_support, spm_dir, 'support', 'en', 'de', scores=scores.support)
write_sents(spm_query, spm_dir, 'query', 'en', 'de', scores=scores.query)
else:
write_sents(spm_support, spm_dir, 'support', 'en', 'de')
write_sents(spm_query, spm_dir, 'query', 'en', 'de')
# Split the domain data into D_meta-train & D_meta-test
def meta_dataset_split(meta_train_task_N, meta_test_task_N, domains, support_tokens, query_tokens, split_dir, spm_model, data_dir):
domain_type = []
for key in domains.keys():
domain_type += [(i,key) for i in domains[key]]
for domain,seen_status in domain_type:
used_data_indices = []
logging.info("[Domain]:" + domain + "| [Type] " + seen_status + "| Start creating split...")
split_dir = os.path.join(split_dir, domain, 'meta_split')
domain_sample_info = {
'status': seen_status,
'support': str(support_tokens),
'query': str(query_tokens),
'meta-train-task': meta_train_task_N,
'meta-train': None,
'meta-train-score': None,
'meta-test-task': meta_test_task_N,
'meta-test': None,
'meta-test-score': None,
'meta-dev': None
}
para_data, score_df = read_para_data(domain, seen_status, data_dir)
domain_info_df = build_corpus_info(para_data, score_df)
# Must keep same for meta-test dataset
logging.info("[Meta Test Support]")
mtest_support = split_task_by_tokens(domain_info_df, meta_test_task_N * support_tokens)
logging.info("[Meta Test Query]")
used_data_indices += mtest_support
mtest_query = split_task_by_tokens(
info_df=domain_info_df,
sample_tokens=meta_test_task_N * query_tokens,
used_index=used_data_indices
)
used_data_indices += mtest_query
domain_sample_info['meta-test'] = MetaData(support=mtest_support, query=mtest_query)
# Rest ~ Meta-Train
if seen_status == 'seen': # Only seen domains need D_meta-train
domain_sample_info['meta-test-score'] = MetaData(
support=[domain_info_df.loc[i].diff_score for i in mtest_support],
query=[domain_info_df.loc[i].diff_score for i in mtest_query]
)
logging.info("[Meta Train Support]")
mtrain_support = split_task_by_tokens(
info_df=domain_info_df,
sample_tokens=meta_train_task_N * support_tokens,
used_index=used_data_indices
)
used_data_indices += mtrain_support
logging.info("[Meta Train Query]")
mtrain_query = split_task_by_tokens(
info_df=domain_info_df,
sample_tokens=meta_train_task_N * query_tokens,
used_index=used_data_indices
)
used_data_indices += mtrain_query
domain_sample_info['meta-train'] = MetaData(support=mtrain_support, query=mtrain_query)
domain_sample_info['meta-train-score'] = MetaData(
support=[domain_info_df.loc[i].diff_score for i in mtrain_support],
query=[domain_info_df.loc[i].diff_score for i in mtrain_query]
)
write_split_data(para_data, domain_sample_info, split_dir, 'meta-train', spm_model,is_spm=True)
# Save sampling info
logging.info("Check unused data for meta-dev")
mdev = split_dev_data(all_indices=list(range(0, len(para_data))), used_indices=used_data_indices)
domain_sample_info['meta-dev'] = mdev
logging.info("Save domain info csv & dataset info json")
pathlib.Path(split_dir).mkdir(parents=True, exist_ok=True)
domain_info_df.to_csv(os.path.join(split_dir, 'domain_info_all.csv'))
dump_dict_to_file(domain_sample_info, split_dir)
# Write sampled tok file & SPM file
logging.info("Write static files to {}".format(split_dir))
write_split_data(para_data, domain_sample_info, split_dir, 'meta-test', spm_model, is_spm=True)
write_dev_data(para_data, domain_sample_info, split_dir, 'meta-dev', spm_model, is_spm=True)
logging.info("========Done Current Domain========")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Adaptation distance scoring.')
parser.add_argument('--data-path', help='domain corpus', required=True)
parser.add_argument('--split-dir', help='path to save meta data split', required=True)
parser.add_argument('--spm-model', help='sentencepiece model path', required=True)
parser.add_argument('--k-support', metavar='N', type=int, help='support set tokens (K)', required=True)
parser.add_argument('--k-query', metavar='N', type=int, help='query set tokens (K)', required=True)
parser.add_argument('--meta_train_task', metavar='N', type=int, help='number of meta-train tasks', required=True)
parser.add_argument('--meta_test_task', metavar='N', type=int, help='number of meta-test tasks', required=True)
parser.add_argument('--unseen-domains', nargs="+", default=["bible"], help='unseen domains',required=True)
parser.add_argument('--seen-domains', nargs="+", default=["emea"], help='unseen domains',required=True)
args = parser.parse_args()
DATA_DIR = args.data_path
SPLIT_DIR = args.split_dir
SPM_MODEL_PATH = args.spm_model
init_logger(SPLIT_DIR)
domains = {
'unseen': args.unseen_domains,
'seen': args.seen_domains
}
k_support = args.k_support
k_query = args.k_query
meta_train_task_N = args.meta_train_task
meta_test_task_N = args.meta_test_task
logging.info("[Domains]: {}".format(domains['unseen'] + domains['seen']))
logging.info("[Toknes]: support-{}k, query-{}k".format(k_support, k_query))
meta_dataset_split(meta_train_task_N=meta_train_task_N,
meta_test_task_N=meta_test_task_N,
domains=domains,
support_tokens=k_support * 1000,
query_tokens=k_query * 1000,
split_dir=SPLIT_DIR,
spm_model=SPM_MODEL_PATH,
data_dir=DATA_DIR
)
|
1714812
|
from typing import Tuple
import numpy as np
from qiskit.opflow.primitive_ops.pauli_sum_op import PauliSumOp
from qc_grader.grade import grade_and_submit
from qc_grader.util import paulisumop_to_json
criteria: dict = {}
def grade_lab4_ex1(matmult_result: complex) -> None:
grade_and_submit(matmult_result, 'lab4', 'ex1')
def grade_lab4_ex2(shot_result: complex) -> None:
grade_and_submit(shot_result, 'lab4', 'ex2')
def grade_lab4_ex3(H_tfi: PauliSumOp) -> None:
answer = {
'qubit_op': paulisumop_to_json(H_tfi)
}
grade_and_submit(answer, 'lab4', 'ex3')
def grade_lab4_ex4(tfi_result: float) -> None:
grade_and_submit(tfi_result, 'lab4', 'ex4')
|
1714825
|
import torch.nn as nn
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, inputs):
return inputs.view(inputs.size(0), -1)
|
1714836
|
import datetime
from sqlalchemy import text
from sqlalchemy.orm import relationship
from sqlalchemy import Integer, Column, String, DateTime, ForeignKey
from urllib.parse import urlparse
from pygmy.database.base import Model
from pygmy.database.dbutil import dbconnection, utcnow
class ClickMeta(Model):
__tablename__ = 'clickmeta'
id = Column(Integer, primary_key=True, autoincrement=True)
# TODO AMIT: should be enum
link_id = Column(Integer, ForeignKey('link.id'))
link = relationship(
'Link', back_populates='clickmeta', foreign_keys=[link_id])
country = Column(String(5), nullable=True)
referrer = Column(String(100), nullable=True)
created_at = Column(DateTime(timezone=False), server_default=utcnow())
class ClickMetaManager:
"""Click meta manager"""
def __init__(self):
self.clickmeta = None
self._oldest_link_date = None
self._referrer_aggregate = None
self._country_aggregate = None
self._date_aggregate = None
@staticmethod
def _format_referrer(referrer):
ref = urlparse(referrer)
if not ref.netloc:
return None
referrer = "{}://{}{}".format(
ref.scheme or 'http', ref.netloc, ref.path)
if len(referrer) > 100:
return None
return referrer
@staticmethod
def _date_display_format(base):
date_format_map = dict(minute="%H:%M",
hour="%H:00",
day="%m-%d",
month="%Y-%m-%d")
return date_format_map.get(base)
@staticmethod
def psql_date_format(date_str):
psql_format = date_str
date_arg_map = {
'%Y': 'YYYY',
'%m': 'MM',
'%d': 'DD',
'%H': 'HH24',
'%M': 'MI',
'%S': 'SS'
}
for dt_arg, dt_val in date_arg_map.items():
psql_format = psql_format.replace(dt_arg, dt_val)
return psql_format
@staticmethod
def mysql_date_format(date_str):
psql_format = date_str
date_arg_map = {
'%M': '%i',
}
for dt_arg, dt_val in date_arg_map.items():
psql_format = psql_format.replace(dt_arg, dt_val)
return psql_format
@property
def past_30th_date(self):
d = (datetime.datetime.utcnow() - datetime.timedelta(days=30))
day_30 = d.replace(microsecond=0).strftime("%Y-%m-%d %H:%M:%S")
return day_30
@dbconnection
def add(self, db, link_id, country, referrer, user_agent=None):
referrer = self._format_referrer(referrer)
insert_values = dict(link_id=link_id,
country=country,
referrer=referrer)
if db.bind.name == 'mysql':
insert_values['created_at'] = datetime.datetime.utcnow()
self.clickmeta = ClickMeta(**insert_values)
db.add(self.clickmeta)
db.commit()
return self.clickmeta
@dbconnection
def get_by_link(self, db, link_id):
"""Pass link id and get clickmeta object"""
yield db.query(ClickMeta).find(link_id=link_id).all()
@dbconnection
def link_hit_count(self, db, params):
"""Get hit count for the passed link id
:param db:
:param params:
:return:
"""
qry = text("SELECT COUNT(*) FROM clickmeta WHERE link_id=:link_id")
return db.execute(qry, params).first()[0]
@dbconnection
def country_aggregate(self, db, params):
"""Country aggregate for the passed link id
:param db:
:param params:
:return:
"""
if self._country_aggregate is None:
qry = text("""
SELECT COALESCE(country, 'others'), COUNT(*)
From clickmeta
WHERE link_id=:link_id
GROUP BY country
ORDER BY count(*) DESC""")
self._country_aggregate = dict(db.execute(qry, params).fetchall())
return self._country_aggregate
@dbconnection
def referrer_aggregate(self, db, params):
"""Referrer aggregate for the passed link id
:param db:
:param params:
:return:
"""
if self._referrer_aggregate is None:
qry = text("""
SELECT COALESCE(referrer, 'others'), COUNT(*) FROM clickmeta
WHERE link_id=:link_id
GROUP BY referrer
ORDER BY count(*) DESC""")
self._referrer_aggregate = dict(db.execute(qry, params).fetchall())
return self._referrer_aggregate
@dbconnection
def date_aggregate(self, db, date_part, params, base):
"""Datetime aggregation for a link id.
:param db:
:param date_part:
:param params:
:param base:
:return:
"""
if self._date_aggregate is None:
qry = text("""
SELECT {} AS click_on, COUNT(*)
FROM clickmeta
WHERE link_id=:link_id AND created_at >= :created_at
GROUP BY click_on
ORDER BY click_on DESC
""".format(date_part))
data = db.execute(qry, params).fetchall()
if base == 'month':
self._date_aggregate = dict(
(datetime.datetime.strptime(
k, '%Y-%m-%d').strftime('%d %b, %Y'), val)
for k, val in data)
if base == 'day':
self._date_aggregate = dict(
(datetime.datetime.strptime(
k, '%m-%d').strftime('%d %b'), val)
for k, val in data)
else:
self._date_aggregate = dict(data)
return self._date_aggregate
@dbconnection
def oldest_click_date(self, db, params):
"""Get the oldest click info date for a link.
:param db:
:param params:
:return: date str
"""
if self._oldest_link_date is None:
qry = text("""
SELECT created_at FROM clickmeta
WHERE link_id=:link_id
ORDER BY created_at ASC
LIMIT 1
""")
link_date = db.execute(qry, params).first()
if link_date is None:
return None
link_date = link_date[0]
if isinstance(link_date, datetime.datetime):
link_date = link_date.strftime("%Y-%m-%d %H:%M:%S")
self._oldest_link_date = link_date
return self._oldest_link_date
@dbconnection
def click_stats(self, db, link_id):
"""Function to get click aggregation data based on:
1. Country
2. Referrer
3. Datetime: It can be based on minute, hour, or days depending
on how old the link clickstats are.
:param db:
:param link_id:
:return: dict
"""
bind_param = dict(link_id=link_id,
created_at=self.past_30th_date)
assert db.bind.name in ['sqlite', 'mysql', 'postgresql']
db_date_function_mapper = dict(sqlite='STRFTIME',
postgresql='TO_CHAR',
mysql='DATE_FORMAT')
date_func = db_date_function_mapper.get(db.bind.name)
# if None, it means no record found for the link
oldest_link_date = self.oldest_click_date(bind_param)
if oldest_link_date is None:
return {}
base = time_base(oldest_link_date)
date_format = self._date_display_format(base)
if db.bind.name == 'postgresql':
date_format = self.psql_date_format(date_format)
date_part = "{}(created_at, '{}')".format(date_func, date_format)
elif db.bind.name == 'mysql':
date_format = self.mysql_date_format(date_format)
date_part = "{}(created_at, '{}')".format(date_func, date_format)
else:
date_part = "{}('{}', created_at)".format(date_func, date_format)
return dict(
hits=self.link_hit_count(bind_param),
country_hits=self.country_aggregate(bind_param),
referrer_hits=self.referrer_aggregate(bind_param),
timestamp_hits=self.date_aggregate(date_part, bind_param, base),
time_base=base
)
def time_base(date_str):
"""Returns base for the time string `date_str` passed.
:param date_str: str
:return: str
"""
old_link_date = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
time_delta = datetime.datetime.utcnow() - old_link_date
delta_seconds = time_delta.total_seconds()
if (delta_seconds / (60*60)) < 1:
return 'minute'
elif (delta_seconds / (60*60*24)) < 1:
return 'hour'
elif (delta_seconds / (60*60*24*30)) < 1:
return 'day'
else:
return 'month'
|
1714905
|
import os
import sys
import pytest
from django.core.exceptions import ImproperlyConfigured
from .utils import render_template
# Needed for the custom filter tests
sys.path.append(os.path.dirname(__file__))
@pytest.mark.django_db
def test_with_custom_filter_simple(simple_content, settings):
settings.TINYCONTENT_FILTER = 'utils.toupper'
assert "THIS IS A TEST." == render_template(
"{% tinycontent_simple 'foobar' %}"
)
@pytest.mark.django_db
def test_with_custom_filter_complex(simple_content, settings):
settings.TINYCONTENT_FILTER = 'utils.toupper'
assert "THIS IS A TEST." == render_template(
"{% tinycontent 'foobar' %}"
"Not found."
"{% endtinycontent %}"
)
@pytest.mark.django_db
def test_with_custom_filter_simple_with_html(html_content, settings):
settings.TINYCONTENT_FILTER = 'utils.toupper'
assert "<STRONG>&</STRONG>" == render_template(
"{% tinycontent_simple 'html' %}"
)
@pytest.mark.django_db
def test_with_custom_filter_complex_with_html(html_content, settings):
settings.TINYCONTENT_FILTER = 'utils.toupper'
assert "<STRONG>&</STRONG>" == render_template(
"{% tinycontent 'html' %}"
"Not found."
"{% endtinycontent %}"
)
@pytest.mark.django_db
def test_with_bad_custom_filter(simple_content, settings):
settings.TINYCONTENT_FILTER = 'utils.ohnothisisfake'
with pytest.raises(ImproperlyConfigured):
render_template("{% tinycontent_simple 'foobar' %}")
@pytest.mark.django_db
def test_with_chained_custom_filters(simple_content, settings):
settings.TINYCONTENT_FILTER = [
'utils.toupper',
'utils.truncate_ten',
'utils.reverse',
]
assert "A SI SIHT" == render_template(
"{% tinycontent_simple 'foobar' %}"
)
|
1714968
|
from production.envs.time_calc import *
"""
Heuristic Decision Agents
"""
class Decision_Heuristic(object):
def __init__(self, env, statistics, parameters, resources, agents, agents_resource):
self.statistics = statistics
self.parameters = parameters
self.resources = resources
self.agents = agents
self.env = env
self.agents_resource = agents_resource
agents.update({'Decision_Heuristic_Transp' : []})
agents.update({'Decision_Heuristic_Machine' : []})
def act(self, states):
raise NotImplementedError
def get_next_machine_min_buffer_fill(self, order, statistics, parameters, resources):
"""Return next machine for processing with smallest buffer fill if multiple machines are in same machine group. Used for Transp-Heuristics."""
result_machine = None
min_buffer_fill = round(float('inf'), 0)
if order.get_next_step().type == "sink":
return order.get_next_step(), min_buffer_fill
for mach in [x for x in resources['machines'] if order.get_next_step().machine_group == x.machine_group]:
if len(mach.buffer_in) < min_buffer_fill:
result_machine = mach
min_buffer_fill = len(mach.buffer_in)
return result_machine, min_buffer_fill
class Decision_Heuristic_Transp_NJF(Decision_Heuristic):
"""Selects the next transportation order for a transportation agent based on the distance to the order pickup location"""
def __init__(self, env, statistics, parameters, resources, agents, agents_resource):
super(self.__class__, self).__init__(env=env, statistics=statistics, parameters=parameters, resources=resources, agents=agents, agents_resource=agents_resource)
agents['Decision_Heuristic_Transp'].append(self)
print("NJF_Transp_Decision created")
def act(self, states):
if states == None:
return None, None
result_order = None
result_dest = None
min_distance = float('inf')
for order in states:
if order.get_next_step().is_free_machine_group() and not order.reserved:
distance = self.parameters['TRANSP_TIME'][self.agents_resource.current_location.id][order.current_location.id]
if distance < min_distance:
min_distance = distance
result_order = order
result_dest, _ = self.get_next_machine_min_buffer_fill(order=order, statistics=self.statistics, parameters=self.parameters, resources=self.resources)
result_order = states.pop(states.index(result_order))
result_order.reserved = True
return result_order, result_dest
class Decision_Heuristic_Transp_EMPTY(Decision_Heuristic):
"""Selects the next transportation order for a transportation agent based on the total order waiting time"""
def __init__(self, env, statistics, parameters, resources, agents, agents_resource):
super(self.__class__, self).__init__(env=env, statistics=statistics, parameters=parameters, resources=resources, agents=agents, agents_resource=agents_resource)
agents['Decision_Heuristic_Transp'].append(self)
print("EMPTY_Transp_Decision created")
def act(self, states):
if states == None:
return None, None
result_order = None
result_dest = None
min_fill_level = round(float('inf'), 0)
for order in states:
if order.get_next_step().is_free_machine_group() and not order.reserved:
dest, fill_level = self.get_next_machine_min_buffer_fill(order=order, statistics=self.statistics, parameters=self.parameters, resources=self.resources)
if fill_level < min_fill_level:
min_fill_level = fill_level
result_order = order
result_dest = dest
result_order = states.pop(states.index(result_order))
result_order.reserved = True
return result_order, result_dest
class Decision_Heuristic_Transp_FIFO(Decision_Heuristic):
"""Selects the next transportation order for a transportation agent based on the total order waiting time"""
def __init__(self, env, statistics, parameters, resources, agents, agents_resource):
super(self.__class__, self).__init__(env=env, statistics=statistics, parameters=parameters, resources=resources, agents=agents, agents_resource=agents_resource)
agents['Decision_Heuristic_Transp'].append(self)
print("FIFO_Transp_Decision created")
def act(self, states):
if states == None:
return None, None
for order in sorted(states, key=lambda x: x.id, reverse=False): # FIFO sort based on ID
if order.get_next_step().is_free_machine_group() and not order.reserved:
order = states.pop(states.index(order))
order.reserved = True
mach, _ = self.get_next_machine_min_buffer_fill(order=order, statistics=self.statistics, parameters=self.parameters, resources=self.resources)
return order, mach
return None, None
class Decision_Heuristic_Machine_FIFO(Decision_Heuristic):
"""Selects the next processing order for a machine agent based on the total order waiting time"""
def __init__(self, env, statistics, parameters, resources, agents, agents_resource):
super(self.__class__, self).__init__(env=env, statistics=statistics, parameters=parameters, resources=resources, agents=agents, agents_resource=agents_resource)
agents['Decision_Heuristic_Machine'].append(self)
print("FIFO_Machine_Decision created")
def act(self, states):
if states == None:
return None
for order in states:
return [order]
|
1715009
|
import os
from dotenv import load_dotenv
load_dotenv()
def getenv(id, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return os.getenv(id, default=default)
|
1715053
|
from django.contrib import admin
from .models import Post, Like, Dislike, PostComment
admin.site.register(Post)
admin.site.register(Dislike)
admin.site.register(Like)
admin.site.register(PostComment)
|
1715059
|
from django.db.backends.base.creation import BaseDatabaseCreation
import sys
class DatabaseCreation(BaseDatabaseCreation):
def _get_test_db_name(self):
return self.connection.settings_dict['TEST']['NAME']
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
test_database_name = self._get_test_db_name()
self.connection.db_name = test_database_name
if keepdb:
return test_database_name
try:
self.connection.schema_editor().create_db(test_database_name)
except Exception as e:
if keepdb:
return test_database_name
sys.stderr.write(
"Got an error creating the test non-database backend: %s\n" % e)
if not autoclobber:
confirm = raw_input(
"Type 'yes' if you would like to try deleting the test "
"non-database backend '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test non-db backend for alias %s..." % (
self._get_database_display_str(verbosity, test_database_name),
))
self.connection.schema_editor().delete_db(test_database_name)
self.connection.schema_editor().create_db(test_database_name)
except Exception as e:
sys.stderr.write(
"Got an error recreating the test non-db backend: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity):
self.connection.schema_editor().delete_db(test_database_name)
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['ENGINE'],
settings_dict['NAME']
)
|
1715072
|
from rich import box
from rich.table import Table
from rich import print
from nubia import context
from nubia.internal.commands.builtin import Exit
from nubia.internal.commands.help import HelpCommand
from nubia.internal.exceptions import UnknownCommand, CommandError
class _Exit(Exit):
cmd = ["exit"]
def __init__(self):
super(Exit, self).__init__()
def run_interactive(self, cmd, args, raw):
ctx = context.get_context()
ctx.console.set_alt_screen(False)
raise EOFError()
class _Help(HelpCommand):
cmds = {"help": HelpCommand.HELP}
def __init__(self):
super(HelpCommand, self).__init__()
# Overwrite function to use Rich instead of PrettyTable. Also remove built-in table
def run_interactive(self, _0, args, _2):
if args:
args = args.split()
try:
cmd_instance = self.registry.find_command(args[0])
if not cmd_instance:
raise UnknownCommand("Command `{}` is " "unknown".format(args[0]))
else:
help_msg = cmd_instance.get_help(args[0].lower(), *args)
print(help_msg)
except CommandError as e:
print(str(e), "red")
return 1
else:
table = Table(style="#52311A", header_style="bold #8E562E", box=box.ROUNDED)
table.add_column("Command", style="yellow")
table.add_column("Description", style="green")
commands = {
cmd_name: cmd
for cmd in self.registry.get_all_commands()
for cmd_name in cmd.get_command_names()
}
for cmd_name in sorted(commands):
cmd = commands[cmd_name]
cmd_help = cmd.get_help(cmd_name)
table.add_row(cmd_name, cmd_help)
print(table)
return 0
|
1715093
|
if sm.hasQuestCompleted(31351): # Clean up 7
sm.warp(240092100, 2)
elif sm.hasQuestCompleted(31348):
sm.warp(240092101, 2)
else:
sm.warp(240092100, 2)
|
1715100
|
import csv
import json
from functools import reduce
from tqdm import tqdm
file = open("report.txt", "w")
def show(text = ""):
print(text)
file.write(text + "\n")
print("Loading config...")
config = json.load(open("config.json"))
do_usda = config["usda"]
do_foodon = config["foodon"]
do_recipes = config["recipes"]
do_links = config["links"]
print("Loading files...")
if do_usda:
print("Loading USDA data...")
usda_nutrdef = list(csv.reader(open("data/NUTR_DEF.txt", encoding="latin-1")))
usda_fooddes = list(csv.reader(open("data/FOOD_DES.txt", encoding="latin-1")))
usda_csv = list(csv.reader(open("data/usda-pairs.csv")))
if do_foodon:
print("Loading FoodOn data...")
foodon_csv = list(csv.reader(open("data/foodon-pairs.csv")))
if do_recipes:
print("Loading Recipe1M data...")
layer1 = json.load(open("data/layer1.json"))
det_ingrs = json.load(open("data/det_ingrs.json"))
if do_links:
print("Loading linkage data...")
usda_links = open("data/usda-links.trig").readlines()
foodon_links = open("data/foodon-links.trig").readlines()
print("All data loaded.")
print()
print("Processing...")
if do_usda:
print("Doing USDA...")
usda_entities = len(usda_fooddes)
usda_nutr_count = len(usda_nutrdef)
usda_entities_used = len(usda_csv) - 1
if do_foodon:
print("Doing foodon...")
foodon_entities = len(foodon_csv) - 1
print("Doing recipe1m...")
if do_recipes:
recipe1m_recipes = len(layer1)
recipe1m_ings = 0
recipe1m_ings_uniq_set = set()
print("Processing the layer1 file...")
for recipe in tqdm(layer1):
recipe1m_ings += len(recipe["ingredients"])
for ing in recipe["ingredients"]:
recipe1m_ings_uniq_set.add(ing["text"])
recipe1m_ings_uniq = len(recipe1m_ings_uniq_set)
recipe1m_ings_valid = 0
recipe1m_ings_resolved_set = set()
print("Processing the det_ingrs file...")
for recipe in tqdm(det_ingrs):
for i in range(len(recipe["valid"])):
if recipe["valid"][i]:
recipe1m_ings_valid += 1
recipe1m_ings_resolved_set.add(recipe["ingredients"][i]["text"])
recipe1m_ings_resolved = len(recipe1m_ings_resolved_set)
if do_links:
print("Doing links...")
usda_link_count = 0
for line in usda_links:
if "owl:equivalentClass" in line:
usda_link_count += 1
foodon_link_count = 0
for line in foodon_links:
if "owl:equivalentClass" in line:
foodon_link_count += 1
print("Done processing.")
print()
if do_usda:
show("USDA: ")
show("Number of USDA entities: {0}".format(usda_entities))
show("Number of USDA nutrient types: {0}".format(usda_nutr_count))
show("Number of USDA entities considered: {0}".format(usda_entities_used))
if do_foodon:
show()
show("FoodOn: ")
show("Number of FoodOn entities considered: {0}".format(foodon_entities))
if do_recipes:
show()
show("Recipes: ")
show("Number of recipes: {0}".format(recipe1m_recipes))
show("Number of ingredients: {0}".format(recipe1m_ings))
show("Number of valid ingredients: {0}".format(recipe1m_ings_valid))
show("Percentage of valid ingredients: {0:.2f}%".format(100 * recipe1m_ings_valid/ recipe1m_ings))
show("Number of unique ingredients: {0}".format(recipe1m_ings_uniq))
show("Number of resolved ingredient names: {0}".format(recipe1m_ings_resolved))
if do_links:
show()
show("Linkages:")
show("Number of USDA links made: {0}".format(usda_link_count))
show("Number of FoodOn links made: {0}".format(foodon_link_count))
if do_recipes and do_links:
show()
show("Additional linkage stats: ")
show("Percentage ingredients linked to USDA: {0:.2f}%".format(100 * usda_link_count / recipe1m_ings_resolved))
show("Percentage ingredients linked to FoodOn: {0:.2f}%".format(100 * foodon_link_count / recipe1m_ings_resolved))
|
1715146
|
import abc
from typing import Iterable, Optional, Type, Union
from .adapters import (TensorAdapter, _numpy_adapter, _pytorch_adapter)
from .layout import (TensorLayout, _dataclass_layout)
from .utils import hybridmethod
class DataClassTensorMixin(abc.ABC):
@hybridmethod
def to_numpy(cls,
self,
obj=None,
*,
tensor_layout: Optional[Type[TensorLayout]] = None,
dtype = None,
batch: bool = False,
batch_size: Optional[int] = None):
layout = tensor_layout or cls.tensor_layout()
return _to_tensor(_numpy_adapter,
layout,
obj or self,
dtype=cls._resolve_dtype(dtype),
batch=batch,
batch_size=batch_size)
@classmethod
def from_numpy(cls,
tensor,
*,
tensor_layout: Optional[Type[TensorLayout]]=None,
batch: bool = False,
batch_size: Optional[int] = None):
return _from_tensor(_numpy_adapter,
tensor_layout or cls.tensor_layout(),
tensor,
batch=batch,
batch_size=batch_size)
@hybridmethod
def to_torch(cls,
self,
obj=None,
*,
tensor_layout: Optional[Type[TensorLayout]] = None,
dtype = None,
batch: bool = False,
batch_size: Optional[int] = None):
layout = tensor_layout or cls.tensor_layout()
return _to_tensor(_pytorch_adapter,
layout,
obj or self,
dtype=cls._resolve_dtype(dtype),
batch=batch,
batch_size=batch_size)
@classmethod
def from_torch(cls,
tensor,
*,
tensor_layout: Optional[Type[TensorLayout]] = None,
batch: bool = False,
batch_size: Optional[int] = None):
return _from_tensor(_pytorch_adapter,
tensor_layout or cls.tensor_layout(),
tensor,
batch=batch,
batch_size=batch_size)
@classmethod
def tensor_layout(cls):
return _dataclass_layout(cls)
@classmethod
def _resolve_dtype(cls, dtype):
return dtype or cls._default_tensor_dtype or "float32"
def dataclass_tensor(_cls=None, *, dtype="float32"):
"""
Based on the code in the `dataclasses` module to handle optional-parens
decorators. See example below:
@dataclass_tensor
@dataclass_tensor(dtype="int64")
class Example:
...
"""
def wrap(cls):
return _process_class(cls, dtype)
if _cls is None: return wrap
return wrap(_cls)
def _process_class(cls, dtype):
cls.to_numpy = hybridmethod(DataClassTensorMixin.to_numpy.__func__)
cls.from_numpy = classmethod(DataClassTensorMixin.from_numpy.__func__)
cls.to_torch = hybridmethod(DataClassTensorMixin.to_torch.__func__)
cls.from_torch = classmethod(DataClassTensorMixin.from_torch.__func__)
cls.tensor_layout = classmethod(DataClassTensorMixin.tensor_layout.__func__)
cls._default_tensor_dtype = dtype
cls._resolve_dtype = classmethod(DataClassTensorMixin._resolve_dtype.__func__)
DataClassTensorMixin.register(cls)
return cls
def config(shape: Optional[Iterable[int]]):
return {"shape": shape}
def _to_tensor(adapter: TensorAdapter,
layout: Type[TensorLayout],
val,
*,
dtype="float",
batch: bool = False,
batch_size: Optional[int] = None):
batch = batch or batch_size is not None
shape = len(layout)
if batch:
batch_size = batch_size or (len(val) if hasattr(val, "__len__") else 0)
if batch_size == 0:
val = list(val)
batch_size = len(val)
shape = (batch_size, shape)
tensor = adapter.zeros(shape, dtype=dtype)
if not batch:
layout.write(adapter, 0, tensor, val)
else:
for i, vi in enumerate(val):
layout.write(adapter, 0, tensor[i], vi)
return tensor
def _from_tensor(adapter: TensorAdapter,
layout: Type[TensorLayout],
tensor,
*,
batch: bool = False,
batch_size: Optional[int] = None):
batch = batch or batch_size is not None
if not batch:
return layout.read(adapter, 0, tensor)
batch_size = batch_size or (len(tensor) if hasattr(tensor, "__len__") else 0)
result = [None]*batch_size
if batch_size != 0:
for i, t in enumerate(tensor):
result[i] = layout.read(adapter, 0, t)
else:
for t in tensor:
result.append(layout.read(adapter, 0, t))
return result
|
1715197
|
import unittest
from numpy.testing import assert_allclose
import numpy as np
from sklearn.datasets import load_iris, make_multilabel_classification, load_diabetes
from skelm import ELMRegressor, ELMClassifier
import warnings
from sklearn.exceptions import DataDimensionalityWarning, DataConversionWarning
class TestAcceptance(unittest.TestCase):
def setUp(self) -> None:
self.data_class = load_iris(return_X_y=True)
self.data_ml = make_multilabel_classification()
self.data_reg = load_diabetes(return_X_y=True)
warnings.simplefilter("ignore", DataDimensionalityWarning)
warnings.simplefilter("ignore", DataConversionWarning)
def test_SineWave_Solves(self):
"""A highly non-linear regression problem, with added strong noise.
"""
X = np.linspace(-1, 1, num=1000)[:, None]
Y = np.sin(16 * X) * X + 0.2*np.random.randn(1000)[:, None]
elm = ELMRegressor(random_state=0)
elm.fit(X, Y)
Yt = elm.predict(X)
MSE = np.mean((Y - Yt) ** 2)
self.assertLess(MSE, 0.3)
def test_Xor_OneNeuron_Solved(self):
"""ELM should be able to solve XOR problem.
"""
X = np.array([[0, 0],
[1, 1],
[1, 0],
[0, 1]])
Y = np.array([1, 1, -1, -1])
elm = ELMClassifier(n_neurons=3, random_state=0)
elm.fit(X, Y)
Yh = elm.predict(X)
self.assertGreater(Yh[0], 0)
self.assertGreater(Yh[1], 0)
self.assertLess(Yh[2], 0)
self.assertLess(Yh[3], 0)
def test_ELMClassifier_ReportedScore_ActuallyIsClassificationScore(self):
X, Y = self.data_class
Yr = np.vstack((Y == 0, Y == 1, Y == 2)).T
elm_c = ELMClassifier(random_state=0).fit(X, Y)
elm_r = ELMRegressor(random_state=0).fit(X, Yr)
Yc_hat = elm_c.predict(X)
Yr_hat = elm_r.predict(X).argmax(1)
assert_allclose(Yc_hat, Yr_hat)
def test_ELMClassifier_MultilabelClassification_Works(self):
X, Y = self.data_ml
elm_c = ELMClassifier(random_state=0).fit(X, Y)
elm_r = ELMRegressor(random_state=0).fit(X, Y)
Yc_hat = elm_c.predict(X)
Yr_hat = (elm_r.predict(X) >= 0.5).astype(int)
assert_allclose(Yc_hat, Yr_hat)
def test_RegularizationL2_DifferentValue_ChangesPrediction(self):
X, Y = self.data_reg
Yh_1 = ELMRegressor(alpha=1e-7, random_state=0).fit(X, Y).predict(X)
Yh_2 = ELMRegressor(alpha=1e+3, random_state=0).fit(X, Y).predict(X)
self.assertFalse(np.allclose(Yh_1, Yh_2))
def test_Default_SetNumberOfNeurons(self):
X, y = self.data_reg
elm5 = ELMRegressor(n_neurons=5, random_state=0).fit(X, y)
elm50 = ELMRegressor(n_neurons=50, random_state=0).fit(X, y)
score5 = elm5.score(X, y)
score50 = elm50.score(X, y)
self.assertGreater(score50, score5)
self.assertGreater(score50, 0.33)
|
1715245
|
import bpy
import sys
import mathutils as mathU
# boop = 'D:/PycharmProjects/Lobster/src/'
boop = "/Users/maxbaylis/Lobster/src/"
if not (boop in sys.path):
sys.path.append(boop)
from ..BlenderAPI.BlenderObjects import to_quaternion
from .. import BlenderAPI as bld
import unittest
class BlenderCameraTest(unittest.TestCase):
def setUp(self):
# delete all objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
def test_create_no_reference(self):
num_objects_before = len(bpy.data.objects)
obj = bld.BlenderCamera()
num_objects_after = len(bpy.data.objects)
self.assertGreater(num_objects_after, num_objects_before, 'Number of objects did not increase!')
self.assertTrue('Camera' in bpy.data.objects.keys())
def tearDown(self):
# delete all objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
def test_spin(self):
cam = bld.BlenderCamera()
q = cam.get_rot()
focal_origin = mathU.Vector([0, 0, -1])
t = q.to_matrix()
focal_axis = t * focal_origin
focal_axis.normalize()
# test positive
q_rot = to_quaternion(90, *focal_axis)
q_new = q*q_rot
cam.spin(90)
self.assertEqual(q_new, cam.get_rot(), 'Camera spin not correct!')
# test negative
q_rot = to_quaternion(-90, *focal_axis)
q_new = q * q_rot
cam.spin(-90)
self.assertEqual(q_new, cam.get_rot(), 'Camera spin not correct!')
# test zero
q_rot = to_quaternion(0, *focal_axis)
q_new = q * q_rot
cam.spin(0)
self.assertEqual(q_new, cam.get_rot(), 'Camera spin not correct!')
def test_face_towards(self):
# instantiate camera at arbitrary location
cam = bld.BlenderCamera(location=(2.0,4.0,-1.0))
# face origin
cam.face_towards(0.0,0.0,0.0)
q = cam.get_rot()
focal_origin = mathU.Vector([0, 0, -1])
t = q.to_matrix()
focal_axis = t * focal_origin
focal_axis.normalize()
cam_loc_norm = cam.reference.location
cam_loc_norm.normalize()
# camera location should be parallel to vector of focal axis
self.assertAlmostEqual(cam_loc_norm[0], -focal_axis[0], places=5)
self.assertAlmostEqual(cam_loc_norm[1], -focal_axis[1], places=5)
self.assertAlmostEqual(cam_loc_norm[2], -focal_axis[2], places=5)
# face random
cam.face_towards(20.0, -10.0, 0.0)
q = cam.get_rot()
focal_origin = mathU.Vector([0, 0, -1])
t = q.to_matrix()
focal_axis = t * focal_origin
focal_axis.normalize()
cam_loc_norm = cam.reference.location - mathU.Vector((20.0, -10.0, 0.0))
cam_loc_norm.normalize()
# camera location should be parallel to vector of focal axis
self.assertAlmostEqual(cam_loc_norm[0], -focal_axis[0], places=5)
self.assertAlmostEqual(cam_loc_norm[1], -focal_axis[1], places=5)
self.assertAlmostEqual(cam_loc_norm[2], -focal_axis[2], places=5)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(BlenderCameraTest)
success = unittest.TextTestRunner().run(suite).wasSuccessful()
|
1715277
|
import argparse
import datetime
import functools
import re
import sys
import traceback
import typing
import holidays
import github
class PennHolidays(holidays.UnitedStates):
def _populate(self, year):
super()._populate(year)
# See https://github.com/greenelab/scrum/issues/114
for day in range(26, 32):
self[datetime.date(year, 12, day)] = 'Special Winter Vacation'
holiday_names = {
'Independence Day',
'Labor Day',
'Thanksgiving',
'Christmas Day',
"New Year's Day",
'<NAME>, <NAME>',
'Memorial Day',
'Special Winter Vacation',
}
penn_holidays = PennHolidays()
def get_today() -> datetime.date:
"""
Returns the datetime.date for today. Needed since tests cannot mock a
builtin type: http://stackoverflow.com/a/24005764/4651668
"""
return datetime.date.today()
def is_holiday(date: datetime.date) -> bool:
"""
Return True or False for whether a date is a holiday
"""
name = penn_holidays.get(date)
if not name:
return False
name = name.replace(' (Observed)', '')
return name in holiday_names
def is_workday(date) -> bool:
"""
Return boolean for whether a date is a workday.
"""
if date.weekday() in holidays.WEEKEND:
return False
if is_holiday(date):
return False
return True
@functools.lru_cache()
def issue_title_to_date(title: str) -> typing.Optional[datetime.date]:
"""
Return a datetime.date object from a Scrum issue title.
"""
pattern = re.compile(r'([0-9]{4})-([0-9]{2})-([0-9]{2}):')
match = pattern.match(title)
if not match:
return None
return datetime.date(*map(int, match.groups()))
def close_old_issues(issues, lifespan: int):
"""
Close scrum issues older than the number of days specified by lifespan.
"""
lifespan = datetime.timedelta(days=lifespan)
today = get_today()
for issue in issues:
if issue.state == 'closed':
continue
title = issue.title
date = issue_title_to_date(title)
if not date:
continue
if today - date > lifespan:
print('Closing', title, file=sys.stderr)
try:
issue.edit(state='closed')
except Exception:
print('Closing issue failed:\n{}'.format(traceback.format_exc()), file=sys.stderr)
def create_scrum_issue(
repo: github.Repository.Repository,
date: datetime.date,
previous_issue: github.Issue.Issue = None,
) -> typing.Optional[github.Issue.Issue]:
"""
Create a scrum issue for the given date.
If not None, previous_issue is used to set an issue body
that refers to the previous issue.
"""
kwargs = {'title': f"{date}: e-scrum for {date:%A, %B %-d, %Y}"}
if previous_issue:
kwargs['body'] = 'Preceeding e-scrum in {}.'.format(previous_issue.html_url)
print('Creating {title!r}'.format(**kwargs), file=sys.stderr)
try:
return repo.create_issue(**kwargs)
except Exception:
print('Creating issue failed:\n{}'.fomrat(traceback.format_exc()), file=sys.stderr)
def get_future_dates_without_issues(issues, workdays_ahead: int = 2):
"""
Look through issues and yield the dates of future workdays (includes today)
that don't have open issues.
"""
future_dates = set(get_upcoming_workdays(workdays_ahead))
future_dates -= {issue_title_to_date(x.title) for x in issues}
return sorted(future_dates)
def get_upcoming_workdays(workdays_ahead: int = 2) -> typing.Iterator[datetime.date]:
"""
Return a generator of the next number of workdays specified by
workdays_ahead. The current day is yielded first, if a workday,
and does not count as one of workdays_ahead.
"""
date = get_today()
if is_workday(date):
yield date
i = 0
while i < workdays_ahead:
date += datetime.timedelta(days=1)
if is_workday(date):
yield date
i += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--username', default='scrum-lord')
parser.add_argument(
'--token', help='GitHub personal access token for --username')
parser.add_argument('--repository', default='greenelab/scrum')
parser.add_argument('--lifespan', type=int, default=7)
parser.add_argument('--workdays-ahead', type=int, default=2)
parser.add_argument('--upkeep-file', type=str, default='uptime.txt')
args = parser.parse_args()
gh = github.Github(args.username, args.token)
user = gh.get_user()
# Get greenelab/scrum repository. Could not find a better way
repo, = [
repo for repo in user.get_repos()
if repo.full_name == args.repository
]
# Get open issues
open_issues = list(repo.get_issues(state='open'))
# Close old issues
close_old_issues(open_issues, args.lifespan)
# Get n most recent issues (open or closed), where n = 10 + --workdays-ahead
# to help ensure the most recent existing e-scrum issue is included even when other
# non e-scrum issues exist
issues = repo.get_issues(state='all', sort='number', direction='desc')
issues = issues[:min(10 + args.workdays_ahead, issues.totalCount)]
date_issue_pairs = [(issue_title_to_date(issue.title), issue) for issue in issues]
# Filter issues that are not scrum entries
filtered_date_issue_pairs = [(date, issue) for date, issue in date_issue_pairs if date]
# Issue objects are not comparable, so we need to sort by date only
date_issue_pairs = sorted(filtered_date_issue_pairs, key=lambda x: x[0])
# Detect previous issue for creation of the first upcoming issue
previous_issue = None
if date_issue_pairs:
_, previous_issue = date_issue_pairs[-1]
# Create upcoming issues
dates = get_future_dates_without_issues(issues, args.workdays_ahead)
for date in dates:
previous_issue = create_scrum_issue(repo, date, previous_issue)
# Create a small, meaningless change to keep Github Actions from disabling
# the repo for inactivity
with open(args.upkeep_file) as in_file:
message = in_file.readline().strip()
days = int(message.split(' ')[3])
days += 1
new_message = "It has been "
new_message += str(days)
new_message += " days since I last had to tinker with the scrum bot.\n"
with open(args.upkeep_file, 'w') as out_file:
out_file.write(new_message)
|
1715283
|
import tensorflow as tf
from ocnn import softmax_loss, softmax_accuracy
class ShapeLoss:
def __init__(self, flags, reuse=False):
self.flags = flags
self.reuse = reuse
def _def_memory(self, channel):
with tf.variable_scope('shape_memory'):
self.memory = tf.get_variable('memory',
shape=[self.flags.inst_num, channel], trainable=False,
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, feature):
with tf.variable_scope('shape_cls', reuse=self.reuse):
self._def_memory(int(feature.shape[1]))
self.feature = tf.nn.l2_normalize(feature, axis=1)
logit = tf.matmul(self.feature, self.memory, transpose_a=False, transpose_b=True)
logit = tf.div(logit, self.flags.sigma)
return logit
def loss(self, logit, shape_id):
self.shape_id = shape_id # this is the ground-truth label
with tf.name_scope('shape_loss'):
loss = softmax_loss(logit, self.shape_id, self.flags.inst_num)
accu = softmax_accuracy(logit, self.shape_id)
return loss, accu
def update_memory(self, solver):
# update memory bank after solver
with tf.control_dependencies([solver]):
with tf.name_scope('update_shape_memory'):
momentum = self.flags.momentum
weight = tf.gather(self.memory, self.shape_id)
weight = self.feature * momentum + weight * (1 - momentum)
weight = tf.nn.l2_normalize(weight, 1)
memory = tf.scatter_update(self.memory, self.shape_id, weight)
return memory
def knn_accuracy(self, logit, label_test, label_train, class_num=10, K=200):
with tf.name_scope('knn_accu'):
one_hot_train = tf.one_hot(label_train, depth=class_num)
top_k_values, top_k_indices = tf.nn.top_k(logit, k=K) # k nearest points
top_k_label = tf.gather(one_hot_train, top_k_indices) # gather label
weight = tf.expand_dims(tf.exp(top_k_values), axis=-1) # predict
weighted_label = tf.multiply(top_k_label, weight)
sum_up_predictions = tf.reduce_sum(weighted_label, axis=1)
label_pred = tf.argmax(sum_up_predictions, axis=1)
accu = label_accuracy(label_pred, label_test)
return accu
class PointLoss:
def __init__(self, flags, reuse=False):
self.flags = flags
self.reuse = reuse
def _def_memory(self, channel):
with tf.variable_scope('point_memory'):
self.memory = tf.get_variable('memory', trainable=False,
shape=[self.flags.inst_num, self.flags.seg_num, channel],
initializer=tf.contrib.layers.xavier_initializer())
def forward(self, feature, shape_id, obj_segment, batch_size):
self.shape_id = shape_id
self.obj_segment = obj_segment
self.batch_size = batch_size
with tf.variable_scope('point_cls', reuse=self.reuse):
self._def_memory(int(feature.shape[1]))
self.feature = tf.nn.l2_normalize(feature, axis=1)
# split the feature
node_nums = tf.segment_sum(tf.ones_like(obj_segment), obj_segment)
node_nums = tf.reshape(node_nums, [self.batch_size])
features = tf.split(self.feature, node_nums)
# gather memory bank
out = [None] * self.batch_size
for i in range(self.batch_size):
out[i] = tf.matmul(features[i], self.memory[shape_id[i], :, :],
transpose_a=False, transpose_b=True)
# logit
logit = tf.concat(out, axis=0)
logit = tf.div(logit, self.flags.sigma)
return logit
def loss(self, logit, point_id):
self.point_id = point_id
with tf.name_scope('point_loss'):
# point_mask = point_id > -1 # filter label -1
# logit = tf.boolean_mask(logit, point_mask)
# point_id = tf.boolean_mask(point_id, point_mask)
loss = softmax_loss(logit, point_id, self.flags.seg_num)
accu = softmax_accuracy(logit, point_id)
return loss, accu
def update_memory(self, solver):
# update memory bank after solver
with tf.control_dependencies([solver]):
with tf.name_scope('update_point_memory'):
feature = self.feature
seg_num, point_id = self.flags.seg_num, self.point_id
# point_mask = point_id > -1 # filter label -1
point_id = point_id + (self.obj_segment * seg_num)
# point_id = tf.boolean_mask(point_id, point_mask)
# feature = tf.boolean_mask(feature, point_mask)
batch_size = self.batch_size
feature = tf.unsorted_segment_mean(feature, point_id, seg_num*batch_size)
feature = tf.nn.l2_normalize(feature, axis=1)
feature = tf.reshape(feature, [batch_size, seg_num, -1])
momentum = self.flags.momentum
weight = tf.gather(self.memory, self.shape_id)
weight = feature * momentum + weight * (1 - momentum)
weight = tf.nn.l2_normalize(weight, axis=2)
memory = tf.scatter_update(self.memory, self.shape_id, weight)
return memory
|
1715284
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from linearmodels import PanelOLS
#import data
data = pd.DataFrame.from_csv("fraserDataWithRGDPPC.csv", index_col=[0,1],
parse_dates = True)
# create list of each index set from multi index
years = list(sorted(set(data.index.get_level_values('Year'))))
country = list(sorted(set(data.index.get_level_values('ISO_Code'))))
#choose variables that will be plotted for each year in scatter
plot_vars = ["Sound Money", "Government Consumption",
"RGDP Per Capita","Quartile"]
# Normalize income so that 1 represents the maximum value of RGDP Per Capita
# This will allow dot to be easily adjusted
data["RGDP Per Capita"] = data["RGDP Per Capita"] / max(data["RGDP Per Capita"]) * 1000
# Panel OLS
reg_data = data[["RGDP Per Capita", "Sound Money", "Government Consumption",
"SUMMARY INDEX"]].dropna()
x = reg_data[["Sound Money", "Government Consumption",
"SUMMARY INDEX"]]
y = reg_data[["RGDP Per Capita"]]
mod = PanelOLS(y, x, entity_effects=True, time_effects=False)
res = mod.fit(cov_type='clustered', cluster_entity=True)
print(res.summary)
|
1715320
|
import torch
class ObservationBuffer:
def __init__(self, num_envs, num_obs, include_history_steps, device):
self.num_envs = num_envs
self.num_obs = num_obs
self.include_history_steps = include_history_steps
self.device = device
self.num_obs_total = num_obs * include_history_steps
self.obs_buf = torch.zeros(self.num_envs, self.num_obs_total, device=self.device, dtype=torch.float)
def reset(self, reset_idxs, new_obs):
self.obs_buf[reset_idxs] = new_obs.repeat(1, self.include_history_steps)
def insert(self, new_obs):
# Shift observations back.
self.obs_buf[:, : self.num_obs * (self.include_history_steps - 1)] = self.obs_buf[:,self.num_obs : self.num_obs * self.include_history_steps]
# Add new observation.
self.obs_buf[:, -self.num_obs:] = new_obs
def get_obs_vec(self, obs_ids):
"""Gets history of observations indexed by obs_ids.
Arguments:
obs_ids: An array of integers with which to index the desired
observations, where 0 is the latest observation and
include_history_steps - 1 is the oldest observation.
"""
obs = []
for obs_id in reversed(sorted(obs_ids)):
slice_idx = self.include_history_steps - obs_id - 1
obs.append(self.obs_buf[:, slice_idx * self.num_obs : (slice_idx + 1) * self.num_obs])
return torch.cat(obs, dim=-1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.