id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11486305
|
import gettext
import sys
from termcolor import colored
from server.player import Player
from utils import debug, warning, info, DEBUG
from web_admin.constants import *
_ = gettext.gettext
class Server:
def __init__(self, web_admin, database, game, name):
self.name = name
self.database = database
self.web_admin = web_admin
self.game_password = None
self.game = game
self.trader_time = False
self.players = []
self.rejects = []
# Initial game's record data is discarded because some may be missed
self.record_games = True
def supported_mode(self):
return self.web_admin.supported_mode(self.game.game_type)
def stop(self):
pass
def get_player_by_username(self, username):
matched_players = 0
matched_player = None
for player in self.players:
# Unidentifiable players have no steam_id
if username in player.username and player.steam_id:
matched_players += 1
matched_player = player
if matched_players != 1:
return None
return matched_player
def get_player_by_key(self, player_key):
for player in self.players:
if player.player_key == player_key:
return player
return None
def get_player_by_sid(self, sid):
matched_players = 0
matched_player = None
for player in self.players:
if sid in player.steam_id:
matched_players += 1
matched_player = player
if matched_players != 1:
return None
return matched_player
def set_game_password(self, password):
self.game_password = password
self.web_admin.set_game_password(password)
def toggle_game_password(self):
self.web_admin.toggle_game_password()
def write_all_players(self):
for player in self.players:
self.database.save_player(player)
def write_game_map(self):
self.database.save_game_map(self.game.game_map)
def set_difficulty(self, difficulty):
self.web_admin.set_difficulty(difficulty)
def set_length(self, length):
self.web_admin.set_length(length)
def disable_password(self):
self.web_admin.set_game_password()
def enable_password(self, password=None):
if password:
self.web_admin.set_game_password(password)
else:
self.web_admin.set_game_password(self.game_password)
def get_maps(self, active_only=False):
if active_only:
return self.web_admin.get_active_maps()
else:
return self.web_admin.get_maps()
def find_map(self, search_title):
matches = 0
matched_title = None
for map_title in self.get_maps():
if search_title.upper() in map_title.upper():
matches += 1
matched_title = map_title
if matches != 1:
return None
return matched_title
def change_map(self, new_map):
matched_title = self.find_map(new_map)
if not matched_title:
return None
self.web_admin.set_map(matched_title)
def kick_player(self, username):
player = self.get_player_by_username(username)
if not player:
player = self.get_player_by_sid(username)
if not player:
return False
self.web_admin.kick_player(player.player_key)
return player.username
def ban_player(self, username):
player = self.get_player_by_username(username)
if not player:
player = self.get_player_by_sid(username)
if not player:
return False
self.web_admin.ban_player(player.steam_id, player.player_key)
return player.username
def restart_map(self):
self.change_map(self.game.game_map.title)
def change_game_type(self, mode):
self.web_admin.set_game_type(mode)
def event_player_join(self, player):
if player.username not in self.rejects:
identity = self.web_admin.get_player_identity(player.username)
else:
return
# Reject unidentifiable players
if not identity['steam_id']:
debug("Rejected player: {}".format(player.username))
self.rejects.append(player.username)
return
new_player = Player(player.username, player.perk)
new_player.kills = player.kills
new_player.dosh = player.dosh
new_player.ip = identity['ip']
new_player.country = identity['country']
new_player.country_code = identity['country_code']
new_player.steam_id = identity['steam_id']
new_player.player_key = identity['player_key']
self.database.load_player(new_player)
new_player.sessions += 1
self.players.append(new_player)
if DEBUG:
message = _("Player {} ({}) joined {} from {}").format(
new_player.username, new_player.steam_id, self.name,
new_player.country
)
else:
message = _("Player {} joined {} from {}") \
.format(new_player.username, self.name, new_player.country)
print(colored(
message.encode("utf-8").decode(sys.stdout.encoding), 'cyan'
))
info(
"{} (SteamID: {})".format(message, new_player.steam_id),
display=False
)
self.web_admin.chat.handle_message(
"internal_command",
"!player_join " + new_player.username,
USER_TYPE_INTERNAL
)
def event_player_quit(self, player):
self.players.remove(player)
self.database.save_player(player)
message = _("Player {} left {}") \
.format(player.username, self.name)
print(colored(
message.encode("utf-8").decode(sys.stdout.encoding), 'cyan'
))
info(
"{} (SteamID: {})".format(message, player.steam_id),
display=False
)
self.web_admin.chat.handle_message("internal_command",
"!player_quit " + player.username,
USER_TYPE_INTERNAL)
def event_player_death(self, player):
player.total_deaths += 1
message = _("Player {} died on {}").format(player.username, self.name)
print(colored(
message.encode("utf-8").decode(sys.stdout.encoding), 'red'
))
def event_new_game(self):
if self.game.game_type in GAME_TYPE_DISPLAY:
display_name = GAME_TYPE_DISPLAY[self.game.game_type]
else:
display_name = GAME_TYPE_UNKNOWN
message = _("New game on {}, map: {}, mode: {}") \
.format(self.name, self.game.game_map.name, display_name)
print(colored(
message.encode("utf-8").decode(sys.stdout.encoding), 'magenta'
))
self.database.load_game_map(self.game.game_map)
self.game.new_game()
if self.game.game_type == GAME_TYPE_ENDLESS:
self.game.game_map.plays_endless += 1
elif self.game.game_type == GAME_TYPE_SURVIVAL:
self.game.game_map.plays_survival += 1
elif self.game.game_type == GAME_TYPE_SURVIVAL_VS:
self.game.game_map.plays_survival_vs += 1
elif self.game.game_type == GAME_TYPE_WEEKLY:
self.game.game_map.plays_weekly += 1
else:
warning(_("Unknown game_type {}").format(self.game.game_type))
self.game.game_map.plays_other += 1
self.rejects = []
self.web_admin.chat.handle_message("internal_command", "!new_game",
USER_TYPE_INTERNAL)
def event_end_game(self, victory=False):
debug(_("End game on {}, map: {}, mode: {}, victory: {}").format(
self.name, self.game.game_map.title, self.game.game_type,
str(victory)
))
self.write_game_map()
self.database.save_map_record(self.game, len(self.players), victory)
def event_wave_start(self):
self.web_admin.chat.handle_message("internal_command",
"!new_wave " + str(self.game.wave),
USER_TYPE_INTERNAL)
if self.game.wave > self.game.game_map.highest_wave:
self.game.game_map.highest_wave = self.game.wave
for player in self.players:
player.wave_kills = 0
player.wave_dosh = 0
def event_wave_end(self):
pass
def event_trader_open(self):
self.trader_time = True
command = "!t_open {}".format(self.game.wave)
self.web_admin.chat.handle_message("internal_command", command,
USER_TYPE_INTERNAL)
def event_trader_close(self):
self.trader_time = False
command = "!t_close {}".format(self.game.wave)
self.web_admin.chat.handle_message("internal_command", command,
USER_TYPE_INTERNAL)
|
11486319
|
from datetime import datetime
from pulsar.api import PermissionDenied, Http404
from lux.core import http_assert
from lux.ext.rest import RestRouter, route
from lux.models import Schema, fields, ValidationError
from lux.ext.odm import Model
from . import ensure_service_user, IdSchema
URI = 'registrations'
email_templates = {
"subject": {
1: "registration/activation_email_subject.txt",
2: "registration/password_email_subject.txt"
},
"message": {
1: "registration/activation_email.txt",
2: "registration/password_email.txt"
}
}
class RegistrationSchema(Schema):
user = fields.Nested('UserSchema')
class Meta:
model = URI
class PasswordSchema(Schema):
"""Schema for checking a password is input correctly
"""
password = fields.Password(required=True, minLength=5, maxLength=128)
password_repeat = fields.Password(required=True)
def post_load(self, data):
password = data['password']
password_repeat = data.pop('password_repeat', None)
if password != password_repeat:
raise ValidationError('Passwords did not match')
class UserCreateSchema(PasswordSchema):
username = fields.Slug(required=True, minLength=2, maxLength=30)
email = fields.Email(required=True)
class RegistrationModel(Model):
@property
def type(self):
return self.metadata.get('type', 1)
def create_instance(self, session, data):
data['active'] = False
user = self.app.auth.create_user(session, **data)
# send_email_confirmation(request, reg)
return user
def update_model(self, request, instance, data, session=None, **kw):
if not instance.id:
return super().update_model(request, instance, data,
session=session, **kw)
reg = self.instance(instance).obj
http_assert(reg.type == self.type, Http404)
self.update_registration(request, reg, data, session=session)
return {'success': True}
def update_registration(self, request, reg, data, session=None):
with self.session(request, session=session) as session:
user = reg.user
user.active = True
session.add(user)
session.delete(reg)
class RegistrationCRUD(RestRouter):
"""
---
summary: Registration to the API
tags:
- authentication
- registration
"""
model = RegistrationModel("registrations", RegistrationSchema)
@route(default_response_schema=[RegistrationSchema])
def get(self, request):
"""
---
summary: List registration objects
responses:
200:
description: List of registrations matching filters
"""
return self.model.get_list_response(request)
@route(default_response=201,
default_response_schema=RegistrationSchema,
body_schema=UserCreateSchema)
def post(self, request, **kw):
"""
---
summary: Create a new registration
"""
ensure_service_user(request)
return self.model.create_response(request, **kw)
@route('<id>/activate', path_schema=IdSchema)
def post_activate(self, request):
"""
---
summary: Activate a user from a registration ID
description: Clients should POST to this endpoint once they are
happy the user has confirm his/her identity.
This is a one time only operation.
responses:
204:
description: Activation was successful
400:
description: Bad Token
401:
description: Token missing or expired
404:
description: Activation id not found
"""
ensure_service_user(request)
model = self.get_model(request)
with model.session(request) as session:
reg = self.get_instance(request, session=session)
if reg.expiry < datetime.utcnow():
raise PermissionDenied('registration token expired')
reg.user.active = True
session.add(reg.user)
model.delete_model(request, reg, session=session)
request.response.status_code = 204
return request.response
def send_email_confirmation(request, reg, email_subject=None,
email_message=None):
"""Send an email to user
"""
user = reg.user
if not user.email:
return
app = request.app
token = ensure_service_user(request)
site = token.get('url')
reg_url = token.get('registration_url')
psw_url = token.get('password_reset_url')
ctx = {'auth_key': reg.id,
'register_url': reg_url,
'reset_password_url': psw_url,
'expiration': reg.expiry,
'email': user.email,
'site_uri': site}
email_subject = email_subject or email_templates['subject'][reg.type]
email_message = email_message or email_templates['message'][reg.type]
subject = app.render_template(email_subject, ctx)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = app.render_template(email_message, ctx)
user.email_user(app, subject, body)
|
11486338
|
import os
import glob
from data import common
import numpy as np
import torch.utils.data as data
class SRData(data.Dataset):
def __init__(self, args, train=True):
self.args = args
self.train = train
self.scale = args.scale
self.arr = self.get_arr()
# self.arr = np.fromfile('arr.dat', dtype=int)
data_range = [r.split('-') for r in args.data_range.split('/')] # ex: self.data_range: 1-400/401-432
data_range = data_range[0] if train else data_range[1]
self.begin, self.end = list(map(lambda x: int(x), data_range))
self._set_filesystem(args.dir_data_root) # args.dir_data_root: /home/xxx/xxx/data/
self.images_hr, self.images_lr = self._scan() # get file list
if train:
n_patches = args.batch_size * args.test_every # args.batch_size: 16, args.test_every: 1000
self.repeat = max(n_patches // len(self.images_hr), 1) if len(self.images_hr) > 0 else 0
# Below functions as used to prepare images
def _scan(self):
names_hr = sorted(glob.glob(os.path.join(self.dir_hr, '*.dat')))
names_lr = sorted(glob.glob(os.path.join(self.dir_lr, '*.dat')))
names_hr = np.array(names_hr)[self.arr] # shuffle
names_hr = names_hr[self.begin - 1: self.end]
if self.args.apply_feild_data:
names_lr = np.array(names_lr)
else:
names_lr = np.array(names_lr)[self.arr]
names_lr = names_lr[self.begin - 1: self.end]
return names_hr, names_lr
def _set_filesystem(self, dir_data_root):
self.root_path = dir_data_root
self.dir_hr = os.path.join(self.root_path, self.args.dir_hr)
if not self.args.apply_feild_data:
self.dir_lr = os.path.join(self.root_path, self.args.dir_lr)
else:
self.dir_lr = self.args.dir_lr
def __getitem__(self, idx):
lr, hr, filename = self._load_file(idx)
pair, params = common.normal(lr, hr)
pair = common.set_channel(*pair)
pair = self.get_patch(*pair)
pair_t = common.np2Tensor(*pair)
return pair_t[0], pair_t[1], filename, params
def __len__(self):
if self.train:
return len(self.images_hr) * self.repeat
else:
return len(self.images_hr)
def _load_file(self, idx):
idx = idx % len(self.images_hr) if self.train else idx
f_hr = self.images_hr[idx]
f_lr = self.images_lr[idx]
filename, _ = os.path.splitext(os.path.basename(f_lr)) # without suffix
lr = np.fromfile(f_lr, dtype=np.float32)
hr = np.fromfile(f_hr, dtype=np.float32)
if not self.args.apply_feild_data:
lr = lr.reshape((128,128))
else:
shape = [int(x) for x in filename.split('_')[1].split('x')]
lr = lr.reshape(shape)
hr = hr.reshape((256,256))
lr = np.rot90(lr, 3)
hr = np.rot90(hr, 3)
return lr, hr, filename
def get_patch(self, lr, hr):
scale = self.scale
if self.train:
lr, hr = common.get_patch(
lr, hr,
patch_size=self.args.patch_size,
scale=scale
)
lr, hr = common.augment(lr, hr)
return lr, hr
def get_arr(self):
names_hr = sorted(glob.glob(os.path.join(self.dir_hr, '*.dat')))
l = len(names_hr)
arr = np.arange(l)
np.random.shuffle(arr)
return arr
|
11486339
|
import copy
from typing import Any, Tuple, Union
from django.forms import Select
from django.forms.fields import CallableChoiceIterator, Field
from django.utils.translation import gettext_lazy as _
class AjaxChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, *, choices: Tuple = (), **kwargs: Any) -> None:
super().__init__(**kwargs)
self.choices = choices
def __deepcopy__(self, memo: dict) -> Field:
result = super().__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self) -> Union[CallableChoiceIterator, list]:
return self._choices
def _set_choices(self, value: Any) -> None:
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value: Any) -> str:
"""Return a string."""
if value in self.empty_values:
return ''
return str(value)
def validate(self, value: Any) -> None:
"""Validate that the input is in self.choices."""
super().validate(value)
def valid_value(self, value: Any) -> bool:
"""Check to see if the provided value is a valid choice."""
text_value = str(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == str(k2):
return True
else:
if value == k or text_value == str(k):
return True
return False
|
11486365
|
import datetime as dt
import itertools
import multiprocessing as mp
import pickle
import platform
import select
import socket
import sys
import time
import pytest
from mbedtls import hashlib
from mbedtls.exceptions import TLSError
from mbedtls.pk import RSA
from mbedtls.tls import *
from mbedtls.tls import _BaseConfiguration as BaseConfiguration
from mbedtls.tls import _DTLSCookie as DTLSCookie
from mbedtls.tls import _enable_debug_output
from mbedtls.tls import _PSKSToreProxy as PSKStoreProxy
from mbedtls.tls import _set_debug_level
from mbedtls.tls import TLSSession
from mbedtls.x509 import CRT, CSR, BasicConstraints
try:
from contextlib import suppress
except ImportError:
# Python 2.7
from contextlib2 import suppress
try:
FileNotFoundError
except NameError:
# Python 2.7
FileNotFoundError = OSError
class Client:
def __init__(self, cli_conf, proto, srv_address, srv_hostname):
super().__init__()
self.cli_conf = cli_conf
self.proto = proto
self.srv_address = srv_address
self.srv_hostname = srv_hostname
self._sock = None
def __enter__(self):
self.start()
return self
def __exit__(self, *exc_info):
self.stop()
def __del__(self):
self.stop()
@property
def context(self):
if self._sock is None:
return None
return self._sock.context
def do_handshake(self):
if not self._sock:
return
self._sock.do_handshake()
def echo(self, buffer, chunksize):
if not self._sock:
return
view = memoryview(buffer)
received = bytearray()
for idx in range(0, len(view), chunksize):
part = view[idx : idx + chunksize]
amt = self._sock.send(part)
received += self._sock.recv(2 << 13)
return received
def start(self):
if self._sock:
self.stop()
self._sock = ClientContext(self.cli_conf).wrap_socket(
socket.socket(socket.AF_INET, self.proto),
server_hostname=self.srv_hostname,
)
self._sock.connect(self.srv_address)
def stop(self):
if not self._sock:
return
with suppress(TLSError, OSError):
self._sock.close()
self._sock = None
def restart(self):
self.stop()
self.start()
class Server:
def __init__(self, srv_conf, proto, conn_q):
super().__init__()
self.srv_conf = srv_conf
self.proto = proto
self.conn_q = conn_q
self._sock = None
def __enter__(self):
self.start()
return self
def __exit__(self, *exc_info):
self.stop()
def __del__(self):
self.stop()
@property
def context(self):
if self._sock is None:
return None
return self._sock.context
def start(self):
if self._sock:
self.stop()
self._sock = ServerContext(self.srv_conf).wrap_socket(
socket.socket(socket.AF_INET, self.proto)
)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind(
("127.0.0.1" if platform.system() == "Windows" else "", 0)
)
if self.proto == socket.SOCK_STREAM:
self._sock.listen(1)
self.conn_q.put(self._sock.getsockname())
def stop(self):
if not self._sock:
return
with suppress(TLSError, OSError):
self._sock.close()
self._sock = None
def run(self, conn_handler):
with self:
{
TLSConfiguration: self._run_tls,
DTLSConfiguration: self._run_dtls,
}[type(self.srv_conf)](conn_handler)
def _run_tls(self, conn_handler):
assert self._sock
conn, addr = self._sock.accept()
try:
conn.do_handshake()
except TLSError:
conn.close()
return
try:
conn_handler(conn)
finally:
conn.close()
def _run_dtls(self, conn_handler):
assert self._sock
cli, addr = self._sock.accept()
cli.setcookieparam(addr[0].encode("ascii"))
with pytest.raises(HelloVerifyRequest):
cli.do_handshake()
_, (cli, addr) = cli, cli.accept()
_.close()
cli.setcookieparam(addr[0].encode("ascii"))
try:
cli.do_handshake()
except TLSError:
cli.close()
return
try:
conn_handler(cli)
finally:
cli.close()
class EchoHandler:
def __init__(self, stop_ev, packet_size=4096):
self.stop_ev = stop_ev
self.packet_size = packet_size
def __call__(self, conn):
while not self.stop_ev.is_set():
readable, _, err = select.select([conn], [], [], 0.1)
if err:
break
for _ in readable:
# We use `send()` instead of `sendto()` for DTLS as well
# because the DTLS socket is connected.
received = conn.recv(self.packet_size)
sent = conn.send(received)
class TestPickle:
@pytest.fixture
def session(self):
return TLSSession()
@pytest.fixture(params=[TLSConfiguration, DTLSConfiguration])
def conf(self, request):
return request.param()
@pytest.fixture(params=[ClientContext, ServerContext])
def context(self, request, conf):
return request.param(conf)
@pytest.fixture
def identity(self):
return lambda obj: pickle.loads(pickle.dumps(obj))
@pytest.fixture
def tls_wrapped_buffer(self, context):
return TLSWrappedBuffer(context)
@pytest.fixture
def tls_wrapped_socket(self, tls_wrapped_buffer):
return TLSWrappedSocket(socket.socket(), tls_wrapped_buffer)
def test_session(self, session):
with pytest.raises(TypeError) as excinfo:
pickle.dumps(session)
assert str(excinfo.value).startswith("cannot pickle")
def test_configuration(self, conf, identity):
assert conf == identity(conf)
def test_context(self, context, identity):
with pytest.raises(TypeError) as excinfo:
pickle.dumps(context)
assert str(excinfo.value).startswith("cannot pickle")
def test_tls_wrapped_buffer(self, tls_wrapped_buffer):
with pytest.raises(TypeError) as excinfo:
pickle.dumps(tls_wrapped_buffer)
assert str(excinfo.value).startswith("cannot pickle")
def test_tls_wrapped_socket(self, tls_wrapped_socket):
# Python socket.socket is not pickable.
with pytest.raises(TypeError) as excinfo:
pickle.dumps(tls_wrapped_socket)
assert str(excinfo.value).startswith("cannot pickle")
class TestPSKStoreProxy:
@pytest.fixture
def psk_store(self):
return {"client": b"the secret key"}
@pytest.fixture
def proxy(self, psk_store):
return PSKStoreProxy(psk_store)
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, psk_store):
assert isinstance(repr_(psk_store), str)
def test_unwrap(self, proxy, psk_store):
assert proxy.unwrap() == psk_store
def test_eq(self, proxy, psk_store):
for k, v in psk_store.items():
assert proxy[k] == v
def test_len(self, proxy, psk_store):
assert len(proxy) == len(psk_store)
class TestTLSVersion:
@pytest.mark.parametrize("version", TLSVersion)
def test_major(self, version):
assert version.major() == 3
def test_minor(self):
# assert TLSVersion.SSLv3.minor() == 0
assert TLSVersion.TLSv1.minor() == 1
assert TLSVersion.TLSv1_1.minor() == 2
assert TLSVersion.TLSv1_2.minor() == 3
@pytest.mark.parametrize("version", TLSVersion)
def test_from_major_minor(self, version):
assert (
TLSVersion.from_major_minor(version.major(), version.minor())
is version
)
@pytest.mark.parametrize(
"version", [TLSVersion.MINIMUM_SUPPORTED, TLSVersion.MAXIMUM_SUPPORTED]
)
def test_minmax_supported(self, version):
assert version in TLSVersion
class TestDTLSVersion:
@pytest.mark.parametrize("version", DTLSVersion)
def test_major(self, version):
assert version.major() == 3
def test_minor(self):
assert DTLSVersion.DTLSv1_0.minor() == 2
assert DTLSVersion.DTLSv1_2.minor() == 3
@pytest.mark.parametrize("version", DTLSVersion)
def test_from_major_minor(self, version):
assert (
DTLSVersion.from_major_minor(version.major(), version.minor())
is version
)
@pytest.mark.parametrize(
"version",
[DTLSVersion.MINIMUM_SUPPORTED, DTLSVersion.MAXIMUM_SUPPORTED],
)
def test_minmax_supported(self, version):
assert version in DTLSVersion
class TestTLSRecordHeader:
@pytest.fixture(params=TLSRecordHeader.RecordType)
def record_type(self, request):
return request.param
@pytest.fixture(params=TLSVersion)
def version(self, request):
return request.param
@pytest.fixture
def length(self):
return 42
@pytest.fixture
def header(self, record_type, version, length):
return TLSRecordHeader(record_type, version, length)
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, record_type):
assert isinstance(repr_(record_type), str)
def test_hash(self, record_type):
assert isinstance(hash(record_type), int)
def test_accessors(self, header, record_type, version, length):
assert len(header) == 5
assert header.record_type is record_type
assert header.version is version
assert header.length == length
def test_serialization(self, header):
serialized = bytes(header)
assert isinstance(serialized, bytes)
assert len(serialized) == 5
assert TLSRecordHeader.from_bytes(serialized) == header
class TestTLSSession:
@pytest.fixture
def session(self):
return TLSSession()
def test_repr(self, session):
assert isinstance(repr(session), str)
class Chain:
@pytest.fixture(scope="class")
def now(self):
return dt.datetime.utcnow()
@pytest.fixture(scope="class")
def digestmod(self):
return hashlib.sha256
@pytest.fixture(scope="class")
def ca0_key(self):
ca0_key = RSA()
ca0_key.generate()
return ca0_key
@pytest.fixture(scope="class")
def ca1_key(self):
ca1_key = RSA()
ca1_key.generate()
return ca1_key
@pytest.fixture(scope="class")
def ee0_key(self):
ee0_key = RSA()
ee0_key.generate()
return ee0_key
@pytest.fixture(scope="class")
def ca0_crt(self, ca0_key, digestmod, now):
ca0_csr = CSR.new(ca0_key, "CN=Trusted CA", digestmod())
return CRT.selfsign(
ca0_csr,
ca0_key,
not_before=now,
not_after=now + dt.timedelta(days=90),
serial_number=0x123456,
basic_constraints=BasicConstraints(True, -1),
)
@pytest.fixture(scope="class")
def ca1_crt(self, ca1_key, ca0_crt, ca0_key, digestmod, now):
ca1_csr = CSR.new(ca1_key, "CN=Intermediate CA", digestmod())
return ca0_crt.sign(
ca1_csr,
ca0_key,
now,
now + dt.timedelta(days=90),
0x234567,
basic_constraints=BasicConstraints(True, -1),
)
@pytest.fixture(scope="class")
def ee0_crt(self, ee0_key, ca1_crt, ca1_key, digestmod, now):
ee0_csr = CSR.new(ee0_key, "CN=End Entity", digestmod())
return ca1_crt.sign(
ee0_csr, ca1_key, now, now + dt.timedelta(days=90), 0x345678
)
@pytest.fixture(scope="class")
def certificate_chain(self, ee0_crt, ca1_crt, ee0_key):
return (ee0_crt, ca1_crt), ee0_key
class TestTrustStore(Chain):
@pytest.fixture
def store(self):
return TrustStore.system()
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, store):
assert isinstance(repr_(store), str)
def test_eq(self, store):
other = TrustStore(store)
assert store is not other
assert store == other
def test_bool(self, store):
assert not TrustStore()
assert store
def test_len(self, store):
assert len(store) != 0
def test_iter(self, store):
assert store[0] != store[1]
for n, crt in enumerate(store, start=1):
assert crt in store
assert n == len(store)
def test_add_existing_certificate(self, store):
length = len(store)
store.add(store[0])
assert len(store) == length
def test_add_new_certificate(self, store, ca0_crt):
length = len(store)
store.add(ca0_crt)
assert len(store) == length + 1
class TestDTLSCookie:
@pytest.fixture
def cookie(self):
return DTLSCookie()
def test_generate_does_not_raise(self, cookie):
cookie.generate()
def test_timeout(self, cookie):
assert cookie.timeout == 60
cookie.timeout = 1000
assert cookie.timeout == 1000
class _BaseConfiguration(Chain):
@pytest.fixture
def conf(self):
raise NotImplementedError
@pytest.fixture
def version(self):
raise NotImplementedError
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr(self, repr_, conf):
assert isinstance(repr_(conf), str)
@pytest.mark.parametrize("validate", [True, False])
def test_set_validate_certificates(self, conf, validate):
conf_ = conf.update(validate_certificates=validate)
assert conf_.validate_certificates is validate
@pytest.mark.parametrize("chain", [((), None), None])
def test_set_certificate_chain(self, conf, chain, certificate_chain):
if chain is None:
chain = certificate_chain
conf_ = conf.update(certificate_chain=chain)
assert conf_.certificate_chain == chain
@pytest.mark.parametrize("ciphers", (ciphers_available(),))
def test_set_ciphers(self, conf, ciphers):
conf_ = conf.update(ciphers=ciphers)
assert conf_.ciphers == ciphers
@pytest.mark.parametrize(
"inner_protocols",
[[], (), [NextProtocol.H2, NextProtocol.H2C], [b"h2", b"h2c", b"ftp"]],
)
def test_set_inner_protocols(self, conf, inner_protocols):
conf_ = conf.update(inner_protocols=inner_protocols)
assert conf_.inner_protocols == tuple(
NextProtocol(_) for _ in inner_protocols
)
def test_lowest_supported_version(self, conf, version):
conf_ = conf.update(lowest_supported_version=version)
assert conf_.lowest_supported_version is version
def test_highest_supported_version(self, conf, version):
conf_ = conf.update(highest_supported_version=version)
assert conf_.highest_supported_version is version
@pytest.mark.parametrize("store", [TrustStore.system()])
def test_trust_store(self, conf, store):
conf_ = conf.update(trust_store=store)
assert store
assert conf_.trust_store == store
@pytest.mark.parametrize("callback", [None])
def test_set_sni_callback(self, conf, callback):
assert conf.sni_callback is None
@pytest.mark.parametrize("psk", [None, ("client", b"the secret key")])
def test_psk(self, conf, psk):
assert conf.pre_shared_key is None
conf_ = conf.update(pre_shared_key=psk)
assert conf_.pre_shared_key == psk
@pytest.mark.parametrize(
"psk_store", [None, {"client": b"the secret key"}]
)
def test_psk_store(self, conf, psk_store):
assert conf.pre_shared_key_store is None
conf_ = conf.update(pre_shared_key_store=psk_store)
assert conf_.pre_shared_key_store == psk_store
class TestTLSConfiguration(_BaseConfiguration):
@pytest.fixture
def conf(self):
return TLSConfiguration()
@pytest.fixture(params=TLSVersion)
def version(self, request):
return request.param
class TestDTLSConfiguration(_BaseConfiguration):
@pytest.fixture
def conf(self):
return DTLSConfiguration()
@pytest.fixture(params=DTLSVersion)
def version(self, request):
return request.param
@pytest.mark.parametrize("anti_replay", [True, False])
def test_set_anti_replay(self, conf, anti_replay):
assert conf.anti_replay is True
conf_ = conf.update(anti_replay=anti_replay)
assert conf_.anti_replay is anti_replay
@pytest.mark.parametrize(
"hs_min, hs_max", [(1, 60), (42, 69), (4.2, 6.9), (42.0, 69.0)]
)
def test_handshake_timeout_minmax(self, conf, hs_min, hs_max):
assert conf.handshake_timeout_min == 1.0
assert conf.handshake_timeout_max == 60.0
conf_ = conf.update(
handshake_timeout_min=hs_min,
handshake_timeout_max=hs_max,
)
assert conf_.handshake_timeout_min == hs_min
assert conf_.handshake_timeout_max == hs_max
@pytest.mark.parametrize(
"hs_min, hs_max", [(None, None), (1, None), (None, 60)]
)
def test_handshake_timeout_default(self, conf, hs_min, hs_max):
conf_ = conf.update(
handshake_timeout_min=hs_min,
handshake_timeout_max=hs_max,
)
assert conf_.handshake_timeout_min == hs_min or 1.0
assert conf_.handshake_timeout_max == hs_max or 60.0
class TestBaseContext:
@pytest.fixture(params=[Purpose.SERVER_AUTH, Purpose.CLIENT_AUTH])
def purpose(self, request):
return request.param
@pytest.fixture(params=[TLSConfiguration, DTLSConfiguration])
def conf(self, request):
return request.param()
@pytest.fixture(params=[ServerContext, ClientContext])
def context(self, conf, request):
cls = request.param
return cls(conf)
def test_repr(self, context):
assert isinstance(repr(context), str)
def test_get_configuration(self, context, conf):
assert conf
assert context.configuration is conf
def test_selected_npn_protocol(self, context):
assert context._selected_npn_protocol() is None
def test_cipher(self, context):
assert context._cipher() is None
def test_get_channel_binding(self, context):
assert context._get_channel_binding() is None
# def test_negotiated_tls_version(self, context):
# assert context._negotiated_tls_version() is TLSVersion.SSLv3
@pytest.fixture
def tls_wrapped_buffer(self, context):
return TLSWrappedBuffer(context)
@pytest.mark.parametrize("repr_", (repr, str), ids=lambda f: f.__name__)
def test_repr_tls_wrapped_buffer(self, repr_, tls_wrapped_buffer):
assert isinstance(repr_(tls_wrapped_buffer), str)
class TestClientContext(TestBaseContext):
@pytest.fixture(params=[None, "hostname", "localhost"])
def hostname(self, request):
return request.param
@pytest.fixture
def context(self, conf, hostname):
return ClientContext(conf)
def test_context(self, context):
assert isinstance(context, ClientContext)
def test_hostname(self, context, hostname):
_ = context.wrap_buffers(hostname)
assert context._hostname == hostname
def test_wrap_buffers(self, context):
assert isinstance(context.wrap_buffers(None), TLSWrappedBuffer)
class TestServerContext(TestBaseContext):
@pytest.fixture
def context(self, conf):
return ServerContext(conf)
def test_context(self, context):
assert isinstance(context, ServerContext)
def test_wrap_buffers(self, context):
assert isinstance(context.wrap_buffers(), TLSWrappedBuffer)
PSK_AUTHENTICATION_CIPHERS = (
"TLS-ECDHE-PSK-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-PSK-WITH-AES-128-CBC-SHA",
"TLS-DHE-PSK-WITH-AES-256-CBC-SHA",
"TLS-DHE-PSK-WITH-AES-128-CBC-SHA",
"TLS-RSA-PSK-WITH-AES-256-CBC-SHA",
"TLS-RSA-PSK-WITH-AES-128-CBC-SHA",
"TLS-PSK-WITH-AES-256-CBC-SHA",
"TLS-PSK-WITH-AES-128-CBC-SHA",
)
def generate_configs(*configs):
for conf, versions in configs:
for version in versions:
yield conf, version
class TestCommunication(Chain):
@pytest.fixture(
params=generate_configs(
(TLSConfiguration, TLSVersion), (DTLSConfiguration, DTLSVersion)
)
)
def configs(self, request):
return request.param
@pytest.fixture
def conf_cls(self, configs):
assert issubclass(configs[0], BaseConfiguration)
return configs[0]
@pytest.fixture
def version(self, configs):
assert isinstance(configs[1], (TLSVersion, DTLSVersion))
return configs[1]
@pytest.fixture
def version_min(self, conf_cls):
return {
TLSConfiguration: TLSVersion.MINIMUM_SUPPORTED,
DTLSConfiguration: DTLSVersion.MINIMUM_SUPPORTED,
}[conf_cls]
@pytest.fixture
def proto(self, conf_cls):
return {
TLSConfiguration: socket.SOCK_STREAM,
DTLSConfiguration: socket.SOCK_DGRAM,
}[conf_cls]
@pytest.fixture
def srv_conf(
self,
conf_cls,
version,
version_min,
trust_store,
certificate_chain,
srv_psk,
ciphers,
):
return conf_cls(
trust_store=trust_store,
certificate_chain=certificate_chain,
lowest_supported_version=version_min,
highest_supported_version=version,
ciphers=ciphers,
pre_shared_key_store=srv_psk,
validate_certificates=False,
)
@pytest.fixture
def cli_conf(
self, conf_cls, version, version_min, trust_store, cli_psk, ciphers
):
return conf_cls(
trust_store=trust_store,
lowest_supported_version=version_min,
highest_supported_version=version,
ciphers=ciphers,
pre_shared_key=cli_psk,
validate_certificates=True,
)
@pytest.fixture(params=[4])
def debug(self, srv_conf, cli_conf, request):
_enable_debug_output(srv_conf)
_enable_debug_output(cli_conf)
_set_debug_level(request.param)
@pytest.fixture(scope="class", params=[None])
def ciphers(self, request):
return request.param
@pytest.fixture(scope="class", params=["End Entity"])
def srv_hostname(self, request):
return request.param
@pytest.fixture(scope="class", params=[None])
def cli_psk(self, request):
return request.param
@pytest.fixture(scope="class", params=[None])
def srv_psk(self, request):
return request.param
@pytest.fixture(params=[False])
def buffer(self, request, randbytes):
return randbytes(5 * 16 * 1024)
@pytest.fixture(scope="class")
def trust_store(self, ca0_crt):
store = TrustStore()
store.add(ca0_crt)
return store
@pytest.fixture
def server(self, srv_conf, version, proto):
conn_q = mp.SimpleQueue()
stop_ev = mp.Event()
srv = Server(srv_conf, proto, conn_q)
runner = mp.Process(target=srv.run, args=(EchoHandler(stop_ev),))
runner.start()
yield conn_q.get()
stop_ev.set()
runner.join()
@pytest.fixture
def client(self, server, srv_hostname, cli_conf, proto):
return Client(cli_conf, proto, server, srv_hostname)
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize(
"srv_hostname", ["Wrong End Entity"], indirect=True
)
def test_host_name_verification_failure(self, client, srv_hostname):
with pytest.raises(TLSError), client:
client.do_handshake()
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize(
"ciphers", [PSK_AUTHENTICATION_CIPHERS], indirect=True
)
@pytest.mark.parametrize(
"srv_psk", [{"client": b"the secret key"}], indirect=True
)
@pytest.mark.parametrize(
"cli_psk", [("client", b"the secret key")], indirect=True
)
@pytest.mark.parametrize("chunksize", [1024])
def test_psk_authentication_success(self, client, buffer, chunksize):
with client:
client.do_handshake()
assert client.echo(buffer, chunksize) == buffer
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize(
"ciphers", [PSK_AUTHENTICATION_CIPHERS], indirect=True
)
@pytest.mark.parametrize(
"srv_psk",
[
{"client": b"another key"},
{"another client": b"the secret key"},
{"another client": b"another key"},
],
indirect=True,
)
@pytest.mark.parametrize(
"cli_psk", [("client", b"the secret key")], indirect=True
)
def test_psk_authentication_failure(self, client):
with pytest.raises(TLSError), client:
client.do_handshake()
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize("ciphers", (ciphers_available(),), indirect=True)
@pytest.mark.parametrize("chunksize", [1024])
def test_client_server(self, client, buffer, chunksize):
with client:
while True:
try:
client.do_handshake()
except (WantReadError, WantWriteError):
pass
except TLSError:
client.restart()
else:
break
assert client.echo(buffer, chunksize) == buffer
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("server")
@pytest.mark.parametrize("ciphers", (ciphers_available(),), indirect=True)
def test_session_caching(self, client, cli_conf):
session = TLSSession()
with client:
while True:
try:
client.do_handshake()
except (WantReadError, WantWriteError):
pass
except (ConnectionError, TLSError):
client.restart()
else:
break
session.save(client.context)
new_context = session.resume(cli_conf)
assert isinstance(new_context, ClientContext)
assert new_context._verified
|
11486378
|
from .client import Client, NotAPage
from .utils import get_url_host
from collections import deque
from re import search
class Crawler(object):
""" Generator which systematically searches through a site. """
def __init__(self, entry_point, whitelist, client=None, blacklist=set()):
"""
Parameters:
entry_point - where to start the search.
whitelist - which host are allowed to be crawled.
client - A client object which can be used.
"""
self.whitelist = whitelist
self.blacklist = blacklist
self.entry_point = entry_point
self.visited_pages = set()
self.to_visit = deque()
if client is None:
self.client = Client()
else:
self.client = client
def __iter__(self):
self.to_visit.append(self.entry_point)
while self.to_visit:
url = self.to_visit.pop()
if not get_url_host(url) in self.whitelist:
continue
if any(search(x, url) for x in self.blacklist):
continue
url_without_hashbang, _, _ = url.partition("#")
if url_without_hashbang in self.visited_pages:
continue
self.visited_pages.add(url_without_hashbang)
try:
page = self.client.download_page(url)
except NotAPage:
continue
yield page
self.to_visit.extend(page.get_links())
|
11486414
|
import os
import random
import pickle
import numpy as np
from sonopy import mfcc_spec
from scipy.io.wavfile import read
from tqdm import tqdm
class IntegerEncode:
"""Encodes labels into integers
Args:
labels (list): shape (n_samples, strings)
"""
def __init__(self, labels):
# reserve 0 for blank label
self.char2index = {"-": 0, "pad": 1}
self.index2char = {0: "-", 1: "pad"}
self.grapheme_count = 2
self.process(labels)
self.max_label_seq = 6
def process(self, labels):
"""builds the encoding values for labels
Args:
labels (list): shape (n_samples, strings)
"""
strings = "".join(labels)
for s in strings:
if s not in self.char2index:
self.char2index[s] = self.grapheme_count
self.index2char[self.grapheme_count] = s
self.grapheme_count += 1
def convert_to_ints(self, label):
"""Convert into integers
Args:
label (str): string to encode
Returns:
list: shape (max_label_seq)
"""
y = []
for char in label:
y.append(self.char2index[char])
if len(y) < self.max_label_seq:
diff = self.max_label_seq - len(y)
pads = [self.char2index["pad"]] * diff
y += pads
return y
def save(self, file_path):
"""Save integer encoder model as a pickle file
Args:
file_path (str): path to save pickle object
"""
file_name = os.path.join(file_path, "int_encoder.pkl")
with open(file_name, "wb") as f:
pickle.dump(self.__dict__, f)
def normalize(values):
"""Normalize values to mean 0 and std 1
Args:
values (np.array): shape (frame_len, features)
Returns:
np.array: normalized features
"""
return (values - np.mean(values)) / np.std(values)
class GoogleSpeechCommand:
"""Data set can be found here
https://www.kaggle.com/c/tensorflow-speech-recognition-challenge/data
"""
def __init__(self, data_path="speech_data/speech_commands_v0.01", sr=16000):
self.data_path = data_path
self.labels = [
"right",
"eight",
"cat",
"tree",
"bed",
"happy",
"go",
"dog",
"no",
"wow",
"nine",
"left",
"stop",
"three",
"sheila",
"one",
"bird",
"zero",
"seven",
"up",
"marvin",
"two",
"house",
"down",
"six",
"yes",
"on",
"five",
"off",
"four",
]
self.intencode = IntegerEncode(self.labels)
self.sr = sr
self.max_frame_len = 225
def get_data(self, progress_bar=True):
"""Currently returns mfccs and integer encoded data
Returns:
(list, list):
inputs shape (sample_size, frame_len, mfcs_features)
targets shape (sample_size, seq_len) seq_len is variable
"""
pg = tqdm if progress_bar else lambda x: x
inputs, targets = [], []
meta_data = []
for labels in self.labels:
path = os.listdir(os.path.join(self.data_path, labels))
for audio in path:
audio_path = os.path.join(self.data_path, labels, audio)
meta_data.append((audio_path, labels))
random.shuffle(meta_data)
for md in pg(meta_data):
audio_path = md[0]
labels = md[1]
_, audio = read(audio_path)
mfccs = mfcc_spec(
audio,
self.sr,
window_stride=(160, 80),
fft_size=512,
num_filt=20,
num_coeffs=13,
)
mfccs = normalize(mfccs)
diff = self.max_frame_len - mfccs.shape[0]
mfccs = np.pad(mfccs, ((0, diff), (0, 0)), "constant")
inputs.append(mfccs)
target = self.intencode.convert_to_ints(labels)
targets.append(target)
return inputs, targets
@staticmethod
def save_vectors(file_path, x, y):
"""saves input and targets vectors as x.npy and y.npy
Args:
file_path (str): path to save numpy array
x (list): inputs
y (list): targets
"""
x_file = os.path.join(file_path, "x")
y_file = os.path.join(file_path, "y")
np.save(x_file, np.asarray(x))
np.save(y_file, np.asarray(y))
@staticmethod
def load_vectors(file_path):
"""load inputs and targets
Args:
file_path (str): path to load targets from
Returns:
inputs, targets: np.array, np.array
"""
x_file = os.path.join(file_path, "x.npy")
y_file = os.path.join(file_path, "y.npy")
inputs = np.load(x_file)
targets = np.load(y_file)
return inputs, targets
if __name__ == "__main__":
gs = GoogleSpeechCommand()
inputs, targets = gs.get_data()
gs.save_vectors("./speech_data", inputs, targets)
gs.intencode.save("./speech_data")
print("preprocessed and saved")
|
11486419
|
import re
matcher = re.compile(r"\b[\t\b]")
def match_data(data):
return bool(matcher.match(data))
|
11486423
|
import bson, struct
import itertools
from bson.errors import InvalidBSON
# Helper functions work working with bson files created using mongodump
def bson_iter(bson_file):
"""
Takes a file handle to a .bson file and returns an iterator for each
doc in the file. This will not load all docs into memory.
with open('User.bson', 'rb') as bs:
active_users = filter(bson_iter(bs), "type", "active")
"""
while True:
size_str = bson_file.read(4)
if not len(size_str):
break
obj_size = struct.unpack("<i", size_str)[0]
obj = bson_file.read(obj_size - 4)
if obj[-1] != "\x00":
raise InvalidBSON("bad eoo")
yield bson._bson_to_dict(size_str + obj, dict, True)[0]
def _deep_get(obj, field):
parts = field.split(".")
if len(parts) == 1:
return obj.get(field)
last_value = {}
for part in parts[0:-1]:
last_value = obj.get(part)
if not last_value:
return False
if isinstance(last_value, dict):
return last_value.get(parts[-1])
else:
return getattr(last_value, parts[-1])
def groupby(iterator, field):
"""
Returns dictionary with the keys beign the field to group by
and the values a list of the group docs.
This is useful for converting a list of docs into dict by _id
for example.
"""
groups = {}
for k, g in itertools.groupby(iterator, lambda x: _deep_get(x, field)):
items = groups.setdefault(k, [])
for item in g:
items.append(item)
return groups
def filter(iterator, field, value):
"""
Takes an iterator and returns only the docs that have a field == value.
The field can be a nested field like a.b.c and it will descend into the
embedded documents.
"""
return itertools.ifilter(lambda x: _deep_get(x, field) == value, iterator)
|
11486442
|
from __future__ import print_function
import numpy
import scipy
import scipy.linalg
from irbasis import *
from itertools import product
from .internal import *
from .two_point_basis import *
class ThreePoint(object):
def __init__(self, Lambda, beta, cutoff = 1e-8, augmented=True):
self._Lambda = Lambda
self._beta = beta
self._Bf = Basis(irbasis.load('F', Lambda), beta, cutoff)
if augmented:
self._Bb = Basis(augmented_basis_b(irbasis.load('B', Lambda)), beta, cutoff)
else:
self._Bb = Basis(irbasis.load('B', Lambda), beta, cutoff)
# DG: the below is tantamount to using a larger cutoff
# for one of the basis
self._Nl = min(self._Bf.dim, self._Bb.dim)
@property
def beta(self):
return self._beta
@property
def Nl(self):
return self._Nl
@property
def basis_beta_f(self):
return self._Bf
@property
def basis_beta_b(self):
return self._Bb
def normalized_S(self):
Nl = self._Nl
svec = numpy.zeros((3, Nl, Nl))
sf = numpy.array([self._Bf.Sl(l) / self._Bf.Sl(0) for l in range(Nl)])
sb = numpy.array([self._Bb.Sl(l) / self._Bb.Sl(0) for l in range(Nl)])
svec[0, :, :] = sf[:, None] * sf[None, :]
svec[1, :, :] = sb[:, None] * sf[None, :]
svec[2, :, :] = sf[:, None] * sb[None, :]
return svec
def projector_to_matsubara_vec(self, n1_n2_vec):
"""
Return a projector from IR to Matsubara frequencies
"""
n_f = []
n_b = []
for i in range(len(n1_n2_vec)):
n1 = n1_n2_vec[i][0]
n2 = n1_n2_vec[i][1]
n_f.append(n1)
n_f.append(n2)
n_b.append(n1 + n2 + 1)
self._Bf._precompute_Unl(n_f)
self._Bb._precompute_Unl(n_b)
r = []
for i in range(len(n1_n2_vec)):
r.append(self.projector_to_matsubara(n1_n2_vec[i][0], n1_n2_vec[i][1]))
return r
def projector_to_matsubara(self, n1, n2):
"""
Return a projector from IR to a Matsubara frequency
"""
M = numpy.zeros((3, self._Nl, self._Nl), dtype=complex)
# Note: with this signature, einsum does not actually perform any summation
M[0, :, :] = numpy.einsum('i,j->ij', self._get_Unl_f(n1), self._get_Unl_f(n2))
M[1, :, :] = numpy.einsum('i,j->ij', self._get_Unl_b(n1+n2+1), self._get_Unl_f(n2))
M[2, :, :] = numpy.einsum('i,j->ij', self._get_Unl_f(n1), self._get_Unl_b(n1+n2+1))
return M
def sampling_points_matsubara(self, whichl):
"""
Return sampling points in two-fermion-frequency convention
"""
sp_o_f = 2*sampling_points_matsubara(self._Bf, whichl) + 1
sp_o_b = 2*sampling_points_matsubara(self._Bb, whichl)
sp_o = []
Nf = len(sp_o_f)
Nb = len(sp_o_b)
# Fermion, Fermion
for i, j in product(range(Nf), repeat=2):
sp_o.append((sp_o_f[i], sp_o_f[j]))
# Boson, Fermion
for i, j in product(range(Nb), range(Nf)):
o1 = sp_o_b[i] - sp_o_f[j]
o2 = sp_o_f[j]
sp_o.append((o1, o2))
sp_o.append((o2, o1))
conv = lambda x: tuple(map(o_to_matsubara_idx_f, x))
return list(map(conv, list(set(sp_o))))
def _get_Unl_f(self, n):
return self._Bf.compute_Unl([n])[:,0:self._Nl].reshape((self._Nl))
def _get_Unl_b(self, n):
return self._Bb.compute_Unl([n])[:,0:self._Nl].reshape((self._Nl))
|
11486445
|
import tensorflow as tf
from tfsnippet.utils import (add_name_arg_doc, is_tensor_object, get_shape,
InputSpec, get_static_shape)
from .shape_utils import broadcast_to_shape
from .type_utils import convert_to_tensor_and_cast
__all__ = ['pixelcnn_2d_sample']
@add_name_arg_doc
def pixelcnn_2d_sample(fn, inputs, height, width, channels_last=True,
start=0, end=None, back_prop=False,
parallel_iterations=1, swap_memory=False,
name=None):
"""
Sample output from a PixelCNN 2D network, pixel-by-pixel.
Args:
fn: `(i: tf.Tensor, inputs: tuple[tf.Tensor]) -> tuple[tf.Tensor]`,
the function to derive the outputs of PixelCNN 2D network at
iteration `i`. `inputs` are the pixel-by-pixel outputs gathered
through iteration `0` to iteration `i - 1`. The iteration index
`i` may range from `0` to `height * width - 1`.
inputs (Iterable[tf.Tensor]): The initial input tensors.
All the tensors must be at least 4-d, with identical shape.
height (int or tf.Tensor): The height of the outputs.
width (int or tf.Tensor): The width of the outputs.
channels_last (bool): Whether or not the channel axis is the last
axis in `input`? (i.e., the data format is "NHWC")
start (int or tf.Tensor): The start iteration, default `0`.
end (int or tf.Tensor): The end (exclusive) iteration.
Default `height * width`.
back_prop, parallel_iterations, swap_memory: Arguments passed to
:func:`tf.while_loop`.
Returns:
tuple[tf.Tensor]: The final outputs.
"""
from tfsnippet.layers.convolutional.utils import validate_conv2d_input
# check the arguments
def to_int(t):
if is_tensor_object(t):
return convert_to_tensor_and_cast(t, dtype=tf.int32)
return int(t)
height = to_int(height)
width = to_int(width)
inputs = list(inputs)
if not inputs:
raise ValueError('`inputs` must not be empty.')
inputs[0], _, _ = validate_conv2d_input(
inputs[0], channels_last=channels_last, arg_name='inputs[0]')
input_spec = InputSpec(shape=get_static_shape(inputs[0]))
for i, input in enumerate(inputs[1:], 1):
inputs[i] = input_spec.validate('inputs[{}]'.format(i), input)
# do pixelcnn sampling
with tf.name_scope(name, default_name='pixelcnn_2d_sample', values=inputs):
# the total size, start and end index
total_size = height * width
start = convert_to_tensor_and_cast(start, dtype=tf.int32)
if end is None:
end = convert_to_tensor_and_cast(total_size, dtype=tf.int32)
else:
end = convert_to_tensor_and_cast(end, dtype=tf.int32)
# the mask shape
if channels_last:
mask_shape = [height, width, 1]
else:
mask_shape = [height, width]
if any(is_tensor_object(t) for t in mask_shape):
mask_shape = tf.stack(mask_shape, axis=0)
# the input dynamic shape
input_shape = get_shape(inputs[0])
# the pixelcnn sampling loop
def loop_cond(idx, _):
return idx < end
def loop_body(idx, inputs):
inputs = tuple(inputs)
# prepare for the output mask
selector = tf.reshape(
tf.concat(
[tf.ones([idx], dtype=tf.uint8),
tf.zeros([1], dtype=tf.uint8),
tf.ones([total_size - idx - 1], dtype=tf.uint8)],
axis=0
),
mask_shape
)
selector = tf.cast(broadcast_to_shape(selector, input_shape),
dtype=tf.bool)
# obtain the outputs
outputs = list(fn(idx, inputs))
if len(outputs) != len(inputs):
raise ValueError('The length of outputs != inputs: {} vs {}'.
format(len(outputs), len(inputs)))
# mask the outputs
for i, (input, output) in enumerate(zip(inputs, outputs)):
input_dtype = inputs[i].dtype.base_dtype
output_dtype = output.dtype.base_dtype
if output_dtype != input_dtype:
raise TypeError(
'`outputs[{idx}].dtype` != `inputs[{idx}].dtype`: '
'{output} vs {input}'.
format(idx=i, output=output_dtype, input=input_dtype)
)
outputs[i] = tf.where(selector, input, output)
return idx + 1, tuple(outputs)
i0 = start
_, outputs = tf.while_loop(
cond=loop_cond,
body=loop_body,
loop_vars=(i0, tuple(inputs)),
back_prop=back_prop,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
)
return outputs
|
11486509
|
def _MakeType(name):
return _TypeMeta(name, (Type,), {})
class _TypeMeta(type):
def __getitem__(self, args):
if not isinstance(args, tuple):
args = (args,)
name = '{}[{}]'.format(
str(self),
', '.join(map(str, args))
)
return _MakeType(name)
def __str__(self):
return self.__name__
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'ClassVar',
'Generic',
'Optional',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'GenericMeta', # subclass of abc.ABCMeta and a metaclass
# for 'Generic' and ABCs below.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
# The following are added depending on presence
# of their non-generic counterparts in stdlib:
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
# AsyncContextManager
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'Generator',
# One-off things.
'AnyStr',
'cast',
'get_type_hints',
'NewType',
'no_type_check',
'no_type_check_decorator',
'overload',
'Text',
'TYPE_CHECKING',
'ChainMap',
'NoReturn',
]
def NewType(name, typ):
return _MakeType(name)
def TypeVar(name, *types, bound=None):
return _MakeType(name)
def cast(typ, val):
return val
def get_type_hints(obj, globals=None, locals=None):
return {}
def overload(function):
return function
def no_type_check(function):
return function
def no_type_check_decorator(function):
return function
TYPE_CHECKING = False
class Type(metaclass=_TypeMeta):
pass
class Any(Type):
pass
class Callable(Type):
pass
class ClassVar(Type):
pass
class Generic(Type):
pass
class Optional(Type):
pass
class Tuple(Type):
pass
class Union(Type):
pass
class AbstractSet(Type):
pass
class GenericMeta(Type):
pass
class ByteString(Type):
pass
class Container(Type):
pass
class ContextManager(Type):
pass
class Hashable(Type):
pass
class ItemsView(Type):
pass
class Iterable(Type):
pass
class Iterator(Type):
pass
class KeysView(Type):
pass
class Mapping(Type):
pass
class MappingView(Type):
pass
class MutableMapping(Type):
pass
class MutableSequence(Type):
pass
class MutableSet(Type):
pass
class Sequence(Type):
pass
class Sized(Type):
pass
class ValuesView(Type):
pass
class Awaitable(Type):
pass
class AsyncIterator(Type):
pass
class AsyncIterable(Type):
pass
class Coroutine(Type):
pass
class Collection(Type):
pass
class AsyncGenerator(Type):
pass
class AsyncContextManage(Type):
pass
class Reversible(Type):
pass
class SupportsAbs(Type):
pass
class SupportsBytes(Type):
pass
class SupportsComplex(Type):
pass
class SupportsFloat(Type):
pass
class SupportsInt(Type):
pass
class SupportsRound(Type):
pass
class Counter(Type):
pass
class Deque(Type):
pass
class Dict(Type):
pass
class DefaultDict(Type):
pass
class List(Type):
pass
class Set(Type):
pass
class FrozenSet(Type):
pass
class NamedTuple(Type):
pass
class Generator(Type):
pass
class AnyStr(Type):
pass
class Text(Type):
pass
class ChainMap(Type):
pass
class NoReturn(Type):
pass
|
11486525
|
import json
from hbconfig import Config
from .route import MsgRouter
from .slack.resource import MsgResource
from .slack.slackbot import SlackerAdapter
from .utils.logger import Logger
class MsgListener(object):
def __init__(self) -> None:
self.router = MsgRouter()
self.slackbot = SlackerAdapter()
self.logger = Logger().get_logger()
def handle(self, msg: str) -> None:
self.msg = json.loads(msg)
self.handle_message()
self.handle_presence_change()
self.handle_dnd_change()
def handle_message(self) -> MsgRouter.message_route:
if self.is_message():
is_bot = self.is_bot()
if not self.is_self() and not is_bot:
self.handle_user_message()
elif is_bot and self.is_webhook():
self.handle_webhook_message()
else:
pass
def handle_user_message(self) -> MsgRouter.message_route:
try:
self.router.message_route(
text=self.msg["text"],
user=self.msg["user"],
channel=self.msg["channel"],
direct=self.is_direct(),
)
except Exception as e:
self.logger.error(f"USER Listener Error: {e}")
self.logger.exception("user")
self.slackbot.send_message(text=MsgResource.ERROR)
def handle_webhook_message(self) -> MsgRouter.message_route:
try:
self.router.message_route(
text=self.make_full_text(), direct=self.is_direct(), webhook=True
)
except Exception as e:
self.logger.error(f"Webhook Listener Error: {e}")
self.logger.exception("webhook")
self.slackbot.send_message(text=MsgResource.ERROR)
def is_message(self, msg=None) -> bool:
if msg is None:
msg = self.msg
msg_type = msg.get("type", None)
if msg_type == "message":
return True
else:
return False
def is_self(self, msg=None) -> bool:
if msg is None:
msg = self.msg
if msg.get("user", None) == self.slackbot.get_bot_id():
return True
else:
return False
def is_bot(self, msg=None) -> bool:
if msg is None:
msg = self.msg
if "bot_id" in msg:
return True
subtype = msg.get("subtype", None)
if subtype == "bot_message":
return True
if subtype == "message_changed":
message = msg.get("message", None)
if "bot_id" in message:
return True
return False
def is_webhook(self, msg=None) -> bool:
if msg is None:
msg = self.msg
if (
msg.get("username", None) == "IFTTT"
or self.msg.get("username", None) == "incoming-webhook"
):
return True
else:
return False
def is_direct(self, msg=None) -> bool:
if msg is None:
msg = self.msg
text = msg.get("text", "$#")
channel = msg.get("channel", "")
slack_bot_id = self.slackbot.get_bot_id()
if (
f"<@{slack_bot_id}>" in text
or channel.startswith("D")
or any(
[
text.lower().startswith(t.lower())
for t in Config.bot.get("TRIGGER", ["키노야", "Hey kino"])
]
)
):
return True
else:
return False
def make_full_text(self) -> str:
if self.msg.get("text", None):
return self.msg["text"]
else:
text = ""
for attachment in self.msg["attachments"]:
text += attachment["text"]
return text
def parse_attachments(self):
pass
def handle_presence_change(self) -> MsgRouter.presence_route:
if self.is_presence() and not self.is_self():
try:
self.router.presence_route(
user=self.msg["user"], presence=self.msg["presence"]
)
except Exception as e:
self.logger.error(f"Presence Listener Error: {e}")
self.logger.exception("presence")
self.slackbot.send_message(text=MsgResource.ERROR)
def is_presence(self, msg=None) -> bool:
if msg is None:
msg = self.msg
msg_type = msg.get("type", None)
if msg_type == "presence_change":
return True
else:
return False
def handle_dnd_change(self) -> MsgRouter.dnd_route:
if self.is_dnd_updated_user() and not self.is_self():
try:
self.router.dnd_route(dnd=self.msg["dnd_status"])
except Exception as e:
self.logger.error(f"dnd_change Listener Error: {e}")
self.logger.exception("dnd")
self.slackbot.send_message(text=MsgResource.ERROR)
def is_dnd_updated_user(self, msg=None) -> bool:
if msg is None:
msg = self.msg
msg_type = msg.get("type", None)
if msg_type == "dnd_updated_user":
return True
else:
return False
# TODO : user_change ex) 'status_text': 'In a meeting'
|
11486571
|
from .base import AugerBaseApi
from ..exceptions import AugerException
class AugerReviewAlertItemApi(AugerBaseApi):
"""Auger Review Alert Item API."""
def __init__(self, ctx, endpoint_api):
super(AugerReviewAlertItemApi, self).__init__(ctx, endpoint_api)
|
11486608
|
import torch
from .losses import Loss
def bt_loss(h1: torch.Tensor, h2: torch.Tensor, lambda_, batch_norm=True, eps=1e-15, *args, **kwargs):
batch_size = h1.size(0)
feature_dim = h1.size(1)
if lambda_ is None:
lambda_ = 1. / feature_dim
if batch_norm:
z1_norm = (h1 - h1.mean(dim=0)) / (h1.std(dim=0) + eps)
z2_norm = (h2 - h2.mean(dim=0)) / (h2.std(dim=0) + eps)
c = (z1_norm.T @ z2_norm) / batch_size
else:
c = h1.T @ h2 / batch_size
off_diagonal_mask = ~torch.eye(feature_dim).bool()
loss = (1 - c.diagonal()).pow(2).sum()
loss += lambda_ * c[off_diagonal_mask].pow(2).sum()
return loss
class BarlowTwins(Loss):
def __init__(self, lambda_: float = None, batch_norm: bool = True, eps: float = 1e-5):
self.lambda_ = lambda_
self.batch_norm = batch_norm
self.eps = eps
def compute(self, anchor, sample, pos_mask, neg_mask, *args, **kwargs) -> torch.FloatTensor:
loss = bt_loss(anchor, sample, self.lambda_, self.batch_norm, self.eps)
return loss.mean()
|
11486616
|
import sys
import os
def init(cfg, args):
import ete3
ncbi = ete3.NCBITaxa()
ncbi.update_taxonomy_database()
print('NCBI Taxomomy database is installed in {}.'.format(ncbi.dbfile))
|
11486617
|
import pytest
import os
import sys
import sqlite3
from primrose.configuration.configuration import Configuration
from primrose.readers.sqlite_reader import SQLiteReader
from primrose.data_object import DataObject, DataObjectResponseType
def test_necessary_config():
assert isinstance(SQLiteReader.necessary_config({}), set)
assert len(SQLiteReader.necessary_config({})) == 2
def test_read():
config = {
"implementation_config": {
"reader_config": {
"mynode": {
"class": "SQLiteReader",
"filename": "test/test_sqlite.db",
"query_json": [{"query": "test/test_sqlite.sql"}],
"destinations": [],
}
}
}
}
filename = "test/test_sqlite.db"
if os.path.exists(filename):
os.remove(filename)
conn = sqlite3.connect(filename)
c = conn.cursor()
c.execute("create table test(firstname text, lastname text);")
c.execute(
"insert into test(firstname, lastname) values('joe', 'doe'), ('mary','poppins');"
)
conn.commit()
conn.close()
configuration = Configuration(
config_location=None, is_dict_config=True, dict_config=config
)
reader = SQLiteReader(configuration, "mynode")
data_object = DataObject(configuration)
data_object, terminate = reader.run(data_object)
assert not terminate
dd = data_object.get("mynode", rtype=DataObjectResponseType.KEY_VALUE.value)
assert "query_0" in dd
df = dd["query_0"]
assert df is not None
assert df.shape == (2, 2)
if os.path.exists(filename):
os.remove(filename)
|
11486644
|
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/')
def hello_world():
print('receive request')
return jsonify({'content': 'hello world'})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8891)
|
11486662
|
import import_declare_test
import sys
import json
from splunklib import modularinput as smi
class EXAMPLE_INPUT_TWO(smi.Script):
def __init__(self):
super(EXAMPLE_INPUT_TWO, self).__init__()
def get_scheme(self):
scheme = smi.Scheme('example_input_two')
scheme.description = 'Example Input Two'
scheme.use_external_validation = True
scheme.streaming_mode_xml = True
scheme.use_single_instance = True
scheme.add_argument(
smi.Argument(
'name',
title='Name',
description='Name',
required_on_create=True
)
)
scheme.add_argument(
smi.Argument(
'interval',
required_on_create=True,
)
)
scheme.add_argument(
smi.Argument(
'account',
required_on_create=True,
)
)
scheme.add_argument(
smi.Argument(
'input_two_multiple_select',
required_on_create=True,
)
)
scheme.add_argument(
smi.Argument(
'input_two_checkbox',
required_on_create=False,
)
)
scheme.add_argument(
smi.Argument(
'input_two_radio',
required_on_create=False,
)
)
scheme.add_argument(
smi.Argument(
'use_existing_checkpoint',
required_on_create=False,
)
)
scheme.add_argument(
smi.Argument(
'start_date',
required_on_create=False,
)
)
scheme.add_argument(
smi.Argument(
'example_help_link',
required_on_create=False,
)
)
return scheme
def validate_input(self, definition):
return
def stream_events(self, inputs, ew):
input_items = [{'count': len(inputs.inputs)}]
for input_name, input_item in inputs.inputs.items():
input_item['name'] = input_name
input_items.append(input_item)
event = smi.Event(
data=json.dumps(input_items),
sourcetype='example_input_two',
)
ew.write_event(event)
if __name__ == '__main__':
exit_code = EXAMPLE_INPUT_TWO().run(sys.argv)
sys.exit(exit_code)
|
11486676
|
from .strategy_logger import RLStrategyLogger
from .interactive_logging import TqdmWriteInteractiveLogger
from .tensorboard_logger import TensorboardLogger
__all__ = ["RLStrategyLogger",
"TqdmWriteInteractiveLogger", "TensorboardLogger"]
|
11486693
|
from decimal import Decimal
from unittest import TestCase
from src.NumberUtils import value_to_string, value_to_decimal, value_to_scaled_integer, scaled_integer_to_decimal
class TestNumberUtils(TestCase):
def test_value_to_decimal_none(self):
self.assertEqual(None, value_to_decimal(None))
def test_value_to_decimal_string(self):
self.assertEqual(Decimal('3.251'), value_to_decimal('3.251'))
self.assertEqual(Decimal('3.3'), value_to_decimal('3.251', 1))
def test_value_to_decimal_decimal(self):
self.assertEqual(Decimal('3.251'), value_to_decimal(Decimal('3.251')))
self.assertEqual(Decimal('3.3'), value_to_decimal(Decimal('3.251'), 1))
def test_value_to_decimal_float(self):
# self.assertEqual(Decimal('3.251'), value_to_decimal(3.251)) # fail: 3.2509999999999998898658759572
self.assertEqual(Decimal('3.251'), value_to_decimal(3.251, 3))
self.assertEqual(Decimal('3.3'), value_to_decimal(3.251, 1))
def test_value_to_decimal_integer(self):
self.assertEqual(Decimal('3'), 3)
self.assertEqual(Decimal('0'), 0)
def test_value_to_decimal_invalid_empty(self):
self.assertRaises(ValueError, value_to_decimal, '')
def test_value_to_decimal_invalid_multiple_points(self):
self.assertRaises(ValueError, value_to_decimal, '3.25.1')
def test_value_to_decimal_invalid_comma(self):
self.assertRaises(ValueError, value_to_decimal, '3,251')
def test_value_to_string_none(self):
self.assertEqual(None, value_to_string(None, 10))
def test_value_to_string_string(self):
self.assertEqual('3.2510000000000000000000000000', value_to_string('3.251')) # 28 decimals is default
self.assertEqual('0', value_to_string('0.3', 0))
self.assertEqual('3.3', value_to_string('3.251', 1))
def test_value_to_string_float(self):
# self.assertEqual('3.251', value_to_string(3.251)) # fail: 3.2509999999999998898658759572
self.assertEqual('3.251', value_to_string(3.251, 3))
self.assertEqual('3.3', value_to_string(3.251, 1))
def test_value_to_string(self):
self.assertEqual('3.2510000000000000000000000000', value_to_string(Decimal('3.251'))) # 28 decimals is default
self.assertEqual('3.3', value_to_string(Decimal('3.251'), 1))
def test_value_to_scaled_integer_none(self):
self.assertEqual(None, value_to_scaled_integer(None))
def test_value_to_scaled_integer(self):
self.assertEqual(32510000000000000000000000000, value_to_scaled_integer('3.251')) # 28 decimals is default
self.assertEqual(3251, value_to_scaled_integer('3.251', 3))
self.assertEqual(33, value_to_scaled_integer('3.251', 1))
def test_scaled_integer_to_decimal_none(self):
self.assertEqual(None, scaled_integer_to_decimal(None))
def test_scaled_integer_to_decimal(self):
self.assertEqual(Decimal('3.251'),
scaled_integer_to_decimal(32510000000000000000000000000)) # 28 decimals is default
self.assertEqual(Decimal('3.251'), scaled_integer_to_decimal(3251, 3))
self.assertEqual(Decimal('0.251'), scaled_integer_to_decimal(251, 3))
self.assertEqual(Decimal('0'), scaled_integer_to_decimal(0, 2))
def test_scaled_integer_to_decimal_round(self):
# self.assertEqual(33, to_integer(3.25, 1)) # fail
self.assertEqual(33, value_to_scaled_integer(3.251, 1))
|
11486698
|
from .common import random_str
import time
def test_system_app_creator(admin_mc, admin_system_pc, remove_resource):
client = admin_mc.client
provider_name = random_str()
access = random_str()
secret = random_str()
globaldns_provider = \
client.create_global_dns_provider(
name=provider_name,
rootDomain="example.com",
route53ProviderConfig={
'accessKey': access,
'secretKey': secret})
remove_resource(globaldns_provider)
app = wait_for_system_app(
admin_system_pc.client,
"systemapp-"+globaldns_provider.name)
# the creator id of system app won't be listed in api
assert app.creatorId != globaldns_provider.creatorId
def wait_for_system_app(client, name, timeout=60):
start = time.time()
interval = 0.5
apps = client.list_app(name=name)
while len(apps.data) != 1:
if time.time() - start > timeout:
print(apps)
raise Exception('Timeout waiting for workload service')
time.sleep(interval)
interval *= 2
apps = client.list_app(name=name)
return apps.data[0]
|
11486724
|
import numpy as np
from pathlib import Path
import warnings
from pynwb import NWBHDF5IO, validate, TimeSeries
from pynwb.image import ImageSeries
from pynwb.testing import TestCase
class TestReadOldVersions(TestCase):
expected_errors = {
'1.0.2_str_experimenter.nwb': [("root/general/experimenter (general/experimenter): incorrect shape - expected "
"an array of shape '[None]', got non-array data 'one experimenter'")],
'1.0.3_str_experimenter.nwb': [("root/general/experimenter (general/experimenter): incorrect shape - expected "
"an array of shape '[None]', got non-array data 'one experimenter'")],
'1.0.2_str_pub.nwb': [("root/general/related_publications (general/related_publications): incorrect shape "
"- expected an array of shape '[None]', got non-array data 'one publication'")],
'1.0.3_str_pub.nwb': [("root/general/related_publications (general/related_publications): incorrect shape "
"- expected an array of shape '[None]', got non-array data 'one publication'")],
}
def test_read(self):
"""Test reading and validating all NWB files in the same folder as this file.
This folder contains NWB files generated by previous versions of NWB using the script
src/pynwb/testing/make_test_files.py
"""
dir_path = Path(__file__).parent
nwb_files = dir_path.glob('*.nwb')
for f in nwb_files:
with self.subTest(file=f.name):
with NWBHDF5IO(str(f), 'r') as io:
errors = validate(io)
io.read()
if errors:
for e in errors:
if f.name in self.expected_errors and str(e) not in self.expected_errors[f.name]:
warnings.warn('%s: %s' % (f.name, e))
# TODO uncomment below when validation errors have been fixed
# raise Exception('%d validation error(s). See warnings.' % len(errors))
def test_read_timeseries_no_data(self):
"""Test that a TimeSeries written without data is read with data set to the default value."""
f = Path(__file__).parent / '1.5.1_timeseries_no_data.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
np.testing.assert_array_equal(read_nwbfile.acquisition['test_timeseries'].data, TimeSeries.DEFAULT_DATA)
def test_read_timeseries_no_unit(self):
"""Test that an ImageSeries written without unit is read with unit set to the default value."""
f = Path(__file__).parent / '1.5.1_timeseries_no_unit.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
self.assertEqual(read_nwbfile.acquisition['test_timeseries'].unit, TimeSeries.DEFAULT_UNIT)
def test_read_imageseries_no_data(self):
"""Test that an ImageSeries written without data is read with data set to the default value."""
f = Path(__file__).parent / '1.5.1_imageseries_no_data.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
np.testing.assert_array_equal(read_nwbfile.acquisition['test_imageseries'].data, ImageSeries.DEFAULT_DATA)
def test_read_imageseries_no_unit(self):
"""Test that an ImageSeries written without unit is read with unit set to the default value."""
f = Path(__file__).parent / '1.5.1_imageseries_no_unit.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
self.assertEqual(read_nwbfile.acquisition['test_imageseries'].unit, ImageSeries.DEFAULT_UNIT)
|
11486742
|
import oneflow as flow
import logging
from otrans.data import PAD, BLK, EOS
from otrans.recognize.base import Recognizer
class CTCRecognizer(Recognizer):
def __init__(
self,
model,
lm=None,
lm_weight=0.1,
ngram_lm=None,
beam_width=5,
idx2unit=None,
ngpu=1,
mode="greedy",
alpha=0.1,
beta=0.0,
):
super().__init__(model, idx2unit, lm, lm_weight, ngpu)
self.beam_width = beam_width
self.mode = mode
if self.mode == "beam":
import ctcdecode_edited as ctcdecode
# import ctcdecode
vocab_list = [self.idx2unit[i] for i in range(len(idx2unit))]
self.ctcdecoder = ctcdecode.CTCBeamDecoder(
vocab_list,
beam_width=self.beam_width,
blank_id=BLK,
model_path=ngram_lm,
alpha=alpha,
beta=beta,
log_probs_input=True,
num_processes=10,
)
def recognize(self, inputs, inputs_length):
if self.mode == "greedy":
results = self.recognize_greedy(inputs, inputs_length)
elif self.mode == "beam":
results = self.recognize_beam(inputs, inputs_length)
else:
raise ValueError
return self.translate(results)
def recognize_greedy(self, inputs, inputs_length):
log_probs, length = self.model.inference(inputs, inputs_length)
_, preds = log_probs.topk(self.beam_width, dim=-1)
results = []
for b in range(log_probs.size(0)):
pred = []
last_k = PAD
for i in range(int(length[b])):
k = int(preds[b][i][0])
if k == last_k or k == PAD:
last_k = k
continue
else:
last_k = k
pred.append(k)
results.append(pred)
return results
def recognize_beam(self, inputs, inputs_length):
log_probs, length = self.model.inference(inputs, inputs_length)
beam_results, beam_scores, _, out_seq_len = self.ctcdecoder.decode(
log_probs.cpu(), seq_lens=length.cpu()
)
best_results = beam_results[:, 0]
batch_length = out_seq_len[:, 0]
results = []
for b in range(log_probs.size(0)):
length = int(batch_length[b])
tokens = [int(i) for i in best_results[b, :length]]
results.append(tokens)
return results
|
11486781
|
import ast
from typing import cast
from typing import Iterable
from typing import List
from typing import Tuple
from typing import Type
from typing import Union
from tokenize_rt import Offset
from tokenize_rt import Token
from pyupgrade._ast_helpers import ast_to_offset
from pyupgrade._ast_helpers import is_name_attr
from pyupgrade._data import register
from pyupgrade._data import State
from pyupgrade._data import TokenFunc
from pyupgrade._data import Version
from pyupgrade._token_helpers import Block
def _find_if_else_block(tokens: List[Token], i: int) -> Tuple[Block, Block]:
if_block = Block.find(tokens, i)
i = if_block.end
while tokens[i].src != 'else':
i += 1
else_block = Block.find(tokens, i, trim_end=True)
return if_block, else_block
def _find_elif(tokens: List[Token], i: int) -> int:
while tokens[i].src != 'elif': # pragma: no cover (only for <3.8.1)
i -= 1
return i
def _fix_py3_block(i: int, tokens: List[Token]) -> None:
if tokens[i].src == 'if':
if_block = Block.find(tokens, i)
if_block.dedent(tokens)
del tokens[if_block.start:if_block.block]
else:
if_block = Block.find(tokens, _find_elif(tokens, i))
if_block.replace_condition(tokens, [Token('NAME', 'else')])
def _fix_py2_block(i: int, tokens: List[Token]) -> None:
if tokens[i].src == 'if':
if_block, else_block = _find_if_else_block(tokens, i)
else_block.dedent(tokens)
del tokens[if_block.start:else_block.block]
else:
j = _find_elif(tokens, i)
if_block, else_block = _find_if_else_block(tokens, j)
del tokens[if_block.start:else_block.start]
def _fix_py3_block_else(i: int, tokens: List[Token]) -> None:
if tokens[i].src == 'if':
if_block, else_block = _find_if_else_block(tokens, i)
if_block.dedent(tokens)
del tokens[if_block.end:else_block.end]
del tokens[if_block.start:if_block.block]
else:
j = _find_elif(tokens, i)
if_block, else_block = _find_if_else_block(tokens, j)
del tokens[if_block.end:else_block.end]
if_block.replace_condition(tokens, [Token('NAME', 'else')])
def _eq(test: ast.Compare, n: int) -> bool:
return (
isinstance(test.ops[0], ast.Eq) and
isinstance(test.comparators[0], ast.Num) and
test.comparators[0].n == n
)
def _compare_to_3(
test: ast.Compare,
op: Union[Type[ast.cmpop], Tuple[Type[ast.cmpop], ...]],
minor: int = 0,
) -> bool:
if not (
isinstance(test.ops[0], op) and
isinstance(test.comparators[0], ast.Tuple) and
len(test.comparators[0].elts) >= 1 and
all(isinstance(n, ast.Num) for n in test.comparators[0].elts)
):
return False
# checked above but mypy needs help
ast_elts = cast('List[ast.Num]', test.comparators[0].elts)
# padding a 0 for compatibility with (3,) used as a spec
elts = tuple(e.n for e in ast_elts) + (0,)
return elts[:2] == (3, minor) and all(n == 0 for n in elts[2:])
@register(ast.If)
def visit_If(
state: State,
node: ast.If,
parent: ast.AST,
) -> Iterable[Tuple[Offset, TokenFunc]]:
min_version: Version
if state.settings.min_version == (3,):
min_version = (3, 0)
else:
min_version = state.settings.min_version
assert len(min_version) >= 2
if (
min_version >= (3,) and (
# if six.PY2:
is_name_attr(node.test, state.from_imports, 'six', ('PY2',)) or
# if not six.PY3:
(
isinstance(node.test, ast.UnaryOp) and
isinstance(node.test.op, ast.Not) and
is_name_attr(
node.test.operand,
state.from_imports,
'six',
('PY3',),
)
) or
# sys.version_info == 2 or < (3,)
# or < (3, n) or <= (3, n) (with n<m)
(
isinstance(node.test, ast.Compare) and
is_name_attr(
node.test.left,
state.from_imports,
'sys',
('version_info',),
) and
len(node.test.ops) == 1 and (
_eq(node.test, 2) or
_compare_to_3(node.test, ast.Lt, min_version[1]) or
any(
_compare_to_3(node.test, (ast.Lt, ast.LtE), minor)
for minor in range(min_version[1])
)
)
)
)
):
if node.orelse and not isinstance(node.orelse[0], ast.If):
yield ast_to_offset(node), _fix_py2_block
elif (
min_version >= (3,) and (
# if six.PY3:
is_name_attr(node.test, state.from_imports, 'six', ('PY3',)) or
# if not six.PY2:
(
isinstance(node.test, ast.UnaryOp) and
isinstance(node.test.op, ast.Not) and
is_name_attr(
node.test.operand,
state.from_imports,
'six',
('PY2',),
)
) or
# sys.version_info == 3 or >= (3,) or > (3,)
# sys.version_info >= (3, n) (with n<=m)
# or sys.version_info > (3, n) (with n<m)
(
isinstance(node.test, ast.Compare) and
is_name_attr(
node.test.left,
state.from_imports,
'sys',
('version_info',),
) and
len(node.test.ops) == 1 and (
_eq(node.test, 3) or
_compare_to_3(node.test, (ast.Gt, ast.GtE)) or
_compare_to_3(node.test, ast.GtE, min_version[1]) or
any(
_compare_to_3(node.test, (ast.Gt, ast.GtE), minor)
for minor in range(min_version[1])
)
)
)
)
):
if node.orelse and not isinstance(node.orelse[0], ast.If):
yield ast_to_offset(node), _fix_py3_block_else
elif not node.orelse:
yield ast_to_offset(node), _fix_py3_block
|
11486797
|
import json
import re
from datetime import datetime
from typing import NamedTuple, Iterable
import os
import itertools
class TvShow(NamedTuple):
file_path: str
show: str
network: str
recorded_datetime: datetime
def _parse_dir(dir) -> Iterable[TvShow]:
pattern = re.compile('(.+)_(.+)_(\d{4})_(\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).wtv')
for file in os.listdir(dir):
m = pattern.search(file)
if m:
filepath = os.path.join(dir, file)
show = m.group(1)
network = m.group(2)
recorded_datetime = datetime(year=int(m.group(3)), month=int(m.group(4)), day=int(m.group(5)),
hour=int(m.group(6)), minute=int(m.group(7)), second=int(m.group(8)))
yield TvShow(filepath, show, network, recorded_datetime)
def run_delete(config_file, dry_run=True):
"""
Deletes WTV files in a directory based on a configuration file
:param config_file:
:param dry_run:
:return:
"""
with open(config_file, 'r') as file:
config = json.load(file)
dir = config['directory']
tv_shows_config = config['tv_shows']
key = lambda x: x.show
for show, files in itertools.groupby(sorted(_parse_dir(dir), key=key), key=key):
if show in tv_shows_config:
files = list(files)
max_count = tv_shows_config[show]
num_to_delete = len(files) - max_count
if max_count > 0 and num_to_delete > 0:
files.sort(key=lambda x: x.recorded_datetime)
to_delete = files[:num_to_delete]
for d in to_delete:
print('{}'.format(os.path.basename(d.file_path)))
if not dry_run:
os.remove(d.file_path)
|
11486828
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_moons, make_circles
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
class Adaboost_Demonstration:
def __init__(self, X, y, learning_rate=1.):
"""
输入的X为N*2矩阵, y为一维向量, y的值只能取1或-1
:param X: 数据点
:param y: 数据点标记
"""
self.X = X
self.y = y
# 给每个弱分类器一个衰减, 避免过拟合
self.learning_rate = learning_rate
# 样本的个数
self.num_samples = len(self.X)
# 初始化数据样本的权重
self.sample_weight = np.full(self.num_samples, 1 / self.num_samples)
# python list用来存储所有的弱分类器对象
self.classifiers = []
# 储存在每一步的错误率
self.errors_list = []
# 定义弱分类器, 这里我们直接调用sklearn的决策树, max_depth=1代表着这是一个一层决策树, 也就是决策树桩
self.alphas = []
def predict(self, data=None, labels=None, reduction="sign"):
"""
预测数据点的分类
:param reduction: "sign"对弱分类的线性加权组合取符号, "mean"取平均
"""
if data is None:
data = self.X
labels = self.y
# 计算弱分类器线性加权组合的结果
predictions = np.zeros([len(data)]).astype("float")
for classifier, alpha in zip(self.classifiers, self.alphas):
predictions += alpha * classifier.predict(data)
# 对结果取符号
if reduction == "sign":
predictions = np.sign(predictions)
# 对结果求均值
elif reduction == "mean":
predictions /= len(self.classifiers)
# 如果可以的话获取f1 score
if labels is not None and reduction == "sign":
f1 = f1_score(predictions, labels)
return predictions, f1
else:
return predictions
def contour_plot(self, data=None, labels=None, interval=0.2, title="adaboost",
mode="3d"):
"""
等高线图可视化
:param interval: 等高线图网格的间隔
:param title: 等高线图的标题
:param mode: 可选3D或2D可视化
"""
if data is None:
data = self.X
labels = self.y
if labels is None:
labels = np.ones([len(data)])
# 获取网格
x_min, x_max = data[:, 0].min() - .5, data[:, 0].max() + .5
y_min, y_max = data[:, 1].min() - .5, data[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, interval), np.arange(y_min, y_max, interval))
# 将网格的X, Y轴拼接用来进行等高线的计算
X_grid = np.concatenate([np.expand_dims(np.ravel(xx), axis=-1),
np.expand_dims(np.ravel(yy), axis=-1)], axis=-1)
# X_grid的形状[batch(数据点数量), 2]
# 计算分类边界(等高线)
Z_grid = self.predict(data=X_grid, reduction="mean")
Z_grid = Z_grid.reshape(xx.shape)
# 可视化
if mode == "3d":
# 数据点画散点图
scatter = go.Scatter3d(x=data[:, 0], y=data[:, 1], z=self.predict(data=data, reduction="mean"),
mode='markers',
marker=dict(color=labels, size=5, symbol='circle',
line=dict(color='rgb(204, 204, 204)', width=1),
opacity=0.9))
# 等高线3D轮廓图
surface = go.Surface(x=xx, y=yy, z=Z_grid, opacity=0.9)
plot_data = [scatter, surface]
layout = go.Layout(title=title)
# 设置视角
camera = dict(up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=1, y=1, z=0.8))
fig = go.Figure(data=plot_data, layout=layout)
fig['layout'].update(scene=dict(camera=camera))
iplot(fig, image="png", filename=title)
if mode == "2d":
# 等高线
plt.contourf(xx, yy, Z_grid, cmap=plt.cm.RdBu, alpha=.8)
# 散点
plt.scatter(data[:, 0], data[:, 1], c=labels,
cmap=ListedColormap(['#FF0000', '#0000FF']), edgecolors='k')
plt.title(title)
plt.show()
def __next__(self, reduction="mean", plot=True, plot_mode="2d"):
# 定义弱分类器(决策树桩)
# classifier = DecisionTreeClassifier(
# max_depth=2,min_samples_split=20,
# min_samples_leaf=5)
classifier = DecisionTreeClassifier(max_depth=1)
# 用弱分类器拟合数据
classifier.fit(self.X, self.y, sample_weight=self.sample_weight)
# 得到弱分类器对数据的推断, 也就是h(x)
predictions = classifier.predict(self.X)
# 计算错误率
error_rate = np.mean(np.average((predictions != self.y), weights=self.sample_weight))
# 计算alpha
alpha = self.learning_rate * (np.log((1 - error_rate) / error_rate)) / 2
# 计算t+1的权重
self.sample_weight *= np.exp(-alpha * self.y * predictions)
# 归一化, 归一化因子为Z: sum(self.sample_weight)
self.sample_weight /= np.sum(self.sample_weight)
# 记录当前弱分类器对象
self.classifiers.append(classifier)
# 记录当前弱分类器权重
self.alphas.append(alpha)
# 计算f1 score
_, f1 = self.predict()
# 画图
if plot:
return self.contour_plot(
title="adaboost step " + str(len(self.classifiers)) + " f1 score: {:.2f}".format(f1), mode=plot_mode)
else:
return f1
if __name__ == '__main__':
# 测试
X, y = make_moons(n_samples=300, noise=0.2, random_state=3)
y[np.where(y == 0)] = -1
model = Adaboost_Demonstration(X, y)
for i in range(100):
model.__next__(plot=False)
model.contour_plot(mode="2d")
|
11486850
|
import gzip
import json
import os
from typing import Sequence, Optional
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
ObjectNavDatasetTaskSampler,
)
def create_debug_dataset_from_train_dataset(
scene: str,
target_object_type: Optional[str],
episodes_subset: Sequence[int],
train_dataset_path: str,
base_debug_output_path: str,
):
downloaded_episodes = os.path.join(
train_dataset_path, "episodes", scene + ".json.gz"
)
assert os.path.exists(downloaded_episodes), (
"'{}' doesn't seem to exist or is empty. Make sure you've downloaded to download the appropriate"
" training dataset with"
" datasets/download_navigation_datasets.sh".format(downloaded_episodes)
)
# episodes
episodes = ObjectNavDatasetTaskSampler.load_dataset(
scene=scene, base_directory=os.path.join(train_dataset_path, "episodes")
)
if target_object_type is not None:
ids = {
"{}_{}_{}".format(scene, target_object_type, epit)
for epit in episodes_subset
}
else:
ids = {"{}_{}".format(scene, epit) for epit in episodes_subset}
debug_episodes = [ep for ep in episodes if ep["id"] in ids]
assert len(ids) == len(debug_episodes), (
f"Number of input ids ({len(ids)}) does not equal"
f" number of output debug tasks ({len(debug_episodes)})"
)
# sort by episode_ids
debug_episodes = [
idep[1]
for idep in sorted(
[(int(ep["id"].split("_")[-1]), ep) for ep in debug_episodes],
key=lambda x: x[0],
)
]
assert len(debug_episodes) == len(episodes_subset)
episodes_dir = os.path.join(base_debug_output_path, "episodes")
os.makedirs(episodes_dir, exist_ok=True)
episodes_file = os.path.join(episodes_dir, scene + ".json.gz")
json_str = json.dumps(debug_episodes)
json_bytes = json_str.encode("utf-8")
with gzip.GzipFile(episodes_file, "w") as fout:
fout.write(json_bytes)
assert os.path.exists(episodes_file)
if __name__ == "__main__":
CURRENT_PATH = os.getcwd()
SCENE = "FloorPlan_Train1_1"
TARGET = "Television"
EPISODES = [0, 7, 11, 12]
BASE_OUT = os.path.join(CURRENT_PATH, "datasets", "robothor-objectnav", "debug")
create_debug_dataset_from_train_dataset(
scene=SCENE,
target_object_type=TARGET,
episodes_subset=EPISODES,
train_dataset_path=os.path.join(
CURRENT_PATH, "datasets", "robothor-objectnav", "train"
),
base_debug_output_path=BASE_OUT,
)
|
11486954
|
import traceback
import re
# any length > 0 of the following: { X, numbers, hybrid e.g. (U/R), WUBRGC }
mana_pattern = re.compile(
'(X|' '\d|' '(\([WUBRGC2]/[WUBRGC]\))|' '[WUBRGC])+')
def get_card_from_user_input(player, string):
"""Convert a user input (naming a card in a zone) to an actual game object
b 2 --> 2nd(3rd) card on battlefield
b Grizzly Bear --> Grizzly Bear on battlefield
s 0 --> 1st card on stack (from top)
h -1 --> last card in hand
og 3 --> 3rd(4th) card on opponent's graveyard
"""
if not string:
return None
if string[0] == 'o': # opponent; continue parsing rest of string
string = string[1:]
player = player.game.opponent(player)
if string[0] == 'p':
return player
if string[0] == 'b':
zone = player.battlefield
elif string[0] == 's':
zone = player.game.stack
elif string[0] == 'h':
zone = player.hand
elif string[0] == 'g':
zone = player.graveyard
else:
return None
try:
i = int(string[2:])
if i < len(zone):
return zone[i]
else:
return None
except ValueError:
return zone.get_card_by_name(string[2:])
def choose_targets(source):
# TODO: ensure boolean/card return values of this func
# is being parsed correctly.
if source.target_criterias is None:
return True
# No valid targets
if not source.has_valid_target():
return False
targets_chosen = []
for criteria, prompt in zip(source.target_criterias, source.target_prompts):
# keep choosing until we get a valid target
# TODO: allow optional targeting;
# TODO: if no valid target available, fizzles
card = None
try:
while not card:
answer = source.controller.make_choice(prompt)
card = get_card_from_user_input(source.controller, answer)
if card is None: continue
if not criteria(source, card):
card = None
except:
traceback.print_exc()
return False
targets_chosen.append(card)
return targets_chosen
def parse_targets(criterias):
for i, v in enumerate(criterias):
if v == 'creature':
criterias[i] = lambda self, p: p.is_permanent and p.is_creature
if v == 'your creature':
criterias[i] = lambda self, p: p.is_permanent and p.is_creature and p.controller == self.controller
if v == 'other creature':
criterias[i] = lambda self, p: p.is_permanent and p.is_creature and p != self
if v == 'your other creature':
criterias[i] = lambda self, p: (p.is_permanent and p.is_creature
and p.controller == self.controller and p != self)
if v == 'opponent creature':
criterias[i] = lambda self, p: p.is_creature and p.controller != self.controller
if v == 'opponent':
criterias[i] = lambda self, p: p.is_player and p != self.controller
if v == 'player':
criterias[i] = lambda self, p: p.is_player
if v == 'creature or player':
criterias[i] = (lambda self, p: p.is_player
or (p.is_creature and p.is_permanent))
if v == 'spell':
criterias[i] = lambda self, s: s.is_spell
if v == 'instant or sorcery spell':
criterias[i] = lambda self, s: s.is_spell and (s.is_instant or s.is_sorcery)
return criterias
def parse_ability_costs(cost):
_costs = cost.split(', ')
costs = []
if 'T' in _costs:
costs.append("self.tap() and not self.is_summoning_sick")
for itm in _costs:
if mana_pattern.match(itm):
costs.append("self.controller.pay('%s')" % itm)
if re.match('[pP]ay [\dX]+ life', itm):
costs.append("self.controller.pay(life=%s)" %
re.search('[\dX]+', itm).group(0))
if itm == 'Sacrifice ~':
costs.append("self.sacrifice()")
# elif other costs
costs = " and ".join(costs)
return costs
|
11486962
|
from model_mommy import mommy
from django.shortcuts import resolve_url
from django.test import TestCase
from meupet.models import Kind, Pet, PetStatus, StatusGroup
class StatusGroupView(TestCase):
def setUp(self):
self.status_group = StatusGroup.objects.create(slug="test", name="Name")
initial_status = mommy.make(PetStatus, final=False, group=self.status_group)
final_status = mommy.make(PetStatus, final=True, group=self.status_group)
self.kind = Kind.objects.create(kind="Kind")
mommy.make(Pet, status=initial_status, kind=self.kind)
mommy.make(Pet, status=final_status, kind=self.kind)
def test_get_status_group_list(self):
resp = self.client.get(resolve_url("meupet:pet_list", self.status_group.slug, self.kind.id))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, "meupet/pet_list.html")
def test_list_all_pets(self):
resp = self.client.get(resolve_url("meupet:pet_list", self.status_group.slug, self.kind.slug))
self.assertEqual(200, resp.status_code)
self.assertEqual(2, len(resp.context["pets"]))
self.assertTemplateUsed(resp, "meupet/pet_list.html")
|
11486990
|
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
from ..models import Parking
def fill_normalized_reg_nums(apps, schema_editor):
parking_model = apps.get_model('parkings', 'Parking')
parkings_to_process = parking_model.objects.filter(
Q(normalized_reg_num=None) | Q(normalized_reg_num=''))
for parking in parkings_to_process:
parking.normalized_reg_num = Parking.normalize_reg_num(
parking.registration_number)
parking.save(update_fields=['normalized_reg_num'])
class Migration(migrations.Migration):
dependencies = [
('parkings', '0014_normalized_reg_num'),
]
operations = [
migrations.RunPython(
code=fill_normalized_reg_nums,
reverse_code=migrations.RunPython.noop),
]
|
11487002
|
import logging
import unittest
import time
from requests.auth import HTTPProxyAuth
import configcatclient
from configcatclient import ConfigCatClientException
logging.basicConfig(level=logging.INFO)
_SDK_KEY = '<KEY>'
class DefaultTests(unittest.TestCase):
def test_without_sdk_key(self):
try:
configcatclient.create_client(None)
self.fail('Expected ConfigCatClientException')
except ConfigCatClientException:
pass
def test_client_works(self):
client = configcatclient.create_client(_SDK_KEY)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_get_all_keys(self):
client = configcatclient.create_client(_SDK_KEY)
keys = client.get_all_keys()
self.assertEqual(5, len(keys))
self.assertTrue('keySampleText' in keys)
def test_force_refresh(self):
client = configcatclient.create_client(_SDK_KEY)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.force_refresh()
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
class AutoPollTests(unittest.TestCase):
def test_without_sdk_key(self):
try:
configcatclient.create_client_with_auto_poll(None)
self.fail('Expected ConfigCatClientException')
except ConfigCatClientException:
pass
def test_client_works(self):
client = configcatclient.create_client_with_auto_poll(_SDK_KEY)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_valid_base_url(self):
client = configcatclient.create_client_with_auto_poll(_SDK_KEY, base_url='https://cdn.configcat.com')
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_valid_base_url_trailing_slash(self):
client = configcatclient.create_client_with_auto_poll(_SDK_KEY, base_url='https://cdn.configcat.com/')
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_invalid_base_url(self):
client = configcatclient.create_client_with_auto_poll(_SDK_KEY, base_url='https://invalidcdn.configcat.com')
self.assertEqual('default value', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_invalid_proxy(self):
proxies = {'https': '0.0.0.0:0'}
proxy_auth = HTTPProxyAuth("test", "test")
client = configcatclient.create_client_with_auto_poll(_SDK_KEY, proxies=proxies, proxy_auth=proxy_auth)
self.assertEqual('default value', client.get_value('keySampleText', 'default value'))
client.stop()
def test_force_refresh(self):
client = configcatclient.create_client_with_auto_poll(_SDK_KEY)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.force_refresh()
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_wrong_param(self):
client = configcatclient.create_client_with_auto_poll(_SDK_KEY, 0, -1)
time.sleep(2)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
class LazyLoadingTests(unittest.TestCase):
def test_without_sdk_key(self):
try:
configcatclient.create_client_with_lazy_load(None)
self.fail('Expected ConfigCatClientException')
except ConfigCatClientException:
pass
def test_client_works(self):
client = configcatclient.create_client_with_lazy_load(_SDK_KEY)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_valid_base_url(self):
client = configcatclient.create_client_with_lazy_load(_SDK_KEY, base_url='https://cdn.configcat.com')
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_invalid_base_url(self):
client = configcatclient.create_client_with_lazy_load(_SDK_KEY, base_url='https://invalidcdn.configcat.com')
self.assertEqual('default value', client.get_value('keySampleText', 'default value'))
client.stop()
def test_wrong_param(self):
client = configcatclient.create_client_with_lazy_load(_SDK_KEY, 0)
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
class ManualPollingTests(unittest.TestCase):
def test_without_sdk_key(self):
try:
configcatclient.create_client_with_manual_poll(None)
self.fail('Expected ConfigCatClientException')
except ConfigCatClientException:
pass
def test_client_works(self):
client = configcatclient.create_client_with_manual_poll(_SDK_KEY)
self.assertEqual('default value', client.get_value('keySampleText', 'default value'))
client.force_refresh()
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_valid_base_url(self):
client = configcatclient.create_client_with_manual_poll(_SDK_KEY, base_url='https://cdn.configcat.<EMAIL>')
client.force_refresh()
self.assertEqual('This text came from ConfigCat', client.get_value('keySampleText', 'default value'))
client.stop()
def test_client_works_invalid_base_url(self):
client = configcatclient.create_client_with_manual_poll(_SDK_KEY, base_url='https://invalidcdn.configcat.com')
client.force_refresh()
self.assertEqual('default value', client.get_value('keySampleText', 'default value'))
client.stop()
if __name__ == '__main__':
unittest.main()
|
11487045
|
from django.template.base import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils.lorem_ipsum import COMMON_P, WORDS
from ..utils import setup
class LoremTagTests(SimpleTestCase):
@setup({'lorem1': '{% lorem 3 w %}'})
def test_lorem1(self):
output = self.engine.render_to_string('lorem1')
self.assertEqual(output, 'lorem ipsum dolor')
@setup({'lorem_random': '{% lorem 3 w random %}'})
def test_lorem_random(self):
output = self.engine.render_to_string('lorem_random')
words = output.split(' ')
self.assertEqual(len(words), 3)
for word in words:
self.assertIn(word, WORDS)
@setup({'lorem_default': '{% lorem %}'})
def test_lorem_default(self):
output = self.engine.render_to_string('lorem_default')
self.assertEqual(output, COMMON_P)
@setup({'lorem_syntax_error': '{% lorem 1 2 3 4 %}'})
def test_lorem_syntax(self):
msg = "Incorrect format for 'lorem' tag"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('lorem_syntax_error')
@setup({'lorem_multiple_paragraphs': '{% lorem 2 p %}'})
def test_lorem_multiple_paragraphs(self):
output = self.engine.render_to_string('lorem_multiple_paragraphs')
self.assertEqual(output.count('<p>'), 2)
@setup({'lorem_incorrect_count': '{% lorem two p %}'})
def test_lorem_incorrect_count(self):
output = self.engine.render_to_string('lorem_incorrect_count')
self.assertEqual(output.count('<p>'), 1)
|
11487109
|
from __future__ import absolute_import
import functools
from voluptuous import Schema, ALLOW_EXTRA, Invalid
from voluptuous.schema_builder import PREVENT_EXTRA
class S(Schema):
def __init__(self, *args, **kwargs):
super(S, self).__init__(*args, required=kwargs.pop('required', True), **kwargs)
self.error = None
def _validate(self, other):
try:
self(other)
except Invalid as e:
self.error = e # cache error
return False
else:
return True
def __eq__(self, other):
self.extra = PREVENT_EXTRA
return self._validate(other)
def __le__(self, other):
self.extra = ALLOW_EXTRA
return self._validate(other)
Exact = functools.partial(S, extra=PREVENT_EXTRA)
Partial = functools.partial(S, extra=ALLOW_EXTRA)
|
11487115
|
import tensorflow as tf
from utils.models.relational_memory import RelationalMemory
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from utils.ops import *
embedding_size = 16
filter_sizes = [3, 3, 3, 3, 3]
num_filters = [128, 256, 128, 256, 512]
num_blocks = [2, 2, 2, 2]
cnn_initializer = tf.keras.initializers.he_normal()
fc_initializer = tf.truncated_normal_initializer(stddev=0.05)
# The generator network based on the Relational Memory
def generator(x_real, temperature, vocab_size, batch_size, seq_len, gen_emb_dim, mem_slots, head_size, num_heads,
hidden_dim, start_token):
start_tokens = tf.constant([start_token] * batch_size, dtype=tf.int32)
output_size = mem_slots * head_size * num_heads
# build relation memory module
g_embeddings = tf.get_variable('g_emb', shape=[vocab_size, gen_emb_dim],
initializer=create_linear_initializer(vocab_size))
gen_mem = RelationalMemory(mem_slots=mem_slots, head_size=head_size, num_heads=num_heads)
g_output_unit = create_output_unit(output_size, vocab_size)
# initial states
init_states = gen_mem.initial_state(batch_size)
# ---------- generate tokens and approximated one-hot results (Adversarial) ---------
gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False, infer_shape=True)
gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=seq_len, dynamic_size=False, infer_shape=True)
gen_x_onehot_adv = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False,
infer_shape=True) # generator output (relaxed of gen_x)
# the generator recurrent module used for adversarial training
def _gen_recurrence(i, x_t, h_tm1, gen_o, gen_x, gen_x_onehot_adv):
mem_o_t, h_t = gen_mem(x_t, h_tm1) # hidden_memory_tuple
o_t = g_output_unit(mem_o_t) # batch x vocab, logits not probs
gumbel_t = add_gumbel(o_t)
next_token = tf.stop_gradient(tf.argmax(gumbel_t, axis=1, output_type=tf.int32))
next_token_onehot = tf.one_hot(next_token, vocab_size, 1.0, 0.0)
x_onehot_appr = tf.nn.softmax(tf.multiply(gumbel_t, temperature)) # one-hot-like, [batch_size x vocab_size]
# x_tp1 = tf.matmul(x_onehot_appr, g_embeddings) # approximated embeddings, [batch_size x emb_dim]
x_tp1 = tf.nn.embedding_lookup(g_embeddings, next_token) # embeddings, [batch_size x emb_dim]
gen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(next_token_onehot, x_onehot_appr), 1)) # [batch_size], prob
gen_x = gen_x.write(i, next_token) # indices, [batch_size]
gen_x_onehot_adv = gen_x_onehot_adv.write(i, x_onehot_appr)
return i + 1, x_tp1, h_t, gen_o, gen_x, gen_x_onehot_adv
# build a graph for outputting sequential tokens
_, _, _, gen_o, gen_x, gen_x_onehot_adv = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3, _4, _5: i < seq_len,
body=_gen_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(g_embeddings, start_tokens),
init_states, gen_o, gen_x, gen_x_onehot_adv))
gen_o = tf.transpose(gen_o.stack(), perm=[1, 0]) # batch_size x seq_len
gen_x = tf.transpose(gen_x.stack(), perm=[1, 0]) # batch_size x seq_len
gen_x_onehot_adv = tf.transpose(gen_x_onehot_adv.stack(), perm=[1, 0, 2]) # batch_size x seq_len x vocab_size
# ----------- pre-training for generator -----------------
x_emb = tf.transpose(tf.nn.embedding_lookup(g_embeddings, x_real), perm=[1, 0, 2]) # seq_len x batch_size x emb_dim
g_predictions = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False, infer_shape=True)
ta_emb_x = tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len)
ta_emb_x = ta_emb_x.unstack(x_emb)
# the generator recurrent moddule used for pre-training
def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):
mem_o_t, h_t = gen_mem(x_t, h_tm1)
o_t = g_output_unit(mem_o_t)
g_predictions = g_predictions.write(i, tf.nn.softmax(o_t)) # batch_size x vocab_size
x_tp1 = ta_emb_x.read(i)
return i + 1, x_tp1, h_t, g_predictions
# build a graph for outputting sequential tokens
_, _, _, g_predictions = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3: i < seq_len,
body=_pretrain_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(g_embeddings, start_tokens),
init_states, g_predictions))
g_predictions = tf.transpose(g_predictions.stack(),
perm=[1, 0, 2]) # batch_size x seq_length x vocab_size
# pre-training loss
pretrain_loss = -tf.reduce_sum(
tf.one_hot(tf.to_int32(tf.reshape(x_real, [-1])), vocab_size, 1.0, 0.0) * tf.log(
tf.clip_by_value(tf.reshape(g_predictions, [-1, vocab_size]), 1e-20, 1.0)
)
) / (seq_len * batch_size)
return gen_x_onehot_adv, gen_x, pretrain_loss, gen_o
def discriminator(x_onehot, batch_size, seq_len, vocab_size, dis_emb_dim, num_rep, sn, is_train=True):
# ============= Embedding Layer =============
d_embeddings = tf.get_variable('d_emb', shape=[vocab_size, dis_emb_dim],
initializer=create_linear_initializer(vocab_size))
input_x_re = tf.reshape(x_onehot, [-1, vocab_size])
emb_x_re = tf.matmul(input_x_re, d_embeddings)
emb_x = tf.reshape(emb_x_re, [batch_size, seq_len, dis_emb_dim]) # batch_size x seq_len x dis_emb_dim
emb_x_expanded = tf.expand_dims(emb_x, 2) # batch_size x seq_len x 1 x emd_dim
# ============= First Convolution Layer =============
with tf.variable_scope("conv-0"):
conv0 = tf.layers.conv2d(
emb_x_expanded,
filters=num_filters[0],
kernel_size=[filter_sizes[0], 1],
kernel_initializer=cnn_initializer,
activation=tf.nn.relu)
# ============= Convolution Blocks =============
conv1 = conv_block(conv0, 1, max_pool=True, is_train=is_train)
conv2 = conv_block(conv1, 2, max_pool=False, is_train=is_train)
# conv3 = conv_block(conv2, 3, max_pool=False, is_train=is_train)
#
# conv4 = conv_block(conv3, 4, max_pool=False, is_train=is_train)
# ============= k-max Pooling =============
h = tf.transpose(tf.squeeze(conv2), [0, 2, 1])
top_k = tf.nn.top_k(h, k=1, sorted=False).values
h_flat = tf.reshape(top_k, [batch_size, -1])
# ============= Fully Connected Layers =============
# fc1_out = tf.layers.dense(h_flat, 2048, activation=tf.nn.relu, kernel_initializer=fc_initializer)
#
# fc2_out = tf.layers.dense(fc1_out, 2048, activation=tf.nn.relu, kernel_initializer=fc_initializer)
logits = tf.layers.dense(h_flat, 1, activation=None, kernel_initializer=fc_initializer)
logits = tf.squeeze(logits, -1) # batch_size
return logits
def conv_block(input, i, max_pool=True, is_train=True):
with tf.variable_scope("conv-block-%s" % i):
# Two "conv-batch_norm-relu" layers.
for j in range(2):
with tf.variable_scope("conv-%s" % j):
# convolution
conv = tf.layers.conv2d(
input,
filters=num_filters[i],
kernel_size=[filter_sizes[i], 1],
kernel_initializer=cnn_initializer,
activation=None)
# batch normalization
conv = tf.layers.batch_normalization(conv, training=is_train)
# relu
conv = tf.nn.relu(conv)
if max_pool:
# Max pooling
pool = tf.layers.max_pooling2d(
conv,
pool_size=(3, 1),
strides=(2, 1),
padding="SAME")
return pool
else:
return conv
|
11487128
|
from datetime import datetime
from pytz import UTC
from grouper.settings import Settings
from grouper.templating import BaseTemplateEngine
def mock_utcnow():
# type: () -> datetime
return datetime(2015, 8, 11, 12, tzinfo=UTC)
def test_expires_when_str():
# type: () -> None
assert BaseTemplateEngine.expires_when_str(None) == "Never", "no datetime means no expires"
for date, expected, msg in [
(datetime(2015, 8, 11, 11, 00, 00, 0), "Expired", "long before should expire"),
(datetime(2015, 8, 11, 12, 00, 00, 0), "Expired", "same time should expire"),
(datetime(2015, 8, 11, 11, 59, 59, 0), "Expired", "minute after should expire"),
(datetime(2015, 8, 11, 12, 0, 0, 100), "Expired", "milliseonds should be ignored"),
(datetime(2015, 8, 11, 12, 0, 1, 0), "1 second", "singular second"),
(datetime(2015, 8, 11, 12, 0, 2, 0), "2 seconds", "pural second"),
(datetime(2015, 8, 11, 12, 1, 2, 0), "1 minute", "ignore lower periods"),
(datetime(2016, 8, 11, 12, 1, 2, 0), "1 year", "ignore lower periods"),
]:
utcdate = date.replace(tzinfo=UTC)
assert BaseTemplateEngine.expires_when_str(utcdate, utcnow_fn=mock_utcnow) == expected, msg
assert BaseTemplateEngine.expires_when_str(date, utcnow_fn=mock_utcnow) == expected, (
msg + " (no tzinfo)"
)
def test_long_ago_str():
# type: () -> None
for date, expected, msg in [
(datetime(2015, 8, 11, 11, 0, 0, 0), "1 hour ago", "long before should expire"),
(datetime(2015, 8, 11, 12, 0, 0, 0), "now", "now"),
(datetime(2015, 8, 11, 11, 59, 59, 100), "now", "milliseconds should be ignored"),
(datetime(2015, 8, 11, 11, 59, 0, 0), "1 minute ago", "1 minute"),
(datetime(2015, 8, 11, 11, 58, 0, 0), "2 minutes ago", "pural minutes"),
(datetime(2015, 8, 11, 12, 0, 1, 0), "in the future", "in the future"),
]:
utcdate = date.replace(tzinfo=UTC)
assert BaseTemplateEngine.long_ago_str(utcdate, utcnow_fn=mock_utcnow) == expected, msg
assert BaseTemplateEngine.long_ago_str(date, utcnow_fn=mock_utcnow) == expected, (
msg + " (no tzinfo)"
)
def test_print_date():
# type: () -> None
settings = Settings()
settings.date_format = "%Y-%m-%d %I:%M %p"
setattr(settings, "timezone", "US/Pacific") # work around mypy confusion
template_engine = BaseTemplateEngine(settings, "grouper.fe")
for date_, expected, msg in [
(datetime(2015, 8, 11, 18, tzinfo=UTC), "2015-08-11 11:00 AM", "from datetime object"),
(datetime(2015, 8, 11, 18, 0, 10, 10, tzinfo=UTC), "2015-08-11 11:00 AM", "ignore sec/ms"),
(datetime(2015, 8, 11, 18, 0, 10, 10), "2015-08-11 11:00 AM", "add tzinfo if needed"),
]:
assert template_engine.print_date(date_) == expected, msg
|
11487141
|
class BinarySearchTree:
def __init__(self):
self.root = None
self.size = 0
def insert(self, item):
self.root = self._insert(self.root, item)
self.size += 1
def _insert(self, root, item):
if not root:
root = TreeNode(item)
return root
if item < root.val:
root.left = self._insert(root.left, item)
else:
root.right = self._insert(root.right, item)
return root
def find(self, item):
return self._find(self.root, item)
def _find(self, root, item):
if not root:
return False
if item > root.val:
return self._find(root.right, item)
elif item < root.val:
return self._find(root.left, item)
else:
return True
def delete(self, key):
def _delete(root, key):
if not root:
return
if key > root.val:
root.right = _delete(root.right, key)
elif key < root.val:
root.left = _delete(root.left, key)
else:
if not root.right:
return root.left
else:
p = root.right
while p.left:
p = p.left
root.val = p.val
root.right = _delete(root.right, p.val)
return root
return _delete(self.root, key)
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
if __name__ == '__main__':
bst = BinarySearchTree()
bst.insert(12)
bst.insert(9)
bst.insert(13)
print(bst.size)
print(bst.find(11))
|
11487149
|
import logging
import os
import re
import tempfile
import time
import math
import mock
import torch
from torch import nn
from torch.utils.data import DataLoader
import tensorflow.compat.v1 as tf
import cpu.logger as logger
from cpu.hooks import EvalHook, HookBase
from cpu.trainer import Trainer, MetricStorage
tf.disable_v2_behavior()
class _SimpleModel(nn.Module):
def __init__(self, sleep_sec=0):
super().__init__()
self.fc = nn.Linear(3, 3)
self.sleep_sec = sleep_sec
def forward(self, x):
if self.sleep_sec > 0:
time.sleep(self.sleep_sec)
return {"loss": x.sum() + sum([x.mean() for x in self.parameters()])}
class _SimpleDataset:
def __init__(self):
self.data = torch.rand(10, 3)
def __len__(self):
return 10
def __getitem__(self, index):
return self.data[index]
# a random but unchanged dataset in the whole testing phase
_simple_dataset = _SimpleDataset()
def _reset_logger():
cpu_logger = logging.Logger.manager.loggerDict["cpu"]
if hasattr(cpu_logger, "handlers"):
cpu_logger.handlers = []
logger.logger_initialized.clear()
def _create_new_trainer(
max_epochs=10,
log_period=1,
checkpoint_period=1,
work_dir="work_dir",
max_num_checkpoints=None,
enable_amp=False,
device="cpu",
):
_reset_logger()
model = _SimpleModel().to(device)
optimizer = torch.optim.SGD(model.parameters(), 0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3)
data_loader = DataLoader(_simple_dataset)
trainer = Trainer(
model,
optimizer,
lr_scheduler,
data_loader,
max_epochs=max_epochs,
log_period=log_period,
checkpoint_period=checkpoint_period,
work_dir=work_dir,
max_num_checkpoints=max_num_checkpoints,
enable_amp=enable_amp,
)
return trainer
def test_basic_run_with_log_period():
with tempfile.TemporaryDirectory() as dir:
trainer = _create_new_trainer(log_period=4, work_dir=dir)
trainer.train()
# check counter
assert trainer.cur_iter == trainer.max_iters - 1
assert trainer.inner_iter == trainer.epoch_len - 1
assert trainer.epoch == trainer.max_epochs - 1
assert trainer.lr_scheduler.last_iter == trainer.max_iters
log_file = os.path.join(dir, "log_rank0.txt")
assert os.path.exists(log_file)
log_content = open(log_file).readlines()
assert len(log_content) != 0
# check lr/logging period/ETA
for epoch in range(10):
if epoch >= 0 and epoch < 3:
lr = 0.1
elif epoch < 6:
lr = 0.01
elif epoch < 9:
lr = 0.001
else:
lr = 0.0001
cnt = 0
for line in log_content:
if f"Epoch: [{epoch}]" in line:
assert f"lr: {lr:.4g}" in line
assert "[3/9]" in line or "[7/9]" in line
assert "ETA: " in line
cnt += 1
assert cnt == 2
def test_basic_run_without_log_period():
with tempfile.TemporaryDirectory() as dir:
trainer = _create_new_trainer(work_dir=dir)
trainer.train()
# check counter
assert trainer.cur_iter == trainer.max_iters - 1
assert trainer.inner_iter == trainer.epoch_len - 1
assert trainer.epoch == trainer.max_epochs - 1
assert trainer.lr_scheduler.last_iter == trainer.max_iters
log_file = os.path.join(dir, "log_rank0.txt")
assert os.path.exists(log_file)
log_content = open(log_file).readlines()
assert len(log_content) != 0
# check lr/ETA
for epoch in range(10):
if epoch >= 0 and epoch < 3:
lr = 0.1
elif epoch < 6:
lr = 0.01
elif epoch < 9:
lr = 0.001
else:
lr = 0.0001
iter = 0
for line in log_content:
if f"Epoch: [{epoch}][{iter}/9]" in line:
iter += 1
assert f"lr: {lr:.4g}" in line
assert "ETA: " in line
assert iter == 10
def test_tensorboard_logging():
class SimpleHook(HookBase):
def after_iter(self) -> None:
self.log(self.trainer.cur_iter, metric1=self.trainer.cur_iter, smooth=False)
with tempfile.TemporaryDirectory() as dir:
trainer = _create_new_trainer(max_epochs=9, work_dir=dir)
test_func = mock.Mock(return_value={"metric2": 3.0})
trainer.register_hooks([EvalHook(3, test_func), SimpleHook()])
trainer.train()
tb_log_file = os.listdir(os.path.join(dir, "tb_logs"))
assert len(tb_log_file) == 1
tb_log_file = os.path.join(dir, "tb_logs", tb_log_file[0])
lrs = []
metric1s = []
metric2s = []
for event in tf.train.summary_iterator(tb_log_file):
for value in event.summary.value:
if value.tag == "lr":
lrs.append(value.simple_value)
if value.tag == "metric1":
metric1s.append(value.simple_value)
if value.tag == "metric2":
metric2s.append(value.simple_value)
assert len(lrs) == 90
true_lrs = [0.1] * 30 + [0.01] * 30 + [0.001] * 30
assert sum([lrs[i] - true_lrs[i] for i in range(90)]) < 1e-7
assert len(metric1s) == 90
assert len(metric2s) == 3
# with log_period
with tempfile.TemporaryDirectory() as dir:
trainer = _create_new_trainer(max_epochs=9, work_dir=dir, log_period=4)
test_func = mock.Mock(return_value={"metric2": 3.0})
trainer.register_hooks([EvalHook(3, test_func), SimpleHook()])
trainer.train()
tb_log_file = os.listdir(os.path.join(dir, "tb_logs"))
assert len(tb_log_file) == 1
tb_log_file = os.path.join(dir, "tb_logs", tb_log_file[0])
lrs = []
metric1s = []
metric2s = []
for event in tf.train.summary_iterator(tb_log_file):
for value in event.summary.value:
if value.tag == "lr":
lrs.append(value.simple_value)
if value.tag == "metric1":
metric1s.append(value.simple_value)
if value.tag == "metric2":
metric2s.append(value.simple_value)
assert len(lrs) == 27
true_lrs = [0.1] * 9 + [0.01] * 9 + [0.001] * 9
assert sum([lrs[i] - true_lrs[i] for i in range(27)]) < 1e-7
assert len(metric1s) == 27
assert len(metric2s) == 3
def test_checkpoint_and_resume():
for enable_amp in [True, False]:
for device in ["cuda", "cpu"]:
if device == "cuda" and not torch.cuda.is_available():
continue
if enable_amp and device == "cpu":
continue
with tempfile.TemporaryDirectory() as dir1:
trainer = _create_new_trainer(
max_epochs=4,
work_dir=dir1,
checkpoint_period=3,
enable_amp=enable_amp,
device=device,
)
trainer.train()
assert (trainer.lr - 0.01) < 1e-7
assert trainer.lr_scheduler.last_iter == 40
# test periodically checkpointing
for should_ckpt_epoch in [2, 3]:
assert os.path.exists(
os.path.join(dir1, f"checkpoints/epoch_{should_ckpt_epoch}.pth")
)
assert os.path.exists(os.path.join(dir1, "checkpoints/latest.pth"))
total_losses = trainer.metric_storage._history["total_loss"]._history
epoch_3_smoothed_losses = []
for line in open(os.path.join(dir1, "log_rank0.txt")):
if "Epoch: [3]" not in line:
continue
res = re.findall(r"total_loss: \S+", line)
epoch_3_smoothed_losses.append(res[0])
# resume training from the "epoch_2.pth"
with tempfile.TemporaryDirectory() as dir2:
trainer = _create_new_trainer(
max_epochs=4,
work_dir=dir2,
checkpoint_period=3,
enable_amp=enable_amp,
device=device,
)
trainer.resume_or_load(os.path.join(dir1, "checkpoints/epoch_2.pth"))
assert (trainer.lr - 0.01) < 1e-7
assert trainer.lr_scheduler.last_iter == 30
trainer.train()
# test periodically checkpointing
assert os.path.exists(os.path.join(dir2, "checkpoints/epoch_3.pth"))
assert os.path.exists(os.path.join(dir2, "checkpoints/latest.pth"))
total_losses_resume = trainer.metric_storage._history["total_loss"]._history
epoch_3_smoothed_losses_resume = []
for line in open(os.path.join(dir2, "log_rank0.txt")):
if "Epoch: [3]" not in line:
continue
res = re.findall(r"total_loss: \S+", line)
epoch_3_smoothed_losses_resume.append(res[0])
# If the model/optimizer/lr_scheduler resumes correctly,
# the training losses should be the same.
for loss1, loss2 in zip(total_losses, total_losses_resume):
if device == "cpu":
assert loss1 == loss2
else:
assert abs(loss1 - loss2) < 1e-6
# If the metric storage resumes correctly,
# the training smoothed losses should be the same too.
for loss1, loss2 in zip(
epoch_3_smoothed_losses, epoch_3_smoothed_losses_resume
):
assert loss1 == loss2
def test_eval_hook():
with tempfile.TemporaryDirectory() as dir:
for total_epochs, period, eval_count in [(30, 15, 2), (31, 15, 3), (20, 0, 1)]:
test_func = mock.Mock(return_value={"metric": 3.0})
trainer = _create_new_trainer(max_epochs=total_epochs, work_dir=dir)
trainer.register_hooks([EvalHook(period, test_func)])
trainer.train()
assert test_func.call_count == eval_count
def test_checkpoint_hook():
with tempfile.TemporaryDirectory() as dir:
trainer = _create_new_trainer(max_epochs=10, work_dir=dir, max_num_checkpoints=3)
trainer.train()
for epoch in range(10):
if epoch < 7:
assert not os.path.exists(os.path.join(dir, f"checkpoints/epoch_{epoch}.pth"))
else:
assert os.path.exists(os.path.join(dir, f"checkpoints/epoch_{epoch}.pth"))
def test_metric_storage():
# without smooth
metric_storage = MetricStorage(window_size=4)
metric_storage.update(0, loss=0.7, accuracy=0.1, smooth=False)
metric_storage.update(1, loss=0.6, accuracy=0.2, smooth=False)
metric_storage.update(2, loss=0.4, accuracy=0.3, smooth=False)
metric_storage.update(3, loss=0.3, accuracy=0.7, smooth=False)
assert metric_storage.values_maybe_smooth["loss"] == (3, 0.3)
assert metric_storage.values_maybe_smooth["accuracy"] == (3, 0.7)
assert abs(metric_storage["loss"].global_avg - 0.5) < 1e-7
assert metric_storage["accuracy"].global_avg == 0.325
metric_storage.update(4, loss=0.5, accuracy=0.6, smooth=False)
metric_storage.update(5, loss=0.1, accuracy=0.8, smooth=False)
assert metric_storage.values_maybe_smooth["loss"] == (5, 0.1)
assert metric_storage.values_maybe_smooth["accuracy"] == (5, 0.8)
assert metric_storage["loss"].global_avg == 2.6 / 6
assert metric_storage["accuracy"].global_avg == 0.45
# with smooth
metric_storage = MetricStorage(window_size=4)
metric_storage.update(0, loss=0.7, accuracy=0.1)
metric_storage.update(1, loss=0.6, accuracy=0.2)
metric_storage.update(2, loss=0.4, accuracy=0.3)
metric_storage.update(3, loss=0.3, accuracy=0.7)
assert metric_storage.values_maybe_smooth["loss"][0] == 3
assert abs(metric_storage.values_maybe_smooth["loss"][1] - 0.5) < 1e-7
assert metric_storage.values_maybe_smooth["accuracy"] == (3, 1.3 / 4)
assert abs(metric_storage["loss"].global_avg - 0.5) < 1e-7
assert metric_storage["accuracy"].global_avg == 0.325
metric_storage.update(4, loss=0.5, accuracy=0.6)
metric_storage.update(5, loss=0.1, accuracy=0.8)
assert metric_storage.values_maybe_smooth["loss"] == (5, 1.3 / 4)
assert math.isclose(metric_storage.values_maybe_smooth["accuracy"][0], 5)
assert math.isclose(metric_storage.values_maybe_smooth["accuracy"][1], 2.4 / 4)
assert metric_storage["loss"].global_avg == 2.6 / 6
assert metric_storage["accuracy"].global_avg == 0.45
|
11487165
|
from scipy.stats import pearsonr, spearmanr
import torch
import torch.nn.functional as F
from .evaluator import Evaluator
class SICKEvaluator(Evaluator):
def get_scores(self):
self.model.eval()
num_classes = self.dataset_cls.NUM_CLASSES
test_kl_div_loss = 0
predictions = []
true_labels = []
for batch in self.data_loader:
# Select embedding
sent1, sent2 = self.get_sentence_embeddings(batch)
output = self.model(sent1, sent2, batch.ext_feats, batch.dataset.word_to_doc_cnt, batch.sentence_1_raw, batch.sentence_2_raw)
test_kl_div_loss += F.kl_div(output, batch.label, size_average=False).item()
predict_classes = batch.label.new_tensor(torch.arange(1, num_classes + 1)).expand(self.batch_size, num_classes)
# handle last batch which might have smaller size
if len(predict_classes) != len(batch.sentence_1):
predict_classes = batch.label.new_tensor(torch.arange(1, num_classes + 1)).expand(len(batch.sentence_1), num_classes)
true_labels.append((predict_classes * batch.label.detach()).sum(dim=1))
predictions.append((predict_classes * output.detach().exp()).sum(dim=1))
del output
predictions = torch.cat(predictions)
true_labels = torch.cat(true_labels)
mse = F.mse_loss(predictions, true_labels).item()
test_kl_div_loss /= len(batch.dataset.examples)
predictions = predictions.cpu().numpy()
true_labels = true_labels.cpu().numpy()
pearson_r = pearsonr(predictions, true_labels)[0]
spearman_r = spearmanr(predictions, true_labels)[0]
return [pearson_r, spearman_r, mse, test_kl_div_loss], ['pearson_r', 'spearman_r', 'mse', 'KL-divergence loss']
def get_final_prediction_and_label(self, batch_predictions, batch_labels):
num_classes = self.dataset_cls.NUM_CLASSES
predict_classes = batch_labels.new_tensor(torch.arange(1, num_classes + 1)).expand(batch_predictions.size(0), num_classes)
predictions = (predict_classes * batch_predictions.exp()).sum(dim=1)
true_labels = (predict_classes * batch_labels).sum(dim=1)
return predictions, true_labels
|
11487170
|
from wasmite import WasmiteCase, WasmModule
from wasmite import FunctionTypes, Function, Global, Value, main
from wasmite import I32, I64
def sum(x: int, y: int) -> int:
""" python function to be imported into WASM """
return x + y
class Test(WasmiteCase):
# create a variable the hold all the functions from a specific wasm file.
module = WasmModule("test_wasm.wasm")
# import python function into WASM
# type annotations on the function is necessary
module.register("math", {
"sum": Function(module.store, sum),
"seven": Global(module.store, Value.i32(7), mutable=True)
})
# start up the module and return the exports (this is mandatory)
exports = module.get_exports()
def test_add(self):
# test add function
result = self.exports.add(1,2)
self.assertEqual(result, 3)
def test_sub(self):
# test the sub function
result = self.exports.sub(2,2)
self.assertEqual(result, 0)
def test_args_add(self):
# check the types for results and parameter of the function "add"
# param is I32, I32 and result is I32
add_function = self.exports.add
self.assertTypes(add_function, FunctionTypes([I32, I32], [I32])) #
def test_import_sum(self):
# test the imported python function sum.
sum_function = self.exports.addsum(5,2)
self.assertEqual(sum_function, 7)
def test_global_read(self):
# test reading value of global
read_seven = self.exports.read_global()
self.assertEqual(read_seven, 7)
def test_global_write(self):
# test writing value of global
self.exports.write_global(5)
read_seven = self.exports.read_global()
self.assertEqual(read_seven, 5)
# Hi don't forget to add me
if __name__ == "__main__":
main()
|
11487175
|
import os
from datetime import datetime
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from pyjobs.core.models import Job
from pyjobs.marketing.triggers import send_feedback_collection_email
class Command(BaseCommand):
def handle(self, *args, **options):
jobs = Job().get_jobs_to_get_feedback()
for job in jobs:
send_feedback_collection_email(job)
return "True"
|
11487208
|
from django.apps.config import AppConfig
from djangoseo.models import setup
class SeoConfig(AppConfig):
name = 'djangoseo'
def ready(self):
setup()
|
11487241
|
from plugin.core.libraries.helpers.path import PathHelper
from plugin.core.libraries.helpers.storage import StorageHelper
from plugin.core.libraries.helpers.system import SystemHelper
|
11487305
|
import sys
from pathlib import Path
import pandas as pd
from neuro.points.points_to_brainrender import (
main as points_to_brainrender_run,
)
points_dir = Path("tests", "data", "points")
cellfinder_out = points_dir / "cellfinder_out.xml"
brainrender_file = points_dir / "brainrender.h5"
def test_points_to_brainrender(tmpdir):
tmpdir = Path(tmpdir)
brainrender_file_test = tmpdir / "brainrender_test.h5"
args = [
"points_to_brainrender",
str(cellfinder_out),
str(brainrender_file_test),
"-x",
"10",
"-y",
"10",
"-z",
"10",
"--max-z",
"13200",
"--hdf-key",
"df",
]
sys.argv = args
points_to_brainrender_run()
assert (
(
pd.read_hdf(brainrender_file_test, key="df")
== pd.read_hdf(brainrender_file, key="df")
)
.all()
.all()
)
|
11487363
|
import numpy as np
import theano
import theano.tensor as T
import math
# library with theano PDF functions
c = - 0.5 * math.log(2*math.pi)
def normal(x, mean, sd):
return c - T.log(T.abs_(sd)) - (x - mean)**2 / (2 * sd**2)
def normal2(x, mean, logvar):
return c - logvar/2 - (x - mean)**2 / (2 * T.exp(logvar))
def standard_normal(x):
return c - x**2 / 2
|
11487388
|
from keras.models import load_model
from keras.optimizers import SGD, Adam
from skimage.io import imshow
from cnnlevelset.pascalvoc_util import PascalVOC
from cnnlevelset.localizer import Localizer
from cnnlevelset.generator import pascal_datagen, pascal_datagen_singleobj
from cnnlevelset import config as cfg
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
tf.python.control_flow_ops = tf
nb_epoch = 160
pascal = PascalVOC(voc_dir=cfg.PASCAL_PATH)
if len(sys.argv) > 1:
if sys.argv[1] == 'test':
X_img_test, X_test, y_test = pascal.get_test_data(10, random=True)
localizer = Localizer(model_path=cfg.MODEL_PATH)
cls_preds, bbox_preds = localizer.predict(X_test)
for img, y, cls_pred, bbox_pred in zip(X_img_test, y_test, cls_preds, bbox_preds):
label = pascal.idx2label[np.argmax(cls_pred)]
print(label)
img = img.reshape(224, 224, 3)
imshow(pascal.draw_bbox(img, bbox_pred))
plt.show()
sys.exit(0)
X_train, y_train = pascal.load_features_trainset()
y_cls = y_train[:, :, 0]
y_reg = y_train[:, :, 1:]
idxes = np.argmax(y_cls, axis=1)
y_reg = y_reg[range(y_train.shape[0]), idxes]
y_train = [y_cls, y_reg]
localizer = Localizer()
localizer.train(X_train, y_train, nb_epoch=nb_epoch)
|
11487412
|
import backbone
import utils
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type = 'softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func()
if loss_type == 'softmax':
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif loss_type == 'dist': #Baseline ++
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type #'softmax' #'dist'
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
self.DBval = False; #only set True for CUB dataset, see issue #31
def forward(self,x):
x = Variable(x.cuda())
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def forward_loss(self, x, y):
scores = self.forward(x)
y = Variable(y.cuda())
return self.loss_fn(scores, y )
def train_loop(self, epoch, train_loader, optimizer):
print_freq = 10
avg_loss=0
for i, (x,y) in enumerate(train_loader):
optimizer.zero_grad()
loss = self.forward_loss(x, y)
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1) ))
def test_loop(self, val_loader):
if self.DBval:
return self.analysis_loop(val_loader)
else:
return -1 #no validation, just save model during iteration
def analysis_loop(self, val_loader, record = None):
class_file = {}
for i, (x,y) in enumerate(val_loader):
x = x.cuda()
x_var = Variable(x)
feats = self.feature.forward(x_var).data.cpu().numpy()
labels = y.cpu().numpy()
for f, l in zip(feats, labels):
if l not in class_file.keys():
class_file[l] = []
class_file[l].append(f)
for cl in class_file:
class_file[cl] = np.array(class_file[cl])
DB = DBindex(class_file)
print('DB index = %4.2f' %(DB))
return 1/DB #DB index: the lower the better
def DBindex(cl_data_file):
#For the definition Davis Bouldin index (DBindex), see https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index
#DB index present the intra-class variation of the data
#As baseline/baseline++ do not train few-shot classifier in training, this is an alternative metric to evaluate the validation set
#Emperically, this only works for CUB dataset but not for miniImagenet dataset
class_list = cl_data_file.keys()
cl_num= len(class_list)
cl_means = []
stds = []
DBs = []
for cl in class_list:
cl_means.append( np.mean(cl_data_file[cl], axis = 0) )
stds.append( np.sqrt(np.mean( np.sum(np.square( cl_data_file[cl] - cl_means[-1]), axis = 1))))
mu_i = np.tile( np.expand_dims( np.array(cl_means), axis = 0), (len(class_list),1,1) )
mu_j = np.transpose(mu_i,(1,0,2))
mdists = np.sqrt(np.sum(np.square(mu_i - mu_j), axis = 2))
for i in range(cl_num):
DBs.append( np.max([ (stds[i]+ stds[j])/mdists[i,j] for j in range(cl_num) if j != i ]) )
return np.mean(DBs)
|
11487416
|
from rknn.api import RKNN
import cv2
import numpy as np
import re
import math
import random
import matplotlib.pylab as plt
def caffe2rknn(caffe_proto,caffe_weight,rknn_model):
print("start export")
rknn=RKNN(verbose=True)
ret=rknn.load_caffe(model=caffe_proto,
proto="caffe",
blobs=caffe_weight)
rknn.config(channel_mean_value='127.5 127.5 127.5 128.0',
reorder_channel='2 1 0',
#reorder_channel='0 1 2',
#need_horizontal_merge=True
)
ret = rknn.build(do_quantization=False)
#ret = rknn.build(do_quantization=True)
ret=rknn.export_rknn(export_path=rknn_model)
print("export finished")
def run_ssd(img_path,priorbox_path):
#caffe_proto="./MobileNetSSD_deploy.prototxt"
caffe_proto= "./MobileNetSSD_deploy_truncated.prototxt"
caffe_weight="./MobileNetSSD_deploy10695.caffemodel"
rknn_model="./pedestrian_ssd.rknn"
caffe2rknn(caffe_proto,caffe_weight,rknn_model)
print("run ssd")
rknn=RKNN(verbose=True)
ret=rknn.load_rknn(path=rknn_model)
ret=rknn.init_runtime()
#ret = rknn.init_runtime(target='rk1808', device_id='012345789AB')
img=cv2.imread(img_path)
img=cv2.resize(img,(300,300))
print("shape:",img.shape)
outlen=7668 #change to your model
priorbox=[]
with open(priorbox_path) as f:
for line in f:
arr=line.strip().split(",")
priorbox=list(map(float,arr))
priorbox=np.reshape(np.array(priorbox),(2,outlen))
outputs = rknn.inference(inputs=[img])#,data_format="nchw",data_type="float32"
print("pb:",priorbox.shape,priorbox)
print("loc:",outputs[0].shape,outputs[0])
print("conf:",outputs[1].shape,outputs[1])
NUM_RESULTS=outlen//4
NUM_CLASSES=2
box_priors= priorbox[0].reshape((NUM_RESULTS,4))
box_var = priorbox[1].reshape((NUM_RESULTS,4))
loc = outputs[0].reshape((NUM_RESULTS, 4))
conf = outputs[1].reshape((NUM_RESULTS, NUM_CLASSES))
#compute softmax
conf = [[x/(x+y),y/(x+y)] for x,y in np.exp(conf)]
# Post Process
for i in range(0, NUM_RESULTS):
pb = box_priors[i]
lc = loc[i]
var= box_var[i]
pb_w = pb[2] - pb[0]
pb_h = pb[3] - pb[1]
pb_cx = (pb[0] + pb[2]) * 0.5;
pb_cy = (pb[1] + pb[3]) * 0.5;
bbox_cx = var[0] * lc[0] * pb_w + pb_cx;
bbox_cy = var[1] * lc[1] * pb_h + pb_cy;
bbox_w = math.exp(var[2] * lc[2]) * pb_w;
bbox_h = math.exp(var[3] * lc[3]) * pb_h;
xmin = bbox_cx - bbox_w * 0.5;
ymin = bbox_cy - bbox_h * 0.5;
xmax = bbox_cx + bbox_w * 0.5;
ymax = bbox_cy + bbox_h * 0.5;
xmin *= 300 #input width
ymin *= 300 #input height
xmax *= 300 #input width
ymax *= 300 #input height
score = conf[i][1];
if score > 0.9:
print("score:",score)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)),(0, 0, 255), 3)
plt.imshow(cv2.cvtColor(img,cv2.COLOR_RGB2BGR))
plt.show()
print("ssd finished")
if __name__=="__main__":
img_path="test.jpeg"
priorbox_path="priorbox_flatten.txt"
run_ssd(img_path,priorbox_path)
|
11487466
|
import numpy as np
import pytest
#===============================================================================
@pytest.mark.parametrize( 'n', [5, 10, 13] )
def test_lsmr_tridiagonal( n ):
"""
Test generic LSMR algorithm on tridiagonal linear system.
Parameters
----------
n : int
Dimension of linear system (number of rows = number of columns).
"""
from psydac.linalg.iterative_solvers import lsmr
#---------------------------------------------------------------------------
# PARAMETERS
#---------------------------------------------------------------------------
# Build generic non-singular matrix
sdiag = np.random.random( n - 1 )
diag = np.random.random( n )
A = np.diag(sdiag,-1) + np.diag(diag,0) + np.diag(sdiag,1)
# Build exact solution: here with random values in [-1,1]
xe = 2.0 * np.random.random( n ) - 1.0
# Tolerance for success: L2-norm of error in solution
tol = 1e-10
#---------------------------------------------------------------------------
# TEST
#---------------------------------------------------------------------------
# Title
print()
print( "="*80 )
print( "SERIAL TEST: solve linear system A*x = b using lsmr" )
print( "="*80 )
print()
# Manufacture right-hand-side vector from exact solution
b = A.dot( xe )
# Solve linear system using BiCG
x, info = lsmr( A, A.T, b, tol=1e-13, verbose=True )
# Verify correctness of calculation: L2-norm of error
res = A.dot(x)-b
res_norm = np.linalg.norm( res )
#---------------------------------------------------------------------------
# TERMINAL OUTPUT
#---------------------------------------------------------------------------
print()
print( 'A =', A, sep='\n' )
print( 'b =', b )
print( 'x =', x )
print( 'xe =', xe )
print( 'info =', info )
print()
print( "-"*40 )
print( "L2-norm of error in solution = {:.2e}".format( res_norm ) )
if res_norm < tol:
print( "PASSED" )
else:
print( "FAIL" )
print( "-"*40 )
#---------------------------------------------------------------------------
# PYTEST
#---------------------------------------------------------------------------
assert res_norm < tol
|
11487490
|
import asyncio
import logging
from ether import calldata, ethrpc
from maintainer import config
from ether.ether_types import Receipt
from ether.transactions import UnsignedEthTx
from typing import Any, cast, Dict, Iterator, List, Optional
logger = logging.getLogger('root.summa_relay.shared_eth')
GWEI = 1000000000
DEFAULT_GAS = 500_000
DEFAULT_GAS_PRICE = 2 * GWEI
CONNECTION: ethrpc.BaseRPC
NONCE: Iterator[int] # yields ints, takes no sends
def _nonce(i: int) -> Iterator[int]:
'''Infinite generator for nonces'''
index = i
while 1:
yield index
index += 1
async def init() -> None:
'''Set up a connection to the interwebs'''
global CONNECTION
c = config.get()
network = c['NETWORK']
project_id = c['PROJECT_ID']
uri = c['ETHER_URL']
force_https = project_id != ''
logger.info(f'contract is {c["CONTRACT"]}')
CONNECTION = ethrpc.get_client(
network=network,
infura_key=project_id,
uri=uri,
logger=logger.getChild('ethrpc'),
force_https=force_https)
await CONNECTION.open()
if c['PRIVKEY'] is None and c['GETH_UNLOCK'] is None:
logger.warn(
'No ethereum privkey found in env config. Txns will error')
else:
global NONCE
address = cast(str, c['ETH_ADDRESS'])
n = await CONNECTION.get_nonce(address)
NONCE = _nonce(n)
logger.info(f'nonce is {n}')
async def close_connection() -> None:
try:
global CONNECTION
await CONNECTION.close()
except NameError:
pass
async def sign_and_broadcast(
tx: UnsignedEthTx,
ignore_result: bool = False) -> None:
'''Sign an ethereum transaction and broadcast it to the network'''
c = config.get()
privkey = c['PRIVKEY']
address = c['ETH_ADDRESS']
unlock_code = c['GETH_UNLOCK']
if privkey is None and unlock_code is None:
raise RuntimeError('Attempted to sign tx without access to key')
if privkey is None:
logger.debug('signing with ether node')
await CONNECTION._RPC(
'personal_unlockAccount',
[address, unlock_code])
tx_id = await CONNECTION.send_transaction(cast(str, address), tx)
else:
logger.debug('signing with local key')
signed = tx.sign(cast(bytes, privkey))
serialized = signed.serialize_hex()
tx_id = await CONNECTION.broadcast(serialized)
logger.info(f'dispatched transaction {tx_id}')
if not ignore_result:
asyncio.ensure_future(_track_tx_result(tx_id))
def make_call_tx(
contract: str,
abi: List[Dict[str, Any]],
method: str,
args: List[Any],
nonce: int,
value: int = 0,
gas: int = DEFAULT_GAS,
gas_price: int = DEFAULT_GAS_PRICE) -> UnsignedEthTx:
'''
Sends tokens to a recipient
Args:
contract (str): address of contract being called
abi (dict): contract ABI
method (str): the name of the method to call
args (list): the arguments to the method call
nonce (int): the account nonce for the txn
value (int): ether in wei
gas_price (int): the price of gas in wei or gwei
Returns:
(UnsignedEthTx): the unsigned tx object
'''
logger.debug(f'making tx call {method} on {contract} '
f'with value {value} and {len(args)} args')
gas_price = _adjust_gas_price(gas_price)
chainId = config.get()['CHAIN_ID']
data = calldata.call(
method,
args,
abi)
txn = UnsignedEthTx(
to=contract,
value=value,
gas=gas,
gasPrice=gas_price,
nonce=nonce,
data=data,
chainId=chainId)
return txn
def _adjust_gas_price(gas_price: int) -> int:
'''
We accept gas price in GWEI or in WEI.
This adjusts, and ensures we error if it's high.
Args:
gas_price (int): the user-provided gas price
Returns:
(int): the adjusted price
'''
if gas_price < GWEI:
gas_price = gas_price * GWEI
if gas_price > 1000 * GWEI:
logger.error('rejecting high gas price')
raise ValueError(
'very high gas price detected: {} gwei'.format(gas_price / GWEI))
return gas_price
async def _track_tx_result(tx_id: str) -> None:
'''Keep track of the result of a transaction by polling every 25 seconds'''
receipt_or_none: Optional[Receipt] = None
for _ in range(20):
await asyncio.sleep(30)
receipt_or_none = await CONNECTION.get_tx_receipt(tx_id)
if receipt_or_none is not None:
break
if receipt_or_none is None:
raise RuntimeError(f'No receipt after 10 minutes: {tx_id}')
receipt = cast(Receipt, receipt_or_none)
logger.info(f'Receipt for {tx_id} status is {receipt["status"]}')
if receipt['status'] != '0x1':
raise RuntimeError(f'Failed tx: {receipt["transactionHash"]}')
|
11487493
|
import os
import subprocess
from unittest import mock
import pytest
from briefcase.exceptions import BriefcaseCommandError
from briefcase.platforms.macOS.app import macOSAppRunCommand
def test_run_app(first_app_config, tmp_path, monkeypatch):
"""A macOS app can be started."""
command = macOSAppRunCommand(base_path=tmp_path)
command.subprocess = mock.MagicMock()
log_stream_process = mock.MagicMock()
command.subprocess.Popen.return_value = log_stream_process
monkeypatch.setattr(
"briefcase.platforms.macOS.get_process_id_by_command", lambda *a, **kw: 100
)
monkeypatch.setattr(
"briefcase.platforms.macOS.is_process_dead", lambda *a, **kw: False
)
command.run_app(first_app_config)
# Calls were made to start the app and to start a log stream.
bin_path = command.binary_path(first_app_config)
sender = bin_path / "Contents" / "MacOS" / "First App"
command.subprocess.Popen.assert_called_with(
[
"log",
"stream",
"--style",
"compact",
"--predicate",
f'senderImagePath=="{sender}"'
f' OR (processImagePath=="{sender}"'
' AND senderImagePath=="/usr/lib/libffi.dylib")',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
)
command.subprocess.run.assert_called_with(
["open", "-n", os.fsdecode(bin_path)], check=True
)
command.subprocess.stream_output.assert_called_with(
"log stream", log_stream_process, stop_func=mock.ANY
)
command.subprocess.cleanup.assert_called_with("log stream", log_stream_process)
def test_run_app_failed(first_app_config, tmp_path):
"""If there's a problem started the app, an exception is raised."""
command = macOSAppRunCommand(base_path=tmp_path)
command.subprocess = mock.MagicMock()
log_stream_process = mock.MagicMock()
command.subprocess.Popen.return_value = log_stream_process
command.subprocess.run.side_effect = subprocess.CalledProcessError(
cmd=["open", "-n", os.fsdecode(command.binary_path(first_app_config))],
returncode=1,
)
with pytest.raises(BriefcaseCommandError):
command.run_app(first_app_config)
# Calls were made to start the app and to start a log stream.
bin_path = command.binary_path(first_app_config)
sender = bin_path / "Contents" / "MacOS" / "First App"
command.subprocess.Popen.assert_called_with(
[
"log",
"stream",
"--style",
"compact",
"--predicate",
f'senderImagePath=="{sender}"'
f' OR (processImagePath=="{sender}"'
' AND senderImagePath=="/usr/lib/libffi.dylib")',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
)
command.subprocess.run.assert_called_with(
["open", "-n", os.fsdecode(bin_path)], check=True
)
# No attempt was made to stream the log; but there was a cleanup
command.subprocess.stream_output.assert_not_called()
command.subprocess.cleanup.assert_called_with("log stream", log_stream_process)
def test_run_app_find_pid_failed(first_app_config, tmp_path, monkeypatch, capsys):
"""If after app is started, its pid is not found, do not stream output."""
command = macOSAppRunCommand(base_path=tmp_path)
command.subprocess = mock.MagicMock()
log_stream_process = mock.MagicMock()
command.subprocess.Popen.return_value = log_stream_process
monkeypatch.setattr(
"briefcase.platforms.macOS.get_process_id_by_command", lambda *a, **kw: None
)
command.run_app(first_app_config)
# Calls were made to start the app and to start a log stream.
bin_path = command.binary_path(first_app_config)
sender = bin_path / "Contents" / "MacOS" / "First App"
command.subprocess.Popen.assert_called_with(
[
"log",
"stream",
"--style",
"compact",
"--predicate",
f'senderImagePath=="{sender}"'
f' OR (processImagePath=="{sender}"'
' AND senderImagePath=="/usr/lib/libffi.dylib")',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
)
command.subprocess.run.assert_called_with(
["open", "-n", os.fsdecode(bin_path)], check=True
)
assert capsys.readouterr().out == (
"\n"
"[first-app] Starting app...\n"
"\n"
"Unable to find process for app first-app to start log streaming.\n"
)
command.subprocess.stream_output.assert_not_called()
command.subprocess.cleanup.assert_called_with("log stream", log_stream_process)
|
11487494
|
from django.contrib.auth.models import User
from rest_framework import routers, serializers, viewsets
from registration.models import UserInfo
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username']
class UserInfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserInfo
fields = ['user', 'url', 'small_intro', 'profile_pic']
|
11487545
|
import os
import threading
import numpy as np
import shutil
from math import exp
from PIL import Image
import matplotlib.pyplot as plt
from network import VGG19
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
from torchvision.utils import save_image
class dehazing_loss(nn.Module):
def __init__(self, coeff_l1=1.0, coeff_cl=0.5, coeff_ssim=0.1):
super(dehazing_loss, self).__init__()
self.content_loss = ContentLoss()
self.coeff_l1 = coeff_l1
self.coeff_cl = coeff_cl # content loss coefficient
self.coeff_ssim = coeff_ssim # ssim loss coefficient
self.ssim = 0 if self.coeff_ssim == 0 else SSIM(window_size=11)
def forward(self, input_wo_brelu, target):
input = input_wo_brelu.clone().clamp(0, 1)
loss = self.coeff_l1 * F.l1_loss(input, target) + self.coeff_cl * self.content_loss(input, target) \
+ self.coeff_ssim * (1 - ssim(input, target))
return loss
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
class ContentLoss(nn.Module):
r"""
Perceptual loss, VGG-based
https://arxiv.org/abs/1603.08155
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
"""
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
super().__init__()
self.add_module('vgg', VGG19().cuda())
self.criterion = torch.nn.L1Loss().cuda()
self.weights = weights
def __call__(self, x, y):
# Compute features
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
content_loss = 0.0
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
return content_loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def Gaussiansmoothing(img, channel=3, window_size = 11):
window = create_window(window_size, channel, sigma=5)
if img.is_cuda:
window = window.cuda(img.get_device())
window = window.type_as(img)
pad = window_size//2
padded_img = F.pad(img, (pad, pad, pad, pad), mode='reflect')
x_smooth = F.conv2d(padded_img, window, padding=0, groups=channel)
return x_smooth, img - x_smooth
def psnr(output, target):
"""
Computes the PSNR.
1 means the maximum value of intensity(255)
"""
psnr = 0
output_temp = output.clone().clamp(0, 1)
with torch.no_grad():
mse = torch.mean((output_temp - target)**2, dim=(1, 2, 3))
psnr = 10 * torch.log10(1 / mse)
psnr = torch.mean(psnr).item()
return psnr
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Saves the serialized current checkpoint
Params
state =
"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(os.path.dirname(filename), 'model_best.pth.tar'))
def adjust_learning_rate(args, optimizer, epoch, prev_lr):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (0.5 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
elif args.lr_mode == None:
return optimizer.param_groups[0]['lr']
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
if lr != prev_lr:
print('Learning rate has changed!')
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
for ind in range(len(filenames)):
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = os.path.split(fn)[0]
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
pred = predictions[ind]
save_image(pred, fn)
def draw_curves(training_loss, training_score, validation_loss, validation_score, epoch, save_dir='./curves'):
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True)
x = np.arange(1, epoch+1, step=1)
axes[0].plot(x, training_loss, label='train', alpha=0.8)
axes[0].plot(x, validation_loss, label='val', alpha=0.8)
axes[0].set_xlim(0, epoch+1)
axes[0].set_xlabel("Epochs")
axes[0].set_ylim(0, 1.2)
axes[0].set_ylabel('Losses')
axes[0].legend()
axes[0].grid()
axes[1].plot(x, training_score, label='train', alpha=0.8)
axes[1].plot(x, validation_score, label='val', alpha=0.8)
axes[1].set_xlim(0, epoch+1)
axes[1].set_xlabel("Epochs")
axes[1].set_ylim(5, 25.0)
axes[1].set_ylabel('Scores')
axes[1].legend()
axes[1].grid()
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
plt.savefig(os.path.join(save_dir, 'epoch_{:04d}_curve.png'.format(epoch)))
plt.close('all')
|
11487598
|
from ..models import SequencingMachine, MACHINE_MODEL_HISEQ2000, INDEX_WORKFLOW_A
class SetupSequencingMachineMixin:
def setUp(self):
super().setUp()
self.hiseq2000 = SequencingMachine.objects.create(
project=self.project,
vendor_id="Hxxxxxxxx",
label="Test machine",
machine_model=MACHINE_MODEL_HISEQ2000,
slot_count=2,
dual_index_workflow=INDEX_WORKFLOW_A,
)
# Additional data for posting to API
self.post_data = {
"vendor_id": "Hbbbbbbb",
"label": "API created machine",
"machine_model": MACHINE_MODEL_HISEQ2000,
"slot_count": 2,
"dual_index_workflow": INDEX_WORKFLOW_A,
}
def make_machine(self):
return SequencingMachine.objects.create(
project=self.project,
vendor_id="Hyyyyyyyy",
label="Another test machine",
description="This is to be found",
machine_model=MACHINE_MODEL_HISEQ2000,
slot_count=2,
dual_index_workflow=INDEX_WORKFLOW_A,
)
|
11487631
|
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from pylmnn import LargeMarginNearestNeighbor as LMNN
# Load a data set
X, y = load_iris(return_X_y=True)
# Split in training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.7, stratify=y, random_state=42)
# Set up the hyperparameters
k_train, k_test, n_components, max_iter = 3, 3, X.shape[1], 180
# Instantiate the metric learner
lmnn = LMNN(n_neighbors=k_train, max_iter=max_iter, n_components=n_components)
# Train the metric learner
lmnn.fit(X_train, y_train)
# Fit the nearest neighbors classifier
knn = KNeighborsClassifier(n_neighbors=k_test)
knn.fit(lmnn.transform(X_train), y_train)
# Compute the k-nearest neighbor test accuracy after applying the learned transformation
lmnn_acc = knn.score(lmnn.transform(X_test), y_test)
print('LMNN accuracy on test set of {} points: {:.4f}'.format(X_test.shape[0], lmnn_acc))
|
11487653
|
from functools import lru_cache
from pytezos.rpc.contract import Contract
from pytezos.rpc.node import RpcQuery
class Context(RpcQuery):
def __init__(self, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self._node.get(f'{self._path}/raw/json?depth=1', cache=self._cache)
@property
@lru_cache(maxsize=None)
def contracts(self):
"""
Attention: very slow method
:return: list of Contracts
"""
return RpcQuery(
path=f'{self._path}/contracts',
node=self._node,
child_class=Contract,
**self._kwargs
)
|
11487730
|
import numpy as np
import pytest
import scipy
from packaging import version
if version.parse(scipy.__version__) >= version.parse('1.7.0'):
from alibi_detect.cd import CVMDrift
n, n_test = 500, 200
np.random.seed(0)
n_features = [2] # TODO - test 1D case once BaseUnivariateDrift updated
tests_cvmdrift = list(n_features)
n_tests = len(tests_cvmdrift)
@pytest.fixture
def cvmdrift_params(request):
return tests_cvmdrift[request.param]
@pytest.mark.skipif(version.parse(scipy.__version__) < version.parse('1.7.0'),
reason="Requires scipy version >= 1.7.0")
@pytest.mark.parametrize('cvmdrift_params', list(range(n_tests)), indirect=True)
def test_cvmdrift(cvmdrift_params):
n_feat = cvmdrift_params
# Reference data
x_ref = np.random.normal(0, 1, size=(n, n_feat)).squeeze() # squeeze to test vec input in 1D case
# Instantiate detector
cd = CVMDrift(x_ref=x_ref, p_val=0.05)
# Test predict on reference data
x_h0 = x_ref.copy()
preds = cd.predict(x_h0, return_p_val=True)
assert preds['data']['is_drift'] == 0 and (preds['data']['p_val'] >= cd.p_val).any()
# Test predict on heavily drifted data
x_h1 = np.random.normal(2, 2, size=(n, n_feat)).squeeze()
preds = cd.predict(x_h1, drift_type='batch')
assert preds['data']['is_drift'] == 1
assert preds['data']['distance'].min() >= 0.
|
11487731
|
from app.functions.firestore import is_doc_exist
from app.models.firestore import Task
from typing import Any, List
from pydantic import BaseModel, ValidationError, root_validator, validator
class AnnotationResult(BaseModel):
id: str
name: str
email: str
data: Any
result_data: Any
order_index: int
user_id: str
user_task_id: str
annotation_id: str
created_at: Any
updated_at: Any
class ResponseTaskResult(BaseModel):
task: Task
annotations: List[AnnotationResult]
class RequestTaskResult(BaseModel):
task_id: str
@validator("task_id")
def task_id_is_exist(cls, v):
if not is_doc_exist("tasks", v):
raise ValueError(f"task_id: {v} is not found.")
return v
|
11487777
|
from django.contrib.auth.models import User
from rest_framework import generics, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import UserProfile
from .serializers import UserDetailsSerializer, UserProfileSerializer
class EmailExistsCheck(APIView):
throttle_classes = ()
permission_classes = ()
def post(self, request, *args, **kwargs):
content = {}
try:
email = request.data.get('email')
User.objects.get(email=email)
content = {'email': email}
except User.DoesNotExist:
pass
return Response(content)
class UserExistsCheck(APIView):
throttle_classes = ()
permission_classes = ()
def post(self, request, *args, **kwargs):
content = {}
try:
username = request.data.get('username')
User.objects.get(username=username)
content = {'user': username}
except User.DoesNotExist:
pass
return Response(content)
class UserProfileDetailsView(generics.RetrieveUpdateAPIView):
"""
View for accessing a user's Profile.
"""
serializer_class = UserProfileSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.get_queryset()
def get_queryset(self):
user, created = UserProfile.objects.get_or_create(
owner=self.request.user)
return user
class UserDetailsView(generics.RetrieveUpdateAPIView):
"""
Reads and updates UserModel fields
Accepts GET, PUT, PATCH methods.
Default accepted fields: username, first_name, last_name
Default display fields: pk, username, email, first_name, last_name
Read-only fields: pk, email
Returns UserModel fields.
NOTE: This is an override of django-rest-auth's default UserDetailsView
in order to use our custom UserDetailsSerializer class for the serializer.
"""
serializer_class = UserDetailsSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
|
11487800
|
import figurefirst as fifi
layout = fifi.svg_to_axes.FigureLayout('example_negative_labels.svg')
layout.make_mplfigures()
layout.fig.set_facecolor('None')
ex = layout.axes['ex']
ex.plot([1, 2], [3, 4])
fifi.mpl_functions.adjust_spines(ex, spines='left', yticks=[-1, -2])
layout.insert_figures('panels', cleartarget=True)
layout.write_svg('negative_labels_output.svg')
|
11487801
|
import json
import random
import re
from os import path
import asyncio
from aiocqhttp import Event
from nonebot import CommandSession, get_bot, on_command
from nonebot.permission import *
from utils_bot.command_ops import force_private
from utils_bot.logging import logger
from utils_bot.typing import Union
FAILED_MSG = '查看关键字对:view [群号] [模式]\n'\
'添加单条消息:add [群号] [模式]\n'\
'删除单条消息:del [群号] [模式]\n'\
'删除指定群:delall [群号]\n'\
'模式:完全匹配-1,包含匹配-2,正则匹配-3'
__plugin_name__ = '自定义群聊回复 (private)'
__plugin_usage__ = r'''feature: 自定义回复
从 json 文件读取自定义回复并且响应群聊。
可以私聊机器人添加或删除机器人,其改变即时生效
''' + FAILED_MSG
# define reply data file path
DATA_PATH = path.join(path.dirname(__file__), 'group_data.json')
DEFAULT_GROUP_DICT: dict = {
"full_match": {},
"inclusive_match": {},
"regex_match" : {}
}
def load_data() -> dict:
if not path.exists(DATA_PATH):
with open(DATA_PATH, 'w') as datafile:
json.dump({
"global": DEFAULT_GROUP_DICT
}, datafile, indent=4)
with open(DATA_PATH) as datafile:
return json.load(datafile)
# load reply data
REPLIES: dict = load_data()
# acquires global event monitor
bot = get_bot()
# auto reply in group chats
def process_var(ctx: Event, myText: str) -> str:
'process whether the reply keyword contains variables.'
if not re.search(r'{SENDER_.+]', myText):
return myText
# define pointer constants
SENDER_ID: str = str(ctx['sender']['user_id'])
SENDER_NICK: str = ctx['sender']['nickname']
SENDER_CARD: str = ctx['sender']['card']
SENDER_ROLE: str = ctx['sender']['role']
SENDER_TITLE: str = ctx['sender']['title']
# when you chose python, you gave up efficiency...
return myText.replace('{SENDER_ID]', SENDER_ID).\
replace('{SENDER_NICK]', SENDER_NICK).\
replace('{SENDER_CARD]', SENDER_CARD).\
replace('{SENDER_ROLE]', SENDER_ROLE).\
replace('{SENDER_TITLE]', SENDER_TITLE)
class _Get_Out(Exception):
pass
@bot.on_message('group')
async def handle_keyword_reply(ctx: Event):
currentGroupId: int = ctx['group_id']
textReceived: str = ctx['raw_message']
toSend: Union[str, None] = None
# config specific to groups is prioritized
for groupId in (str(currentGroupId), 'global'):
try:
# handles full_match
mayReply = REPLIES[groupId]['full_match'].get(textReceived, None)
if mayReply is not None:
toSend = process_var(ctx, random.choice(mayReply))
raise _Get_Out
# handles inclusive_match
for keyword, reply in REPLIES[groupId]['inclusive_match'].items():
if keyword in textReceived:
toSend = process_var(ctx, random.choice(reply))
raise _Get_Out
# handles regex_match
for keyword, reply in REPLIES[groupId]['regex_match'].items():
if re.search(keyword, textReceived):
toSend = process_var(ctx, random.choice(reply))
raise _Get_Out
except KeyError:
# REPLIES[str(groupId)] may not exist, go to global
pass
except _Get_Out:
break
if toSend is not None:
# waits few secs before sending message
await asyncio.sleep(random.randint(1,5))
await bot.send_group_msg(group_id=currentGroupId, message=toSend)
# superuser can modify replies
class keyword_ops:
'operations on keyword dict'
def __init__(self, repliesDict,
order='', groupId='', mode='', keyword='', reply=''):
"""
:param order: 'add', 'del', 'delall' or 'view'
:param groupId: digits or 'global'
:param mode: '1', '2' or '3'
:param keyword: keyword
:param reply: reply
"""
if order not in ('add', 'del', 'delall', 'view'):
raise Exception("指令错误\n" + FAILED_MSG)
if not groupId.isdecimal() and groupId != 'global':
raise Exception("群号错误\n" + FAILED_MSG)
if order != 'delall':
if mode == '1':
self.modeStr: str = 'full_match'
elif mode == '2':
self.modeStr: str = 'inclusive_match'
elif mode == '3':
self.modeStr: str = 'regex_match'
else:
raise Exception("模式错误\n" + FAILED_MSG)
from copy import deepcopy
self.repliesDict: dict = deepcopy(repliesDict)
self.order: str = order
self.groupId: str = groupId
self.mode: str = mode
self.keyword: str = keyword
self.reply: str = reply
def add(self):
self.repliesDict.setdefault(self.groupId, DEFAULT_GROUP_DICT)
modifyPlace = self.repliesDict[self.groupId][self.modeStr]
modifyPlace.setdefault(self.keyword, [])
modifyPlace[self.keyword].append(self.reply)
def dele(self):
del self.repliesDict[self.groupId][self.modeStr][self.keyword]
def delall(self):
if self.groupId == 'global':
self.repliesDict['global'] = DEFAULT_GROUP_DICT
else:
del self.repliesDict[self.groupId]
@staticmethod
def backup():
from shutil import copyfile
copyfile(DATA_PATH, path.join(path.dirname(__file__), 'group_data_bak.json'))
def rewrite(self):
with open(DATA_PATH, 'w') as datafile:
json.dump(self.repliesDict, datafile, indent=4, ensure_ascii=False)
def send(self):
return self.repliesDict
def view_keywords(self) -> str:
'NOTHROW. returns the keyword pairs for group --> mode'
try:
pos: dict = self.repliesDict[self.groupId][self.modeStr]
res: str = f'{self.groupId}, {self.modeStr}\n'
for k, v in pos.items():
if len(v) > 10:
v = v[:10] + ['...']
res += '"{}": "{}"\n'.format(k, '"; "'.join(v))
if pos == {}:
res += '(空)'
res = res.rstrip('\n')
except KeyError:
res = '群号错误?'
return res
def modify_keywords(self) -> dict:
'modifies the keyword json file, returns the new keyword dict'
try:
if self.order == 'add':
self.add()
elif self.order == 'del':
self.dele()
elif self.order == 'delall':
self.delall()
except KeyError:
raise Exception('当前关键字表不可用或无此关键字')
try:
self.backup()
except Exception as exc:
raise Exception('出现错误。 当前设置没有改动' + str(exc))
self.rewrite()
return self.send()
# keyword interface
@on_command('群关键字', permission=SUPERUSER)
@force_private
async def keyword_mod(session: CommandSession):
order, groupId, mode = '', '', ''
iniParam: list = session.get('iniParam')
try:
order = iniParam[0]
groupId = iniParam[1]
mode = iniParam[2]
except Exception:
pass
logger.info(f'keyword modification called: {order}; {groupId}; {mode}')
##################################################################
global REPLIES
# get keyword mod object, reuse if this command is recalled and continued
if not session.state.get('keymod_obj'):
try:
keymod = keyword_ops(REPLIES, order, groupId, mode)
session.state['keymod_obj'] = keymod
except Exception as exc:
session.finish(str(exc))
else:
keymod = session.state['keymod_obj']
# VIEW order
if order == 'view':
session.finish(keymod.view_keywords())
# MODIFY order
if order in ['add', 'del']:
keymod.keyword = session.get('keyword', prompt='在这里输入关键字')
if order == 'add':
keymod.reply = session.get('reply', prompt='在这里输入回复')
try:
REPLIES = keymod.modify_keywords()
session.finish('success!')
except Exception as exc:
session.finish(str(exc))
@keyword_mod.args_parser
@force_private
async def keyword_mod_arg(session: CommandSession):
argStripped = session.current_arg_text.strip()
if session.is_first_run:
if argStripped:
session.state['iniParam'] = argStripped.split(' ')
# if no arg given at first
else:
session.finish(FAILED_MSG)
elif session.current_key == 'keyword':
session.state['keyword'] = argStripped
elif session.current_key == 'reply':
session.state['reply'] = argStripped
|
11487851
|
from django.conf.urls import url
from apps.ui_test.views import ui_test,ui_test_result,ui_package,ui_show
from apps.ui_task.views import ui_task_simple
urlpatterns = [
url(r'^UITest$', ui_test.uiTestPage,name="UITest"),
url(r'^userIsFileShowSubPage$', ui_test.userIsFileShowSubPage, name="userIsFileShowSubPage"),
url(r'^createDir$', ui_test.createDir, name="createDir"),
url(r'^fileExists$', ui_test.fileExists, name="fileExists"),
url(r'^fileUpload$', ui_test.fileUpload, name="fileUpload"),
url(r'^checkFileList$', ui_test.checkFileList, name="checkFileList"),
url(r'^runTest$', ui_test.runTest, name="runTest"),
url(r'^downLoadExcel$', ui_test.file_download, name="downLoadExcel"),
url(r'^file_delete$', ui_test.file_delete, name="file_delete"),
url(r'^uiTestResultPage$', ui_test_result.ui_test_result_page, name="uiTestResultPage"),
url(r'^uiTestResultListCheck$', ui_test_result.ui_test_resultListCheck, name="uiTestResultListCheck"),
#ui测试的
url(r'^ui_report$', ui_test_result.ui_report, name="ui_report"),
url(r'^cancelUiTask$', ui_test_result.cancelExecuteUiTask, name="cancelUiTask"),
url(r'^checkUiTestLog$', ui_test_result.checkUiTestLog, name="checkUiTestLog"),
url(r'^uiAddSimpleTaskPage$', ui_task_simple.uiAddSimpleTaskPage, name="uiAddSimpleTaskPage"),
url(r'^uiSaveSimpleTask$', ui_task_simple.saveSimpleTask, name="uiSaveSimpleTask"),
url(r'^uiShowSimpleTask$', ui_task_simple.show_ui_simple_task_page, name="uiShowSimpleTask"),
url(r'^uiTaskShowresultListCheck$', ui_task_simple.show_ui_test_resultListCheck, name="uiTaskShowresultListCheck"),
url(r'^uiSimpleTaskExecute$', ui_task_simple.executeSimpleTask, name="uiSimpleTaskExecute"),
url(r'^ui_operationTask$', ui_task_simple.ui_operationTask, name="ui_operationTask"),
url(r'^getTaskForTaskId$', ui_task_simple.getTaskForTaskId, name="getTaskForTaskId"),
url(r'^uiDelSimpleTask$', ui_task_simple.delSimpleTask, name="uiDelSimpleTask"),
url(r'^uiAgainRunTask$', ui_test_result.againRunTask, name="uiAgainRunTask"),
url(r'^uiAppPackagePage$', ui_package.appPackagePage, name="uiAppPackagePage"),
url(r'^appPackageIsExist$', ui_package.appPackageIsExist, name="appPackageIsExist"),
url(r'^uiAppPackage$', ui_package.addPackage, name="uiAppPackage"),
url(r'^uiAppPackageCheckPage$', ui_package.appPackageCheckPage, name="uiAppPackageCheckPage"),
url(r'^uiAppPackageCheckSubPage$', ui_package.appPackageCheckSunPage, name="uiAppPackageCheckSubPage"),
url(r'^uiAppPackageDel$', ui_package.delAppPackage, name="delAppPackage"),
url(r'^editAppPackagePage$', ui_package.editAppPackagePage, name="editAppPackagePage"),
url(r'^getAppPackage$', ui_package.getAppPackage, name="getAppPackage"),
url(r'^saveEditAppPackage$', ui_package.saveEditAppPackage, name="saveEditAppPackage"),
url(r'^uploadAPK$', ui_package.uploadAPK),
url(r'^uploadAPP$', ui_package.uploadAPP),
url(r'^ui/showTaskExecuteProgressing$', ui_show.uiShowProgressing, name="ui_showTaskExecuteProgressing"),
url(r'^ui/showUiTaskProgressIndex$', ui_show.uiShowPorgressIndex, name="ui_showUiTaskProgressIndex"),
url(r'^ui/showUiTaskProgressData$', ui_show.uiGetTaskProgressData, name="ui_uiGetTaskProgressData"),
url(r'^ui/UITaskGetTaskFotTaskId$', ui_task_simple.getTaskRunDetailsForTaskId, name="UITaskGetTaskFotTaskId"),
url(r'^ui/addPageObject$', ui_task_simple.addPageObject, name="addPageObject"),
url(r'^ui/getPageObject$', ui_task_simple.getPageObject, name="getPageObject"),
url(r'^ui/getPageObjectForId$', ui_task_simple.getPageObjectForId, name="getPageObjectForId"),
url(r'^ui/editPageObject$', ui_task_simple.editPageObject, name="editPageObject"),
url(r'^ui/delPageObject$', ui_task_simple.delPageObject, name="delPageObject"),
url(r'^ui/resetPageObject$', ui_task_simple.resetPageObject, name="resetPageObject"),
]
|
11487975
|
import struct
import time
import random
import hashlib
import socket
import os
version = struct.pack("i", 70002)
services = struct.pack("Q", 0)
timestamp = struct.pack("q", time.time())
addr_recv_services = struct.pack("Q", 0) #services
addr_recv_ip = struct.pack(">16s", "127.0.0.1")
addr_recv_port = struct.pack(">H", 8333)
addr_trans_services = struct.pack("Q", 0) #services
addr_trans_ip = struct.pack(">16s", "127.0.0.1")
addr_trans_port = struct.pack(">H", 8333)
nonce = struct.pack("Q", random.getrandbits(64))
user_agent_bytes = struct.pack("B", 0)
starting_height = struct.pack("i", 395292)
relay = struct.pack("?", False)
payload = version + services + timestamp + addr_recv_services + addr_recv_ip + addr_recv_port + addr_trans_services + addr_trans_ip + addr_trans_port + nonce + user_agent_bytes + starting_height + relay
magic = "F9BEB4D9".decode("hex")
command = "version" + 5 * "\00"
length = struct.pack("I", len(payload))
check = hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]
msg = magic + command + length + check + payload
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "192.168.3.11"
PORT = 8333
s.connect((HOST, PORT))
s.send(msg)
s.recv(1024)
|
11488003
|
import torch
from torch import nn
import torch.nn.functional as F
@torch.no_grad()
def label2onehot(label):
onehot = torch.eye(7).to(label.device)
return onehot[label].permute(0, 3, 1, 2)
def tensor_erode(bin_img, ksize=5):
B, C, H, W = bin_img.shape
pad = (ksize - 1) // 2
bin_img = F.pad(bin_img, [pad, pad, pad, pad], mode='constant', value=0)
patches = bin_img.unfold(dimension=2, size=ksize, step=1)
patches = patches.unfold(dimension=3, size=ksize, step=1)
# B x C x H x W x k x k
eroded, _ = patches.reshape(B, C, H, W, -1).min(dim=-1)
return eroded
def tensor_dilate(bin_img, ksize=5):
B, C, H, W = bin_img.shape
pad = (ksize - 1) // 2
bin_img = F.pad(bin_img, [pad, pad, pad, pad], mode='constant', value=0)
patches = bin_img.unfold(dimension=2, size=ksize, step=1)
patches = patches.unfold(dimension=3, size=ksize, step=1)
# B x C x H x W x k x k
eroded, _ = patches.reshape(B, C, H, W, -1).max(dim=-1)
return eroded
def tensor_close(bin_img, ksize=3):
out = tensor_dilate(bin_img, ksize)
out = tensor_erode(out, ksize)
return out
class OhemCELoss(nn.Module):
def __init__(self, thresh, n_min, ignore_lb=255, *args, **kwargs):
super(OhemCELoss, self).__init__()
self.thresh = -torch.log(torch.tensor(thresh, dtype=torch.float)).cuda()
self.n_min = n_min
self.ignore_lb = ignore_lb
self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none')
def forward(self, logits, labels):
loss = self.criteria(logits, labels).view(-1)
loss, _ = torch.sort(loss, descending=True)
if loss[self.n_min] > self.thresh:
loss = loss[loss > self.thresh]
else:
loss = loss[:self.n_min]
return torch.mean(loss)
class OhemBCELoss(nn.Module):
def __init__(self, thresh, n_min):
super(OhemBCELoss, self).__init__()
self.n_min = n_min
self.criteria = nn.BCEWithLogitsLoss(reduction='none')
self.thresh = -torch.log(torch.tensor(thresh, dtype=torch.float)).cuda()
def forward(self, logits, labels):
loss = self.criteria(logits, labels).view(-1)
loss, _ = torch.sort(loss, descending=True)
if loss[self.n_min] > self.thresh:
loss = loss[loss > self.thresh]
else:
loss = loss[: self.n_min]
return torch.mean(loss)
class DiceLoss(nn.Module):
def __init__(self):
super().__init__()
#self.activation = nn.Sigmoid()
def forward(self, pr, gt, eps=1e-7):
#pr = self.activation(pr)
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = (2 * tp + eps) / (2 * tp + fn + fp + eps)
return 1 - score
|
11488065
|
import pytest
TEST_CASES = [
(
"Adresa este str. Principală nr. 5.",
["Adresa", "este", "str.", "Principală", "nr.", "5", "."],
),
("Teste, etc.", ["Teste", ",", "etc."]),
("Lista, ș.a.m.d.", ["Lista", ",", "ș.a.m.d."]),
("Și d.p.d.v. al...", ["Și", "d.p.d.v.", "al", "..."]),
# number tests
("Clasa a 4-a.", ["Clasa", "a", "4-a", "."]),
("Al 12-lea ceas.", ["Al", "12-lea", "ceas", "."]),
]
@pytest.mark.parametrize("text,expected_tokens", TEST_CASES)
def test_ro_tokenizer_handles_testcases(ro_tokenizer, text, expected_tokens):
tokens = ro_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
|
11488119
|
import os
from dotenv.main import load_dotenv
load_dotenv()
# Bot setup
PREFIX = "!"
BOT_NAME = "DevProTips"
BOT_TOKEN = os.getenv("DISCORD_TOKEN", "")
# Discord Guild ID
GUILD_ID = int(os.getenv("GUILD_ID", ""))
# Discord Channel IDs
INTRO_CHANNEL_ID = int(os.getenv("INTRO_CHANNEL_ID", ""))
RULES_CHANNEL_ID = int(os.getenv("RULES_CHANNEL_ID", ""))
BOT_LOG_CHANNEL_ID = int(os.getenv("BOT_LOG_CHANNEL_ID", ""))
YOUTUBE_VIDEOS_CHANNEL_ID = int(os.getenv("YOUTUBE_VIDEOS_CHANNEL_ID", ""))
# Discord Role IDs
CONTENT_CREATOR_ROLE_ID = int(os.getenv("CONTENT_CREATOR_ROLE_ID", ""))
DEVELOPER_ROLE_ID = int(os.getenv("DEVELOPER_ROLE_ID", ""))
SUBSCRIBER_ROLE_ID = int(os.getenv("SUBSCRIBER_ROLE_ID", ""))
MEMBER_ROLE_ID = int(os.getenv("MEMBER_ROLE_ID", ""))
UNASSIGNED_ROLE_ID = int(os.getenv("UNASSIGNED_ROLE_ID", ""))
YOUTUBE_PING_ROLE_ID = int(os.getenv("YOUTUBE_PING_ROLE_ID", ""))
# Discord Message IDs
RULES_MESSAGE_ID = int(os.getenv("RULES_MESSAGE_ID", ""))
# YouTube Channel ID
YT_CHANNEL_ID = os.getenv("YT_CHANNEL_ID", "")
|
11488121
|
import pickle
import numpy as np
from nltk.tokenize import word_tokenize
from utils.seq2seq.learning_core_20180401 import Seq2seqCore
from utils.io.tools import data_indexer
def feeder_generator(raw_inputs, token_idx_dict, pretrained_dict):
result_dict = dict()
max_length = 0
for one_entry in raw_inputs:
if len(one_entry) > max_length:
max_length = len(one_entry)
x_data = list()
pretrained_x_data = list()
x_length = list()
y_length = list()
for one_entry in raw_inputs:
one_pretrained_x = list()
for one_original_token in one_entry:
try:
one_pretrained_x.append(pretrained_dict[one_original_token])
except Exception:
one_pretrained_x.append(np.zeros([200, ], dtype=np.float32))
x_length.append(len(one_entry))
x_data.append(data_indexer(one_entry, token_idx_dict) + [0] * (max_length + 2 - len(one_entry)))
pretrained_x_data.append(np.concatenate((np.array(one_pretrained_x, dtype=np.float32),
np.zeros([max_length + 2 - len(one_entry), 200], dtype=np.float32)),
axis=0))
y_length.append(5)
result_dict["encoder_length"] = np.array(x_length, dtype=np.int32)
result_dict["decoder_length"] = np.array(y_length, dtype=np.int32)
result_dict["encoder_input"] = np.array(x_data, dtype=np.int32)
result_dict["encoder_pretrained"] = np.array(pretrained_x_data, dtype=np.float32)
return result_dict
def s2s_end2end(filename):
pkl_name = "data/split/20180405.pkl"
token_idx_dict, idx_token_dict, ontology_idx_dict, idx_ontology_dict, pretrained_dict, train_token, test_token, train_ontology, test_ontology = pickle.load(
open(pkl_name, "rb"))
embedding_matrix = pickle.load(open("data/preprocessed/20180405_graph.pkl", "rb"))
s2s_core = Seq2seqCore(gpu_device=0,
encoder_vocab_size=len(token_idx_dict),
decoder_vocab_size=len(ontology_idx_dict),
pretrained_graph_embedding=embedding_matrix)
raw_inputs = list()
for one_line in open(filename):
one_line = one_line.strip()
one_line = one_line.lower()
assert len(one_line.split("\t")) == 2
one_line = one_line.split("\t")[0]
one_line_list = word_tokenize(one_line)
raw_inputs.append(one_line_list)
input_dict = feeder_generator(raw_inputs, token_idx_dict, pretrained_dict)
for epoch_number in range(1, 300):
print(epoch_number)
s2s_core.load("models/20180401/pso/%05d.tfmodel"%epoch_number)
predict_result = s2s_core.predict(input_dict)
predict_list = predict_result.sample_id[:, 0:3].tolist()
f_w = open("data/output/pso_attention_pretrained_graph_%03d.txt"%epoch_number, "w")
for one_predict_result in predict_list:
one_predict_tuple = data_indexer(one_predict_result, idx_ontology_dict)
f_w.write("%s\n" % (" ".join(one_predict_tuple)))
f_w.close()
if __name__ == "__main__":
filename = "data/test/20180405.txt"
s2s_end2end(filename)
|
11488125
|
from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
from .chaining_info import ChainingInfo
__all__ = ['ChainingSuggestion', 'ChainingSuggestionInterface']
class ChainingSuggestionInterface(ApiInterfaceBase):
chaining_info: ChainingInfo
profile_chaining_secondary_label: AnyType
class ChainingSuggestion(PropertyMapper, ChainingSuggestionInterface):
pass
|
11488145
|
import os
import pandas
import numpy
import pickle
import warnings
from pathlib import Path
from cytoolz import partial
from . import convert
from . import describe
def do_nothing(data):
"""
A function that does nothing.
Args:
Anything
Returns:
Anything
"""
return data
def deduplicate(data):
"""
Adds the values from any duplicated genes.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes))
Returns:
pandas.DataFrame
"""
return data.groupby(data.columns, axis=1).sum()
def impute(data, scale=0.5):
"""
Replace any zeros in each row with a fraction of the smallest non-zero
value in the corresponding row.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes))
scale (optional; float)
Returns:
imputed data (pandas.DataFrame ~ (num_samples, num_genes))
"""
v = scale * data[data > 0].min(axis=1)
data_fill = data.fillna(0)
return data_fill + (data_fill == 0).multiply(v, axis=0)
class Normalizer(object):
"""
Tools to change units of expression data, primarily to convert to TPM.
Attributes:
gene_lengths (DataFrame): bp lengths for genes.
"""
def __init__(self, identifier='symbol'):
"""
Tools to normalize expression data and transform into TPM.
Args:
identifier (str)
Returns:
Normalizer
"""
# read the gene lengths
p = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'gtex')
gene_info = pandas.read_csv(os.path.join(p, 'gene_info.csv'), sep='\t')
gene_info.set_index('gene_id', inplace=True)
self.gene_lengths = gene_info['bp_length']
# clean the ensemble gene ids
self.gene_lengths.index = convert.clean_ensembl_ids(self.gene_lengths.index)
self.gene_lengths = self.gene_lengths[~self.gene_lengths.index.duplicated(keep='first')]
# convert the gene ids
self.converter = None
if identifier is not 'ensembl_gene_id':
self.converter = convert.IDConverter('ensembl_gene_id', identifier)
self.gene_lengths.index = self.converter.convert_list(list(self.gene_lengths.index))
self.describer = describe.Describer(identifier)
# drop any NaN and duplicate ids
self.gene_lengths = self.gene_lengths[~self.gene_lengths.index.isnull()]
self.gene_lengths = self.gene_lengths[~self.gene_lengths.index.duplicated(keep='first')]
def _get_common_genes(self, gene_list):
"""
Get a set of identifiers that occur in GTEx and, therefore,
have gene lengths.
Args:
gene_list (List[str])
Returns:
common_genes (List[str])
"""
if gene_list is None:
# reindex to all of the gtex genes
return list(self.gene_lengths.index)
# select the genes in the gene_list that also occur in gtex
common_genes = [gene for gene in gene_list if gene in self.gene_lengths.index]
# warn the user about any genes that are not in gtex and are being dropped
missing_genes = list(set(gene_list) - set(common_genes))
if len(missing_genes) > 0:
warnings.warn("Could not find identifiers: {}".format(missing_genes))
return common_genes
def reindex(self, data, gene_list=None):
"""
Reindexes the dataframe so that it has the same genes as the gtex
dataset from recount.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): any expression data
gene_list (List[str]): a list of gene ids
Returns:
pandas.DataFrame ~ (num_samples, num_common_genes)
"""
common_genes = self._get_common_genes(gene_list)
common = data.reindex(columns=common_genes)
common.fillna(0, inplace=True)
return common
def tpm_from_rpkm(self, data, gene_list=None, imputer=do_nothing):
"""
Transform data from RPKM to TPM.
Unless a gene list is specified, genes are reindex to GTEx:
- Any genes from GTEx that are not in data.columns are set to zero.
- Any genes not present in GTEx are dropped.
Takes an optional imputation method applied after reindexing.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): RPKM data
gene_list (optional; List[str]): a list of gene ids
imputer (optional; callable)
Returns:
pandas.DataFrame
"""
subset = imputer(self.reindex(data, gene_list))
return 10**6 * subset.divide(subset.sum(axis=1), axis='index')
def tpm_from_counts(self, data, gene_list=None, imputer=do_nothing):
"""
Transform data from counts to TPM.
Unless a gene list is specified, genes are reindex to GTEx:
- Any genes from GTEx that are not in data.columns are set to zero.
- Any genes not present in GTEx are dropped.
Takes an optional imputation method applied after reindexing.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): count data
gene_list (optional; List[str]): a list of gene ids
imputer (optional; callable)
Returns:
pandas.DataFrame
"""
subset = imputer(self.reindex(data, gene_list))
normed = subset.divide(self.gene_lengths[subset.columns], axis='columns')
return 10**6 * normed.divide(normed.sum(axis=1), axis='rows')
def tpm_from_subset(self, data, gene_list=None, imputer=do_nothing):
"""
Renormalize a subset of genes already in TPM.
Unless a gene list is specified, genes are reindex to GTEx:
- Any genes from GTEx that are not in data.columns are set to zero.
- Any genes not present in GTEx are dropped.
Takes an optional imputation method applied after reindexing.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): TPM data
gene_list (optional; List[str]): a list of gene ids
imputer (optional; callable)
Returns:
pandas.DataFrame
"""
return self.tpm_from_rpkm(data, gene_list, imputer)
def clr_from_tpm(self, data, gene_list=None, imputer=do_nothing):
"""
Compute the centered log ratio transform of data in TPM format.
Unless a gene list is specified, genes are reindex to GTEx:
- Any genes from GTEx that are not in data.columns are set to zero.
- Any genes not present in GTEx are dropped.
Takes an optional imputation method applied after reindexing.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): TPM data
gene_list (optional; List[str]): a list of gene ids
imputer (optional; callable)
Returns:
pandas.DataFrame ~ (num_samples, num_genes)
"""
imputed = self.tpm_from_subset(data, gene_list, imputer)
log_transformed = numpy.log(imputed)
return log_transformed.subtract(log_transformed.mean(axis=1), axis=0)
def tpm_from_clr(self, data, gene_list=None):
"""
Compute data in TPM format from centered log ratio transformed data.
Unless a gene list is specified, genes are reindex to GTEx:
- Any genes from GTEx that are not in data.columns are set to zero.
- Any genes not present in GTEx are dropped.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): CLR data
gene_list (optional; List[str]): a list of gene ids
Returns:
pandas.DataFrame ~ (num_samples, num_genes)
"""
return self.tpm_from_rpkm(numpy.exp(data), gene_list)
def alr_from_tpm(self, data, reference_genes, gene_list=None,
imputer=do_nothing):
"""
Compute the additive log ratio transform of data in TPM format.
This transform normalizes by the geometric mean of the reference genes,
and drops the reference genes from the data set.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): TPM data
reference_genes (List[str]): a list of gene ids to use as the
references in the ALR transform
gene_list (optional; List[str]): a list of gene ids
imputer (optional; callable)
Returns:
pandas.DataFrame ~ (num_samples, num_genes - num_reference_genes)
"""
common_genes = self._get_common_genes(gene_list)
common_references = [gene for gene in reference_genes if gene in common_genes]
genes_to_keep = [gene for gene in common_genes if gene not in common_references]
imputed = self.tpm_from_subset(data, genes_to_keep + common_references, imputer)
log_transformed = numpy.log(imputed)
refs = log_transformed[common_references].mean(axis=1)
return log_transformed[genes_to_keep].subtract(refs, axis=0)
def z_score_from_clr(self, data, tissues, gene_list=None):
"""
Compute the z-score of the clr'd tpm data relative to healthy tissue
in GTEx.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): CLR data
tissues (pandas.Series) ~ (num_samples)): tissues of data samples
gene_list (optional; List[str]): a list of gene ids
Returns:
pandas.DataFrame ~ (num_samples, num_genes - num_reference_genes)
"""
# get the clr tissue stats from GTEx
mean_clr = self.describer.tissue_stats['mean_clr']
std_clr = self.describer.tissue_stats['std_clr']
# convert gene IDs from Ensembl to the identifier, if needed
# duplicates are dropped!
if self.converter is not None:
mean_clr.index = self.converter.convert_list(mean_clr.index)
mean_clr = mean_clr[mean_clr.index.notnull()]
mean_clr = mean_clr[~mean_clr.index.duplicated(keep='first')]
std_clr.index = self.converter.convert_list(std_clr.index)
std_clr = std_clr[std_clr.index.notnull()]
std_clr = std_clr[~std_clr.index.duplicated(keep='first')]
if gene_list is None:
gene_list = data.columns
mean_clr = mean_clr.reindex(gene_list)
std_clr = std_clr.reindex(gene_list)
mean_expression = mean_clr[tissues].transpose().set_index(tissues.index)
std_expression = std_clr[tissues].transpose().set_index(tissues.index)
data_subset = self.reindex(data, gene_list)
return (data_subset - mean_expression)/std_expression
def ordinalize(self, data, cutoffs, min_value=0):
"""
Convert data into ordinal values given cutoffs between ordinal boundaries.
Returns the same type as the input data.
Example:
If cutoffs = [-2, 2] and min_value = -1, then
[[-3.2, 1.4, -0.7] [[-1, 0, 0]
[ 2.5, -0.8, 6.1] -> [ 1, 0, 1]
[-1.9, -4.5, 3.7]] [ 0, -1, 1]]
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): any expression data
cutoffs (List[float]): cutoffs between ordinal boundaries.
No lower or upper bounds should be given, e.g. to binarize this
argument should be a list with 1 value.
Returns:
pandas.DataFrame ~ (num_samples, num_genes): ordinal values,
typed as the input data.
"""
ordinalizer = partial(numpy.searchsorted, cutoffs)
return data.apply(ordinalizer).astype(data.dtypes) + min_value
class RemoveUnwantedVariation(object):
"""
The RUV-2 algorithm.
Attributes:
hk_genes (List[str]): a list of housekeeping gene names used in fitting.
means (pandas.Series): the means of each gene from the training data.
U (optional; numpy array ~ (num_training_samples, num_factors)):
left eigenvectors from SVD of housekeeping genes in training set
L (optional; numpy array ~ (num_factors,))
eigenvalues from SVD of housekeeping genes in training set
Vt (optional; numpy_array ~ (num_factors, num_hk_genes)
right eigenvectors from SVD of housekeeping genes in training set
"""
def __init__(self, center=True, hk_genes=None, means=None, U=None, L=None, Vt=None):
"""
Perform the 2-step Remove Unwanted Variation (RUV-2) algorithm
defined in:
"Correcting gene expression data when neither the unwanted variation nor the
factor of interest are observed."
Biostatistics 17.1 (2015): 16-28.
<NAME>, <NAME>, and <NAME>.
The algorithm is modified slightly so that batch correction can be
applied out-of-sample.
Args:
center (optional; bool): whether to center the gene means in the fit.
hk_genes (optional; List[str]): list of housekeeping genes
means (optional; numpy array ~ (num_genes,))
U (optional; numpy array ~ (num_training_samples, num_factors))
L (optional; numpy array ~ (num_factors,))
Vt (optional; numpy_array ~ (num_factors, num_hk_genes))
Returns:
RemoveUnwantedVariation
"""
self.center = center
self.hk_genes = None
self.means = None
self.U = None
self.L = None
self.Vt = None
def _is_fit(self):
"""
Check if the batch effect transformation has been fit.
Args:
None
Returns:
bool
"""
return (self.hk_genes is not None) and \
(self.means is not None) and \
(self.U is not None) and \
(self.L is not None) and \
(self.Vt is not None)
def _cutoff_svd(self, matrix, variance_cutoff=1, num_components=None):
"""
Compute the singular value decomposition of a matrix and get rid
of any singular vectors below a cumulative variance threshold.
Args:
matrix (numpy array): the data
variance_cutoff (float): retains only elements of L that contribute
to the cumulative fractional variance up to the cutoff.
num_components (int): the maximum number of components of L to use.
If None, no additional constraint is applied.
Returns:
U, L, Vt where M = U L V^{T}
"""
U, L, Vt = numpy.linalg.svd(matrix, full_matrices=False)
# trim eigenvalues close to 0, exploit the fact that L is ordered
L = L[:(~numpy.isclose(L, 0)).sum()]
cumul_variance_fracs = numpy.cumsum(L**2) / numpy.sum(L**2)
max_components = len(L) if num_components is None else num_components
L_cutoff = min(max_components,
1+numpy.searchsorted(cumul_variance_fracs, variance_cutoff))
return U[:, :L_cutoff], L[:L_cutoff], Vt[:L_cutoff, :]
def fit(self, data, hk_genes, variance_cutoff=0.9, num_components=None):
"""
Perform a singular value decomposition of the housekeeping genes to
fit the transform.
Suppose that we measure data on the expression of N genes in M samples
and store these (after CLR transformation) in a matrix Y \in R^{M, N}.
We consider a linear model Y = X B + W A + noise where
X \in R^{M, Q} are some unobserved, but biologically interesting, factors
B \in R^{Q, N} describes how the genes are coupled to the interesting factors
W \in R^{M, K} are some unobserved and uninteresting factors
A \in R^{K, N} describes how the genes are coupled to the uninteresting factors
We assume that there are some housekeeping genes Y_c for which we are
sure that B_c = 0. That is, the housekeeping genes are not coupled to
any biologically interesting factors. Therefore, we have Y_c = W A_c + noise.
Let Y_c = U L V^{T} be the singular value decomposition of Y_c. Then,
we can estiamte W = U L. Additionally, A_c = V^{T}.
Now, if we fix W and assume that X B = 0 for all genes then we can
estimate A = W^+ Y = (W W^{T})^{-1} W^{T} Y.
This matrix stores K patterns of variation that are
usually not biologically interesting.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): clr transformed
expression data
hk_genes (List[str]): list of housekeeping genes
variance_cutoff (float): the cumulative variance cutoff on SVD
eigenvalues of Y_c (the variance fraction of the factors).
num_components (int): the maximum number of components K to use.
If None, all components are used (up to the variance cutoff).
Returns:
None
"""
self.means = data.mean(axis=0)
# restrict to available housekeeping genes
self.hk_genes = [gene for gene in hk_genes if gene in data.columns]
# center the data along genes
if self.center:
housekeeping = data[self.hk_genes] - self.means[self.hk_genes]
else:
housekeeping = data[self.hk_genes]
self.U, self.L, self.Vt = self._cutoff_svd(housekeeping, variance_cutoff,
num_components)
def _delta(self, W, data_centered, penalty):
"""
Compute the corrections for RUV2.
Args:
W (numpy array ~ (num_samples, num_factors))
data_centered (pandas.DataFrame ~ (num_samples, num_genes))
penalty (float)
Returns:
delta (numpy array ~ (num_samples, num_genes))
"""
penalty_term = penalty * numpy.eye(W.shape[1])
J = numpy.linalg.inv(penalty_term + numpy.dot(W.T, W))
return numpy.dot(W, numpy.dot(J, numpy.dot(W.T, data_centered)))
def transform(self, data, penalty=0):
"""
Perform the 2-step Remove Unwanted Variation (RUV-2) algorithm.
The `fit` method estimates the matrix
A \in R^{K, N} which describes how the genes are coupled to the
uninteresting factors
We can estimate the activity of these factors from a new dataset \tilde{Y}
by using the housekeeping genes on this new dataset and computing
\tilde{W} = \tilde{Y}_c A_c^{+}. Since A_c = V^{T} from the SVD,
the right pseudoinverse A_c^{+} = A_c^{T}.
Finally, we can subtract \tilde{W} A from the data,
\tilde{Y} - \tilde{W} A.
Essentially, we are removing the components of the data that project
onto the pre-defined axes of uninteresting variation.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): clr transformed
expression data
penalty (float): regularization on the regression step
Returns:
batch corrected data (pandas.DataFrame ~ (num_samples, num_genes))
"""
assert self._is_fit(), "RUV has not been fit!"
if self.center:
data_trans = data - self.means
else:
data_trans = data
W = numpy.dot(data_trans[self.hk_genes], self.Vt.T)
return data - self._delta(W, data_trans, penalty)
def fit_transform(self, data, hk_genes, penalty=0, variance_cutoff=0.9,
num_components=None):
"""
Perform the 2-step Remove Unwanted Variation (RUV-2) algorithm.
Args:
data (pandas.DataFrame ~ (num_samples, num_genes)): clr transformed
expression data
hk_genes (List[str]): list of housekeeping genes
penalty (float): regularization on the regression step
variance_cutoff (float): the cumulative variance cutoff on SVD
eigenvalues of Y_c.
num_components (int): the maximum number of components K to use.
If None, all components are used (up to the variance cutoff).
Returns:
batch corrected data (pandas.DataFrame ~ (num_samples, num_genes))
"""
self.fit(data, hk_genes, variance_cutoff, num_components)
return self.transform(data, penalty)
def save(self, filename, overwrite_existing=False):
"""
Save the RUV object to filename.
Args:
filename (string): absolute path to save file
overwrite_existing (bool): whether or not to overwrite existing file
Returns:
None
"""
path = Path(filename)
assert overwrite_existing or not path.exists(), \
"Must allow overwriting existing files"
with open(filename, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(cls, filename):
"""
Create an RUV from a saved object.
Args:
filename (str)
Returns:
RemoveUnwantedVaraition
"""
with open(filename, 'rb') as f:
return pickle.load(f)
|
11488150
|
import sys
import uarray as ua
from . import _vjp_diffs, _jvp_diffs
from ._uarray_plug import DiffArrayBackend
from ._core import defvjp, defvjp_argnum, defjvp, defjvp_argnum, def_linear
from ._diff_array import DiffArray, JVPDiffArray, VJPDiffArray
__all__ = [
"DiffArrayBackend",
"DiffArray",
"JVPDiffArray",
"defvjp",
"defvjp_argnum",
"VJPDiffArray",
"defjvp",
"defjvp_argnum",
"def_linear",
]
|
11488169
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
from config import config
from train import *
from data_utils import *
from model import *
import argparse
class ArgumentError(RuntimeError):
pass
def parse_arg(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("-m" ,"--mode", help = "Choose running mode: \nextract - extract faces, \ntrain - training model with config specified in config.py, \neval - running evaluation on the dataset)",required=True)
parser.add_argument("--trained_weights", type=str,
default="weights/glamor-net/Model",
help="/path/to/model_weights in tf2 saved format")
parser.add_argument("--dataset_type", type=str,
help="execution on the dataset type: train, val or test")
parser.add_argument("--resume", type=str,
default = "",
help="resume training from /path/to/weights or latest checkpoint by input 'last' value")
global args
args = parser.parse_args(argv)
#print(args.__dict__)
mode = args.mode
if mode == "extract":
if not args.dataset_type or args.dataset_type not in ['train','val','test']:
raise ArgumentError("extract dataset_type must be one of: train, val, or test")
elif mode == "train":
pass
elif mode == "eval": #Default is evaluating on test set
if not args.dataset_type:
args.dataset_type= "test"
if args.dataset_type not in ['train','val','test']:
raise ArgumentError("evaluation dataset_type must be one of: train, val, or test")
if not args.trained_weights:
raise ArgumentError("trained_weights are required in evaluation")
else:
raise ValueError('Command not found! The supported mode is [extract, train, eval]')
def run_extract():
extract_faces(args.dataset_type)
def run_train():
model = get_model()
train_dataset = get_train_dataset()
val_dataset = None
if config.val_images and config.val_crop:
val_dataset = get_eval_dataset("val")
optimizer = get_optimizer(train_dataset)
if args.resume:
if args.resume == "last":
model = train(model, optimizer, train_dataset, val_dataset=val_dataset, epochs = config.epochs, load_checkpoint=True)
else:
print("Load weight from: " + args.resume)
model.load_weights(args.resume)
model = train(model, optimizer, train_dataset, val_dataset=val_dataset, epochs=config.epochs,
load_checkpoint=False)
else:
print("Training from sratch.")
model = train(model, optimizer, train_dataset, val_dataset=val_dataset, epochs= config.epochs, load_checkpoint=False)
def run_eval():
model = get_model()
print("Loading model weights...")
model.load_weights(args.trained_weights)
print("Model weights loaded!")
eval_dataset = get_eval_dataset(kind=args.dataset_type)
eval(model, eval_dataset)
if __name__ == "__main__":
parse_arg()
mode= args.mode
if mode == "extract":
run_extract()
elif mode == "train":
run_train()
elif mode == "eval": # Default is evaluating on test set
run_eval()
|
11488186
|
import argparse
import glob
import os
from tqdm import tqdm
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--glue_dir", type=str, default="/mnt/nvme/glue")
args = parser.parse_args()
test_files = glob.glob(os.path.join(args.glue_dir, "*", "test.tsv"))
pd.read_csv("")
|
11488187
|
def test_sp_reg_back(sim):
return sim.reg_value(29) == sim.reg_value(9)
def test_addition(sim):
return sim.reg_value(8) == (0x45678901 + 0xABBACEED +
0x12 + 0x98 + 0xFFFF)
|
11488194
|
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from tensorboardX import SummaryWriter
import sys
import time
import json
from qanet.tvqanet import TVQANet
from tvqa_dataset import TVQADataset, pad_collate, prepare_inputs
from config import BaseOptions
import logging
logging.basicConfig()
def mask_logits(target, mask):
return target * mask
def IOFSM(selection_greedy, targets, ts_target, ts_target_mask):
bsz = targets.size(0)
img_len = selection_greedy.size(1)
selection_greedy = selection_greedy.view(bsz, 5, -1)
selection_greedy = selection_greedy[torch.arange(bsz, dtype=torch.long), targets] #(N, Li)
label = torch.zeros(bsz, img_len).cuda()
st_list = ts_target["st"].tolist()
ed_list = ts_target["ed"].tolist()
for idx, (st, ed) in enumerate(zip(st_list, ed_list)):
label[idx, st:ed+1] = 1
label_inv = (label != 1).float()
rewards_greedy_inv = (selection_greedy * label_inv * ts_target_mask).sum(-1) / (label_inv * ts_target_mask).sum(-1)
loss = 1 + rewards_greedy_inv - ((selection_greedy * label).sum(-1) / label.sum(-1))
return loss.sum(), rewards_greedy_inv.sum(), ((selection_greedy * label).sum(-1) / label.sum(-1)).sum()
def binaryCrossEntropy(max_statement_sm_sigmoid, targets, ts_target, ts_target_mask):
bsz = targets.size(0)
max_statement_sm_sigmoid = max_statement_sm_sigmoid.view(bsz, 5, -1)
img_len = max_statement_sm_sigmoid.size(2)
max_statement_sm_sigmoid = max_statement_sm_sigmoid[torch.arange(bsz, dtype=torch.long), targets]
label = torch.zeros(bsz, img_len).cuda()
st_list = ts_target["st"].tolist()
ed_list = ts_target["ed"].tolist()
for idx, (st, ed) in enumerate(zip(st_list, ed_list)):
label[idx, st:ed+1] = 1
loss = nn.functional.binary_cross_entropy_with_logits(max_statement_sm_sigmoid, label, reduction="none")
loss = mask_logits(loss, ts_target_mask).sum()
loss *= 0.1
return loss
def balanced_binaryCrossEntropy(max_statement_sm_sigmoid, targets, ts_target, ts_target_mask):
bsz = targets.size(0)
max_statement_sm_sigmoid = max_statement_sm_sigmoid.view(bsz, 5, -1)
img_len = max_statement_sm_sigmoid.size(2)
max_statement_sm_sigmoid = max_statement_sm_sigmoid[torch.arange(bsz, dtype=torch.long), targets] #(N, Li)
label = torch.zeros(bsz, img_len).cuda()
st_list = ts_target["st"].tolist()
ed_list = ts_target["ed"].tolist()
for idx, (st, ed) in enumerate(zip(st_list, ed_list)):
label[idx, st:ed+1] = 1
label_inv = (label != 1).float()
loss = nn.functional.binary_cross_entropy_with_logits(max_statement_sm_sigmoid, label, reduction="none")
loss_p = mask_logits(loss, label).sum(-1) / label.sum(-1)
loss_n = mask_logits(loss, label_inv * ts_target_mask).sum(-1) / (label_inv * ts_target_mask).sum(-1)
loss = loss_p + loss_n
return loss.sum()
def train(opt, dset, model, criterion, optimizer, epoch, previous_best_acc):
dset.set_mode("train")
model.train()
train_loader = DataLoader(dset, batch_size=opt.bsz, shuffle=True,
collate_fn=pad_collate, num_workers=opt.num_workers, pin_memory=True)
train_loss = []
train_loss_iofsm = []
train_loss_accu = []
train_loss_ts = []
train_loss_cls = []
valid_acc_log = ["batch_idx\tacc\tacc1\tacc2"]
train_corrects = []
torch.set_grad_enabled(True)
max_len_dict = dict(
max_sub_l=opt.max_sub_l,
max_vid_l=opt.max_vid_l,
max_vcpt_l=opt.max_vcpt_l,
max_qa_l=opt.max_qa_l,
max_dc_l=opt.max_dc_l,
)
timer_dataloading = time.time()
for batch_idx, batch in tqdm(enumerate(train_loader)):
timer_start = time.time()
model_inputs, targets, qids = prepare_inputs(batch, max_len_dict=max_len_dict, device=opt.device)
try:
timer_start = time.time()
outputs, max_statement_sm_sigmoid_ = model(model_inputs)
max_statement_sm_sigmoid, max_statement_sm_sigmoid_selection = max_statement_sm_sigmoid_
temporal_loss = balanced_binaryCrossEntropy(max_statement_sm_sigmoid, targets, model_inputs["ts_label"], model_inputs["ts_label_mask"])
cls_loss = criterion(outputs, targets)
iofsm_loss, _, _ = IOFSM(max_statement_sm_sigmoid_selection, targets, model_inputs["ts_label"], model_inputs["ts_label_mask"])
att_loss_accu = 0
loss = cls_loss + temporal_loss + iofsm_loss
timer_start = time.time()
loss.backward(retain_graph=False)
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
optimizer.step()
optimizer.zero_grad()
train_loss.append(loss.data.item())
train_loss_iofsm.append(float(iofsm_loss))
train_loss_ts.append(float(temporal_loss))
train_loss_cls.append(cls_loss.item())
pred_ids = outputs.data.max(1)[1]
train_corrects += pred_ids.eq(targets.data).tolist()
except RuntimeError as e:
if "out of memory" in str(e):
print("WARNING: ran out of memory, skipping batch")
else:
print("RuntimeError {}".format(e))
sys.exit(1)
if batch_idx % opt.log_freq == 0:
niter = epoch * len(train_loader) + batch_idx
if batch_idx == 0:
train_acc = 0
train_loss = 0
train_loss_iofsm = 0
train_loss_ts = 0
train_loss_cls = 0
else:
train_acc = sum(train_corrects) / float(len(train_corrects))
train_loss = sum(train_loss) / float(len(train_corrects))
train_loss_iofsm = sum(train_loss_iofsm) / float(len(train_corrects))
train_loss_cls = sum(train_loss_cls) / float(len(train_corrects))
train_loss_ts = sum(train_loss_ts) / float(len(train_corrects))
valid_acc, valid_loss, qid_corrects, valid_acc1, valid_acc2, submit_json_val = \
validate(opt, dset, model, criterion, mode="valid")
valid_log_str = "%02d\t%.4f\t%.4f\t%.4f" % (batch_idx, valid_acc, valid_acc1, valid_acc2)
valid_acc_log.append(valid_log_str)
if valid_acc > previous_best_acc:
with open("best_github.json", 'w') as cqf:
json.dump(submit_json_val, cqf)
previous_best_acc = valid_acc
if epoch >= 10:
torch.save(model.state_dict(), os.path.join("./results/best_valid_to_keep", "best_github_7420.pth"))
print("Epoch {:02d} [Train] acc {:.4f} loss {:.4f} loss_iofsm {:.4f} loss_ts {:.4f} loss_cls {:.4f}"
"[Val] acc {:.4f} loss {:.4f}"
.format(epoch, train_acc, train_loss, train_loss_iofsm, train_loss_ts, train_loss_cls,
valid_acc, valid_loss))
torch.set_grad_enabled(True)
model.train()
dset.set_mode("train")
train_corrects = []
train_loss = []
train_loss_iofsm = []
train_loss_ts = []
train_loss_cls = []
timer_dataloading = time.time()
with open(os.path.join(opt.results_dir, "valid_acc.log"), "a") as f:
f.write("\n".join(valid_acc_log) + "\n")
return previous_best_acc
def validate(opt, dset, model, criterion, mode="valid"):
dset.set_mode(mode)
torch.set_grad_enabled(False)
model.eval()
valid_loader = DataLoader(dset, batch_size=opt.test_bsz, shuffle=False,
collate_fn=pad_collate, num_workers=opt.num_workers, pin_memory=True)
submit_json_val = {}
valid_qids = []
valid_loss = []
valid_corrects = []
max_len_dict = dict(
max_sub_l=opt.max_sub_l,
max_vid_l=opt.max_vid_l,
max_vcpt_l=opt.max_vcpt_l,
max_qa_l=opt.max_qa_l,
max_dc_l=opt.max_dc_l,
)
for val_idx, batch in enumerate(valid_loader):
model_inputs, targets, qids = prepare_inputs(batch, max_len_dict=max_len_dict, device=opt.device)
outputs, _= model(model_inputs)
loss = criterion(outputs, targets)
valid_qids += [int(x) for x in qids]
valid_loss.append(loss.data.item())
pred_ids = outputs.data.max(1)[1]
for qdix, q_id in enumerate(model_inputs['qid']):
q_id_str = str(q_id)
submit_json_val[q_id_str] = int(pred_ids[qdix].item())
valid_corrects += pred_ids.eq(targets.data).tolist()
acc_1st, acc_2nd = 0., 0.
valid_acc = sum(valid_corrects) / float(len(valid_corrects))
valid_loss = sum(valid_loss) / float(len(valid_corrects))
qid_corrects = ["%d\t%d" % (a, b) for a, b in zip(valid_qids, valid_corrects)]
return valid_acc, valid_loss, qid_corrects, acc_1st, acc_2nd, submit_json_val
def main():
opt = BaseOptions().parse()
torch.manual_seed(opt.seed)
cudnn.benchmark = False
cudnn.deterministic = True
np.random.seed(opt.seed)
dset = TVQADataset(opt)
opt.vocab_size = len(dset.word2idx)
model = TVQANet(opt)
if opt.device.type == "cuda":
print("CUDA enabled.")
if len(opt.device_ids) > 1:
print("Use multi GPU", opt.device_ids)
model = torch.nn.DataParallel(model, device_ids=opt.device_ids, output_device=0) # use multi GPU
model.to(opt.device)
# model.load_state_dict(torch.load("./path/best_release_7420.pth"))
criterion = nn.CrossEntropyLoss(reduction="sum").to(opt.device)
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.wd)
best_acc = 0.
start_epoch = 0
early_stopping_cnt = 0
early_stopping_flag = False
for epoch in range(start_epoch, opt.n_epoch):
if not early_stopping_flag:
niter = epoch * np.ceil(len(dset) / float(opt.bsz))
cur_acc = train(opt, dset, model, criterion, optimizer, epoch, best_acc)
is_best = cur_acc > best_acc
best_acc = max(cur_acc, best_acc)
if not is_best:
early_stopping_cnt += 1
if early_stopping_cnt >= opt.max_es_cnt:
early_stopping_flag = True
else:
early_stopping_cnt = 0
else:
print("=> early stop with valid acc %.4f" % best_acc)
break
if epoch == 10:
for g in optimizer.param_groups:
g['lr'] = 0.0002
return opt.results_dir.split("/")[1]
if __name__ == "__main__":
results_dir = main()
|
11488225
|
from datetime import datetime
from flask import Blueprint, render_template, g, request, current_app, session, redirect, url_for, flash, abort, json
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.babel import gettext
from urlparse import urlparse
from random import randrange
import time
mod = Blueprint('general', __name__, url_prefix='/<lang_code>')
from dataviva import app, db, babel, view_cache, data_viva_apis, s3_host, s3_bucket
from dataviva.apps.general.forms import AccessForm
from dataviva.apps.general.models import Short
from dataviva.apps.user.models import User
from dataviva.apps.news.models import Publication
from dataviva.apps.blog.models import Post
from dataviva.apps.contact.forms import ContactForm
from dataviva.apps.user.forms import SignupForm
from dataviva.apps.user.forms import LoginForm
from dataviva.api.attrs.models import Bra, Hs, Cbo, Cnae, Course_hedu
from dataviva.translations.dictionary import dictionary
from dataviva.api.stats.helper import stats_list, make_items
from dataviva.utils.cached_query import cached_query, api_cache_key
from dataviva.utils.gzip_data import gzipped
from config import ACCOUNTS, DEBUG, API_BASE_URL
#utils
# from dataviva.utils.send_mail import send_mail
###############################
# General functions for ALL views
# ---------------------------
@app.before_request
def before_request():
g.user = current_user
g.accounts = True if ACCOUNTS in [
"True", "true", "Yes", "yes", "Y", "y", 1] else False
g.color = "#af1f24"
g.api_url = API_BASE_URL
g.dictionary = json.dumps(dictionary())
g.attr_version = 17
g.production = False if DEBUG else True
g.contact_form = ContactForm()
g.signup_form = SignupForm()
g.signin_form = LoginForm()
g.s3_host = s3_host
g.s3_bucket = s3_bucket
if request.endpoint != 'static':
url = urlparse(request.url)
url_path = url.path.split('/')
g.locale = get_locale(lang=url_path[1])
# Check if the user is logged in, if so give the global object
# a reference to the user from DB
# if g.user.is_authenticated:
# g.user.last_seen = datetime.utcnow()
# db.session.add(g.user)
# db.session.commit()
if url_path[1] not in data_viva_apis:
if g.locale not in url_path:
if url.query:
new_url = "{}://{}/{}{}?{}".format(
url.scheme, url.netloc, g.locale, url.path, url.query)
else:
new_url = "{}://{}/{}{}".format(url.scheme,
url.netloc, g.locale, url.path)
return redirect(new_url)
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@babel.localeselector
def get_locale(lang=None):
supported_langs = current_app.config['LANGUAGES'].keys()
new_lang = request.accept_languages.best_match(supported_langs, "en")
# user = getattr(g, 'user', None)
user = current_user
if lang:
if lang in supported_langs:
new_lang = lang
# if user.is_authenticated:
# # set users preferred lang
# user.language = new_lang
# db.session.add(user)
# db.session.commit()
else:
session['locale'] = new_lang
else:
current_locale = getattr(g, 'locale', None)
# return new_lang
if current_locale:
new_lang = current_locale
elif user.is_authenticated:
user_preferred_lang = getattr(user, 'language', None)
if user_preferred_lang and user_preferred_lang in supported_langs:
new_lang = user_preferred_lang
else:
# set users preferred lang
user.language = new_lang
#db.session.add(user)
# db.session.commit()
elif 'locale' in session:
new_lang = session['locale']
else:
session['locale'] = new_lang
return new_lang
@babel.timezoneselector
def get_timezone():
user = getattr(g, 'user', None)
if user is not None:
return user.timezone
###############################
# General views
# ---------------------------
@app.after_request
def after_request(response):
return response
@mod.route('/', methods=['GET'])
def home():
g.page_type = 'home'
news_publications = Publication.query.filter(str(Publication.id) != str(
id), Publication.active, Publication.show_home, Publication.language == g.locale).all()
blog_posts = Post.query.filter(str(Post.id) != str(
id), Post.active, Post.show_home, Post.language == g.locale).all()
news_publications += blog_posts
all_publications = sorted(
news_publications, key=lambda x: x.publish_date, reverse=True)
if len(all_publications) > 6:
all_publications = all_publications[0:6]
return render_template("general/index.html", publications=all_publications)
@mod.route('/inicie-uma-pesquisa/', methods=['GET'])
def search():
g.page_type = 'search'
return render_template("general/browse_categories.html")
@mod.route('close/')
def close():
return render_template("general/close.html")
@mod.route('upgrade/')
def upgrade():
return render_template("general/upgrade.html")
@mod.route('/None/')
def none_page():
return redirect(url_for('general.home'))
###############################
# Set language views
# ---------------------------
@mod.route('set_lang/<lang>')
def set_lang(lang):
g.locale = get_locale(lang)
return redirect(request.args.get('next') or
request.referrer or
url_for('general.home'))
###############################
# Handle shortened URLs
# ---------------------------
@mod.route('/<slug>/')
def redirect_short_url(slug):
short = Short.query.filter_by(slug=slug).first_or_404()
short.clicks += 1
# db.session.add(short)
db.session.commit()
return redirect(short.long_url)
# ###############################
# # 404 view
# # ---------------------------
if not DEBUG:
@app.errorhandler(Exception)
@app.errorhandler(404)
@app.errorhandler(500)
@mod.route('413/')
def page_not_found(e="413"):
error = str(e).split(":")[0]
try:
error_code = int(error)
except:
error = "500"
error_code = int(error)
request_info = {
"Date": datetime.today().ctime(),
"IP": request.remote_addr,
"Method": request.method,
"URL": request.url,
"Data": request.data
}
headers = list(request.headers)
g.page_type = "error"
sabrina = {}
sabrina["outfit"] = "lab"
sabrina["face"] = "scared"
sabrina["hat"] = None
return render_template('general/error.html',
error=error, sabrina=sabrina), error_code
@mod.route('/error/')
def error():
g.page_type = "error"
sabrina = {}
sabrina["outfit"] = "lab"
sabrina["face"] = "scared"
sabrina["hat"] = None
error = "500"
error_code = int(error)
return render_template('general/error.html',
error=error, sabrina=sabrina), error_code
|
11488259
|
import pandas.testing as tm
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import fletcher as fr
@pytest.mark.parametrize("continuous", [True, False])
def test_read_parquet(tmpdir, continuous):
str_arr = pa.array(["a", None, "c"], pa.string())
int_arr = pa.array([1, None, -2], pa.int32())
bool_arr = pa.array([True, None, False], pa.bool_())
table = pa.Table.from_arrays([str_arr, int_arr, bool_arr], ["str", "int", "bool"])
pq.write_table(table, "df.parquet")
result = fr.read_parquet("df.parquet", continuous=continuous)
expected = fr.pandas_from_arrow(table, continuous=continuous)
tm.assert_frame_equal(result, expected)
|
11488271
|
import argparse, os, os.path
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
parser.add_argument('--html_template', required=True)
parser.add_argument('--rendered_html', required=True)
args = parser.parse_args()
env = simpleamt.get_jinja_env(args.config)
template = env.get_template(args.html_template)
html = template.render({'input': ''})
with open(args.rendered_html, 'w') as f:
f.write(html)
|
11488288
|
import inspect
import json
import os
from typing import Optional
import pytest
from jury import Jury, load_metric
from tests.jury import EXPECTED_OUTPUTS
_TEST_METRICS = [
load_metric("accuracy"),
load_metric("bertscore", compute_kwargs={"model_type": "albert-base-v1", "device": "cpu"}),
load_metric("bleu"),
load_metric("f1"),
load_metric("meteor"),
load_metric("precision"),
load_metric("recall"),
load_metric("rouge"),
load_metric("sacrebleu"),
load_metric("squad"),
]
_STR_TEST_METRIC = "bleu"
_LIST_STR_TEST_METRICS = [
"accuracy",
"bertscore",
"bleu",
"f1",
"meteor",
"precision",
"recall",
"rouge",
"sacrebleu",
"squad",
]
_LIST_DICT_TEST_METRICS = [
{"path": "accuracy"},
{"path": "bertscore", "compute_kwargs": {"model_type": "albert-base-v1"}},
{"path": "bleu", "resulting_name": "bleu-1", "compute_kwargs": {"max_order": 1}},
{"path": "bleu", "resulting_name": "bleu-2", "compute_kwargs": {"max_order": 2}},
{"path": "f1", "resulting_name": "F1"},
{"path": "meteor", "resulting_name": "METEOR"},
{"path": "precision"},
{"path": "recall"},
{"path": "rouge"},
{"path": "sacrebleu"},
{"path": "squad"},
]
_LIST_MIXED_TEST_METRICS = [
"accuracy",
"bertscore",
"bleu",
{"path": "f1"},
{"path": "meteor"},
{"path": "precision"},
"recall",
"rouge",
{"path": "sacrebleu"},
{"path": "squad"},
]
_DATASETS_METRICS = "wer"
_TEST_METRICS_SEQUENCE_CLASSIFICATION = [
{"path": "accuracy", "task": "sequence-classification"},
{"path": "f1", "task": "sequence-classification"},
{"path": "precision", "task": "sequence-classification"},
{"path": "recall", "task": "sequence-classification"},
]
@pytest.fixture(scope="package")
def predictions():
return ["There is a cat on the mat.", "Look! a wonderful day."]
@pytest.fixture(scope="package")
def references():
return ["The cat is playing on the mat.", "Today is a wonderful day"]
@pytest.fixture
def predictions_sequence_classification():
return [0, 2, 1, 0, 0, 1]
@pytest.fixture
def references_sequence_classification():
return [0, 1, 2, 0, 1, 2]
@pytest.fixture
def multiple_predictions_sequence_classification():
return [[0], [1, 2], [0], [1], [0], [1, 2]]
@pytest.fixture
def multiple_references_sequence_classification():
return [[0, 2], [1, 0], [0, 1], [0], [0], [1, 2]]
@pytest.fixture(scope="function")
def inconsistent_predictions():
return ["There is a cat on the mat."]
@pytest.fixture(scope="function")
def single_prediction_array():
return [["the cat is on the mat"], ["Look! a wonderful day."]]
@pytest.fixture(scope="function")
def multiple_predictions_empty():
return [
[],
["Look! what a wonderful day, today.", "Today is a very wonderful day"],
]
@pytest.fixture(scope="function")
def multiple_references_empty():
return [
["the cat is playing on the mat.", "The cat plays on the mat."],
["Today is a wonderful day", "The weather outside is wonderful."],
]
@pytest.fixture(scope="package")
def multiple_predictions():
return [
["the cat is on the mat", "There is cat playing on mat"],
["Look! what a wonderful day, today.", "Today is a very wonderful day"],
]
@pytest.fixture(scope="package")
def multiple_references():
return [
["the cat is playing on the mat.", "The cat plays on the mat."],
["Today is a wonderful day", "The weather outside is wonderful."],
]
@pytest.fixture(scope="module")
def jury():
return Jury(metrics=_TEST_METRICS)
@pytest.fixture(scope="function")
def jury_concurrent():
return Jury(metrics=_TEST_METRICS, run_concurrent=True)
@pytest.fixture(scope="function")
def jury_str():
return Jury(metrics=_STR_TEST_METRIC)
@pytest.fixture(scope="function")
def jury_list_str():
return Jury(metrics=_LIST_STR_TEST_METRICS)
@pytest.fixture(scope="function")
def jury_list_dict():
return Jury(metrics=_LIST_DICT_TEST_METRICS)
@pytest.fixture(scope="function")
def jury_list_mixed():
return Jury(metrics=_LIST_MIXED_TEST_METRICS)
@pytest.fixture(scope="function")
def jury_datasets():
return Jury(metrics=_DATASETS_METRICS)
@pytest.fixture(scope="function")
def jury_sequence_classification():
return Jury(metrics=_TEST_METRICS_SEQUENCE_CLASSIFICATION)
def get_expected_output(prefix: Optional[str] = None):
def json_load(path: str):
with open(path, "r") as jf:
content = json.load(jf)
return content
def wrapper(fn, *args, **kwargs):
module_name = os.path.basename(inspect.getfile(fn)).replace(".py", "")
path = os.path.join(EXPECTED_OUTPUTS, prefix, f"{module_name}.json")
test_name = fn.__name__.replace("output_", "")
fn.output = json_load(path)[test_name]
return fn
if prefix is None:
prefix = ""
return wrapper
|
11488306
|
from .base import *
from .abs import *
from .sort import *
from .slice import *
from .augment import *
from .maxpool2d import *
from .dequantization_uniform import *
from .dequantization_variational import *
|
11488308
|
import hashlib
import json
import uuid
import traceback
import random,string
import urllib.parse
import re
import requests
import pyotp
from flask import Blueprint
from flask import redirect
from flask import request, Response, render_template
from db import mysqlconn,mysqldb,redisdb
import myAuth
#init
admin = Blueprint("admin",__name__,template_folder='templates', static_folder='static')
private_plugin = []
sql = "SELECT * FROM private_message_plugin"
private_plugin_list = mysqldb.execute(sql)
data = mysqldb.fetchall()
for plugin in data:
plugin_label = {"bname":plugin[1],
"name":plugin[0],
"state":plugin[2]}
private_plugin.append(plugin_label)
group_plugin = []
sql = "SELECT * FROM group_message_plugin"
private_plugin_list = mysqldb.execute(sql)
data = mysqldb.fetchall()
for plugin in data:
plugin_label = {"bname":plugin[1],
"name":plugin[0]}
group_plugin.append(plugin_label)
def replace_cq2html(message:str,nohtml=False):
image = re.search("\[CQ:image.*]",message)
if image is None:
return message
else:
image=image.group()
if nohtml:
return message.replace(image,"[图片]")
url = re.search("url=.*",image).group()[4:-1]
new_img= f"<img src='{url}'>"
new_message = message.replace(image, new_img)
#print(new_img,new_message)
return new_message
def check(user,password):
md5 = hashlib.md5()
md5.update(f"{password}{user[2]}".encode("utf-8"))
return md5.hexdigest() == user[1]
def random_password():
src = string.ascii_letters + string.digits
list_passwd_all = random.sample(src, 5) # 从字母和数字中随机取5位
list_passwd_all.extend(random.sample(string.digits, 1)) # 让密码中一定包含数字
list_passwd_all.extend(random.sample(string.ascii_lowercase, 1)) # 让密码中一定包含小写字母
list_passwd_all.extend(random.sample(string.ascii_uppercase, 1)) # 让密码中一定包含大写字母
random.shuffle(list_passwd_all) # 打乱列表顺序
str_passwd = ''.join(list_passwd_all) # 将列表转化为字符串
return str_passwd
def log_data(target="*"):
data = []
name = []
if target == "*":
mysqldb.execute(f"SELECT date_format(days,'%m月%d日'),num FROM `daily_event_num` LIMIT 5")
else:
mysqldb.execute(f"SELECT date_format(days,'%m月%d日'),num FROM `daily_friendadd_num` LIMIT 5")
getdata = mysqldb.fetchall()
for line in getdata:
name.append(line[0])
data.append(line[1])
return name, data
@admin.route("/plugin/main/home")
@myAuth.cold_login_auth(redisdb)
def home():
return render_template("homepage.html")
@admin.route("/")
@myAuth.cold_login_auth(redisdb)
def frame():
data = json.loads(redisdb.get(request.cookies["token"]))
return render_template("frame.html",
private_plugin=private_plugin,
group_plugin=group_plugin,
username=data["id"])
@admin.route("/plugin/main/index/state")
@myAuth.cold_login_auth(redisdb)
def now_state():
friend_chat_name, friend_chat_data = log_data(target="friendAdd")
message_chat_name, message_chat_data = log_data()
sql = "SELECT * FROM message WHERE res='none'"
message_num = mysqldb.execute(sql)
return render_template("now_state.html",
friend_chat_name=friend_chat_name,
friend_chat_data=friend_chat_data,
message_chat_name=message_chat_name,
message_chat_data=message_chat_data,
message_num=message_num)
@admin.route("/plugin/main/index/message")
@myAuth.cold_login_auth(redisdb)
def message_list():
sql = "SELECT * FROM message AS a WHERE a.send_time IN ( SELECT MAX( send_time ) FROM message GROUP BY sender ) ORDER BY send_time DESC"
mysqldb.execute(sql)
data = mysqldb.fetchall()
messages =[]
for line in data:
message={"sender_id":line[1],
"detail":replace_cq2html(line[3],nohtml=True),
"time":line[2]}
messages.append(message)
return render_template("message_list.html",messages=messages)
@admin.route("/plugin/main/index/re_import")
@myAuth.cold_login_auth(redisdb, auth=0)
def re_import_page():
return render_template("re_import.html")
@admin.route("/plugin/main/index/groupadd")
@myAuth.cold_login_auth(redisdb,auth=0)
def group_add_page():
return render_template("group_add.html")
@admin.route("/plugin/main/index/group_plugin")
@myAuth.cold_login_auth(redisdb, auth=1)
def group_plugin_page():
sql ="SELECT * FROM `group`"
mysqldb.execute(sql)
data = mysqldb.fetchall()
groups=[]
for line in data:
group={"g_id":line[0],
"g_name":line[1],
"plugins":[]}
if line[0] == "*":
group["g_id"] = "all"
groups.append(group)
sql = """
SELECT a.plugin_name,a.plugin_bname,a.package_name,c.g_id FROM group_message_plugin as a LEFT JOIN (
SELECT * FROM group_message_plugin_activate as b WHERE b.g_id = %s
) as c on c.plugin_name = a.plugin_name
"""
#print(groups)
for i in range(len(groups)):
now_group_id = groups[i]["g_id"]
if now_group_id == "all":
now_group_id = "*"
mysqldb.execute(sql,(now_group_id))
data = mysqldb.fetchall()
for line in data:
package = {
"bname":line[1],
"name":line[0],
"package_name":line[2]
}
if isinstance(line[3],str):
package["bstate"]="启用"
else:
package["bstate"]="停用"
groups[i]["plugins"].append(package)
#print(groups)
return render_template("group_plugin_list.html",groups=groups)
@admin.route("/plugin/main/index/private_plugin")
@myAuth.cold_login_auth(redisdb, auth=1)
def private_plugin_page():
sql="SELECT * FROM private_message_plugin"
mysqldb.execute(sql)
data = mysqldb.fetchall()
plugins=[]
for line in data:
plugin={"name":line[0],
"bname":line[1],
"package_name":line[2],
"state":line[3]}
if line[3] ==0:
plugin["bstate"]="启用"
elif line[3]==1:
plugin["bstate"]="临时停用"
else:
plugin["bstate"]="永久停用"
plugins.append(plugin)
return render_template("private_plugin_list.html",plugins=plugins)
@admin.route('/dialogue/<qid>')
@myAuth.cold_login_auth(redisdb)
def dialogue(qid):
sql = "SELECT * FROM message WHERE sender=%s ORDER BY send_time ASC"
state = mysqldb.execute(sql, (qid))
if state > 0:
data = mysqldb.fetchall()
dialogues=[]
for line in data:
dialogue = {"detail":replace_cq2html(line[3]),
"send_time":line[2],
"res":line[4],
"res_time":line[6]}
dialogues.append(dialogue)
sql = "SELECT a.qid,b.* FROM qid2sid as a LEFT JOIN stdinfo as b on a.sid=b.sid WHERE a.qid=%s"
state = mysqldb.execute(sql, (qid))
if state >0:
have_info=True
data = mysqldb.fetchall()
sender = {"qid": qid,
"name":data[0][2],
"sid":data[0][1],
"major":data[0][5],
"school":data[0][4],
"sex":data[0][3]}
if isinstance(data[0][6],str):
sender["phone"] = data[0][6]
else:
sender["phone"] = "未绑定"
else:
have_info=False
sender={"qid":qid}
return render_template("dialogue.html", have_info=have_info, sender=sender, dialogues=dialogues)
else:
return "<h1>无该对话</h1>"
@admin.route('/user/<user_id>', methods=["GET"])
@myAuth.cold_login_auth(redisdb)
def user_home(user_id):
userdata = json.loads(redisdb.get(request.cookies["token"]))
operator_id = userdata["id"]
operator_auth = userdata["auth_class"]
if operator_id == user_id:
operator_otpkey = userdata["OTP_key"]
otp_url = pyotp.totp.TOTP(operator_otpkey).provisioning_uri(f"{<EMAIL>", issuer_name="qq_bot_service")
return render_template("user_personal.html",s_id=operator_id,usertype=operator_auth,otpuri=otp_url)
else:
if operator_auth > 0:
return "<h1>无权访问</h1>"
else:
sql = "SELECT * FROM admin WHERE id=%s"
mysqldb.execute(sql,(user_id))
data = mysqldb.fetchall()[0]
return render_template("user_root.html",s_id=data[0],usertype=data[-2])
@admin.route("/plugin/main/index/user")
@myAuth.cold_login_auth(redisdb,auth=0)
def user_list():
sql = "SELECT admin.id,admin.auth_class FROM admin"
mysqldb.execute(sql)
users_list = mysqldb.fetchall()
users = []
for line in users_list:
user = {"userid":line[0],
"type":line[1]}
users.append(user)
#print(users)
return render_template("user_list.html",users=users)
@admin.route("/plugin/main/index/useradd")
@myAuth.cold_login_auth(redisdb,auth=0)
def user_add():
return render_template("user_add.html")
@admin.route("/plugin/main/API/v1/reset_by_admin.json",methods=["POST"])
@myAuth.cold_login_auth(redisdb,auth=0)
def reset_by_admin():
target_id = request.form["s_id"]
newpassword = request.form["<PASSWORD>password"]
otppassword = request.form["otppassword"]
data = json.loads(redisdb.get(request.cookies["token"]))
otp_key = data["OTP_key"]
sql = "SELECT * FROM admin where id = %s"
mysqldb.execute(sql, (target_id))
user_data = mysqldb.fetchall()[0]
otp_checker = pyotp.TOTP(otp_key)
#print(otp_checker.now(),otppassword)
if otp_checker.verify(otppassword):
md5 = hashlib.md5()
md5.update(f"{newpassword}{user_data[2]}".encode("utf-8"))
md5_passwd = md5.hexdigest()
try:
sql = "UPDATE admin SET password=%s WHERE id=%s"
mysqldb.execute(sql,(md5_passwd,target_id))
mysqlconn.commit()
return "success"
except Exception as e:
print(e)
return "fail"
else:
return "fail"
@admin.route("/plugin/main/API/v1/group_add.js",methods=["POST"])
@myAuth.cold_login_auth(redisdb,auth=0)
def group_add():
group_id=request.form["group_id"]
bname = request.form["bname"]
#print(group_id,bname)
sql = "INSERT INTO `group` VALUES (%s,%s)"
try:
mysqldb.execute(sql,(group_id,bname))
mysqlconn.commit()
except Exception as e:
return json.dumps({"state":"fail","message":"error"})
return json.dumps({"state":"success","info":"成功"})
@admin.route("/plugin/main/API/v1/res_dialogue.json",methods=["POST"])
@myAuth.cold_login_auth(redisdb)
def send_private_message():
operator = json.loads(redisdb.get(request.cookies["token"]))
qid = request.form["qid"]
message = request.form["message"]
#print(qid,message)
sql = "SELECT * FROM message WHERE sender=%s"
data = mysqldb.execute(sql,(qid))
if data > 0:
sql = "SELECT * FROM message WHERE sender=%s and res='none'"
state = mysqldb.execute(sql,(qid))
#print(state)
if state >0:
sql = "UPDATE message SET res='ignore',res_time=NOW(),res_user_id=%s WHERE res='none' and sender=%s"
mysqldb.execute(sql, (operator["id"],qid))
sql = "UPDATE message SET res=%s WHERE sender=%s ORDER BY send_time DESC LIMIT 1"
mysqldb.execute(sql,(message,qid))
else:
uid = str(uuid.uuid4())
sql = "INSERT INTO message values(%s,%s,NOW(),'Dialogue_Detail_None',%s,%s,NOW())"
mysqldb.execute(sql,(uid,qid,message,operator["id"]))
data = {"qid":qid,
"message":message}
res = requests.post("http://127.0.0.1:5000/API/send_private_message.json", data).json()
#print(res)
if res["state"] == "success":
mysqlconn.commit()
return json.dumps({"state":"success","message":""})
else:
return json.dumps({"state":"fail","message":"系统出错"})
else:
return json.dumps({"state":"fail","message":"无留言可以回复"})
@admin.route("/plugin/main/API/v1/private_plugin.json",methods=["POST"])
@myAuth.cold_login_auth(redisdb,auth=1)
def private_plugin_state():
plugin_name = request.form["plugin_name"]
state = int(request.form["state"])
if state == 0:
bstate = "启用"
elif state == 1:
bstate = "临时停用"
else:
bstate = "永久停用"
#print(plugin_name,state)
sql = f"UPDATE private_message_plugin SET active={int(state)} WHERE plugin_name=%s"
try:
#print(sql)
state = mysqldb.execute(sql,(plugin_name))
if state == 1:
mysqlconn.commit()
else:
return json.dumps({"state":"fail","message":"无内容修改"})
except Exception as e:
return json.dumps({"state":"fail","message":"服务器异常"})
return json.dumps({"state":"success","bstate":bstate})
@admin.route("/plugin/main/API/v1/group_plugin.json",methods=["POST"])
@myAuth.cold_login_auth(redisdb,auth=1)
def private_group_state():
gid = request.form["group_id"]
if gid == "all":
gid = "*"
plugin_name = request.form["plugin_name"]
state = int(request.form["state"])
if state == 0:
bstate = "启用"
elif state == 1:
bstate = "停用"
#print(gid,plugin_name,state)
if gid == "*" and state == 0:
#print("a")
sql = "DELETE FROM group_message_plugin_activate WHERE plugin_name = %s"
mysqldb.execute(sql, (plugin_name))
sql = "INSERT INTO group_message_plugin_activate values(%s,%s)"
mysqldb.execute(sql, (gid, plugin_name))
mysqlconn.commit()
return json.dumps({"state":"success","bstate":bstate})
elif gid == "*" and state ==1:
#print("b")
sql = "DELETE FROM group_message_plugin_activate WHERE plugin_name = %s"
mysqldb.execute(sql, (plugin_name))
mysqlconn.commit()
return json.dumps({"state": "success", "bstate": bstate})
elif state == 0:
sql = 'SELECT * FROM group_message_plugin_activate WHERE (plugin_name = %s and g_id = "*")'
exist = mysqldb.execute(sql, (plugin_name))
if exist > 0:
#print("c")
return json.dumps({"state":"success","bstate":"已全局打开"})
else:
#print("d")
sql = 'SELECT * FROM group_message_plugin_activate WHERE (plugin_name = %s and g_id = %s)'
exist = mysqldb.execute(sql, (plugin_name,gid))
if exist >0:
return json.dumps({"state": "success", "bstate": bstate})
sql = "INSERT INTO group_message_plugin_activate values(%s,%s)"
mysqldb.execute(sql, (gid, plugin_name))
mysqlconn.commit()
return json.dumps({"state": "success", "bstate": bstate})
else:
sql = 'SELECT * FROM group_message_plugin_activate WHERE (plugin_name = %s and g_id = "*")'
exist = mysqldb.execute(sql,(plugin_name))
if exist >0:
#print("e")
return json.dumps({"state":"success","bstate":"已全局打开,请在全局中设置"})
else:
#print("f")
sql = "DELETE FROM group_message_plugin_activate WHERE (plugin_name = %s and g_id = %s) "
mysqldb.execute(sql, (plugin_name, gid))
mysqlconn.commit()
return json.dumps({"state": "success", "bstate": bstate})
@admin.route("/plugin/main/API/v1/re_import.json")
@myAuth.cold_login_auth(redisdb,auth=0)
def re_import_api():
try:
data = requests.get("http://127.0.0.1:5000/API/reflash.json").json()
except Exception as e:
return json.dumps({"state":"fail","message":"error"})
if data["state"]:
return json.dumps({"state":"success","info":"成功"})
@admin.route("/plugin/main/API/v1/reset_by_self.json",methods=["POST"])
@myAuth.cold_login_auth(redisdb)
def reset_by_user():
oldpassword = request.form["oldpassword"]
newpassword = request.form["newpassword"]
data = json.loads(redisdb.get(request.cookies["token"]))
user_id = data["id"]
sql = "SELECT * FROM admin where id = %s"
mysqldb.execute(sql, (user_id))
user_data = mysqldb.fetchall()[0]
if check(user_data,oldpassword):
md5 = hashlib.md5()
md5.update(f"{newpassword}{user_data[2]}".encode("utf-8"))
md5_passwd = md5.hexdigest()
try:
sql = "UPDATE admin SET password=%s WHERE id=%s"
mysqldb.execute(sql,(md5_passwd,user_id))
mysqlconn.commit()
return json.dumps({"state": "success","message":"修改成功"})
except Exception as e:
return json.dumps({"state": "fail","message":"服务器异常"})
else:
return json.dumps({"state": "fail","message":"密码有误"})
@admin.route("/plugin/main/API/v1/new_user.json",methods=["POST"])
@myAuth.cold_login_auth(redisdb,auth=0)
def new_user():
data = json.loads(redisdb.get(request.cookies["token"]))
otpkey = data["OTP_key"]
user_id = request.form["s_id"]
user_auth = request.form["auth"]
otp_passwd = request.form["otppassword"]
otp_checker = pyotp.TOTP(otpkey)
if otp_checker.verify(otp_passwd):
user_otp_key = pyotp.random_base32()
user_salt = str(uuid.uuid4())
md5 = hashlib.md5()
new_random_password = <PASSWORD>()
md5.update(f"{new_random_password}{user_salt}".encode("utf-8"))
md5_passwd = md5.hexdigest()
try:
sql = f"INSERT INTO admin values (%s,%s,%s,{int(user_auth)},%s)"
mysqldb.execute(sql,(user_id,md5_passwd,user_salt,user_otp_key))
mysqlconn.commit()
return json.dumps({"state": "success", "password": <PASSWORD>})
except Exception as e:
print(e)
return json.dumps({"state": "fail","message":"该用户已存在"})
return json.dumps({"state":"fail","message":"动态密码错误或超时"})
@admin.route("login", methods=["GET","POST"])
def login():
if request.method == "GET":
resp = render_template("login_admin.html")
else:
mysqlconn.ping(reconnect=True)
target = request.args.get("redirect")
if target is None:
target = "/"
target = urllib.parse.unquote(target)
#print(target)
username = request.form["username"]
passwd = request.form["password"]
exist = mysqldb.execute("SELECT * from admin WHERE id=%s",username)
state = False
if exist:
user = mysqldb.fetchall()[0]
state = check(user, passwd)
if state:
cookies = str(uuid.uuid4())
redisdb.set(cookies, json.dumps({"id": user[0],
"auth_class": user[3],
"OTP_key": user[4]}))
resp = Response(render_template("redirect.html",target=target))
resp.set_cookie("token",cookies)
else:
resp = render_template("login_admin.html",color='red',message='密码错误或用户不存在')
return resp
@admin.route("logout")
@myAuth.cold_login_auth(redisdb)
def logout():
redisdb.delete(request.cookies["token"])
resp = Response(render_template("redirect.html", target="login"))
resp.delete_cookie("token")
return resp
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
sql = "SELECT * FROM private_message_plugin"
private_plugin_list = mysqldb.execute(sql)
data = mysqldb.fetchall()
for plugin in data:
exec(f"from {plugin[2]} import blueprint as blueprint")
app.register_blueprint(blueprint, url_prefix=f"/plugin/private/{plugin[0]}")
app.register_blueprint(admin,url_prefix="/")
app.run("localhost",8080)
|
11488408
|
from enum import Enum
import torch.nn as nn
class ActivationEnum(Enum):
RELU = nn.ReLU()
SIGMOID = nn.Sigmoid()
TANH = nn.Tanh()
PRELU = nn.PReLU()
|
11488421
|
from .browser_integration import *
stop_macro_js = """
storage = []
localStorage.removeItem('__bi_tracking_events');
localStorage.removeItem('__bi_counter');
localStorage.removeItem('__bi_event_time');
for (var p in localStorage) {
if (p.substr(0, 10) === '__bi_event') {
storage.push(JSON.parse(localStorage[p]));
localStorage.removeItem(p);
}
}
return storage;
"""
class BrowserIntegrationStopCommand(sublime_plugin.WindowCommand):
plugin_name = "Stop recording"
plugin_description = "Stop recording browser interaction," \
" and collect macro data."
@staticmethod
def visible():
return browser.connected() and browser.recording
@require_browser
@async
def run(self):
@async
def cancel_macro():
if sublime.ok_cancel_dialog("If you cancel the macro saving, "
"all its data will be lost. "
"There is no way to recover "
"this data later. "
"Are you sure you want to "
"forget this macro?",
"Yes, I wan't to forget the macro!"):
return
stop_macro()
@async
def stop_macro(name='untitled'):
view = self.window.new_file()
# view.set_syntax_file('Packages/JSON/JSON.tmLanguage')
view.set_name(name + '.macro')
view.run_command('insert_into_view', {'text': macro})
browser.recording = False
storage = browser.execute(stop_macro_js)
if storage:
with loading('Collecting events.'):
storage.sort(key=lambda x: x['idx'])
macro = sublime.encode_value(storage, pretty=True)
self.window.show_input_panel('Macro name:', '', stop_macro,
None, None)
else:
warning('No events recorded.')
|
11488450
|
import torch
import math
from torch import nn, Tensor
from torch.nn import functional as F
from .backbones import MiT
from .backbones.layers import trunc_normal_
from .heads import SegFormerHead
segformer_settings = {
'B0': 256, # head_dim
'B1': 256,
'B2': 768,
'B3': 768,
'B4': 768,
'B5': 768
}
class SegFormer(nn.Module):
def __init__(self, variant: str = 'B0', num_classes: int = 19) -> None:
super().__init__()
self.backbone = MiT(variant)
self.decode_head = SegFormerHead(self.backbone.embed_dims, segformer_settings[variant], num_classes)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out // m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.backbone.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def forward(self, x: Tensor) -> Tensor:
y = self.backbone(x)
y = self.decode_head(y) # 4x reduction in image size
y = F.interpolate(y, size=x.shape[2:], mode='bilinear', align_corners=False) # to original image shape
return y
if __name__ == '__main__':
model = SegFormer('B0', 150)
model.load_state_dict(torch.load('checkpoints/pretrained/segformer/segformer.b0.ade.pth', map_location='cpu'))
x = torch.zeros(1, 3, 512, 512)
y = model(x)
print(y.shape)
|
11488462
|
from dataclasses import dataclass
from apischema import identity, serialize, serializer
from apischema.conversions import Conversion
@dataclass
class RGB:
red: int
green: int
blue: int
@serializer
@property
def hexa(self) -> str:
return f"#{self.red:02x}{self.green:02x}{self.blue:02x}"
assert serialize(RGB, RGB(0, 0, 0)) == "#000000"
# dynamic conversion used to bypass the registered one
assert serialize(RGB, RGB(0, 0, 0), conversion=identity) == {
"red": 0,
"green": 0,
"blue": 0,
}
# Expended bypass form
assert serialize(
RGB, RGB(0, 0, 0), conversion=Conversion(identity, source=RGB, target=RGB)
) == {"red": 0, "green": 0, "blue": 0}
|
11488633
|
import logging as log
'''
data.py
Utilities for reading from corpuses or word files.
'''
def read_sentence_data(filename, token_delim=' '):
'''
read_data: reads data from a line-delimited sentence file.
filename: path to file
return: list of lists, each list in the list being a list of
tokens for a sentence.
'''
lines = []
with open(filename, 'r') as f:
for line in f:
line = line.strip()
toks = line.split(token_delim)
# remove all none-like tokens
toks = list(filter(None, toks))
lines.append(toks)
log.debug('Finished reading %s.' % filename)
return lines
def read_word_data(filename):
'''
read_word_data: reads data from a line-delimited word file.
filename: path to file
return: list of strings
'''
lines = []
with open(filename, 'r') as f:
for line in f:
line = line.strip()
lines.append(line)
log.debug('Finished reading %s.' % filename)
return lines
def write_sentence_data(filename, lines):
'''
write_sentence_data: writes data, each item newline delimited
filename: path to file
lines: list of list of tokens
return: True on success
'''
with open(filename, 'w') as f:
for line in lines:
s = ' '.join(line).strip()
f.write('%s\n' % s)
log.debug('Finished writing to %s.' % filename)
return True
def write_parallel_data(file_src, file_trg, lines_src_trg):
'''
write_parallel_data: writes paired data to newline delimited file
file_src: path to output src file
file_trg: path to output trg file
lines_src_trg: list of tuple of (tok list, tok list)
return: True on success
'''
with open(file_src, 'w') as f_src:
with open(file_trg, 'w') as f_trg:
for src, trg in lines_src_trg:
s_src = ' '.join(src).strip()
s_trg = ' '.join(trg).strip()
f_src.write('%s\n' % s_src)
f_trg.write('%s\n' % s_trg)
log.debug('Finished writing to %s and %s.' % (file_src, file_trg))
return True
|
11488683
|
import os
from pycsp3.solvers.solver import SolverProcess #, SolverPy4J
CHOCO_DIR = os.sep.join(__file__.split(os.sep)[:-1]) + os.sep
CHOCO_CP = CHOCO_DIR + "choco-parsers-4.10.5-jar-with-dependencies.jar"
class Choco(SolverProcess):
def __init__(self):
super().__init__(
name="Choco-solver",
command="java -cp " + CHOCO_CP + " org.chocosolver.parser.xcsp.ChocoXCSP", cp=CHOCO_CP
)
def parse_general_options(self, string_options, dict_options, dict_simplified_options):
args_solver = ""
tl = -1
if "limit_time" in dict_simplified_options:
tl = dict_simplified_options["limit_time"]
args_solver += " -limit=[" + str(tl) + ("s" if tl != -1 else "")
free = False
if "limit_runs" in dict_simplified_options:
args_solver += "," + dict_simplified_options["limit_runs"] + "runs"
free = True
if "limit_sols" in dict_simplified_options:
args_solver += "," + dict_simplified_options["limit_sols"] + "sols"
free = True
args_solver += "]"
if "varheuristic" in dict_simplified_options:
dict_simplified_options["varh"] = dict_simplified_options["varHeuristic"]
if "varh" in dict_simplified_options:
v = dict_simplified_options["varh"]
if v == "dom/wdeg":
v = "domwdeg"
if v not in ["input", "dom", "rand", "ibs", "impact", "abs", "activity", "chs", "domwdeg"]:
print("heuristic " + v + " not implemented in Choco")
else:
args_solver += " -varh=" + v
free = True
if "valheuristic" in dict_simplified_options:
dict_simplified_options["valh"] = dict_simplified_options["valHeuristic"]
if "valh" in dict_simplified_options:
v = dict_simplified_options["valh"]
if v not in ["min", "med", "max", "rand", "best", ]:
print("heuristic " + v + " not implemented in Choco")
else:
args_solver += " -valh=" + v
free = True
if "lastConflict" in dict_simplified_options:
dict_simplified_options["lc"] = dict_simplified_options["lastConflict"]
if "lc" in dict_simplified_options:
args_solver += " -lc=" + (dict_simplified_options["lc"] if dict_simplified_options["lc"] else "1")
free = True
if "cos" in dict_simplified_options:
args_solver += " -cos"
free = True
if "last" in dict_simplified_options:
args_solver += " -last"
free = True
if "restarts_type" in dict_simplified_options:
rt = dict_simplified_options["restarts_type"]
args_solver += " -restarts=[" + rt + ","
if "restarts_cutoff" in dict_simplified_options:
args_solver += dict_simplified_options["restarts_cutoff"] + ","
else:
print("Choco needs 'restarts_cutoff' to be set when 'restarts_type' is set.")
if rt == "geometric":
if "restarts_factor" in dict_simplified_options:
args_solver += dict_simplified_options["restarts_gfactor"] + ","
else:
print("Choco needs 'restarts_gfactor' to be set when 'geometric' is declared.")
if "restarts_factor" in dict_simplified_options:
args_solver += dict_simplified_options["restarts_factor"] + ","
else:
print("Choco needs 'restarts_factor' to be set when 'restarts_type' is set.")
free = True
else:
if "restarts_cutoff" in dict_simplified_options \
or "restarts_factor" in dict_simplified_options \
or "restarts_gfactor" in dict_simplified_options:
print("Choco needs 'restarts_type' to be set when 'restarts_cutoff' "
"or 'restarts_factor' or 'restarts_gfactor' is set.")
if "lb" in dict_simplified_options or "ub" in dict_simplified_options:
print(" Bounding objective not implemented in Choco")
if free: # required when some solving options are defined
args_solver += " -f"
if "seed" in dict_simplified_options:
args_solver += " -seed=" + dict_simplified_options["seed"]
if "verbose" in dict_simplified_options:
print(" Verbose log not implemented in Choco")
if "trace" in dict_simplified_options:
print(" Saving trace into a file not implemented in Choco")
return args_solver
# class ChocoPy4J(SolverPy4J): # TODO in progress
# def __init__(self):
# cp = CHOCO_CP + os.pathsep + CHOCO_DIR + "../py4j0.10.8.1.jar" + os.pathsep + CHOCO_DIR + " ChocoSolverPy4J"
# super().__init__(name="Choco-solver", command="java -cp " + cp, cp=CHOCO_CP)
|
11488702
|
from __future__ import print_function
import logging
import glob
import sys
from .config import *
def process_fileinfo(file_info, config):
filenames = []
for line in file_info:
# A line describing a file in the form:
# raw_file [output_file [cur_line cur_token [other_annotations]]]
parts = line.split()
next_part = 0
# Input data
raw_file = parts[0]
next_part += 1
# Output file name
output_file = raw_file + ".annotations"
if len(parts) > next_part:
output_file = parts[next_part]
next_part += 1
# Start somewhere other than the top
d = Document(raw_file)
position = Span(config.annotation, d)
if len(parts) > next_part:
position_text = []
depth = 0
first = True
while depth > 0 or first:
for char in parts[next_part]:
if char == '(': depth += 1
if char == ')': depth -= 1
position_text.append(parts[next_part])
first = False
next_part += 1
position_text = ' '.join(position_text)
span = eval(position_text)
position = Span(config.annotation, d, span)
# Additional annotations (used when comparing annotations)
annotations = []
if len(parts) > next_part:
annotations = parts[next_part:]
filenames.append((raw_file, position, output_file, annotations))
# Check files exist (or do not exist if being created)
missing = []
extra = []
for raw_file, _, output_file, annotations in filenames:
if len(glob.glob(raw_file)) == 0:
missing.append(raw_file)
if not config.args.overwrite:
if len(glob.glob(output_file)) != 0:
extra.append(output_file)
for annotation in annotations:
if len(glob.glob(annotation)) == 0:
missing.append(annotation)
if len(missing) > 0 or len(extra) > 0:
error = "Input filename list has the following errors:"
if len(missing) > 0:
error += "\nUnable to open:\n" + '\n'.join(missing)
if len(extra) > 0:
error += "\nAnnotation file already exists (use '-o' to ignore):\n" + '\n'.join(extra)
raise Exception(error)
return filenames
class Document(object):
"""Storage for the raw text data."""
# TODO: Think about maintaining whitespace variations, probably just by
# removing the .strip() below and then adjusting token selection to skip
# blank tokens
def __init__(self, filename):
self.raw_text = open(filename).read()
self.lines = self.raw_text.split("\n")
self.search_cache = {}
self.tokens = []
self.first_char = None
self.last_char = None
for line in self.raw_text.split("\n"):
cur = []
self.tokens.append(cur)
for token in line.strip().split():
if self.first_char is None:
self.first_char = (len(self.tokens) - 1, 0, 0)
self.last_char = (len(self.tokens) - 1, len(cur), len(token) - 1)
cur.append(token)
assert self.first_char is not None, "Empty document: {}".format(filename)
def get_3tuple(self, partial, start):
if len(partial) == 3:
return partial
elif len(partial) == 0:
if start: return self.first_char
else: return self.last_char
else:
line = partial[0]
token = 0
if len(partial) == 1 and (not start):
token = len(self.tokens[line]) - 1
elif len(partial) == 2:
token = partial[1]
char = 0
if not start:
char = len(self.tokens[line][token]) - 1
return (line, token, char)
def matches(self, text):
if text not in self.search_cache:
positions = []
self.search_cache[text] = positions
for line_no, line in enumerate(self.lines):
if text in line:
parts = line.split(text)
ctoken, cchar = 0, 0
options = []
for part in parts[:-1]:
# Advance
for char in part:
if char == ' ':
ctoken += 1
cchar = 0
else:
cchar += 1
positions.append((line_no, ctoken, cchar))
for char in text:
if char == ' ':
ctoken += 1
cchar = 0
else:
cchar += 1
return self.search_cache[text]
def get_moved_pos(self, pos, right=0, down=0, maxjump=False, skip_blank=True):
"""Calculate a shifted version of a given a position in this document.
Co-ordinates are (line number, token number, character number), with
(0,0, 0) as the top left, tokens increasing left to right and lines
increasing top to bottom.
"""
if len(pos) == 0: # This is the whole document, can't move
return pos
elif len(pos) == 1: # This is a line
npos = pos[0]
# Interpret left/right as also being up/down for lines
if down == 0 and right != 0:
down = right
if maxjump:
if down < 0: npos = self.first_char[0]
elif down > 0: npos = self.last_char[0]
else:
# Shift incrementally so we can optionally only count lines
# that have tokens.
shift = down
delta = 1 if shift > 0 else -1
while shift != 0 and self.first_char[0] <= npos + delta <= self.last_char[0]:
npos += delta
if (not skip_blank) or len(self.tokens[npos]) > 0:
shift -= delta
return (npos,)
elif len(pos) == 2: # Moving a token
nline = pos[0]
ntok = pos[1]
# Vertical movement
if maxjump:
# We always want to be on a token, so go to the first or last
# line with one.
if down < 0: nline = self.first_char[0]
elif down > 0: nline = self.last_char[0]
else:
# Shift incrementally so we can optionally only count lines
# that have tokens.
shift = down
delta = 1 if shift > 0 else -1
while shift != 0 and self.first_char[0] <= nline + delta <= self.last_char[0]:
nline += delta
if (not skip_blank) or len(self.tokens[nline]) > 0:
shift -= delta
# Horizontal movement
ntok = min(ntok, len(self.tokens[nline]) - 1)
if maxjump:
if right < 0: ntok = 0
elif right > 0: ntok = len(self.tokens[nline]) - 1
else:
shift = right
delta = 1 if shift > 0 else -1
while shift != 0:
if delta == -1 and nline == self.first_char[0] and ntok == self.first_char[1]:
break
if delta == 1 and nline == self.last_char[0] and ntok == self.last_char[1]:
break
if 0 <= ntok + delta < len(self.tokens[nline]):
ntok += delta
else:
# Go forward/back to a line with tokens. Note, we know
# there are later/earlier lines, since otherwise we
# would have been at the last_char/first_char
# position.
nline += delta
while len(self.tokens[nline]) == 0:
nline += delta
ntok = 0 if delta > 0 else len(self.tokens[nline]) - 1
shift -= delta
return (nline, ntok)
else: # Moving a character
# Vertical movement
nline = pos[0]
if maxjump:
# We always want to be on a character, so go to the first or
# last line with one.
if down < 0: nline = self.first_char[0]
elif down > 0: nline = self.last_char[0]
else:
# Shift incrementally because we only want to count lines that
# have characters.
shift = down
delta = 1 if shift > 0 else -1
while shift != 0 and self.first_char[0] <= nline + delta <= self.last_char[0]:
nline += delta
if (not skip_blank) or len(self.tokens[nline]) > 0:
shift -= delta
# Horizontal movement
ntok = min(len(self.tokens[nline]) - 1, pos[1])
nchar = min(len(self.tokens[nline][ntok]) - 1, pos[2])
if maxjump:
if right < 0:
ntok = 0
nchar = 0
elif right > 0:
ntok = len(self.tokens[nline]) - 1
nchar = len(self.tokens[nline][ntok]) - 1
else:
shift = right
delta = 1 if shift > 0 else -1
while shift != 0:
if delta == -1 and \
nline == self.first_char[0] and \
ntok == self.first_char[1] and \
nchar == self.first_char[1]:
break
if delta == 1 and \
nline == self.last_char[0] and \
ntok == self.last_char[1] and \
nchar == self.last_char[1]:
break
if 0 <= nchar + delta < len(self.tokens[nline][ntok]):
nchar += delta
elif delta < 0 and ntok > 0:
ntok -= 1
nchar = len(self.tokens[nline][ntok]) - 1
elif delta > 0 and ntok < len(self.tokens[nline]) - 1:
ntok += 1
nchar = 0
else:
# Go forward/back to a line with tokens. Note, we know
# there are later/earlier lines, since otherwise we
# would have been at the last_char/first_char
# position.
if nline + delta <= self.last_char[0]:
nline += delta
while len(self.tokens[nline]) == 0:
nline += delta
ntok = 0 if delta > 0 else len(self.tokens[nline]) - 1
nchar = 0 if delta > 0 else len(self.tokens[nline][ntok]) - 1
shift -= delta
return (nline, ntok, nchar)
def get_next_pos(self, pos):
if len(pos) == 0:
return pos
elif len(pos) == 1:
return self.get_moved_pos(pos, 0, 1)
else:
return self.get_moved_pos(pos, 1, 0)
def get_previous_pos(self, pos):
if len(pos) == 0:
return pos
elif len(pos) == 1:
return self.get_moved_pos(pos, 0, -1)
else:
return self.get_moved_pos(pos, -1, 0)
###class SpanCompare(Enum):
### smaller = 0
### smaller_left = 1
### overlap_end = 2
### overlap_right = 3
### cover = 4
### left_inside = 5
### equal = 6
### left_overlap = 7
### inside = 8
### inside_right = 9
### overlap_start = 10
### right_larger = 11
### larger = 12
### one_smaller = 13
### one_left = 14
### one_inside = 15
### one_right = 16
### one_larger = 17
### smaller_one = 18
### smaller_match = 19
### cover_one = 20
### equal_one = 21
### match_larger = 22
### larger_one = 23
### smaller_one_one = 24
### larger_one_one = 25
value_from_comparisons = {
# s0s1 e0e1 s0e1 e0s1 s0e0 s1e1 SpanCompare |-------|
(1, 1, 1, 1, 1, 1): "smaller", # .--.
(1, 1, 1, 0, 1, 1): "smaller_left", # .-----|
(1, 1, 1, -1, 1, 1): "overlap_end", # .---------.
(1, 0, 1, -1, 1, 1): "overlap_right", # .-------------|
(1, -1, 1, -1, 1, 1): "cover", # .-------------------.
(0, 1, 1, -1, 1, 1): "left_inside", # |---.
(0, 0, 1, -1, 1, 1): "equal", # |-------|
(0, -1, 1, -1, 1, 1): "left_overlap", # |-------------.
(-1, 1, 1, -1, 1, 1): "inside", # .-.
(-1, 0, 1, -1, 1, 1): "inside_right", # .---|
(-1, -1, 1, -1, 1, 1): "overlap_start", # .---------.
(-1, -1, 0, -1, 1, 1): "right_larger", # |-----.
(-1, -1, -1, -1, 1, 1): "larger", # .--.
(1, 1, 1, 1, 0, 1): "one_smaller", # .
(0, 1, 1, 0, 0, 1): "one_left", # |
(-1, 1, 1, -1, 0, 1): "one_inside", # .
(-1, 0, 0, -1, 0, 1): "one_right", # |
(-1, -1, -1, -1, 0, 1): "one_larger", # .
# Now consider cases where the second has width 0
# |
(1, 1, 1, 1, 1, 0): "smaller_one", # .--.
(1, 0, 1, 0, 1, 0): "smaller_match", # .-----|
(1, -1, 1, -1, 1, 0): "cover_one", # .-----------.
(0, 0, 0, 0, 0, 0): "equal_one", # |
(0, -1, 0, -1, 1, 0): "match_larger", # |-----.
(-1, -1, -1, -1, 1, 0): "larger_one", # .--.
(1, 1, 1, 1, 0, 0): "smaller_one_one", # .
(-1, -1, -1, -1, 0, 0): "larger_one_one", # .
}
span_compare_ge = {
"left_inside", "equal", "left_overlap",
"inside", "inside_right", "overlap_start",
"right_larger", "larger", "one_left",
"one_inside", "one_right", "one_larger",
"equal_one", "match_larger", "larger_one",
"larger_one_one"
}
span_compare_le = {
"smaller", "smaller_left", "overlap_end",
"overlap_right", "left_inside", "equal",
"inside", "inside_right", "one_smaller",
"one_left", "one_inside", "one_right",
"smaller_one", "smaller_match", "equal_one",
"smaller_one_one"
}
class Span(object):
"""A continuous span of text.
All annotations are on spans, some of which just happen to have a single element."""
def __init__(self, scope, doc, span=None):
self.start = None
self.end = None
self.doc = doc
self.scope = scope
# Most of the time a span will be provided to start from.
if span is None:
first = self.doc.first_char
if scope == 'character':
self.start = (first[0], first[1], first[2])
elif scope == 'token':
self.start = (first[0], first[1])
elif scope == 'line':
self.start = (first[0],)
elif scope == 'document':
self.start = ()
else:
raise Exception("Invalid scope")
self.end = self.start
else:
# Check it has the right length
length = None
if scope == 'character': length = 3
elif scope == 'token': length = 2
elif scope == 'line': length = 1
elif scope == 'document': length = 0
else: raise Exception("Invalid scope")
# TODO: Add a check that this position is valid for this doc
if type(span) == int and length == 1:
self.start = (span,)
self.end = (span,)
elif type(span) == tuple:
if len(span) == 0:
self.start, self.end = (), ()
else:
if type(span[0]) == int:
assert len(span) == length, "Invalid item: got {} not {}".format(len(span), length)
self.start = span
self.end = span
else:
assert len(span[0]) == len(span[1]) == length, "Invalid item: got {} not {}".format(len(span), length)
self.start = span[0]
self.end = span[1]
else:
assert len(span.start) == len(span.end) == length, "Invalid item: got {} not {}".format(len(span), length)
self.start = span.start
self.end = span.end
def _compare_tuples(self, a, b):
# Returns a number that is the kind of delta going from a to b
# (positive, negative, or zero)
if len(a) == 0 or len(b) == 0:
return 0
if a[0] == b[0]:
if len(a) == 1 or len(b) == 1:
return 0
if a[1] == b[1]:
if len(a) == 2 or len(b) == 2:
return 0
if a[2] == b[2]:
return 0
elif a[2] < b[2]:
return 1
else:
return -1
elif a[1] < b[1]:
return 1
else:
return -1
elif a[0] < b[0]:
return 1
else:
return -1
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
if type(self) != type(other):
return False
elif self._compare_tuples(self.start, other.start) != 0:
return False
elif self._compare_tuples(self.end, other.end) != 0:
return False
else:
return True
def __lt__(self, other):
assert type(self) == type(other)
comp = self._compare_tuples(self.start, other.start)
if comp == 0:
return self._compare_tuples(self.end, other.end) == 1
return comp == 1
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def __repr__(self):
return "Span({}, {})".format(self.start, self.end)
def __str__(self):
return str((self.start, self.end))
def compare(self, other):
'''Compares two spans and returns a SpanCompare.'''
assert type(self) == type(other)
s0 = self.doc.get_3tuple(self.start, True)
e0 = self.doc.get_3tuple(self.end, False)
s1 = other.doc.get_3tuple(other.start, True)
e1 = other.doc.get_3tuple(other.end, False)
s0s1 = self._compare_tuples(s0, s1)
s0e1 = self._compare_tuples(s0, e1)
e0s1 = self._compare_tuples(e0, s1)
e0e1 = self._compare_tuples(e0, e1)
s0e0 = self._compare_tuples(s0, e0)
s1e1 = self._compare_tuples(s1, e1)
return value_from_comparisons[s0s1, e0e1, s0e1, e0s1, s0e0, s1e1]
def to_3tuple(self):
if self.scope == 'character':
return self
start = self.doc.get_3tuple(start)
end = self.doc.get_3tuple(end)
return Span('character', self.doc, (start, end))
def search(self, query, direction=None, count=1, maxjump=False):
options = self.doc.matches(query)
logging.debug(options)
ans = None
for option in options:
comp = self._compare_tuples(self.start, option)
if comp < 0 and direction == 'previous':
ans = option
elif comp > 0:
if direction == 'next':
ans = option
break
if ans is None:
return self
else:
return Span(self.scope, self.doc, ans[:len(self.start)])
def edited(self, direction=None, change=None, distance=1, maxjump=False):
"""Change this span, either moving both ends or only one.
direction is left, right, up, down, next or previous
change is move, expand, or contract
distance is an integer, with negative numbers meaning max
"""
### logging.debug("{} {} {} {}".format(self, direction, change, distance))
new_start = self.start
new_end = self.end
if direction == 'next':
new_start = self.doc.get_next_pos(self.start)
new_end = self.doc.get_next_pos(self.end)
return Span(self.scope, self.doc, (new_start, new_end))
elif direction == 'previous':
new_start = self.doc.get_previous_pos(self.start)
new_end = self.doc.get_previous_pos(self.end)
return Span(self.scope, self.doc, (new_start, new_end))
right = 0
down = 0
if direction == 'left':
right = -distance
elif direction == 'right':
right = distance
elif direction == 'up':
down = -distance
elif direction == 'down':
down = distance
if change == 'contract':
right *= -distance
down *= -distance
if change == "move":
nstart = self.doc.get_moved_pos(new_start, right, down, maxjump)
nend = self.doc.get_moved_pos(new_end, right, down, maxjump)
### logging.debug("From {} and {} to {} and {}".format(self.start, self.end, nstart, nend))
# Only move if it will change both (otherwise it is a shift).
if nstart != self.start and nend != self.end:
new_start = nstart
new_end = nend
else:
### logging.debug("From {} do {} {} {} {} {} {}".format(self, direction, change, distance, maxjump, right, down))
move_start = direction == "left" or direction == "up"
to_move = new_end
if move_start:
to_move = new_start
moved = self.doc.get_moved_pos(to_move, right, down, maxjump)
nstart = new_start
nend = new_end
if move_start: nstart = moved
else: nend = moved
if self._compare_tuples(nstart, nend) >= 0:
new_start = nstart
new_end = nend
ans = Span(self.scope, self.doc, (new_start, new_end))
### logging.debug("Returning {}".format(ans))
return ans
# How to do coreference resolution annotation:
# - Normal mode is selecting a position using the edit function
# - Switch to link mode and then toggle between mentions including this one (to indicate no prior link)
class Item(object):
"""One or more spans and a set of labels.
This is used in Datum to keep track of annotations, and used in View to determine the current appearance."""
def __init__(self, doc, init_span=None, init_label=None):
self.doc = doc
self.spans = []
if type(init_span) == list:
self.spans += init_span
elif init_span is not None:
self.spans.append(init_span)
self.labels = set()
if type(init_label) == set:
self.labels.update(init_label)
elif init_label is not None:
self.labels.add(init_label)
def __eq__(self, other):
return self.spans == other.spans and self.labels == other.labels and self.doc == other.doc
def __str__(self):
labels = []
for label in self.labels:
labels.append(str(label))
labels = ' '.join(labels)
spans = '[' + ', '.join([str(s) for s in self.spans]) +']'
if len(self.spans) == 1:
spans = str(self.spans[0])
if self.spans[0].start == self.spans[0].end:
spans = str(self.spans[0].start)
if len(self.spans[0].start) == 1:
spans = str(self.spans[0].start[0])
elif len(self.spans) > 1:
all_single = True
for s in self.spans:
if s.start != s.end:
all_single = False
if all_single:
spans = str([s.start for s in self.spans])
if len(self.spans[0].start) == 1:
spans = " ".join([str(s.start[0]) for s in self.spans])
return "{} - {}".format(spans, labels)
def get_spans(text, doc, config):
# TODO: allow for <filename>:data
spans = []
if text[0] in '[(':
spans = eval(text.strip())
if type(spans) == int:
spans = [(spans,)]
elif type(spans) == tuple:
spans = [spans]
elif type(spans) == list:
if len(spans) == 0:
spans = [()]
elif type(spans[0]) == int:
spans = [(s,) for s in spans]
else:
for num in text.split():
spans.append((int(num),))
return [Span(config.annotation, doc, s) for s in spans]
def get_labels(text, config):
labels = set()
if config.annotation_type == 'categorical':
for label in text.strip().split():
labels.add(label)
else:
assert len(text.strip().split()) == 0, text
return labels
def read_annotation_file(config, filename, doc):
items = []
if len(glob.glob(filename)) == 1:
for line in open(filename):
# Each line is:
# [spans] - [labels]
fields = line.strip().split()
spans = get_spans(line.split('-')[0], doc, config)
labels = get_labels('-'.join(line.split('-')[1:]), config)
items.append(Item(doc, spans, labels))
logging.info("Read {}".format(filename))
return items
class Datum(object):
"""Storage for a single file's data and annotations.
Note, the structure of storage depends on the annotation type."""
def __init__(self, filename, config, output_file, other_annotation_files):
self.filename = filename
self.config = config
self.output_file = output_file
self.doc = Document(filename)
logging.info("Reading data from "+ self.output_file)
self.annotations = read_annotation_file(config, self.output_file, self.doc)
self.other_annotation_files = other_annotation_files
self.other_annotations = []
for filename in other_annotation_files:
self.other_annotations.append(read_annotation_file(config, filename, self.doc))
# Working this out is a once-off expensive process
self.disagreements = []
all_item_counts = {}
hash_to_item = {}
for annotations in self.other_annotations:
for item in annotations:
h = hash((tuple(item.spans), tuple(item.labels)))
hash_to_item[h] = item
if h not in all_item_counts:
all_item_counts[h] = 0
all_item_counts[h] += 1
for h, count in all_item_counts.items():
self.disagreements.append((hash_to_item[h],len(self.other_annotations) - count))
def get_next_self_link(self, cursor, linking_pos, direction, moving_link):
if moving_link:
self_links = set()
for item in self.annotations:
if max(item.spans) == min(item.spans):
self_links.add(min(item.spans))
position = linking_pos.edited(direction)
prev = None
while position != prev:
if position in self_links:
return position
prev = position
position = position.edited(direction)
return linking_pos
else:
return cursor
def get_next_unannotated(self, cursor, linking_pos, direction, moving_link):
if moving_link:
annotated = set()
for item in self.annotations:
span = max(item.spans)
annotated.add(span)
position = linking_pos.edited(direction)
prev = None
while position in annotated and position != prev:
prev = position
position = position.edited(direction)
if position not in annotated:
return position
return linking_pos
else:
return cursor
def get_next_disagreement(self, cursor, linking_pos, direction, moving_link, cycle=True):
best = None
first = None
last = None
for item, count in self.disagreements:
if moving_link:
if count > 0:
span = max(item.spans)
if first is None or span > first:
first = span
if last is None or span < last:
last = span
if direction == 'next' and span > linking_pos:
if best is None or span < best:
best = span
elif direction == 'previous' and span < linking_pos:
if best is None or span > best:
best = span
else:
has_link = False
for span in item.spans:
if span == linking_pos:
has_link = True
if has_link:
for span in item.spans:
if span <= linking_pos:
if first is None or span < first:
first = span
if last is None or span > last:
last = span
if direction == 'next' and span > cursor:
if best is None or span < best:
best = span
elif direction == 'previous' and span < cursor:
if best is None or span > best:
best = span
if best is None and cycle:
if direction == 'next':
best = first
else:
best = last
return best
def get_all_markings(self, cursor, linking_pos):
ans = {}
# Set colors for cursor and linking pos
pos = cursor.start
while True:
ans.setdefault(pos, []).append('cursor')
if pos == cursor.end:
break
pos = self.doc.get_next_pos(pos)
# Handle the case of a space
if len(pos) == 2 or (len(pos) == 3 and pos[2] == 0):
ans.setdefault((pos[0], pos[1], -1), []).append('cursor')
if linking_pos is not None:
pos = linking_pos.start
while True:
ans.setdefault(pos, []).append('link')
if pos == linking_pos.end:
break
pos = self.doc.get_next_pos(pos)
# Handle the case of a space
if len(pos) == 2 or (len(pos) == 3 and pos[2] == 0):
ans.setdefault((pos[0], pos[1], -1), []).append('link')
# Set item colors
for item in self.annotations:
# Get the standard color for this item based on its label
base_labels = []
if self.config.annotation_type == 'categorical':
# For categorical use the configuration set
for key in item.labels:
if key in self.config.labels:
base_labels.append(key)
else:
base_labels.append("label:"+ key)
elif self.config.annotation_type == 'link':
# For links potentially indicate it is linked
if not self.config.args.do_not_show_linked:
base_labels.append('linked')
is_self_link = len(item.spans) == 2 and item.spans[0] == item.spans[1]
has_link = False
for span in item.spans:
if span == linking_pos:
has_link = True
for span in item.spans:
pos = span.start
while True:
cur = ans.setdefault(pos, [])
for label in base_labels:
cur.append(label)
if len(item.spans) > 1 and has_link:
cur.append('ref')
if is_self_link:
cur.append('self-link')
if pos == span.end:
break
pos = self.doc.get_next_pos(pos)
# Handle the case of a space
if len(pos) == 2 or (len(pos) == 3 and pos[2] == 0):
cur = ans.setdefault((pos[0], pos[1], -1), [])
for label in base_labels:
cur.append(label)
if len(item.spans) > 1 and has_link:
cur.append('ref')
# Now do disagreement colours.
for item, count in self.disagreements:
# Get the standard color for this item based on its label
base_labels = []
if self.config.annotation_type == 'categorical':
for key in item.labels:
if key in self.config.labels:
base_labels.append("compare-{}-{}".format(count, key))
else:
base_labels.append("compare-label-{}-{}".format(count, key))
has_link = False
for span in item.spans:
if span == linking_pos:
has_link = True
ref_label = "compare-ref-{}-{}".format(has_link, count)
max_span = max(item.spans)
for span in item.spans:
pos = span.start
while True:
cur = ans.setdefault(pos, [])
for label in base_labels:
cur.append(label)
# TODO: Record the span too
if len(item.spans) > 1:
if span == max_span:
cur.append(ref_label +"-last")
else:
cur.append(ref_label +"-earlier")
if pos == span.end:
break
pos = self.doc.get_next_pos(pos)
# Handle the case of a space
if len(pos) == 2 or (len(pos) == 3 and pos[2] == 0):
cur = ans.setdefault((pos[0], pos[1], -1), [])
for label in base_labels:
cur.append(label)
if len(item.spans) > 1 and has_link:
if span == max_span:
cur.append(ref_label +"-last")
else:
cur.append(ref_label +"-earlier")
return ans
def next_match(self, span, text, reverse=False):
return self.doc.next_match(span, text, reverse)
def get_item_with_spans(self, spans, any_present=False):
items = []
for item in self.annotations:
match = 0
for span in item.spans:
if span in spans:
match += 1
rev_match = 0
for span in spans:
if span in item.spans:
rev_match += 1
if len(item.spans) == len(spans) == match == rev_match:
items.append(item)
elif any_present and match > 0:
items.append(item)
return items
def modify_annotation(self, spans, label=None):
# TODO: switch link to be like the old style
to_edit = self.get_item_with_spans(spans)
if len(to_edit) == 0:
# No item with these spans exists, create it
nspans = [Span(self.config.annotation, self.doc, s) for s in spans]
item = Item(self.doc, nspans, label)
self.annotations.append(item)
else:
for item in to_edit:
# Modify existing item
if label is None:
if len(item.labels) == 0:
self.annotations.remove(item)
elif label in item.labels:
item.labels.remove(label)
if len(item.labels) == 0:
self.annotations.remove(item)
else:
item.labels.add(label)
def remove_annotation(self, spans):
permissive = self.config.annotation_type == 'link'
for item in self.get_item_with_spans(spans, permissive):
self.annotations.remove(item)
def write_out(self, filename=None):
out_filename = self.output_file
if filename is not None:
out_filename = filename
out = open(out_filename, 'w')
for item in self.annotations:
print(str(item), file=out)
out.close()
|
11488723
|
from locatable import Node, Teacher, ExamCenter # AAHNIK 2020
from my_io import generateSaveAndPlot, plot_connections # AAHNIK 2020
from my_algo import sort, connect # AAHNIK 2020
def main(n):
centers = generateSaveAndPlot(ExamCenter, 20, 'ExamCenters')
vacancy = ExamCenter.total_vacancy(centers)
count = int((vacancy*n)//1) # no of teachers
teachers = generateSaveAndPlot(Teacher, count, 'Teachers')
# no of teachers must be greater than no of vacancies
origin = Node(0)
origin.x, origin.y = 50, 50
sort(centers, origin)
connect(teachers, centers)
plot_connections(centers)
if __name__ == "__main__":
main(4) # no of teachers is about 4 times the total vacancy of exam centers
# AAHNIK 2020
|
11488756
|
from tinder_api_sms import *;
from features import *;
import pprint
import datetime
import string
printer = pprint.PrettyPrinter(indent=4)
#recs = get_recommendations()["results"];
count = "50";
match_dict = all_matches(count);
date = str(datetime.datetime.now()).replace(" ", "");
#print(date);
self_dict = get_self();
selfId = self_dict['_id'];
matches = match_dict["data"]["matches"];
#find last message that we didnt send for each user
for user in matches:
userId = user['_id'];
if user['messages']:
lastMessage = user['messages'][-1];
if lastMessage['from'] != selfId:
message = lastMessage['message'];
printer.pprint(message);
|
11488767
|
import numpy as np
import h5py
import copy
import pandas
import os
import imp
import pickle
import scipy.interpolate
import warnings
import time
import matplotlib.pyplot as plt
import inspect
import types
import warnings
def get_filenames(path, contains, does_not_contain=['~', '.pyc']):
cmd = 'ls ' + '"' + path + '"'
ls = os.popen(cmd).read()
all_filelist = ls.split('\n')
try:
all_filelist.remove('')
except:
pass
filelist = []
for i, filename in enumerate(all_filelist):
if contains in filename:
fileok = True
for nc in does_not_contain:
if nc in filename:
fileok = False
if fileok:
filelist.append( os.path.join(path, filename) )
return filelist
def get_filename(path, contains, does_not_contain=['~', '.pyc']):
filelist = get_filenames(path, contains, does_not_contain)
if len(filelist) == 1:
return filelist[0]
elif len(filelist) > 0 and 'bgimg' in contains:
pick = sorted(filelist)[-1]
print('Found multiple background images, using ' + str(pick))
return pick
else:
print (filelist)
print ('Found too many, or too few files')
return None
def load_bag_as_hdf5(bag, skip_messages=[]):
output_fname = bag.split('.')[0] + '.hdf5'
print (output_fname)
if not os.path.exists(output_fname):
mta.bag2hdf5.bag2hdf5( bag,
output_fname,
max_strlen=200,
skip_messages=skip_messages)
metadata = h5py.File(output_fname, 'r')
return metadata
class Trajectory(object):
def __init__(self, pd, objid, functions=None):
self.pd = pd[pd['objid']==objid]
for column in self.pd.columns:
self.__setattr__(column, self.pd[column].values)
if functions is not None:
self.__attach_analysis_functions__(functions)
def __getitem__(self, key): # trajec attributes can be accessed just like a dictionary this way
return self.__getattribute__(key)
class Dataset(object):
def __init__(self, pd, path=None, save=False, convert_to_units=False, annotations=None):
'''
highly recommended to provide directory path
convert_to_units requires that path is given, and that path contains a config file, which has attributes:
- pixels_per_mm (or pixels_per_cm, etc)
- position_zero = [x, y] # the x and y pixels of position zero
- frames_per_second defined
'''
self.pd = pd
self.keys = []
self.__processed_trajecs__ = {}
self.save = save
self.path = path
self.annotations = annotations
self.units = {'length': 'pixels', 'speed': 'pixels per frame'}
if path is not None:
if convert_to_units:
self.load_config()
pixels_per_unit_key = []
for key in self.config.__dict__.keys():
if 'pixels_per_' in key:
pixels_per_unit_key = key
self.units['length'] = key.split('pixels_per_')[1]
self.units['speed'] = self.units['length'] + ' per second'
break
self.pixels_per_unit = self.config.__dict__[pixels_per_unit_key]
self.frames_per_second = self.config.frames_per_second
self.convert_to_units()
self.set_dataset_filename()
if save:
self.load_keys()
self.copy_trajectory_objects_to_dataset()
del(self.pd)
self.pd = None
print()
print ('Dataset loaded as a stand alone object - to save your dataset, use: ')
print ('dataset.save_dataset()')
print()
print (' -- OR -- ')
print()
print ('del (dataset.config)')
print ('import pickle')
print ('f = open(dataset.dataset_filename, "w+")')
print( 'pickle.dump(dataset, f)')
print ('f.close()')
def set_dataset_filename(self):
raw_data_filename = get_filename(self.path, 'trackedobjects.hdf5')
self.dataset_filename = raw_data_filename.split('trackedobjects.hdf5')[0] + 'trackedobjects_dataset.pickle'
def convert_to_units(self):
self.pd.position_x = (self.pd.position_x-self.config.position_zero[0])/float(self.pixels_per_unit)
self.pd.position_y = (self.pd.position_y-self.config.position_zero[1])/float(self.pixels_per_unit)
self.pd.speed = self.pd.speed/float(self.pixels_per_unit)*self.frames_per_second
self.pd.velocity_x = self.pd.velocity_x/float(self.pixels_per_unit)*self.frames_per_second
self.pd.velocity_y = self.pd.velocity_y/float(self.pixels_per_unit)*self.frames_per_second
def load_config(self):
self.config = load_config_from_path(self.path)
def save_dataset(self):
try:
del(self.config)
except:
pass
f = open(self.dataset_filename, "w+")
pickle.dump(self, f)
f.close()
def trajec(self, key):
if self.pd is not None:
trajec = Trajectory(self.pd, key)
return trajec
else:
return self.trajecs[key]
#raise ValueError('This is a saved dataset, use dict access: Dataset.trajecs[key] for data')
def framestamp_to_timestamp(self, frame):
t = self.pd.ix[frame]['time_epoch']
try:
return t.iloc[0]
except:
return t
def timestamp_to_framestamp(self, t):
first_time = self.pd['time_epoch'].values[0]
first_frame = self.pd['frames'].values[0]
last_time = self.pd['time_epoch'].values[-1]
last_frame = self.pd['frames'].values[-1]
func = scipy.interpolate.interp1d([first_time, last_time],[first_frame, last_frame])
return int(func(t))
def load_keys(self, keys=None):
if self.annotations is None:
if keys is None:
self.keys = np.unique(self.pd.objid).tolist()
else:
self.keys = keys
else:
self.keys = []
for key, note in self.annotations.items():
if 'confirmed' in note['notes']:
self.keys.append(key)
def copy_trajectory_objects_to_dataset(self):
self.trajecs = {}
for key in self.keys:
trajec = copy.copy( Trajectory(self.pd, key) )
self.trajecs.setdefault(key, trajec)
def calculate_function_for_all_trajecs(self, function):
for key, trajec in self.trajecs.items():
function(trajec)
def remove_zero_length_objects(self):
if 'trajecs' in self.__dict__:
for key, trajec in self.trajecs.items():
if len(trajec.speed) == 0:
try:
del(self.trajecs[key])
except:
pass
try:
self.keys.remove(key)
except:
pass
for key in self.keys:
if key not in self.trajecs.keys():
self.keys.remove(key)
else:
warnings.warn('remove zero length objects only works on copyied datasets')
def has_zero_length_objects(self):
if 'trajecs' in self.__dict__:
for key, trajec in self.trajecs.items():
if len(trajec.speed) == 0:
return True
for key in self.keys:
if key not in self.trajecs.keys():
return True
return False
else:
warnings.warn('remove zero length objects only works on copyied datasets')
def load_dataset_from_path(path, load_saved=False, convert_to_units=True, use_annotations=True):
'''
load_saved only recommended for reasonably sized datasets, < 500 mb
convert_to_units - see Dataset; converts pixels and frames to mm (or cm) and seconds, based on config
'''
if load_saved:
data_filename = get_filename(path, 'trackedobjects_dataset.pickle')
if data_filename is not None:
print (data_filename)
delete_cut_join_instructions_filename = get_filename(path, 'delete_cut_join_instructions.pickle')
epoch_time_when_dcjif_modified = os.path.getmtime(delete_cut_join_instructions_filename)
epoch_time_when_dataset_modified = os.path.getmtime(data_filename)
if epoch_time_when_dcjif_modified > epoch_time_when_dataset_modified:
print ('Delete cut join instructions modified - recalculating new dataset')
else:
f = open(data_filename)
dataset = pickle.load(f)
f.close()
# check path
if dataset.path != path: # an issue if the files get moved around
dataset.path = path
dataset.set_dataset_filename()
if dataset.has_zero_length_objects():
dataset.remove_zero_length_objects()
dataset.save_dataset()
print ('Loaded cached dataset last modified: ')
print (time.localtime(epoch_time_when_dataset_modified))
print()
return dataset
else:
print ('Could not find cached dataset in path: ')
print (path)
print (' Loading dataset from raw data now...')
data_filename = get_filename(path, 'trackedobjects.hdf5')
pd, config = load_and_preprocess_data(data_filename)
if use_annotations:
annotations_file = open(get_filename(path, 'annotations'))
annotations = pickle.load(annotations_file)
annotations_file.close()
else:
annotations = None
dataset = Dataset(pd, path=path,
save=load_saved,
convert_to_units=convert_to_units,
annotations=annotations) # if load_saved is True, copy the dataset, so it can be cached
if load_saved:
dataset.remove_zero_length_objects()
dataset.save_dataset()
return dataset
def load_data_as_pandas_dataframe_from_hdf5_file(filename, attributes=None):
if '.pickle' in filename:
pd = pandas.read_pickle(filename)
return pd
try:
data = h5py.File(filename, 'r', swmr=True)['data']
except ValueError:
data = h5py.File(filename, 'r', swmr=False)['data']
if attributes is None:
attributes = { 'objid' : 'objid',
'time_epoch_secs' : 'header.stamp.secs',
'time_epoch_nsecs' : 'header.stamp.nsecs',
'position_x' : 'position.x',
'position_y' : 'position.y',
'measurement_x' : 'measurement.x',
'measurement_y' : 'measurement.y',
'velocity_x' : 'velocity.x',
'velocity_y' : 'velocity.y',
'angle' : 'angle',
'frames' : 'header.frame_id',
'area' : 'size',
}
index = data['header.frame_id'].flat
d = {}
for attribute, name in attributes.items():
d.setdefault(attribute, data[name].flat)
pd = pandas.DataFrame(d, index=index)
#pd = pd.drop(pd.index==[0]) # delete 0 frames (frames with no data)
pd = pd[pd.frames!=0] # new pandas doesn't work with above line
pd = calc_additional_columns(pd)
# pd_subset = pd[pd.objid==key]
return pd
def load_and_preprocess_data(hdf5_filename):
'''
requires that a configuration file be found in the same directory as the hdf5 file, with the same prefix
returns: pandas dataframe, processed according to configuration file, and the configuration file instance
'''
if 'trackedobjects' not in hdf5_filename:
print ('File is not a trackedobjects file, looking for a trackedobjects file in this directory')
fname = get_filename(hdf5_filename, 'trackedobjects.hdf5')
if fname is not None:
hdf5_filename = fname
print ('Found: ', fname)
else:
raise ValueError('Could not find trackedobjects.hdf5 file')
pd = load_data_as_pandas_dataframe_from_hdf5_file(hdf5_filename, attributes=None)
hdf5_basename = os.path.basename(hdf5_filename)
directory = os.path.dirname(hdf5_filename)
identifiercode = hdf5_basename.split('_trackedobjects')[0]
config_filename = 'config_' + identifiercode + '.py'
config_filename = get_filename(directory, config_filename)
if config_filename is not None:
Config = imp.load_source('Config', config_filename)
config = Config.Config(directory, identifiercode)
if config.__dict__.has_key('preprocess_data_function'):
pd = config.__getattribute__('preprocess_data_function')(pd)
else:
config = None
return pd, config
def load_config_from_path(path):
config_filename = get_filename(path, 'config')
try:
hdf5_file = os.path.basename(get_filename(path, 'trackedobjects.hdf5'))
identifiercode = hdf5_file.split('_trackedobjects')[0]
except:
config_file_basename = os.path.basename(config_filename)
identifiercode = config_file_basename.split('config_')[1].split('.py')[0]
print ('identifiercode: ', identifiercode)
if config_filename is not None:
Config = imp.load_source('Config', config_filename)
config = Config.Config(path, identifiercode)
else:
config = None
return config
def load_data_selection_from_path(path):
filename = get_filename(path, contains='dataframe_')
pd = pandas.read_pickle(filename)
config = load_config_from_path(os.path.dirname(path))
return pd, config
def find_instructions_related_to_objid(instructions, objid):
for i, instruction in enumerate(instructions):
if 'new_objid' in instruction.keys():
if objid == instruction['new_objid']:
print (i)
if 'objids' in instruction.keys():
if objid in instruction['objids']:
print (i)
def mass_delete(pd, objids_to_delete):
print('Mass deleting objects as requested...')
pd = pd[~pd['objid'].isin(objids_to_delete)]
return pd
def delete_cut_join_trajectories_according_to_instructions(pd, instructions, interpolate_joined_trajectories=True):
if type(instructions) is str:
f = open(instructions)
instructions = pickle.load(f)
f.close()
elif type(instructions) is not list:
instructions = [instructions]
def get_proper_order_of_objects(dataset, keys):
trajecs = []
ts = []
goodkeys = []
for key in keys:
trajec = dataset.trajec(key)
if len(trajec.speed) > 0:
trajecs.append(trajec)
ts.append(trajec.time_epoch[0])
goodkeys.append(key)
order = np.argsort(ts)
return np.array(goodkeys)[order]
def get_indices_to_use_for_interpolation(key1, key2):
length_key1 = len(dataset.trajec(key1).position_x)
first_index_key1 = np.max( [length_key1-4, 0] )
indices_key1 = np.arange( first_index_key1, length_key1 )
length_key2 = len(dataset.trajec(key2).position_x)
last_index_key2 = np.min( [length_key2, 0+4] )
indices_key2 = np.arange( 0, last_index_key2 )
return indices_key1, indices_key2
for instruction in instructions:
if instruction['action'] == 'delete':
#pass
if type(instruction['objid']) == list:
pd = mass_delete(pd, instruction['objid'])
else:
pd = pd[pd.objid!=instruction['objid']]
elif instruction['action'] == 'cut':
mask = (pd['objid']==instruction['objid']) & (pd['frames']>instruction['cut_frame_global'])
pd.loc[mask,'objid'] = instruction['new_objid']
elif instruction['action'] == 'join':
if interpolate_joined_trajectories is False:
for key in instruction['objids']:
mask = pd['objid']==key
if 'new_objid' in instruction.keys():
print ('*** ASSIGNING NEW OBJID: ', instruction['new_objid'])
pd.loc[mask,'objid'] = instruction['new_objid']
else:
warnings.warn("Warning: using old join method; not using unique objid numbers")
pd.loc[mask,'objid'] = instruction['objids'][0]
elif interpolate_joined_trajectories is True:
dataset = Dataset(pd)
keys = get_proper_order_of_objects(dataset, instruction['objids'])
for k, key in enumerate(keys[0:-1]):
dataset = Dataset(pd)
last_frame = dataset.trajec(keys[k]).frames[-1]
first_frame = dataset.trajec(keys[k+1]).frames[0]
if first_frame <= last_frame: # overlap between objects, keep the second object's data, since the first is likely bad kalman projections
mask = np.invert( (pd['objid']==keys[k]) & (pd['frames']>=first_frame) )
pd = pd[mask]
else:
frames_to_interpolate = np.arange(last_frame+1, first_frame)
if len(frames_to_interpolate) > 0:
indices_key1, indices_key2 = get_indices_to_use_for_interpolation(keys[k], keys[k+1])
x = np.hstack((dataset.trajec(keys[k]).frames[indices_key1], dataset.trajec(keys[k+1]).frames[indices_key2]))
new_pd_dict = {attribute: None for attribute in pd.columns}
new_pd_dict.setdefault('interpolated', None)
index = frames_to_interpolate
if 'data_to_add' in instruction.keys():
data_to_add_frames = []
data_to_add_x = []
data_to_add_y = []
for index, data_to_add in enumerate(instruction['data_to_add']):
frame_for_data_to_add = dataset.timestamp_to_framestamp(data_to_add[0])
print( frame_for_data_to_add, last_frame, first_frame)
if frame_for_data_to_add > last_frame and frame_for_data_to_add < first_frame:
data_to_add_frames.append(frame_for_data_to_add)
data_to_add_x.append(data_to_add[1])
data_to_add_y.append(data_to_add[2])
order = np.argsort(data_to_add_frames)
data_to_add_frames = np.array(data_to_add_frames)[order]
data_to_add_x = np.array(data_to_add_x)[order]
data_to_add_y = np.array(data_to_add_y)[order]
for attribute in pd.columns:
if attribute == 'objid':
attribute_values = [keys[0] for f in frames_to_interpolate]
elif attribute == 'frames':
attribute_values = frames_to_interpolate
else:
y = np.hstack((dataset.trajec(keys[k])[attribute][indices_key1], dataset.trajec(keys[k+1])[attribute][indices_key2]))
if 'data_to_add' in instruction.keys():
if 'position' in attribute:
x_with_added_data = np.hstack((x, data_to_add_frames))
if attribute == 'position_x':
y_with_added_data = np.hstack((y, data_to_add_y))
elif attribute == 'position_y':
y_with_added_data = np.hstack((y, data_to_add_x))
order = np.argsort(x_with_added_data)
x_with_added_data = x_with_added_data[order]
y_with_added_data = y_with_added_data[order]
func = scipy.interpolate.interp1d(x_with_added_data,y_with_added_data)
else:
func = scipy.interpolate.interp1d(x,y)
else:
func = scipy.interpolate.interp1d(x,y)
attribute_values = func(frames_to_interpolate)
new_pd_dict[attribute] = attribute_values
interpolated_values = np.ones_like(new_pd_dict['position_x'])
new_pd_dict['interpolated'] = interpolated_values
#return pd, new_pd_dict, frames_to_interpolate
new_pd = pandas.DataFrame(new_pd_dict, index=frames_to_interpolate)
pd = pandas.concat([pd, new_pd])
pd = pd.sort_index()
for key in instruction['objids']:
mask = pd['objid']==key
if 'new_objid' in instruction.keys():
print ('*** ASSIGNING NEW OBJID: ', key, ' to : ', instruction['new_objid'])
pd.loc[mask,'objid'] = instruction['new_objid']
else:
warnings.warn("Warning: using old join method; not using unique objid numbers")
pd.loc[mask,'objid'] = instruction['objids'][0]
return pd
def calc_additional_columns(pd):
pd['time_epoch'] = pd['time_epoch_secs'] + pd['time_epoch_nsecs']*1e-9
pd['speed'] = np.linalg.norm( [pd['velocity_x'], pd['velocity_y']], axis=0 )
return pd
def framestamp_to_timestamp(pd, frame):
return pd.ix[frame]['time_epoch'].iloc[0]
def timestamp_to_framestamp(pd, t):
pd_subset = pd[pd['time_epoch_secs']==np.floor(t)]
return np.argmin(pd_subset['time_epoch'] - t)
def pixels_to_units(pd, pixels_per_unit, center=[0,0]):
attributes = ['speed',
'position_x',
'position_y',
'velocity_x',
'velocity_y',
]
for attribute in attributes:
pd[attribute] = pd[attribute] / pixels_per_unit
return pd
def load_multiple_datasets_into_single_pandas_data_frame(filenames, sync_frames=None):
'''
filenames - list of hdf5 files to load, full path name
sync_frames - list of frames, one for each filename, these sync_frames will all be set to zero
defaults to using first frame for each dataset as sync
'''
pds = [load_data_as_pandas_dataframe_from_hdf5_file(filename) for filename in filenames]
if sync_frames is None:
sync_frames = [np.min(pd.frames) for pd in pds]
for i, pd in enumerate(pds):
pd.index -= sync_frames[i]
pd.frames -= sync_frames[i]
combined_pd = pandas.concat(pds)
return combined_pd
def cull_short_trajectories(pd, min_length=4):
key_length_dict = get_objid_lengths(pd)
keys, lengths = zip(*key_length_dict.items())
keys = list(keys)
lengths = list(lengths)
indices = np.where(np.array(lengths)>min_length)[0]
keys_ok = np.array(keys)[indices]
culled_pd = pd.query('objid in @keys_ok')
return culled_pd
def compare_objids_from_two_dataframes(pd1, pd2):
objids_1 = np.unique(pd1.objid.values)
objids_2 = np.unique(pd2.objid.values)
unique_to_1 = [k for k in objids_1 if k not in objids_2]
unique_to_2 = [k for k in objids_2 if k not in objids_1]
return unique_to_1, unique_to_2
def cull_trajectories_that_do_not_cover_much_ground(pd, min_distance_travelled=10, print_keys=False):
distance_travelled = pd.speed.groupby(pd.objid).agg('sum')
indices = np.where(distance_travelled > min_distance_travelled)[0]
objids = distance_travelled.index[indices]
indices_where_object_acceptable = pd.objid.isin(objids)
culled_pd = pd[indices_where_object_acceptable]
return culled_pd
def cull_trajectories_that_do_not_cover_much_x_or_y_distance(pd, min_distance_travelled=10):
min_x = pd.position_x.groupby(pd.objid).agg('min')
max_x = pd.position_x.groupby(pd.objid).agg('max')
distance_travelled = max_x - min_x
indices = np.where(distance_travelled > min_distance_travelled)[0]
objids = distance_travelled.index[indices]
indices_where_object_acceptable = pd.objid.isin(objids)
culled_pd = pd[indices_where_object_acceptable]
pd = culled_pd
min_y = pd.position_y.groupby(pd.objid).agg('min')
max_y = pd.position_y.groupby(pd.objid).agg('max')
distance_travelled = max_y - min_y
indices = np.where(distance_travelled > min_distance_travelled)[0]
objids = distance_travelled.index[indices]
indices_where_object_acceptable = pd.objid.isin(objids)
culled_pd = pd[indices_where_object_acceptable]
return culled_pd
def get_objid_lengths(pd, objid_attribute='objid'):
keys = np.unique(pd[objid_attribute])
lengths = np.bincount(pd[objid_attribute])
true_lengths = lengths[np.nonzero(lengths)[0]]
key_length_dict = dict(zip(keys,true_lengths))
return key_length_dict
def remove_rows_above_speed_threshold(pd, speed_threshold=10):
q = 'speed < ' + str(speed_threshold)
return pd.query(q)
def remove_objects_that_never_exceed_minimum_speed(pd, speed_threshold=1):
speeds = pd.speed.groupby(pd.objid).max()
keysok = np.where(speeds.values > speed_threshold)
objidsok = speeds.iloc[keysok].index
pd_q = pd.query('objid in @objidsok')
return pd_q
|
11488786
|
import pytest
from weaverbird.backends.sql_translator.metadata import SqlQueryMetadataManager
from weaverbird.backends.sql_translator.types import SQLQuery
@pytest.fixture
def query():
return SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products)',
selection_query='SELECT TOTO, RAICHU, FLORIZARRE FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={'TABLE1': {'TOTO': 'text', 'RAICHU': 'int', 'FLORIZARRE': 'text'}},
),
)
|
11488799
|
import os
import cv2
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from src.models.modnet import MODNet
torch_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
def matting(video, result, alpha_matte=False, fps=30):
# video capture
vc = cv2.VideoCapture(video)
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
if not rval:
print('Failed to read the video: {0}'.format(video))
exit()
num_frame = vc.get(cv2.CAP_PROP_FRAME_COUNT)
h, w = frame.shape[:2]
if w >= h:
rh = 512
rw = int(w / h * 512)
else:
rw = 512
rh = int(h / w * 512)
rh = rh - rh % 32
rw = rw - rw % 32
# video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(result, fourcc, fps, (w, h))
print('Start matting...')
with tqdm(range(int(num_frame)))as t:
for c in t:
frame_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_np = cv2.resize(frame_np, (rw, rh), cv2.INTER_AREA)
frame_PIL = Image.fromarray(frame_np)
frame_tensor = torch_transforms(frame_PIL)
frame_tensor = frame_tensor[None, :, :, :]
if GPU:
frame_tensor = frame_tensor.cuda()
with torch.no_grad():
_, _, matte_tensor = modnet(frame_tensor, True)
matte_tensor = matte_tensor.repeat(1, 3, 1, 1)
matte_np = matte_tensor[0].data.cpu().numpy().transpose(1, 2, 0)
if alpha_matte:
view_np = matte_np * np.full(frame_np.shape, 255.0)
else:
view_np = matte_np * frame_np + (1 - matte_np) * np.full(frame_np.shape, 255.0)
view_np = cv2.cvtColor(view_np.astype(np.uint8), cv2.COLOR_RGB2BGR)
view_np = cv2.resize(view_np, (w, h))
video_writer.write(view_np)
rval, frame = vc.read()
c += 1
video_writer.release()
print('Save the result video to {0}'.format(result))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--video', type=str, required=True, help='input video file')
parser.add_argument('--result-type', type=str, default='fg', choices=['fg', 'matte'],
help='matte - save the alpha matte; fg - save the foreground')
parser.add_argument('--fps', type=int, default=30, help='fps of the result video')
print('Get CMD Arguments...')
args = parser.parse_args()
if not os.path.exists(args.video):
print('Cannot find the input video: {0}'.format(args.video))
exit()
print('Load pre-trained MODNet...')
pretrained_ckpt = './pretrained/modnet_webcam_portrait_matting.ckpt'
modnet = MODNet(backbone_pretrained=False)
modnet = nn.DataParallel(modnet)
GPU = True if torch.cuda.device_count() > 0 else False
if GPU:
print('Use GPU...')
modnet = modnet.cuda()
modnet.load_state_dict(torch.load(pretrained_ckpt))
else:
print('Use CPU...')
modnet.load_state_dict(torch.load(pretrained_ckpt, map_location=torch.device('cpu')))
modnet.eval()
result = os.path.splitext(args.video)[0] + '_{0}.mp4'.format(args.result_type)
alpha_matte = True if args.result_type == 'matte' else False
matting(args.video, result, alpha_matte, args.fps)
|
11488808
|
from mock import patch
from common.utils.haystack import rebuild_index
from allegation.factories import OfficerFactory
from common.tests.core import SimpleTestCase
from twitterbot.factories import ResponseTemplateFactory
from twitterbot.services.twitter_bot_responses_service import TwitterBotResponsesService
class TwitterBotResponsesServiceTestCase(SimpleTestCase):
def setUp(self):
ResponseTemplateFactory(response_type='officer', message='{{obj.display_name}}')
ResponseTemplateFactory(response_type='investigator', message='{{obj.name}}')
def test_build_responses(self):
officer = OfficerFactory(officer_first='Jason', officer_last='<NAME>')
# Temporary disable investigator response since mobile version doesn't have investigator page yet
# investigator = InvestigatorFactory(name='<NAME>')
names = {
'<NAME>': [],
'<NAME>': []
}
rebuild_index()
responses = TwitterBotResponsesService(names).build_responses()
responses_str = [x.message for x in responses]
responses_str.should.contain(officer.display_name)
# Temporary disable investigator response since mobile version doesn't have investigator page yet
# responses_str.should.contain(investigator.name)
def test_limit_responses_return(self):
with patch('twitterbot.services.responses.officers.OfficerResponses.build_responses',
return_value=list(range(20))):
responses = TwitterBotResponsesService({}).build_responses()
len(responses).should.equal(10)
|
11488835
|
from runners.python import Submission
class DavidSubmission(Submission):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# Your code goes here
steps = int(s)
iterations = 2017
buffer = [0]
pos = 0
for i in range(1, iterations+1):
pos = (pos+steps) % i
buffer = buffer[:pos+1] + [i] + buffer[pos+1:]
pos += 1
return buffer[pos+1]
|
11488849
|
import foohid
import struct
import time
keyboard = (
0x05, 0x01,
0x09, 0x06,
0xa1, 0x01,
0x05, 0x07,
0x19, 0xe0,
0x29, 0xe7,
0x15, 0x00,
0x25, 0x01,
0x75, 0x01,
0x95, 0x08,
0x81, 0x02,
0x95, 0x01,
0x75, 0x08,
0x81, 0x01,
0x95, 0x05,
0x75, 0x01,
0x05, 0x08,
0x19, 0x01,
0x29, 0x05,
0x91, 0x02,
0x95, 0x01,
0x75, 0x03,
0x91, 0x01,
0x95, 0x06,
0x75, 0x08,
0x15, 0x00,
0x25, 0x65,
0x05, 0x07,
0x19, 0x00,
0x29, 0x65,
0x81, 0x00,
0x09, 0x00,
0x75, 0x08,
0x95, 0x01,
0x15, 0x00,
0x25, 0x7f,
0xb1, 0x02,
0xc0
)
try:
foohid.destroy("FooHID simple keyboard")
except:
pass
foohid.create("FooHID simple keyboard", struct.pack('{0}B'.format(len(keyboard)), *keyboard), "SN 123", 2, 3)
try:
while True:
# press "a" key
foohid.send("FooHID simple keyboard", struct.pack('8B', 0, 0, 4, 0, 0, 0, 0, 0))
time.sleep(0.1)
foohid.send("FooHID simple keyboard", struct.pack('8B', 0, 0, 0, 0, 0, 0, 0, 0))
time.sleep(0.5)
except KeyboardInterrupt:
# make sure key is unpressed before exiting
foohid.send("FooHID simple keyboard", struct.pack('8B', 0, 0, 0, 0, 0, 0, 0, 0))
foohid.destroy("FooHID simple keyboard")
|
11488897
|
import pandas as pd
import numpy as np
import json
import requests
def _read_usgs_json(text):
data = pd.DataFrame()
for i in range(len(text['value']['timeSeries'])):
try:
site_name = text['value']['timeSeries'][i]['variable']['variableDescription'] #text['value']['timeSeries'][i]['sourceInfo']['siteName']
site_data = pd.DataFrame(text['value']['timeSeries'][i]['values'][0]['value'])
site_data.set_index('dateTime', drop=True, inplace=True)
site_data.index = pd.to_datetime(site_data.index, utc=True)
site_data.rename(columns={'value': site_name}, inplace=True)
site_data[site_name] = pd.to_numeric(site_data[site_name])
site_data.index.name = None
del site_data['qualifiers']
data = data.combine_first(site_data)
except:
pass
return data # we could also extract metadata and return that here
def read_usgs_file(file_name):
"""
Reads a USGS JSON data file (from https://waterdata.usgs.gov/nwis)
Parameters
----------
file_name : str
Name of USGS JSON data file
Returns
-------
data : pandas DataFrame
Data indexed by datetime with columns named according to the parameter's
variable description
"""
with open(file_name) as json_file:
text = json.load(json_file)
data = _read_usgs_json(text)
return data
def request_usgs_data(station, parameter, start_date, end_date,
data_type='Daily', proxy=None, write_json=None):
"""
Loads USGS data directly from https://waterdata.usgs.gov/nwis using a
GET request
The request URL prints to the screen.
Parameters
----------
station : str
USGS station number (e.g. '08313000')
parameter : str
USGS paramter ID (e.g. '00060' for Discharge, cubic feet per second)
start_date : str
Start date in the format 'YYYY-MM-DD' (e.g. '2018-01-01')
end_date : str
End date in the format 'YYYY-MM-DD' (e.g. '2018-12-31')
data_type : str
Data type, options include 'Daily' (return the mean daily value) and
'Instantaneous'.
proxy : dict or None
To request data from behind a firewall, define a dictionary of proxy settings,
for example {"http": 'localhost:8080'}
write_json : str or None
Name of json file to write data
Returns
-------
data : pandas DataFrame
Data indexed by datetime with columns named according to the parameter's
variable description
"""
assert data_type in ['Daily', 'Instantaneous'], 'data_type must be Daily or Instantaneous'
if data_type == 'Daily':
data_url = 'https://waterservices.usgs.gov/nwis/dv'
api_query = '/?format=json&sites='+station+ \
'&startDT='+start_date+'&endDT='+end_date+ \
'&statCd=00003'+ \
'¶meterCd='+parameter+'&siteStatus=all'
else:
data_url = 'https://waterservices.usgs.gov/nwis/iv'
api_query = '/?format=json&sites='+station+ \
'&startDT='+start_date+'&endDT='+end_date+ \
'¶meterCd='+parameter+'&siteStatus=all'
print('Data request URL: ', data_url+api_query)
response = requests.get(url=data_url+api_query,proxies=proxy)
text = json.loads(response.text)
if write_json is not None:
with open(write_json, 'w') as outfile:
json.dump(text, outfile)
data = _read_usgs_json(text)
return data
|
11488927
|
import random
def private_key(p):
return random.randint(2, p-1)
def public_key(p, g, private):
return pow(g, private, p)
def secret(p, public, private):
return pow(public, private, p)
|
11488957
|
from rest_framework import serializers
from modelchimp.models.invitation import Invitation
class InvitationSerializer(serializers.ModelSerializer):
class Meta:
model = Invitation
fields = '__all__'
|
11488965
|
import argparse
from argparse import Namespace
import importlib
import os
import time
from typing import Dict, Tuple
from pyspark_k8s_boilerplate.config import cfg
from pyspark_k8s_boilerplate.utils.log import logger
def get_args() -> Namespace:
"""Get arguments passed to pyspark entrypoint."""
parser = argparse.ArgumentParser(description=f"Run a {cfg.app_name} job")
parser.add_argument('--job', type=str, required=True, dest='job_name',
help="The Name of the job module you want to run")
parser.add_argument('--job-args', nargs='*', dest='job_args',
help="extra args to send to the job, for instance:"
" jobs=prep, jobs=train")
arguments = parser.parse_args()
logger.info("Called with arguments %s" % arguments)
return arguments
def get_job_args(arguments: Namespace) -> Tuple[Dict[str, str],
Dict[str, str]]:
"""Get any additional job arguments associated with a given spark job."""
environment = {
'JOB-ARGS': ' '.join(arguments.job_args) if arguments.job_args else ''
}
if arguments.job_args:
job_args_tuples = [arg_str.split('=') for arg_str in
arguments.job_args]
logger.info('job_args_tuples: %s' % job_args_tuples)
job_args = {a[0]: a[1] for a in job_args_tuples}
else:
job_args = {}
logger.info('\nRunning job %s...\nenvironment is %s\n'
% (arguments.job_name, environment))
return job_args, environment
def run_job(args: Namespace, job_args: Dict[str, str]) -> None:
"""
Run the desired pyspark job with any indicated module and job arguments.
"""
job_module = importlib.import_module(args.job_name)
start = time.time()
job_module.execute(**job_args) # type: ignore
end = time.time()
total = end - start
logger.info("\nExecution of job %s took %s minutes."
% (args.job_name, str(round((total / 60), 2))))
if __name__ == "__main__":
args = get_args()
job_args, env = get_job_args(args)
os.environ.update(env)
run_job(args, job_args)
# TODO spark history server
|
11488968
|
annotation_path = "/home/all/datasets/caltech/extract/train03_new/set04_V003_I01376.txt"
import pydatatool as pdt
# anno = pdt.caltech.load_txt(annotation_path)
# param = pdt.caltech.get_default_filter()
# anns, annId, objId = pdt.caltech.txt2coco(4,3,1376,anno)
pth = "/home/all/datasets/caltech/extract/train03_new"
annotations_train_10x, image_ids_train, annId_str, objId_str = pdt.caltech.txts2cocos(pth,0,0,{})
pdt.caltech.save_coco(annotations_train_10x,image_ids_train,'../output/json/caltech_train_10x_new.json')
pth = "/home/all/datasets/caltech/extract/test_1x_new"
annotations_test_1x, image_ids_test, annId_str, objId_str = pdt.caltech.txts2cocos(pth,annId_str,objId_str,{})
pdt.caltech.save_coco(annotations_test_1x,image_ids_test,'../output/json/caltech_test_1x_new.json')
igandic = [a['ignore']==a['iscrowd'] for a in annotations_test_1x]
print(all(igandic))
print(len(image_ids_train)==42782)
print(len(image_ids_test)==4024)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.