code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from types import GeneratorType
from satori.objects import Object, Argument
from satori.events.misc import flattenCoroutine
from satori.events.protocol import Command, ProtocolError
class Scheduler(Object):
"""Interface. Chooses which Client to run next.
"""
def next(self):
"""Return the next Client to handle.
"""
raise NotImplementedError()
def add(self, client):
"""Add a Client to this Scheduler.
"""
raise NotImplementedError()
def remove(self, client):
"""Remove a Client from this Scheduler.
"""
raise NotImplementedError()
class Client(Object):
"""Abstract. Base for client implementations.
"""
@Argument('scheduler', type=Scheduler)
def __init__(self, scheduler):
self.scheduler = scheduler
def sendResponse(self, response):
"""Send a response to this Client.
"""
raise NotImplementedError()
def recvCommand(self):
"""Receive the next command from this Client.
"""
raise NotImplementedError()
def disconnect(self):
"""Disconnect this Client.
"""
raise NotImplementedError()
class CoroutineClient(Client):
"""In-process Client implemented as a coroutine.
"""
@Argument('coroutine', type=GeneratorType)
def __init__(self, coroutine):
self.coroutine = flattenCoroutine(coroutine)
self.response = None
self.scheduler.add(self)
def sendResponse(self, response):
"""Send a response to this Client.
The response is saved and delivered to the coroutine on the next call to
recvCommand().
"""
if self.response is not None:
raise ProtocolError(
"sendResponse() called twice without an intervening recvCommand()")
self.response = response
self.scheduler.add(self)
def recvCommand(self):
"""Receive the next command from this Client.
"""
response = self.response
self.response = None
if isinstance(response, Exception):
command = self.coroutine.throw(response)
else:
command = self.coroutine.send(response)
if not isinstance(command, Command):
raise ProtocolError("received object is not a Command")
return command
def disconnect(self):
"""Disconnect this Client.
"""
self.response = ProtocolError() | satori.events/satori/events/client.py | from types import GeneratorType
from satori.objects import Object, Argument
from satori.events.misc import flattenCoroutine
from satori.events.protocol import Command, ProtocolError
class Scheduler(Object):
"""Interface. Chooses which Client to run next.
"""
def next(self):
"""Return the next Client to handle.
"""
raise NotImplementedError()
def add(self, client):
"""Add a Client to this Scheduler.
"""
raise NotImplementedError()
def remove(self, client):
"""Remove a Client from this Scheduler.
"""
raise NotImplementedError()
class Client(Object):
"""Abstract. Base for client implementations.
"""
@Argument('scheduler', type=Scheduler)
def __init__(self, scheduler):
self.scheduler = scheduler
def sendResponse(self, response):
"""Send a response to this Client.
"""
raise NotImplementedError()
def recvCommand(self):
"""Receive the next command from this Client.
"""
raise NotImplementedError()
def disconnect(self):
"""Disconnect this Client.
"""
raise NotImplementedError()
class CoroutineClient(Client):
"""In-process Client implemented as a coroutine.
"""
@Argument('coroutine', type=GeneratorType)
def __init__(self, coroutine):
self.coroutine = flattenCoroutine(coroutine)
self.response = None
self.scheduler.add(self)
def sendResponse(self, response):
"""Send a response to this Client.
The response is saved and delivered to the coroutine on the next call to
recvCommand().
"""
if self.response is not None:
raise ProtocolError(
"sendResponse() called twice without an intervening recvCommand()")
self.response = response
self.scheduler.add(self)
def recvCommand(self):
"""Receive the next command from this Client.
"""
response = self.response
self.response = None
if isinstance(response, Exception):
command = self.coroutine.throw(response)
else:
command = self.coroutine.send(response)
if not isinstance(command, Command):
raise ProtocolError("received object is not a Command")
return command
def disconnect(self):
"""Disconnect this Client.
"""
self.response = ProtocolError() | 0.776665 | 0.137764 |
import asyncio
import logging
from pycoinnet.InvItem import InvItem, ITEM_TYPE_TX
class TxHandler:
def __init__(self, inv_collector, tx_store, tx_validator=lambda tx: True):
self.inv_collector = inv_collector
self.q = inv_collector.new_inv_item_queue()
self.tx_store = tx_store
self._validator_handle = asyncio.Task(self._run(tx_validator))
def add_peer(self, peer):
"""
Call this method when a peer comes online and you want to keep its mempool
in sync with this mempool.
"""
@asyncio.coroutine
def _run_getdata(next_message):
while True:
name, data = yield from next_message()
inv_items = data["items"]
not_found = []
txs_found = []
for inv_item in inv_items:
if inv_item.item_type != ITEM_TYPE_TX:
continue
tx = self.tx_store.get(inv_item.data)
if tx:
txs_found.append(tx)
else:
not_found.append(inv_item)
if not_found:
peer.send_msg("notfound", items=not_found)
for tx in txs_found:
peer.send_msg("tx", tx=tx)
@asyncio.coroutine
def _run_mempool(next_message):
try:
name, data = yield from next_message()
inv_items = [InvItem(ITEM_TYPE_TX, tx.hash()) for tx in self.tx_store.values()]
logging.debug("sending inv of %d item(s) in response to mempool", len(inv_items))
if len(inv_items) > 0:
peer.send_msg("inv", items=inv_items)
# then we exit. We don't need to handle this message more than once.
except EOFError:
pass
next_getdata = peer.new_get_next_message_f(lambda name, data: name == 'getdata')
peer.add_task(_run_getdata(next_getdata))
next_mempool = peer.new_get_next_message_f(lambda name, data: name == 'mempool')
peer.add_task(_run_mempool(next_mempool))
peer.send_msg("mempool")
def add_tx(self, tx):
"""
Add a transaction to the mempool and advertise it to peers so it can
propogate throughout the network.
"""
the_hash = tx.hash()
if the_hash not in self.tx_store:
self.tx_store[the_hash] = tx
self.inv_collector.advertise_item(InvItem(ITEM_TYPE_TX, the_hash))
@asyncio.coroutine
def _run(self, tx_validator):
while True:
inv_item = yield from self.q.get()
if inv_item.item_type != ITEM_TYPE_TX:
continue
self.inv_collector.fetch_validate_store_item_async(inv_item, self.tx_store, tx_validator) | pycoinnet/peergroup/TxHandler.py | import asyncio
import logging
from pycoinnet.InvItem import InvItem, ITEM_TYPE_TX
class TxHandler:
def __init__(self, inv_collector, tx_store, tx_validator=lambda tx: True):
self.inv_collector = inv_collector
self.q = inv_collector.new_inv_item_queue()
self.tx_store = tx_store
self._validator_handle = asyncio.Task(self._run(tx_validator))
def add_peer(self, peer):
"""
Call this method when a peer comes online and you want to keep its mempool
in sync with this mempool.
"""
@asyncio.coroutine
def _run_getdata(next_message):
while True:
name, data = yield from next_message()
inv_items = data["items"]
not_found = []
txs_found = []
for inv_item in inv_items:
if inv_item.item_type != ITEM_TYPE_TX:
continue
tx = self.tx_store.get(inv_item.data)
if tx:
txs_found.append(tx)
else:
not_found.append(inv_item)
if not_found:
peer.send_msg("notfound", items=not_found)
for tx in txs_found:
peer.send_msg("tx", tx=tx)
@asyncio.coroutine
def _run_mempool(next_message):
try:
name, data = yield from next_message()
inv_items = [InvItem(ITEM_TYPE_TX, tx.hash()) for tx in self.tx_store.values()]
logging.debug("sending inv of %d item(s) in response to mempool", len(inv_items))
if len(inv_items) > 0:
peer.send_msg("inv", items=inv_items)
# then we exit. We don't need to handle this message more than once.
except EOFError:
pass
next_getdata = peer.new_get_next_message_f(lambda name, data: name == 'getdata')
peer.add_task(_run_getdata(next_getdata))
next_mempool = peer.new_get_next_message_f(lambda name, data: name == 'mempool')
peer.add_task(_run_mempool(next_mempool))
peer.send_msg("mempool")
def add_tx(self, tx):
"""
Add a transaction to the mempool and advertise it to peers so it can
propogate throughout the network.
"""
the_hash = tx.hash()
if the_hash not in self.tx_store:
self.tx_store[the_hash] = tx
self.inv_collector.advertise_item(InvItem(ITEM_TYPE_TX, the_hash))
@asyncio.coroutine
def _run(self, tx_validator):
while True:
inv_item = yield from self.q.get()
if inv_item.item_type != ITEM_TYPE_TX:
continue
self.inv_collector.fetch_validate_store_item_async(inv_item, self.tx_store, tx_validator) | 0.316158 | 0.1011 |
import argparse
import json
import pika
import platform
import sys
import time
from datetime import date
from gpiozero import CPUTemperature
parser = argparse.ArgumentParser()
# Requires a json file for authentication paramters
# debug is optional
parser.add_argument('-j','--json', required=True,
metavar='json_file', help="JSON file containing the mqtt information.")
parser.add_argument('-d','--debug', action='store_true', help="Show debugging messages on the command line")
args = parser.parse_args()
#print(args)
hostname = platform.node()
def debug_print(msg):
if args.debug:
print(f'DEBUG: {msg}')
#End debug_print
def current_time():
now = time.localtime()
now_hour = str(time.strftime("%H", now))
now_min = str(time.strftime("%M", now))
now_sec = str(time.strftime("%S", now))
nowdate = date.today()
return f'{nowdate} {now_hour}:{now_min}'
#End current_time()
def load_json(filename):
with open(filename) as f:
data = json.load(f)
f.close()
if 'user' in data.keys():
debug_print('user = ' + data['user'])
if 'passwd' in data.keys():
debug_print('passwd = ' + data['passwd'])
if 'ip' in data.keys():
debug_print('ip = ' + data['ip'])
return(data)
#End load_json
def send_mqtt(user,passwd,ip,port,mqtt_queue,message):
credentials = pika.PlainCredentials(user, passwd)
parameters = pika.ConnectionParameters(ip,port,'/',credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=mqtt_queue)
channel.basic_publish(exchange='',routing_key=mqtt_queue,body=message)
connection.close()
#End send_mqtt
def cpu_temp():
cpuc = CPUTemperature()
cpuf = (int(cpuc.temperature) * (9/5) +32)
cpufs = str(f'{cpuf:.1f}')
debug_print(cpufs)
return(cpufs)
#End cpu_temp
if __name__ == '__main__':
mqtt_data = load_json(args.json)
ftemp = cpu_temp()
now = current_time()
body = f'{now},{hostname},{ftemp}'
send_mqtt(mqtt_data['user'],mqtt_data['passwd'],mqtt_data['ip'],5672,'cpu_temperature',body) | mqtt_temp_client.py |
import argparse
import json
import pika
import platform
import sys
import time
from datetime import date
from gpiozero import CPUTemperature
parser = argparse.ArgumentParser()
# Requires a json file for authentication paramters
# debug is optional
parser.add_argument('-j','--json', required=True,
metavar='json_file', help="JSON file containing the mqtt information.")
parser.add_argument('-d','--debug', action='store_true', help="Show debugging messages on the command line")
args = parser.parse_args()
#print(args)
hostname = platform.node()
def debug_print(msg):
if args.debug:
print(f'DEBUG: {msg}')
#End debug_print
def current_time():
now = time.localtime()
now_hour = str(time.strftime("%H", now))
now_min = str(time.strftime("%M", now))
now_sec = str(time.strftime("%S", now))
nowdate = date.today()
return f'{nowdate} {now_hour}:{now_min}'
#End current_time()
def load_json(filename):
with open(filename) as f:
data = json.load(f)
f.close()
if 'user' in data.keys():
debug_print('user = ' + data['user'])
if 'passwd' in data.keys():
debug_print('passwd = ' + data['passwd'])
if 'ip' in data.keys():
debug_print('ip = ' + data['ip'])
return(data)
#End load_json
def send_mqtt(user,passwd,ip,port,mqtt_queue,message):
credentials = pika.PlainCredentials(user, passwd)
parameters = pika.ConnectionParameters(ip,port,'/',credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=mqtt_queue)
channel.basic_publish(exchange='',routing_key=mqtt_queue,body=message)
connection.close()
#End send_mqtt
def cpu_temp():
cpuc = CPUTemperature()
cpuf = (int(cpuc.temperature) * (9/5) +32)
cpufs = str(f'{cpuf:.1f}')
debug_print(cpufs)
return(cpufs)
#End cpu_temp
if __name__ == '__main__':
mqtt_data = load_json(args.json)
ftemp = cpu_temp()
now = current_time()
body = f'{now},{hostname},{ftemp}'
send_mqtt(mqtt_data['user'],mqtt_data['passwd'],mqtt_data['ip'],5672,'cpu_temperature',body) | 0.238373 | 0.063193 |
from optparse import make_option
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError, NoArgsCommand
from django.utils import translation
from django.utils.translation import get_language_info
import sys
from fluent_pages.models import UrlNode
from parler.utils.context import switch_language
class Command(NoArgsCommand):
"""
Generate rewrite/redirect rules for the web server to redirect a single unmaintained
language to another one.
"""
help = "Find all pages of a given language, and redirect to the canonical version."
args = "language"
option_list = BaseCommand.option_list + (
make_option('--format', default='nginx', help='Choose the output format, defaults to "nginx"'),
make_option('--site', default=int(settings.SITE_ID), help="Choose the site ID to "),
make_option('--from'),
make_option('--host'),
make_option('--to', default=settings.LANGUAGE_CODE),
)
def handle(self, *args, **options):
site = options['site']
host = options['host']
from_lang = options['from']
to_lang = options['to']
if not from_lang:
raise CommandError("Provide a --from=.. language to redirect for")
if not host:
host = Site.objects.get_current().domain
if '://' not in host:
host = "http://{0}".format(host)
from_name = get_language_info(from_lang)['name']
to_name = get_language_info(to_lang)['name']
with translation.override(from_lang):
qs = (UrlNode.objects
.parent_site(site)
.non_polymorphic()
.translated(to_lang)
.order_by('translations___cached_url'))
if not qs:
raise CommandError("No URLs found for site {0} in {1}".format(site, from_name))
self.stdout.write('# Redirecting all translated {0} URLs to the {1} site\n'.format(from_name, to_name))
self.stdout.write("# Generated using {0}".format(" ".join(sys.argv)))
for page in qs:
from_url = page.default_url
with switch_language(page, to_lang):
to_url = page.get_absolute_url()
if from_url == to_url:
continue
if from_url.endswith('/'):
from_regexp = from_url.rstrip('/')
from_rule = "~ ^{0}(/|$)".format(from_regexp)
else:
from_regexp = from_url
from_rule = "= {0}".format(from_regexp)
if page.plugin.urls:
self.stdout.write("location {0} {{ rewrite ^{1}(.*)$ {2}{3}$1; }}\n".format(
from_rule, from_regexp, host, to_url.rstrip('/')
))
else:
self.stdout.write("location {0} {{ return 301 {1}{2}; }}\n".format(
from_rule, host, to_url
))
# Final redirect for all identical URLs
self.stdout.write("\n# Redirect all remaining and identical URls:\n")
self.stdout.write("location / {{ rewrite ^/(.*)$ {0}/$1 permanent; }}\n".format(host)) | fluent_pages/management/commands/make_language_redirects.py | from optparse import make_option
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError, NoArgsCommand
from django.utils import translation
from django.utils.translation import get_language_info
import sys
from fluent_pages.models import UrlNode
from parler.utils.context import switch_language
class Command(NoArgsCommand):
"""
Generate rewrite/redirect rules for the web server to redirect a single unmaintained
language to another one.
"""
help = "Find all pages of a given language, and redirect to the canonical version."
args = "language"
option_list = BaseCommand.option_list + (
make_option('--format', default='nginx', help='Choose the output format, defaults to "nginx"'),
make_option('--site', default=int(settings.SITE_ID), help="Choose the site ID to "),
make_option('--from'),
make_option('--host'),
make_option('--to', default=settings.LANGUAGE_CODE),
)
def handle(self, *args, **options):
site = options['site']
host = options['host']
from_lang = options['from']
to_lang = options['to']
if not from_lang:
raise CommandError("Provide a --from=.. language to redirect for")
if not host:
host = Site.objects.get_current().domain
if '://' not in host:
host = "http://{0}".format(host)
from_name = get_language_info(from_lang)['name']
to_name = get_language_info(to_lang)['name']
with translation.override(from_lang):
qs = (UrlNode.objects
.parent_site(site)
.non_polymorphic()
.translated(to_lang)
.order_by('translations___cached_url'))
if not qs:
raise CommandError("No URLs found for site {0} in {1}".format(site, from_name))
self.stdout.write('# Redirecting all translated {0} URLs to the {1} site\n'.format(from_name, to_name))
self.stdout.write("# Generated using {0}".format(" ".join(sys.argv)))
for page in qs:
from_url = page.default_url
with switch_language(page, to_lang):
to_url = page.get_absolute_url()
if from_url == to_url:
continue
if from_url.endswith('/'):
from_regexp = from_url.rstrip('/')
from_rule = "~ ^{0}(/|$)".format(from_regexp)
else:
from_regexp = from_url
from_rule = "= {0}".format(from_regexp)
if page.plugin.urls:
self.stdout.write("location {0} {{ rewrite ^{1}(.*)$ {2}{3}$1; }}\n".format(
from_rule, from_regexp, host, to_url.rstrip('/')
))
else:
self.stdout.write("location {0} {{ return 301 {1}{2}; }}\n".format(
from_rule, host, to_url
))
# Final redirect for all identical URLs
self.stdout.write("\n# Redirect all remaining and identical URls:\n")
self.stdout.write("location / {{ rewrite ^/(.*)$ {0}/$1 permanent; }}\n".format(host)) | 0.410284 | 0.081483 |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from match_info.match_info import MatchInfo, MatchInfoConfig
import pytest
class FakeBot:
def __init__(self):
self.match_info: MatchInfo = None
self.new_game_called = 0
self.new_game_with_mmr_called = 0
self.game_ended_called = 0
self.websocket_broadcast_called = 0
async def on_new_game(self, match_info: MatchInfo):
self.new_game_called += 1
async def on_new_game_with_mmr(self, match_info: MatchInfo):
self.new_game_with_mmr_called += 1
async def on_game_ended(self, match_info: MatchInfo):
self.game_ended_called += 1
async def on_rewind(self, match_info: MatchInfo):
pass
async def websocket_broadcast_json(self, payload: str):
self.websocket_broadcast_called += 1
menu_game_data = {
"isReplay": False,
"displayTime": 4.0,
"players": [
{"id": 1, "name": "BuRny", "type": "user", "race": "Prot", "result": "Undecided"},
{"id": 2, "name": "<NAME> (Very Easy)", "type": "computer", "race": "random", "result": "Undecided"},
],
}
menu_ui_data = {
"activeScreens": [
"ScreenBackgroundSC2/ScreenBackgroundSC2",
"ScreenNavigationSC2/ScreenNavigationSC2",
"ScreenForegroundSC2/ScreenForegroundSC2",
"ScreenMultiplayer/ScreenMultiplayer",
]
}
game_game_data = {
"isReplay": False,
"displayTime": 4.0,
"players": [
{"id": 1, "name": "BuRny", "type": "user", "race": "Prot", "result": "Undecided"},
{"id": 2, "name": "<NAME> (Very Easy)", "type": "computer", "race": "random", "result": "Undecided"},
],
}
game_ui_data = {"activeScreens": []}
replay_game_data = {
"isReplay": True,
"displayTime": 4.0,
"players": [
{"id": 1, "name": "BuRny", "type": "user", "race": "Prot", "result": "Undecided"},
{"id": 2, "name": "<NAME> (Very Easy)", "type": "computer", "race": "random", "result": "Undecided"},
],
}
replay_ui_data = {"activeScreens": []}
@pytest.mark.asyncio
async def test_match_info_menu():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
# Emulate menu state
async def get_ui_data():
return menu_ui_data
async def get_game_data():
return menu_game_data
match_info.get_ui_data = get_ui_data
match_info.get_game_data = get_game_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check if variables changed properly
assert match_info.game_location == "menu"
assert match_info.bot.new_game_called == 0
assert match_info.bot.new_game_with_mmr_called == 0
assert match_info.bot.game_ended_called == 0
assert match_info.bot.websocket_broadcast_called == 0
assert match_info.new_game_started is False
assert match_info.valid_game is False
assert match_info.end_of_game_detected is False
@pytest.mark.asyncio
async def test_match_info_menu_to_game():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
match_info.DEBUG_MODE = True
# Emulate menu state
async def get_game_data():
return game_game_data
async def get_ui_data():
return game_ui_data
match_info.get_game_data = get_game_data
match_info.get_ui_data = get_ui_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check that functions were called properly
assert match_info.game_location == "game"
assert match_info.bot.new_game_called == 1
assert match_info.bot.new_game_with_mmr_called == 1
assert match_info.bot.game_ended_called == 0
# After the MMR was gathered, the info will be sent via websockets to the overlay
assert match_info.bot.websocket_broadcast_called == 1
assert match_info.new_game_started is True
assert match_info.valid_game is True
assert match_info.end_of_game_detected is False
@pytest.mark.asyncio
async def test_match_info_game_to_replay():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
match_info.DEBUG_MODE = True
# Emulate menu state
match_info.game_location = "game"
async def get_game_data():
return replay_game_data
async def get_ui_data():
return replay_ui_data
match_info.get_game_data = get_game_data
match_info.get_ui_data = get_ui_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check that functions were called properly
assert match_info.game_location == "replay"
assert match_info.bot.new_game_called == 0
assert match_info.bot.new_game_with_mmr_called == 0
assert match_info.bot.game_ended_called == 0
assert match_info.bot.websocket_broadcast_called == 0
assert match_info.new_game_started is False
assert match_info.valid_game is False
assert match_info.end_of_game_detected is False
@pytest.mark.asyncio
async def test_match_info_game_to_menu():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
match_info.DEBUG_MODE = True
# Emulate game state
match_info.game_location = "game"
async def get_game_data():
return menu_game_data
async def get_ui_data():
return menu_ui_data
match_info.get_game_data = get_game_data
match_info.get_ui_data = get_ui_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check that functions were called properly
assert match_info.game_location == "menu"
assert match_info.bot.new_game_called == 0
assert match_info.bot.new_game_with_mmr_called == 0
assert match_info.bot.game_ended_called == 1
assert match_info.bot.websocket_broadcast_called == 0
assert match_info.new_game_started is False
assert match_info.valid_game is False
assert match_info.end_of_game_detected is True | test/test_match_info.py | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from match_info.match_info import MatchInfo, MatchInfoConfig
import pytest
class FakeBot:
def __init__(self):
self.match_info: MatchInfo = None
self.new_game_called = 0
self.new_game_with_mmr_called = 0
self.game_ended_called = 0
self.websocket_broadcast_called = 0
async def on_new_game(self, match_info: MatchInfo):
self.new_game_called += 1
async def on_new_game_with_mmr(self, match_info: MatchInfo):
self.new_game_with_mmr_called += 1
async def on_game_ended(self, match_info: MatchInfo):
self.game_ended_called += 1
async def on_rewind(self, match_info: MatchInfo):
pass
async def websocket_broadcast_json(self, payload: str):
self.websocket_broadcast_called += 1
menu_game_data = {
"isReplay": False,
"displayTime": 4.0,
"players": [
{"id": 1, "name": "BuRny", "type": "user", "race": "Prot", "result": "Undecided"},
{"id": 2, "name": "<NAME> (Very Easy)", "type": "computer", "race": "random", "result": "Undecided"},
],
}
menu_ui_data = {
"activeScreens": [
"ScreenBackgroundSC2/ScreenBackgroundSC2",
"ScreenNavigationSC2/ScreenNavigationSC2",
"ScreenForegroundSC2/ScreenForegroundSC2",
"ScreenMultiplayer/ScreenMultiplayer",
]
}
game_game_data = {
"isReplay": False,
"displayTime": 4.0,
"players": [
{"id": 1, "name": "BuRny", "type": "user", "race": "Prot", "result": "Undecided"},
{"id": 2, "name": "<NAME> (Very Easy)", "type": "computer", "race": "random", "result": "Undecided"},
],
}
game_ui_data = {"activeScreens": []}
replay_game_data = {
"isReplay": True,
"displayTime": 4.0,
"players": [
{"id": 1, "name": "BuRny", "type": "user", "race": "Prot", "result": "Undecided"},
{"id": 2, "name": "<NAME> (Very Easy)", "type": "computer", "race": "random", "result": "Undecided"},
],
}
replay_ui_data = {"activeScreens": []}
@pytest.mark.asyncio
async def test_match_info_menu():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
# Emulate menu state
async def get_ui_data():
return menu_ui_data
async def get_game_data():
return menu_game_data
match_info.get_ui_data = get_ui_data
match_info.get_game_data = get_game_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check if variables changed properly
assert match_info.game_location == "menu"
assert match_info.bot.new_game_called == 0
assert match_info.bot.new_game_with_mmr_called == 0
assert match_info.bot.game_ended_called == 0
assert match_info.bot.websocket_broadcast_called == 0
assert match_info.new_game_started is False
assert match_info.valid_game is False
assert match_info.end_of_game_detected is False
@pytest.mark.asyncio
async def test_match_info_menu_to_game():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
match_info.DEBUG_MODE = True
# Emulate menu state
async def get_game_data():
return game_game_data
async def get_ui_data():
return game_ui_data
match_info.get_game_data = get_game_data
match_info.get_ui_data = get_ui_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check that functions were called properly
assert match_info.game_location == "game"
assert match_info.bot.new_game_called == 1
assert match_info.bot.new_game_with_mmr_called == 1
assert match_info.bot.game_ended_called == 0
# After the MMR was gathered, the info will be sent via websockets to the overlay
assert match_info.bot.websocket_broadcast_called == 1
assert match_info.new_game_started is True
assert match_info.valid_game is True
assert match_info.end_of_game_detected is False
@pytest.mark.asyncio
async def test_match_info_game_to_replay():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
match_info.DEBUG_MODE = True
# Emulate menu state
match_info.game_location = "game"
async def get_game_data():
return replay_game_data
async def get_ui_data():
return replay_ui_data
match_info.get_game_data = get_game_data
match_info.get_ui_data = get_ui_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check that functions were called properly
assert match_info.game_location == "replay"
assert match_info.bot.new_game_called == 0
assert match_info.bot.new_game_with_mmr_called == 0
assert match_info.bot.game_ended_called == 0
assert match_info.bot.websocket_broadcast_called == 0
assert match_info.new_game_started is False
assert match_info.valid_game is False
assert match_info.end_of_game_detected is False
@pytest.mark.asyncio
async def test_match_info_game_to_menu():
fake_bot = FakeBot()
match_info = MatchInfo(bot=fake_bot)
match_info.DEBUG_MODE = True
# Emulate game state
match_info.game_location = "game"
async def get_game_data():
return menu_game_data
async def get_ui_data():
return menu_ui_data
match_info.get_game_data = get_game_data
match_info.get_ui_data = get_ui_data
# Try to detect if a new game was started
await match_info.update_variables()
# Check that functions were called properly
assert match_info.game_location == "menu"
assert match_info.bot.new_game_called == 0
assert match_info.bot.new_game_with_mmr_called == 0
assert match_info.bot.game_ended_called == 1
assert match_info.bot.websocket_broadcast_called == 0
assert match_info.new_game_started is False
assert match_info.valid_game is False
assert match_info.end_of_game_detected is True | 0.374333 | 0.315578 |
import re
import json
from itertools import count
import datetime
from collections import defaultdict
from rdflib import Graph, Namespace, OWL, Literal, URIRef, BNode, XSD, RDFS, RDF
from rdfalchemy import rdfSubject, rdfSingle, rdfMultiple
bio = Namespace("http://purl.org/vocab/bio/0.1/")
schema = Namespace('http://schema.org/')
void = Namespace('http://rdfs.org/ns/void#')
foaf = Namespace('http://xmlns.com/foaf/0.1/')
sem = Namespace('http://semanticweb.cs.vu.nl/2009/11/sem/')
pnv = Namespace('https://w3id.org/pnv#')
JSONFILE = 'repertorium_van_ambtsdragers.json'
import sys
sys.path.append("../amsterdam-corporate-group-portraits-rdf")
from ga import *
def parseFunctionInfo(functionInfo, person, organizationSubEventDict,
identifier):
if functionInfo['role']:
term = functionInfo['role'].title().replace(' ', '')
roleTypePerson = RoleType(gaRoleType.term(term),
subClassOf=ga.Role,
label=[functionInfo['role']])
else:
roleTypePerson = RoleType(gaRoleType.term('Unknown'),
subClassOf=ga.Role,
label=["?"])
earliestBeginTimeStamp = functionInfo['hasEarliestBeginTimeStamp']
latestBeginTimeStamp = functionInfo['hasLatestBeginTimeStamp']
earliestEndTimeStamp = functionInfo['hasEarliestEndTimeStamp']
latestEndTimeStamp = functionInfo['hasLatestEndTimeStamp']
beginTimeStamp = functionInfo['hasBeginTimeStamp']
endTimeStamp = functionInfo['hasEndTimeStamp']
beginYearLabel = datetime.datetime.fromisoformat(
earliestBeginTimeStamp).year if earliestBeginTimeStamp else "?"
endYearLabel = datetime.datetime.fromisoformat(
latestEndTimeStamp).year if latestEndTimeStamp else "?"
earliestBeginTimeStamp = Literal(
earliestBeginTimeStamp,
datatype=XSD.date) if earliestBeginTimeStamp else None
latestBeginTimeStamp = Literal(
latestBeginTimeStamp,
datatype=XSD.date) if latestBeginTimeStamp else None
earliestEndTimeStamp = Literal(
earliestEndTimeStamp,
datatype=XSD.date) if earliestEndTimeStamp else None
latestEndTimeStamp = Literal(
latestEndTimeStamp, datatype=XSD.date) if latestEndTimeStamp else None
beginTimeStamp = Literal(beginTimeStamp,
datatype=XSD.date) if beginTimeStamp else None
endTimeStamp = Literal(endTimeStamp,
datatype=XSD.date) if endTimeStamp else None
if functionInfo['organization'].get('uri'):
term = URIRef(functionInfo['organization']['uri'])
elif functionInfo['organization']['name']:
term = functionInfo['organization']['name'].title().replace(' ', '')
term = gaOrganization.term(term)
else:
term = None
organization = Organization(
term,
label=[
Literal(functionInfo['organization']['name']
or "Onbekende organisatie",
lang='nl')
])
functionEvent = Event(
identifier,
label=[
Literal(
f"{person.label[0]} als {roleTypePerson.label[0]} bij {functionInfo['organization']['name']} ({beginYearLabel}-{endYearLabel})",
lang='nl'),
Literal(
f"{person.label[0]} as {roleTypePerson.label[0]} at {functionInfo['organization']['name']} ({beginYearLabel}-{endYearLabel})",
lang='en')
],
participationOf=[person, organization],
hasEarliestBeginTimeStamp=earliestBeginTimeStamp,
hasLatestBeginTimeStamp=latestBeginTimeStamp,
hasEarliestEndTimeStamp=earliestEndTimeStamp,
hasLatestEndTimeStamp=latestEndTimeStamp,
hasBeginTimeStamp=beginTimeStamp,
hasEndTimeStamp=endTimeStamp)
rolePerson = SpecificRoleType(
None,
type=roleTypePerson,
carriedIn=functionEvent,
carriedBy=person,
label=[
Literal(
f"{person.label[0]} in de rol van {roleTypePerson.label[0]}",
lang='nl'),
Literal(
f"{person.label[0]} in the role of {roleTypePerson.label[0]}",
lang='en')
])
roleTypeOrganization = SpecificRoleType(
None,
type=gaRoleType.AdministrativeOrganization
if functionInfo['organization']['name'] else gaRoleType.Unknown,
carriedIn=functionEvent,
carriedBy=organization,
label=[
Literal(
f"{functionInfo['organization']['name']} in de rol van Administratieve organisatie",
lang='nl'),
Literal(
f"{functionInfo['organization']['name']} in the role of Administrative organization",
lang='en')
])
organizationSubEventDict[organization].append(functionEvent)
return functionEvent, organizationSubEventDict
def toRdf(filepath: str, target: str):
g = rdfSubject.db = Graph()
organizationSubEventDict = defaultdict(list)
with open(filepath) as infile:
data = json.load(infile)
for p in data:
pn = PersonName(None,
givenName=p['name']['givenName'],
baseSurname=p['name']['baseSurname'],
surnamePrefix=p['name']['surnamePrefix'],
literalName=p['name']['literalName'])
birth = Birth(URIRef(p['uri'] + '#birth'), **p['birthDate'])
beginBirthYearLabel = datetime.datetime.fromisoformat(
p['birthDate']['hasEarliestBeginTimeStamp']
).year if p['birthDate']['hasEarliestBeginTimeStamp'] and p[
'birthDate']['hasEarliestBeginTimeStamp'] != "?" else "?"
endBirthYearLabel = datetime.datetime.fromisoformat(
p['birthDate']['hasLatestEndTimeStamp']
).year if p['birthDate']['hasLatestEndTimeStamp'] and p['birthDate'][
'hasLatestEndTimeStamp'] != "?" else "?"
if p['birthDate']['hasTimeStamp']:
birthYearLabel = datetime.datetime.fromisoformat(
p['birthDate']['hasTimeStamp']).year
elif beginBirthYearLabel == endBirthYearLabel:
birthYearLabel = beginBirthYearLabel
else:
birthYearLabel = f"ca. {beginBirthYearLabel}-{endBirthYearLabel}"
birth.label = [
Literal(
f"Geboorte van {p['name']['literalName']} ({birthYearLabel})",
lang='nl'),
Literal(f"Birth of {p['name']['literalName']} ({birthYearLabel})",
lang='en')
]
roleBorn = Born(
None,
carriedIn=birth,
label=[
Literal(f"{p['name']['literalName']} in de rol van geborene",
lang='nl'),
Literal(f"{p['name']['literalName']} in the role of born",
lang='en')
])
death = Death(URIRef(p['uri'] + '#death'), **p['deathDate'])
beginDeathYearLabel = datetime.datetime.fromisoformat(
p['deathDate']['hasEarliestBeginTimeStamp']
).year if p['deathDate']['hasEarliestBeginTimeStamp'] and p[
'deathDate']['hasEarliestBeginTimeStamp'] != "?" else "?"
endDeathYearLabel = datetime.datetime.fromisoformat(
p['deathDate']['hasLatestEndTimeStamp']
).year if p['deathDate']['hasLatestEndTimeStamp'] and p['deathDate'][
'hasLatestEndTimeStamp'] != "?" else "?"
if p['deathDate']['hasTimeStamp']:
birthYearLabel = datetime.datetime.fromisoformat(
p['deathDate']['hasTimeStamp']).year
elif beginDeathYearLabel == endDeathYearLabel:
birthYearLabel = beginDeathYearLabel
else:
birthYearLabel = f"ca. {beginDeathYearLabel}-{endDeathYearLabel}"
death.label = [
Literal(
f"Geboorte van {p['name']['literalName']} ({birthYearLabel})",
lang='nl'),
Literal(f"Death of {p['name']['literalName']} ({birthYearLabel})",
lang='en')
]
roleDeceased = Deceased(
None,
carriedIn=death,
label=[
Literal(f"{p['name']['literalName']} in de rol van overledene",
lang='nl'),
Literal(f"{p['name']['literalName']} in the role of deceased",
lang='en')
])
lifeEvents = [birth, death]
person = Person(URIRef(p['uri']),
hasName=[pn],
label=[pn.literalName],
birth=birth,
death=death)
birth.principal = person
birth.participationOf = [person]
roleBorn.carriedBy = person
death.principal = person
death.participationOf = [person]
roleDeceased.carriedBy = person
for n, function in enumerate(p['functions'], 1):
identifier = URIRef(p['uri'] + f'#event-{n}')
functionEvent, organizationSubEventDict = parseFunctionInfo(
function, person, organizationSubEventDict, identifier)
lifeEvents.append(functionEvent)
person.participatesIn = lifeEvents
## Organizations
organizationResUri2label = dict()
organizationResUriSubEventDict = defaultdict(list)
for organization, subEvents in organizationSubEventDict.items():
organizationResUriSubEventDict[organization.resUri] += subEvents
organizationResUri2label[organization.resUri] = organization.label[0]
for organization, subEvents in organizationResUriSubEventDict.items():
eventCounter = count(1)
organizationEvent = Event(
URIRef(str(organization) + '#event'),
participationOf=[organization],
subEvent=subEvents,
label=[
Literal(
f"Tijdlijn van {organizationResUri2label[organization]}",
lang='nl'),
Literal(
f"Timeline of {organizationResUri2label[organization]}",
lang='en')
])
for e in subEvents:
e.subEventOf = organizationEvent
Organization(organization).participatesIn = [organizationEvent
] + subEvents
g.bind('foaf', foaf)
g.bind('schema', schema)
g.bind('void', void)
g.bind('owĺ', OWL)
g.bind('xsd', XSD)
g.bind('sem', sem)
g.bind('bio', bio)
g.bind('pnv', pnv)
g.bind('ga', ga)
print(f"Serializing to {target}")
g.serialize(target, format='turtle')
def main():
toRdf(filepath=JSONFILE, target='ambtsdragers.ttl')
if __name__ == "__main__":
main() | 2rdf.py | import re
import json
from itertools import count
import datetime
from collections import defaultdict
from rdflib import Graph, Namespace, OWL, Literal, URIRef, BNode, XSD, RDFS, RDF
from rdfalchemy import rdfSubject, rdfSingle, rdfMultiple
bio = Namespace("http://purl.org/vocab/bio/0.1/")
schema = Namespace('http://schema.org/')
void = Namespace('http://rdfs.org/ns/void#')
foaf = Namespace('http://xmlns.com/foaf/0.1/')
sem = Namespace('http://semanticweb.cs.vu.nl/2009/11/sem/')
pnv = Namespace('https://w3id.org/pnv#')
JSONFILE = 'repertorium_van_ambtsdragers.json'
import sys
sys.path.append("../amsterdam-corporate-group-portraits-rdf")
from ga import *
def parseFunctionInfo(functionInfo, person, organizationSubEventDict,
identifier):
if functionInfo['role']:
term = functionInfo['role'].title().replace(' ', '')
roleTypePerson = RoleType(gaRoleType.term(term),
subClassOf=ga.Role,
label=[functionInfo['role']])
else:
roleTypePerson = RoleType(gaRoleType.term('Unknown'),
subClassOf=ga.Role,
label=["?"])
earliestBeginTimeStamp = functionInfo['hasEarliestBeginTimeStamp']
latestBeginTimeStamp = functionInfo['hasLatestBeginTimeStamp']
earliestEndTimeStamp = functionInfo['hasEarliestEndTimeStamp']
latestEndTimeStamp = functionInfo['hasLatestEndTimeStamp']
beginTimeStamp = functionInfo['hasBeginTimeStamp']
endTimeStamp = functionInfo['hasEndTimeStamp']
beginYearLabel = datetime.datetime.fromisoformat(
earliestBeginTimeStamp).year if earliestBeginTimeStamp else "?"
endYearLabel = datetime.datetime.fromisoformat(
latestEndTimeStamp).year if latestEndTimeStamp else "?"
earliestBeginTimeStamp = Literal(
earliestBeginTimeStamp,
datatype=XSD.date) if earliestBeginTimeStamp else None
latestBeginTimeStamp = Literal(
latestBeginTimeStamp,
datatype=XSD.date) if latestBeginTimeStamp else None
earliestEndTimeStamp = Literal(
earliestEndTimeStamp,
datatype=XSD.date) if earliestEndTimeStamp else None
latestEndTimeStamp = Literal(
latestEndTimeStamp, datatype=XSD.date) if latestEndTimeStamp else None
beginTimeStamp = Literal(beginTimeStamp,
datatype=XSD.date) if beginTimeStamp else None
endTimeStamp = Literal(endTimeStamp,
datatype=XSD.date) if endTimeStamp else None
if functionInfo['organization'].get('uri'):
term = URIRef(functionInfo['organization']['uri'])
elif functionInfo['organization']['name']:
term = functionInfo['organization']['name'].title().replace(' ', '')
term = gaOrganization.term(term)
else:
term = None
organization = Organization(
term,
label=[
Literal(functionInfo['organization']['name']
or "Onbekende organisatie",
lang='nl')
])
functionEvent = Event(
identifier,
label=[
Literal(
f"{person.label[0]} als {roleTypePerson.label[0]} bij {functionInfo['organization']['name']} ({beginYearLabel}-{endYearLabel})",
lang='nl'),
Literal(
f"{person.label[0]} as {roleTypePerson.label[0]} at {functionInfo['organization']['name']} ({beginYearLabel}-{endYearLabel})",
lang='en')
],
participationOf=[person, organization],
hasEarliestBeginTimeStamp=earliestBeginTimeStamp,
hasLatestBeginTimeStamp=latestBeginTimeStamp,
hasEarliestEndTimeStamp=earliestEndTimeStamp,
hasLatestEndTimeStamp=latestEndTimeStamp,
hasBeginTimeStamp=beginTimeStamp,
hasEndTimeStamp=endTimeStamp)
rolePerson = SpecificRoleType(
None,
type=roleTypePerson,
carriedIn=functionEvent,
carriedBy=person,
label=[
Literal(
f"{person.label[0]} in de rol van {roleTypePerson.label[0]}",
lang='nl'),
Literal(
f"{person.label[0]} in the role of {roleTypePerson.label[0]}",
lang='en')
])
roleTypeOrganization = SpecificRoleType(
None,
type=gaRoleType.AdministrativeOrganization
if functionInfo['organization']['name'] else gaRoleType.Unknown,
carriedIn=functionEvent,
carriedBy=organization,
label=[
Literal(
f"{functionInfo['organization']['name']} in de rol van Administratieve organisatie",
lang='nl'),
Literal(
f"{functionInfo['organization']['name']} in the role of Administrative organization",
lang='en')
])
organizationSubEventDict[organization].append(functionEvent)
return functionEvent, organizationSubEventDict
def toRdf(filepath: str, target: str):
g = rdfSubject.db = Graph()
organizationSubEventDict = defaultdict(list)
with open(filepath) as infile:
data = json.load(infile)
for p in data:
pn = PersonName(None,
givenName=p['name']['givenName'],
baseSurname=p['name']['baseSurname'],
surnamePrefix=p['name']['surnamePrefix'],
literalName=p['name']['literalName'])
birth = Birth(URIRef(p['uri'] + '#birth'), **p['birthDate'])
beginBirthYearLabel = datetime.datetime.fromisoformat(
p['birthDate']['hasEarliestBeginTimeStamp']
).year if p['birthDate']['hasEarliestBeginTimeStamp'] and p[
'birthDate']['hasEarliestBeginTimeStamp'] != "?" else "?"
endBirthYearLabel = datetime.datetime.fromisoformat(
p['birthDate']['hasLatestEndTimeStamp']
).year if p['birthDate']['hasLatestEndTimeStamp'] and p['birthDate'][
'hasLatestEndTimeStamp'] != "?" else "?"
if p['birthDate']['hasTimeStamp']:
birthYearLabel = datetime.datetime.fromisoformat(
p['birthDate']['hasTimeStamp']).year
elif beginBirthYearLabel == endBirthYearLabel:
birthYearLabel = beginBirthYearLabel
else:
birthYearLabel = f"ca. {beginBirthYearLabel}-{endBirthYearLabel}"
birth.label = [
Literal(
f"Geboorte van {p['name']['literalName']} ({birthYearLabel})",
lang='nl'),
Literal(f"Birth of {p['name']['literalName']} ({birthYearLabel})",
lang='en')
]
roleBorn = Born(
None,
carriedIn=birth,
label=[
Literal(f"{p['name']['literalName']} in de rol van geborene",
lang='nl'),
Literal(f"{p['name']['literalName']} in the role of born",
lang='en')
])
death = Death(URIRef(p['uri'] + '#death'), **p['deathDate'])
beginDeathYearLabel = datetime.datetime.fromisoformat(
p['deathDate']['hasEarliestBeginTimeStamp']
).year if p['deathDate']['hasEarliestBeginTimeStamp'] and p[
'deathDate']['hasEarliestBeginTimeStamp'] != "?" else "?"
endDeathYearLabel = datetime.datetime.fromisoformat(
p['deathDate']['hasLatestEndTimeStamp']
).year if p['deathDate']['hasLatestEndTimeStamp'] and p['deathDate'][
'hasLatestEndTimeStamp'] != "?" else "?"
if p['deathDate']['hasTimeStamp']:
birthYearLabel = datetime.datetime.fromisoformat(
p['deathDate']['hasTimeStamp']).year
elif beginDeathYearLabel == endDeathYearLabel:
birthYearLabel = beginDeathYearLabel
else:
birthYearLabel = f"ca. {beginDeathYearLabel}-{endDeathYearLabel}"
death.label = [
Literal(
f"Geboorte van {p['name']['literalName']} ({birthYearLabel})",
lang='nl'),
Literal(f"Death of {p['name']['literalName']} ({birthYearLabel})",
lang='en')
]
roleDeceased = Deceased(
None,
carriedIn=death,
label=[
Literal(f"{p['name']['literalName']} in de rol van overledene",
lang='nl'),
Literal(f"{p['name']['literalName']} in the role of deceased",
lang='en')
])
lifeEvents = [birth, death]
person = Person(URIRef(p['uri']),
hasName=[pn],
label=[pn.literalName],
birth=birth,
death=death)
birth.principal = person
birth.participationOf = [person]
roleBorn.carriedBy = person
death.principal = person
death.participationOf = [person]
roleDeceased.carriedBy = person
for n, function in enumerate(p['functions'], 1):
identifier = URIRef(p['uri'] + f'#event-{n}')
functionEvent, organizationSubEventDict = parseFunctionInfo(
function, person, organizationSubEventDict, identifier)
lifeEvents.append(functionEvent)
person.participatesIn = lifeEvents
## Organizations
organizationResUri2label = dict()
organizationResUriSubEventDict = defaultdict(list)
for organization, subEvents in organizationSubEventDict.items():
organizationResUriSubEventDict[organization.resUri] += subEvents
organizationResUri2label[organization.resUri] = organization.label[0]
for organization, subEvents in organizationResUriSubEventDict.items():
eventCounter = count(1)
organizationEvent = Event(
URIRef(str(organization) + '#event'),
participationOf=[organization],
subEvent=subEvents,
label=[
Literal(
f"Tijdlijn van {organizationResUri2label[organization]}",
lang='nl'),
Literal(
f"Timeline of {organizationResUri2label[organization]}",
lang='en')
])
for e in subEvents:
e.subEventOf = organizationEvent
Organization(organization).participatesIn = [organizationEvent
] + subEvents
g.bind('foaf', foaf)
g.bind('schema', schema)
g.bind('void', void)
g.bind('owĺ', OWL)
g.bind('xsd', XSD)
g.bind('sem', sem)
g.bind('bio', bio)
g.bind('pnv', pnv)
g.bind('ga', ga)
print(f"Serializing to {target}")
g.serialize(target, format='turtle')
def main():
toRdf(filepath=JSONFILE, target='ambtsdragers.ttl')
if __name__ == "__main__":
main() | 0.246171 | 0.185136 |
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from dotenv import load_dotenv
import json
import datetime
import os
import requests
load_dotenv()
DATABASE_PASSWORD = os.getenv("DATABASE_PASSWORD")
API_URL = os.getenv("API_URL")
travel_catalog_url = API_URL + ":5001/catalog_items"
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://traval:' + \
DATABASE_PASSWORD + \
'@traval.clkje4jkvizo.ap-southeast-1.rds.amazonaws.com:3306/traval_orders'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {
'pool_size': 20,
'pool_recycle': 3600
}
CORS(app)
db = SQLAlchemy(app)
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
datetime = db.Column(db.DateTime, nullable=False)
status = db.Column(db.String(20), nullable=False)
def __init__(self, id, user_id, datetime, status):
self.id = id
self.user_id = user_id
self.datetime = datetime
self.status = status
def json(self):
return {"id": self.id, "user_id": self.user_id, "datetime": self.datetime, "status": self.status}
class OrderItem(db.Model):
__tablename__ = 'order_items'
id = db.Column(db.Integer, primary_key=True)
order_id = db.Column(db.Integer, nullable=False)
item_id = db.Column(db.Integer, nullable=False)
price = db.Column(db.Float(precision=2), nullable=False)
quantity = db.Column(db.Integer, nullable=False)
def __init__(self, id, order_id, item_id, title, price, quantity, photo_urls):
self.id = id
self.order_id = order_id
self.item_id = item_id
self.title = title
self.price = price
self.quantity = quantity
self.photo_urls = photo_urls
def json(self):
json_string = {"id": self.id, "item_id": self.item_id, "title": self.title, "price": self.price, "quantity": self.quantity, "photo_urls": self.photo_urls}
if hasattr(self, 'voucher_guid'):
json_string.update({"voucher_guid": self.voucher_guid})
if hasattr(self, 'review'):
json_string.update({"review": self.review})
return json_string
class CartItem(db.Model):
__tablename__ = 'cart_items'
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
item_id = db.Column(db.Integer, primary_key=True, nullable=False)
quantity = db.Column(db.Integer, nullable=False)
def __init__(self, user_id, item_id, title, price, quantity):
self.user_id = user_id
self.item_id = item_id
self.title = title # Added on-the-fly
self.price = price # Added on-the-fly
self.quantity = quantity
def json(self):
return {"item_id": self.item_id, "title": self.title, "price": self.price, "quantity": self.quantity}
def retrieve_order_by_ID_in_json(id):
order_item = Order.query.filter_by(id=id).first()
if order_item:
return jsonify(order_item.json())
return jsonify({"status": "error", "message": "Order not found."}), 404
def retrieve_order_in_json():
order_item = {"order_items": [item.json() for item in Order.query.all()]}
if order_item:
return jsonify(order_item)
return jsonify({"status": "error", "message": "Order not found."}), 404
# ADD or Update Cart Item
@app.route("/orders/cart/update", methods=['POST'])
def add_cart_item():
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No cart item found in the request."}), 500
if int(data["quantity"]) < 0:
return jsonify({"status": "error", "message": "Item quantity cannot be less than 0."}), 500
cart_item = CartItem(title=None, price=None, **data)
try:
if int(data["quantity"]) == 0:
# Delete if quantity is set to zero
CartItem.query.filter_by(user_id=data["user_id"]).filter_by(item_id=data["item_id"]).delete()
else:
# Insert or update if exists, when quantity > 0
db.session.merge(cart_item)
db.session.commit()
except error:
return jsonify({"status": "error", "message": "An error occurred when adding the item to cart."}), 500
return jsonify({"user_id": data["user_id"], "item_id": data["item_id"], "quantity": data["quantity"]}), 201
# Clear cart
@app.route("/orders/cart/clear", methods=['POST'])
def clear_cart():
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No JSON data received in the request."}), 500
if 'user_id' not in data:
return jsonify({"status": "error", "message": "The key user_id is not found in the request."}), 500
try:
CartItem.query.filter_by(user_id=data["user_id"]).delete()
db.session.commit()
except error:
return jsonify({"status": "error", "message": "An error occurred when clearing the user's cart"}), 500
return jsonify({"status": "success", "message": "Cart is cleared."}), 200
# List all card items by user
@app.route("/orders/cart/<string:user_id>")
def get_cart_items_by_user_id(user_id):
items = get_cart_items(user_id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
#store in dictionary
reply = {
"items": items,
"total_price": total_price
}
return jsonify(reply), 200
# Helper function to get cart items (with title, desc and price)
def get_cart_items(user_id):
items = CartItem.query.filter_by(user_id=user_id).all()
for item in items:
item_id = str(item.item_id)
# extract json data from catalog
r = requests.get(travel_catalog_url + "/" + item_id)
data = json.loads(r.text)
item.title = data["title"]
item.description = data["description"]
item.price = data["price"]
return items
# UC1
# Creating order
""" Test POST with this format
{
"user_id": "2",
"status": "Pending Payment",
"items": {
"item_id": "3",
"quantity": "4",
}
}
"""
@app.route("/orders", methods=['POST'])
def create_order():
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No order details!"}), 500
time_now = datetime.datetime.now() # Get current timestamp
order = Order(id=None, user_id=data['user_id'], datetime=time_now, status='Pending Payment')
if len(data['items']) <= 0:
return jsonify({"status": "error", "message": "Unable to create order as there are no items in this order list."}), 500
try:
db.session.add(order)
db.session.commit()
print(order.id)
for item in data["items"]:
r = requests.get(travel_catalog_url + "/" + str(item['item_id']))
catalog_item = json.loads(r.text)
price = catalog_item['price']
order_item = OrderItem(id=None, title=None, photo_urls=None, order_id=order.id, item_id=item['item_id'], price=price, quantity=item['quantity'])
db.session.add(order_item)
db.session.commit()
except Exception as error:
#db.session.rollback()
print(error)
return jsonify({"status": "error", "message": "An error occurred when creating the order."}), 500
return jsonify(order.json()), 201
@app.route("/orders/<string:id>", methods=['POST'])
def update_order(id):
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No order details were given."}), 500
try:
order = Order(id=id, status=data['status'], user_id=None, datetime=None)
db.session.query(Order).filter_by(id=order.id).update({"status": order.status})
db.session.commit()
except Exception as error:
return jsonify({"status": "error", "message": "An error occurred when updating the order."}), 500
return jsonify(order.json()), 201
# USC2
# view all information of order + catalog
@app.route("/orders/<string:id>")
def view_order(id):
order = Order.query.filter_by(id=id).first()
if not order:
return jsonify({"message": "Order not found."}), 404
items = get_order_items(id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
#store in dictionary
reply = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
return jsonify(reply), 200
def get_order_items(order_id):
items = OrderItem.query.filter_by(order_id=order_id).all()
for item in items:
item_id = str(item.item_id)
voucher_guid = get_voucher(str(item.id)) # Get by order item ID rather than item ID
# extract json data from catalog
r = requests.get(travel_catalog_url + "/" + item_id)
data = json.loads(r.text)
item.title = data["title"]
item.description = data["description"]
item.photo_urls = data["photo_urls"]
# Grab review of this item
review = None
r = requests.get(API_URL + ":5005/reviews/order_item_id/" + str(item.id)) # Using order item ID here
data = json.loads(r.text)
if data:
review = data
item.review = review
# If voucher exists
if voucher_guid:
item.voucher_guid = voucher_guid
return items
def get_voucher(order_item_id):
r = requests.get(API_URL + ":5004/vouchers/item/" + order_item_id)
data = json.loads(r.text)
if 'guid' in data:
return data["guid"]
return None
# UC3
# Retrieving all paid orders by user ID
@app.route("/orders/user/<string:user_id>")
def get_paid_orders_by_user(user_id):
# Show only paid orders
orders = Order.query.filter_by(user_id=user_id).filter_by(status='Paid').all()
orders_json = []
for order in orders:
items = get_order_items(order.id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
# store in dictionary
order_json = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
orders_json.append(order_json)
return jsonify(orders_json), 200
# Get order item by ID
@app.route("/orders/item/<string:id>")
def get_order_item(id):
item = OrderItem.query.filter_by(id=id).first()
if item:
r = requests.get(travel_catalog_url + "/" + str(item.item_id))
data = json.loads(r.text)
item.title = data["title"]
item.description = data["description"]
item.photo_urls = data["photo_urls"]
return jsonify(item.json()), 200
else:
return jsonify({"message": "An order item is not found with this ID."}), 404
# Get entire order details by order item ID
@app.route("/orders/item/<string:id>/all")
def get_order_details_by_order_item_id(id):
item = OrderItem.query.filter_by(id=id).first()
if item:
order = Order.query.filter_by(id=item.order_id).first()
items = get_order_items(item.order_id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
#store in dictionary
reply = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
return jsonify(reply), 200
else:
return jsonify({"message": "An order item is not found with this ID."}), 404
# Get all orders
@app.route("/orders")
def get_all_orders():
orders = Order.query.all()
orders_json = []
for order in orders:
items = get_order_items(order.id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
# store in dictionary
order_json = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
orders_json.append(order_json)
return jsonify(orders_json), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5002, debug=True) | traval-backend/order/order.py | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from dotenv import load_dotenv
import json
import datetime
import os
import requests
load_dotenv()
DATABASE_PASSWORD = os.getenv("DATABASE_PASSWORD")
API_URL = os.getenv("API_URL")
travel_catalog_url = API_URL + ":5001/catalog_items"
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://traval:' + \
DATABASE_PASSWORD + \
'@traval.clkje4jkvizo.ap-southeast-1.rds.amazonaws.com:3306/traval_orders'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {
'pool_size': 20,
'pool_recycle': 3600
}
CORS(app)
db = SQLAlchemy(app)
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
datetime = db.Column(db.DateTime, nullable=False)
status = db.Column(db.String(20), nullable=False)
def __init__(self, id, user_id, datetime, status):
self.id = id
self.user_id = user_id
self.datetime = datetime
self.status = status
def json(self):
return {"id": self.id, "user_id": self.user_id, "datetime": self.datetime, "status": self.status}
class OrderItem(db.Model):
__tablename__ = 'order_items'
id = db.Column(db.Integer, primary_key=True)
order_id = db.Column(db.Integer, nullable=False)
item_id = db.Column(db.Integer, nullable=False)
price = db.Column(db.Float(precision=2), nullable=False)
quantity = db.Column(db.Integer, nullable=False)
def __init__(self, id, order_id, item_id, title, price, quantity, photo_urls):
self.id = id
self.order_id = order_id
self.item_id = item_id
self.title = title
self.price = price
self.quantity = quantity
self.photo_urls = photo_urls
def json(self):
json_string = {"id": self.id, "item_id": self.item_id, "title": self.title, "price": self.price, "quantity": self.quantity, "photo_urls": self.photo_urls}
if hasattr(self, 'voucher_guid'):
json_string.update({"voucher_guid": self.voucher_guid})
if hasattr(self, 'review'):
json_string.update({"review": self.review})
return json_string
class CartItem(db.Model):
__tablename__ = 'cart_items'
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
item_id = db.Column(db.Integer, primary_key=True, nullable=False)
quantity = db.Column(db.Integer, nullable=False)
def __init__(self, user_id, item_id, title, price, quantity):
self.user_id = user_id
self.item_id = item_id
self.title = title # Added on-the-fly
self.price = price # Added on-the-fly
self.quantity = quantity
def json(self):
return {"item_id": self.item_id, "title": self.title, "price": self.price, "quantity": self.quantity}
def retrieve_order_by_ID_in_json(id):
order_item = Order.query.filter_by(id=id).first()
if order_item:
return jsonify(order_item.json())
return jsonify({"status": "error", "message": "Order not found."}), 404
def retrieve_order_in_json():
order_item = {"order_items": [item.json() for item in Order.query.all()]}
if order_item:
return jsonify(order_item)
return jsonify({"status": "error", "message": "Order not found."}), 404
# ADD or Update Cart Item
@app.route("/orders/cart/update", methods=['POST'])
def add_cart_item():
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No cart item found in the request."}), 500
if int(data["quantity"]) < 0:
return jsonify({"status": "error", "message": "Item quantity cannot be less than 0."}), 500
cart_item = CartItem(title=None, price=None, **data)
try:
if int(data["quantity"]) == 0:
# Delete if quantity is set to zero
CartItem.query.filter_by(user_id=data["user_id"]).filter_by(item_id=data["item_id"]).delete()
else:
# Insert or update if exists, when quantity > 0
db.session.merge(cart_item)
db.session.commit()
except error:
return jsonify({"status": "error", "message": "An error occurred when adding the item to cart."}), 500
return jsonify({"user_id": data["user_id"], "item_id": data["item_id"], "quantity": data["quantity"]}), 201
# Clear cart
@app.route("/orders/cart/clear", methods=['POST'])
def clear_cart():
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No JSON data received in the request."}), 500
if 'user_id' not in data:
return jsonify({"status": "error", "message": "The key user_id is not found in the request."}), 500
try:
CartItem.query.filter_by(user_id=data["user_id"]).delete()
db.session.commit()
except error:
return jsonify({"status": "error", "message": "An error occurred when clearing the user's cart"}), 500
return jsonify({"status": "success", "message": "Cart is cleared."}), 200
# List all card items by user
@app.route("/orders/cart/<string:user_id>")
def get_cart_items_by_user_id(user_id):
items = get_cart_items(user_id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
#store in dictionary
reply = {
"items": items,
"total_price": total_price
}
return jsonify(reply), 200
# Helper function to get cart items (with title, desc and price)
def get_cart_items(user_id):
items = CartItem.query.filter_by(user_id=user_id).all()
for item in items:
item_id = str(item.item_id)
# extract json data from catalog
r = requests.get(travel_catalog_url + "/" + item_id)
data = json.loads(r.text)
item.title = data["title"]
item.description = data["description"]
item.price = data["price"]
return items
# UC1
# Creating order
""" Test POST with this format
{
"user_id": "2",
"status": "Pending Payment",
"items": {
"item_id": "3",
"quantity": "4",
}
}
"""
@app.route("/orders", methods=['POST'])
def create_order():
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No order details!"}), 500
time_now = datetime.datetime.now() # Get current timestamp
order = Order(id=None, user_id=data['user_id'], datetime=time_now, status='Pending Payment')
if len(data['items']) <= 0:
return jsonify({"status": "error", "message": "Unable to create order as there are no items in this order list."}), 500
try:
db.session.add(order)
db.session.commit()
print(order.id)
for item in data["items"]:
r = requests.get(travel_catalog_url + "/" + str(item['item_id']))
catalog_item = json.loads(r.text)
price = catalog_item['price']
order_item = OrderItem(id=None, title=None, photo_urls=None, order_id=order.id, item_id=item['item_id'], price=price, quantity=item['quantity'])
db.session.add(order_item)
db.session.commit()
except Exception as error:
#db.session.rollback()
print(error)
return jsonify({"status": "error", "message": "An error occurred when creating the order."}), 500
return jsonify(order.json()), 201
@app.route("/orders/<string:id>", methods=['POST'])
def update_order(id):
data = request.get_json()
# Handle empty JSON query
if not data:
return jsonify({"status": "error", "message": "No order details were given."}), 500
try:
order = Order(id=id, status=data['status'], user_id=None, datetime=None)
db.session.query(Order).filter_by(id=order.id).update({"status": order.status})
db.session.commit()
except Exception as error:
return jsonify({"status": "error", "message": "An error occurred when updating the order."}), 500
return jsonify(order.json()), 201
# USC2
# view all information of order + catalog
@app.route("/orders/<string:id>")
def view_order(id):
order = Order.query.filter_by(id=id).first()
if not order:
return jsonify({"message": "Order not found."}), 404
items = get_order_items(id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
#store in dictionary
reply = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
return jsonify(reply), 200
def get_order_items(order_id):
items = OrderItem.query.filter_by(order_id=order_id).all()
for item in items:
item_id = str(item.item_id)
voucher_guid = get_voucher(str(item.id)) # Get by order item ID rather than item ID
# extract json data from catalog
r = requests.get(travel_catalog_url + "/" + item_id)
data = json.loads(r.text)
item.title = data["title"]
item.description = data["description"]
item.photo_urls = data["photo_urls"]
# Grab review of this item
review = None
r = requests.get(API_URL + ":5005/reviews/order_item_id/" + str(item.id)) # Using order item ID here
data = json.loads(r.text)
if data:
review = data
item.review = review
# If voucher exists
if voucher_guid:
item.voucher_guid = voucher_guid
return items
def get_voucher(order_item_id):
r = requests.get(API_URL + ":5004/vouchers/item/" + order_item_id)
data = json.loads(r.text)
if 'guid' in data:
return data["guid"]
return None
# UC3
# Retrieving all paid orders by user ID
@app.route("/orders/user/<string:user_id>")
def get_paid_orders_by_user(user_id):
# Show only paid orders
orders = Order.query.filter_by(user_id=user_id).filter_by(status='Paid').all()
orders_json = []
for order in orders:
items = get_order_items(order.id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
# store in dictionary
order_json = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
orders_json.append(order_json)
return jsonify(orders_json), 200
# Get order item by ID
@app.route("/orders/item/<string:id>")
def get_order_item(id):
item = OrderItem.query.filter_by(id=id).first()
if item:
r = requests.get(travel_catalog_url + "/" + str(item.item_id))
data = json.loads(r.text)
item.title = data["title"]
item.description = data["description"]
item.photo_urls = data["photo_urls"]
return jsonify(item.json()), 200
else:
return jsonify({"message": "An order item is not found with this ID."}), 404
# Get entire order details by order item ID
@app.route("/orders/item/<string:id>/all")
def get_order_details_by_order_item_id(id):
item = OrderItem.query.filter_by(id=id).first()
if item:
order = Order.query.filter_by(id=item.order_id).first()
items = get_order_items(item.order_id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
#store in dictionary
reply = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
return jsonify(reply), 200
else:
return jsonify({"message": "An order item is not found with this ID."}), 404
# Get all orders
@app.route("/orders")
def get_all_orders():
orders = Order.query.all()
orders_json = []
for order in orders:
items = get_order_items(order.id)
total_price = sum([item.price * item.quantity for item in items])
items = [item.json() for item in items]
# store in dictionary
order_json = {
"id": order.id,
"user_id": order.user_id,
"datetime": order.datetime,
"status": order.status,
#"currency": "SGD",
"total_price": total_price,
"items": items
}
orders_json.append(order_json)
return jsonify(orders_json), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5002, debug=True) | 0.438785 | 0.098512 |
from __future__ import division
import glob
import os
import matplotlib
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from stats import plot_result_difference_bars
try:
import cPickle as pickle
except:
import pickle
def plot_stats(prefix, stats_file, val_file, mean_interval=10, plot_graphs=True, save_graphs=False):
plot_stats_single_figure(prefix, stats_file, val_file, mean_interval, plot_graphs, save_graphs)
def plot_stats_single_figure(prefix, stats_file, val_file, mean_interval=10, plot_graphs=True, save_graphs=False):
if not plot_graphs and not save_graphs:
print('Set at least one of plot_graphs and save_graphs to True')
return
sns.set_style('whitegrid')
stats = pickle.load(open(stats_file, 'rb'))
fig, ax = plt.subplots(4)
(qax, rax, vax1, vax2) = ax
failure_count = np.add(stats['detected'], stats['missed'])
x = range(1, int(len(stats['scenarios']) / mean_interval) + 1)
perc_missed = [m / fc if fc > 0 else 0 for (m, fc) in zip(stats['missed'], failure_count)]
mean_missed, missed_fit = mean_values(x, perc_missed, mean_interval)
mean_reward, reward_fit = mean_values(x, stats['rewards'], mean_interval)
plot_results_line_graph(stats, 'napfd', mean_interval, qax, x)
#plot_napfd_metrics(afpd, mean_interval, mean_missed, missed_fit, qax, x)
if 'comparison' in stats:
plot_result_difference_bars(stats, 'napfd', rax, x)
else:
plot_results_line_graph(stats, 'rewards', mean_interval, rax, x)
val_res = pickle.load(open(val_file, 'rb'))
plot_validation(val_res, lambda res: res['napfd'], 'Validation Results', 'NAPFD', vax1)
plot_validation(val_res, lambda res: res['detected'] / (res['detected'] + res['missed']) if (res['detected'] + res['missed']) > 0 else 1,
'Validation Results', 'Failures Detected (in %)', vax2)
# plt.tight_layout()
if plot_graphs:
plt.show()
if save_graphs:
fig.savefig('%s_learning.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_learning.png' % prefix, bbox_inches='tight')
plt.close('all')
def plot_stats_separate_figures(prefix, stats_file, val_file, mean_interval=10, plot_graphs=False, save_graphs=False):
if not plot_graphs and not save_graphs:
print('Set at least one of plot_graphs and save_graphs to True')
return
sns.set_style('whitegrid')
sns.set_context('paper')
stats = pickle.load(open(stats_file, 'rb'))
failure_count = np.add(stats['detected'], stats['missed'])
x = range(1, int(len(stats['scenarios']) / mean_interval) + 1)
perc_missed = [m / fc if fc > 0 else 0 for (m, fc) in zip(stats['missed'], failure_count)]
mean_missed, missed_fit = mean_values(x, perc_missed, mean_interval)
mean_reward, reward_fit = mean_values(x, stats['rewards'], mean_interval)
fig = plt.figure()
ax = fig.add_subplot(111)
plot_napfd_metrics([r[3] for r in stats['result']], mean_interval, mean_missed, missed_fit, ax, x)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_quality.pgf' % prefix, bbox_inches='tight', transparent=True)
fig.savefig('%s_quality.png' % prefix, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
plot_reward(mean_interval, mean_reward, ax, reward_fit, x)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_reward.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_reward.png' % prefix, bbox_inches='tight')
val_res = pickle.load(open(val_file, 'rb'))
fig = plt.figure()
ax = fig.add_subplot(111)
plot_validation(val_res, lambda res: res['napfd'], 'Validation Results', 'NAPFD', ax)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_validation_napfd.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_validation_napfd.png' % prefix, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
plot_validation(val_res, lambda res: res['detected'] / (res['detected'] + res['missed']) if (res['detected'] + res['missed']) > 0 else 1,
'Validation Results', 'Failures Detected (in %)', ax)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_validation_failures.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_validation_failures.png' % prefix, bbox_inches='tight')
if plot_graphs:
plt.show() # Keep window open
else:
plt.close('all')
def plot_results_line_graph(stats, metric, mean_interval, qax, x, include_comparison=True):
if include_comparison and 'comparison' in stats:
for key in stats['comparison'].keys():
values, fitline = mean_values(x, stats['comparison'][key][metric], mean_interval)
qax.plot(x, values * 100, label=key)
#qax.plot(x, fitline(x) * 100, color='black')
values, fitline = mean_values(x, stats[metric], mean_interval)
qax.plot(x, values * 100, label=metric)
#qax.plot(x, fitline(x) * 100, color='black')
qax.set_ylim([0, 100])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
def plot_napfd_metrics(afpd, mean_interval, mean_missed, missed_fit, qax, x):
mean_afpd, afpd_fit = mean_values(x, afpd, mean_interval)
qax.plot(x, mean_afpd * 100, label='NAPFD', color='blue')
qax.plot(x, afpd_fit(x) * 100, color='black')
qax.plot(x, mean_missed * 100, label='Percent Missed', color='green')
qax.plot(x, missed_fit(x) * 100, color='black')
qax.set_ylim([0, 100])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
qax.set_title('Failure Detection (averaged over %d schedules)' % mean_interval)
def plot_reward(mean_interval, mean_reward, rax, reward_fit, x):
rax.plot(x, mean_reward, label='Reward', color='red')
rax.plot(x, reward_fit(x), color='black')
rax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
rax.set_xlim([1, max(x)])
rax.set_title('Reward (averaged over %d schedules)' % mean_interval)
def plot_validation(val_res, res_fun, title, ylabel, ax=None):
if not ax:
ax = plt.gca()
df = pd.DataFrame.from_dict(val_res)
res_df = df.apply(res_fun, raw=True, axis=1)
res_df.name = 'res'
ydat = pd.concat([df, res_df], axis=1)
sns.boxplot(data=ydat, x='step', y='res', palette=sns.color_palette(n_colors=1), ax=ax)
ax.set_title(title)
ax.set_ylabel(ylabel)
def mean_values(x, y, mean_interval):
#mean_y = np.mean(np.array(y).reshape(-1, mean_interval), axis=1)
mean_y = np.array(y)
z = np.polyfit(x, mean_y, 6)
f = np.poly1d(z)
return mean_y, f
def pickle_to_dataframe(pickle_file):
return pd.DataFrame.from_dict(pd.read_pickle(pickle_file))
def print_failure_detection(result_dir, file_prefixes):
df = pd.DataFrame()
for fp in file_prefixes:
searchpath = os.path.join(result_dir, fp)
files = glob.glob(searchpath + '_*_stats.p')
dfs = pd.concat([pickle_to_dataframe(f) for f in files])
df = df.append(dfs)
print df
if __name__ == '__main__':
stats_file = 'tableau_iofrol_timerank_lr0.3_as5_n1000_eps0.1_hist3_tableau_stats.p'
val_file = 'tableau_iofrol_timerank_lr0.3_as5_n1000_eps0.1_hist3_tableau_val.p'
mean_interval = 1
plot_stats_single_figure('tableau', stats_file, val_file, mean_interval, plot_graphs=True, save_graphs=False)
#plot_stats_separate_figures('netq', stats_file, val_file, mean_interval, plot_graphs=False, save_graphs=True)
#print_failure_detection('evaluation', ['heur_sort', 'heur_weight', 'random']) | plot_stats.py | from __future__ import division
import glob
import os
import matplotlib
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from stats import plot_result_difference_bars
try:
import cPickle as pickle
except:
import pickle
def plot_stats(prefix, stats_file, val_file, mean_interval=10, plot_graphs=True, save_graphs=False):
plot_stats_single_figure(prefix, stats_file, val_file, mean_interval, plot_graphs, save_graphs)
def plot_stats_single_figure(prefix, stats_file, val_file, mean_interval=10, plot_graphs=True, save_graphs=False):
if not plot_graphs and not save_graphs:
print('Set at least one of plot_graphs and save_graphs to True')
return
sns.set_style('whitegrid')
stats = pickle.load(open(stats_file, 'rb'))
fig, ax = plt.subplots(4)
(qax, rax, vax1, vax2) = ax
failure_count = np.add(stats['detected'], stats['missed'])
x = range(1, int(len(stats['scenarios']) / mean_interval) + 1)
perc_missed = [m / fc if fc > 0 else 0 for (m, fc) in zip(stats['missed'], failure_count)]
mean_missed, missed_fit = mean_values(x, perc_missed, mean_interval)
mean_reward, reward_fit = mean_values(x, stats['rewards'], mean_interval)
plot_results_line_graph(stats, 'napfd', mean_interval, qax, x)
#plot_napfd_metrics(afpd, mean_interval, mean_missed, missed_fit, qax, x)
if 'comparison' in stats:
plot_result_difference_bars(stats, 'napfd', rax, x)
else:
plot_results_line_graph(stats, 'rewards', mean_interval, rax, x)
val_res = pickle.load(open(val_file, 'rb'))
plot_validation(val_res, lambda res: res['napfd'], 'Validation Results', 'NAPFD', vax1)
plot_validation(val_res, lambda res: res['detected'] / (res['detected'] + res['missed']) if (res['detected'] + res['missed']) > 0 else 1,
'Validation Results', 'Failures Detected (in %)', vax2)
# plt.tight_layout()
if plot_graphs:
plt.show()
if save_graphs:
fig.savefig('%s_learning.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_learning.png' % prefix, bbox_inches='tight')
plt.close('all')
def plot_stats_separate_figures(prefix, stats_file, val_file, mean_interval=10, plot_graphs=False, save_graphs=False):
if not plot_graphs and not save_graphs:
print('Set at least one of plot_graphs and save_graphs to True')
return
sns.set_style('whitegrid')
sns.set_context('paper')
stats = pickle.load(open(stats_file, 'rb'))
failure_count = np.add(stats['detected'], stats['missed'])
x = range(1, int(len(stats['scenarios']) / mean_interval) + 1)
perc_missed = [m / fc if fc > 0 else 0 for (m, fc) in zip(stats['missed'], failure_count)]
mean_missed, missed_fit = mean_values(x, perc_missed, mean_interval)
mean_reward, reward_fit = mean_values(x, stats['rewards'], mean_interval)
fig = plt.figure()
ax = fig.add_subplot(111)
plot_napfd_metrics([r[3] for r in stats['result']], mean_interval, mean_missed, missed_fit, ax, x)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_quality.pgf' % prefix, bbox_inches='tight', transparent=True)
fig.savefig('%s_quality.png' % prefix, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
plot_reward(mean_interval, mean_reward, ax, reward_fit, x)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_reward.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_reward.png' % prefix, bbox_inches='tight')
val_res = pickle.load(open(val_file, 'rb'))
fig = plt.figure()
ax = fig.add_subplot(111)
plot_validation(val_res, lambda res: res['napfd'], 'Validation Results', 'NAPFD', ax)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_validation_napfd.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_validation_napfd.png' % prefix, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
plot_validation(val_res, lambda res: res['detected'] / (res['detected'] + res['missed']) if (res['detected'] + res['missed']) > 0 else 1,
'Validation Results', 'Failures Detected (in %)', ax)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_validation_failures.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_validation_failures.png' % prefix, bbox_inches='tight')
if plot_graphs:
plt.show() # Keep window open
else:
plt.close('all')
def plot_results_line_graph(stats, metric, mean_interval, qax, x, include_comparison=True):
if include_comparison and 'comparison' in stats:
for key in stats['comparison'].keys():
values, fitline = mean_values(x, stats['comparison'][key][metric], mean_interval)
qax.plot(x, values * 100, label=key)
#qax.plot(x, fitline(x) * 100, color='black')
values, fitline = mean_values(x, stats[metric], mean_interval)
qax.plot(x, values * 100, label=metric)
#qax.plot(x, fitline(x) * 100, color='black')
qax.set_ylim([0, 100])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
def plot_napfd_metrics(afpd, mean_interval, mean_missed, missed_fit, qax, x):
mean_afpd, afpd_fit = mean_values(x, afpd, mean_interval)
qax.plot(x, mean_afpd * 100, label='NAPFD', color='blue')
qax.plot(x, afpd_fit(x) * 100, color='black')
qax.plot(x, mean_missed * 100, label='Percent Missed', color='green')
qax.plot(x, missed_fit(x) * 100, color='black')
qax.set_ylim([0, 100])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
qax.set_title('Failure Detection (averaged over %d schedules)' % mean_interval)
def plot_reward(mean_interval, mean_reward, rax, reward_fit, x):
rax.plot(x, mean_reward, label='Reward', color='red')
rax.plot(x, reward_fit(x), color='black')
rax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
rax.set_xlim([1, max(x)])
rax.set_title('Reward (averaged over %d schedules)' % mean_interval)
def plot_validation(val_res, res_fun, title, ylabel, ax=None):
if not ax:
ax = plt.gca()
df = pd.DataFrame.from_dict(val_res)
res_df = df.apply(res_fun, raw=True, axis=1)
res_df.name = 'res'
ydat = pd.concat([df, res_df], axis=1)
sns.boxplot(data=ydat, x='step', y='res', palette=sns.color_palette(n_colors=1), ax=ax)
ax.set_title(title)
ax.set_ylabel(ylabel)
def mean_values(x, y, mean_interval):
#mean_y = np.mean(np.array(y).reshape(-1, mean_interval), axis=1)
mean_y = np.array(y)
z = np.polyfit(x, mean_y, 6)
f = np.poly1d(z)
return mean_y, f
def pickle_to_dataframe(pickle_file):
return pd.DataFrame.from_dict(pd.read_pickle(pickle_file))
def print_failure_detection(result_dir, file_prefixes):
df = pd.DataFrame()
for fp in file_prefixes:
searchpath = os.path.join(result_dir, fp)
files = glob.glob(searchpath + '_*_stats.p')
dfs = pd.concat([pickle_to_dataframe(f) for f in files])
df = df.append(dfs)
print df
if __name__ == '__main__':
stats_file = 'tableau_iofrol_timerank_lr0.3_as5_n1000_eps0.1_hist3_tableau_stats.p'
val_file = 'tableau_iofrol_timerank_lr0.3_as5_n1000_eps0.1_hist3_tableau_val.p'
mean_interval = 1
plot_stats_single_figure('tableau', stats_file, val_file, mean_interval, plot_graphs=True, save_graphs=False)
#plot_stats_separate_figures('netq', stats_file, val_file, mean_interval, plot_graphs=False, save_graphs=True)
#print_failure_detection('evaluation', ['heur_sort', 'heur_weight', 'random']) | 0.68679 | 0.307364 |
from unittest import TestCase
from textx import metamodel_from_file
import converter.json_converter as converter
converted_positive = """
{
"rules": [
{
"conditions": [
{
"operator": ">=",
"parameter": {
"property": "temperature",
"deviceId": "a32db207-7236-4e75-abad-7c972f4cfd18"
},
"value": 30
}
],
"name": "ruleTemp01",
"actions": [
{
"name": "TURN OFF",
"parameters": {
"deviceId": "937a7c3e-db39-4d75-b52b-b8442463761a"
}
},
{
"name": "SEND EMAIL",
"parameters": {
"content": "<b>Alert!</b><br><p>High temperature in living room!</p>",
"recipient": "<EMAIL>"
}
},
{
"name": "SEND EMAIL",
"parameters": {
"content": "<b>Alert!</b><br><p>High temperature in living room!</p>",
"recipient": "<EMAIL>"
}
}
]
}
]
}""".strip()
class JsonConverter(TestCase):
class TestExample(object):
def __init__(self, a, b):
self.a = a
self.b = b
def setUp(self):
self.object_example = self.TestExample("test", 0)
self.rules_mm = metamodel_from_file("spec/rules_grammar.tx")
self.rules = self.rules_mm.model_from_str(
"""
RULE ruleTemp01:
a32db207-7236-4e75-abad-7c972f4cfd18["temperature"] >= 30
TRIGGERS
TURN OFF 937a7c3e-db39-4d75-b52b-b8442463761a
SEND EMAIL "<b>Alert!</b><br><p>High temperature in living room!</p>" TO "<EMAIL>"
SEND EMAIL "<b>Alert!</b><br><p>High temperature in living room!</p>" TO "<EMAIL>"
""")
def test_element_creator(self):
self.assertEqual({"attribute": "value"}, converter._element_creator(("attribute", "value")))
def test_object_handler(self):
self.assertEqual({"a": "test", "b": 0}, converter._object_handler(self.object_example))
def test_tx_filter_positive(self):
self.assertTrue(converter._tx_filter(("valid_attribute",)))
def test_tx_filter_negative(self):
self.assertFalse(converter._tx_filter(("_private",)))
self.assertFalse(converter._tx_filter(("parent",)))
def test_list_handler(self):
self.assertTrue({"test": [{"first": "first"}, {"second": "second"}]},
converter._list_handler(
[self.TestExample("first", "first"), self.TestExample("second", "second")]))
def test_general_handler(self):
self.assertEqual({"a": "test", "b": "test"}, converter._general_handler(self.TestExample("test", "test")))
self.assertEqual("test", converter._general_handler("test"))
self.assertTrue({"test": [{"first": "first"}, {"second": "second"}]},
converter._general_handler(
[self.TestExample("first", "first"), self.TestExample("second", "second")]))
def test_converter(self):
self.assertEqual(converted_positive, converter.convert(self.rules)) | parser/tests/test_converter.py | from unittest import TestCase
from textx import metamodel_from_file
import converter.json_converter as converter
converted_positive = """
{
"rules": [
{
"conditions": [
{
"operator": ">=",
"parameter": {
"property": "temperature",
"deviceId": "a32db207-7236-4e75-abad-7c972f4cfd18"
},
"value": 30
}
],
"name": "ruleTemp01",
"actions": [
{
"name": "TURN OFF",
"parameters": {
"deviceId": "937a7c3e-db39-4d75-b52b-b8442463761a"
}
},
{
"name": "SEND EMAIL",
"parameters": {
"content": "<b>Alert!</b><br><p>High temperature in living room!</p>",
"recipient": "<EMAIL>"
}
},
{
"name": "SEND EMAIL",
"parameters": {
"content": "<b>Alert!</b><br><p>High temperature in living room!</p>",
"recipient": "<EMAIL>"
}
}
]
}
]
}""".strip()
class JsonConverter(TestCase):
class TestExample(object):
def __init__(self, a, b):
self.a = a
self.b = b
def setUp(self):
self.object_example = self.TestExample("test", 0)
self.rules_mm = metamodel_from_file("spec/rules_grammar.tx")
self.rules = self.rules_mm.model_from_str(
"""
RULE ruleTemp01:
a32db207-7236-4e75-abad-7c972f4cfd18["temperature"] >= 30
TRIGGERS
TURN OFF 937a7c3e-db39-4d75-b52b-b8442463761a
SEND EMAIL "<b>Alert!</b><br><p>High temperature in living room!</p>" TO "<EMAIL>"
SEND EMAIL "<b>Alert!</b><br><p>High temperature in living room!</p>" TO "<EMAIL>"
""")
def test_element_creator(self):
self.assertEqual({"attribute": "value"}, converter._element_creator(("attribute", "value")))
def test_object_handler(self):
self.assertEqual({"a": "test", "b": 0}, converter._object_handler(self.object_example))
def test_tx_filter_positive(self):
self.assertTrue(converter._tx_filter(("valid_attribute",)))
def test_tx_filter_negative(self):
self.assertFalse(converter._tx_filter(("_private",)))
self.assertFalse(converter._tx_filter(("parent",)))
def test_list_handler(self):
self.assertTrue({"test": [{"first": "first"}, {"second": "second"}]},
converter._list_handler(
[self.TestExample("first", "first"), self.TestExample("second", "second")]))
def test_general_handler(self):
self.assertEqual({"a": "test", "b": "test"}, converter._general_handler(self.TestExample("test", "test")))
self.assertEqual("test", converter._general_handler("test"))
self.assertTrue({"test": [{"first": "first"}, {"second": "second"}]},
converter._general_handler(
[self.TestExample("first", "first"), self.TestExample("second", "second")]))
def test_converter(self):
self.assertEqual(converted_positive, converter.convert(self.rules)) | 0.78609 | 0.38549 |
import requests
from bs4 import BeautifulSoup
import json
import boto3
import json
from datetime import datetime
import os
import common
import match
import watchlist
import logging
log = logging.getLogger()
log.setLevel(logging.INFO)
stop_words = ['One', 'morning', ',', 'when', 'Gregor', 'Samsa', 'woke', 'from', 'troubled', 'dreams', ',', 'he', 'found', 'himself', 'transformed', 'in', 'his', 'bed', 'into', 'a', 'horrible', 'vermin', '.', 'He', 'lay', 'on', 'his', 'armour-like', 'back', ',', 'and', 'if', 'he', 'lifted', 'his', 'head', 'a', 'little', 'he', 'could', 'see', 'his', 'brown', 'belly', ',', 'slightly', 'domed', 'and', 'divided', 'by', 'arches', 'into', 'stiff', 'sections', '.', 'The', 'bedding', 'was', 'hardly', 'able', 'to', 'cover', 'it', 'and', 'seemed', 'ready', 'to', 'slide', 'off', 'any', 'moment', '.', 'His', 'many', 'legs', ',', 'pitifully', 'thin', 'compared', 'with', 'the', 'size', 'of', 'the', 'rest', 'of', 'him', ',', 'waved', 'about', 'helplessly', 'as', 'he', 'looked', '.', '``', 'What', "'s", 'happened', 'to']
def query_newsfeed(event, context):
"""
Submit a request with a newsfeed information to be processed
Web service call format:
{
"url":"https://comicbook.com/starwars/news/lego-star-wars-probe-droid-darth-vader-scout-trooper-helmet-order/",
"html_tag":"article",
"html_attribute":{
"itemprop": "articleBody"
},
"options": {
"extract_entities": "true",
"extract_keyphrase": "true",
"extract_sentiment": "true"
}
}
:param event: see web service example
:param context:
:return: the message ID for the submitted job
"""
log.info("Hello From query_newsfeed")
# Default options:
extract_entities = False
extract_keyphrase = False
extract_sentiment = False
default_options = {
"extract_entities": "true",
"extract_keyphrase": "false",
"extract_sentiment": "true"
}
newsfeed_name_ts = "news_" + datetime.now().strftime("%Y-%d-%mT%H:%M:%S")
try:
req_body = json.loads(event['body'])
# print(req_body)
url = req_body.get('url')
html_tag = req_body.get('html_tag')
html_attribute = req_body.get('html_attribute')
newsfeed_name = req_body.get('newsfeed_name', newsfeed_name_ts)
options = req_body.get('options', default_options)
if options["extract_entities"] == "true":
extract_entities = True
if options["extract_keyphrase"] == "true":
extract_keyphrase = True
if options["extract_sentiment"] == "true":
extract_sentiment = True
log.info("newsfeed_name " + newsfeed_name)
# get secret configuration
config = common.get_secret(os.environ['SECRET'])
newsfeed_bucket = config['newsfeed-bucket']
queue_name = config['incoming-newsfeed-queue']
# scrap webpage
scraped_text = scrape_webpage(url, html_tag, html_attribute)
# save page text to bucket
common.save_content_to_bucket(newsfeed_bucket, "newsfeed", newsfeed_name, ".txt", scraped_text)
# push message to queue
sqs_response = push_message_to_queue(queue_name, newsfeed_bucket, newsfeed_name, scraped_text, url,
extract_entities, extract_keyphrase, extract_sentiment)
except Exception as e:
log.error("Error executing query_newsfeed ", e)
return {
'statusCode': 500,
'body': json.dumps({
"error": str(e)
})
}
response = {
"options": {
"extract_entities": extract_entities,
"extract_keyphrase": extract_keyphrase,
"extract_sentiment": extract_sentiment
},
"file_generated": newsfeed_name + ".txt",
"bucket_used": newsfeed_bucket,
"url": url,
"Message ID": str(sqs_response.get('MessageId', "Error"))
}
return {
'statusCode': 200,
'body': json.dumps(response)
}
def push_message_to_queue(queue_name, newsfeed_bucket, newsfeed_name, scraped_text, url,
extract_entities, extract_keyphrase, extract_sentiment):
"""
Push a message to the newsfeed queue and return the message id from SQS Service
:param queue_name: the queue name to push the message
:param newsfeed_bucket: the bucket name of the related message
:param newsfeed_name: the name of the newsfeed
:param scraped_text: the scraped clean content
:param url: the url of the resource
:param extract_entities: True/False
:param extract_keyphrase: True/False
:param extract_sentiment: True/False
:return: the sqs response including the message id
"""
sqs = boto3.resource('sqs')
item = {
"bucket": newsfeed_bucket,
"file": newsfeed_name + ".txt",
"content": scraped_text,
"url": url,
"options": {
"extract_entities": extract_entities,
"extract_keyphrase": extract_keyphrase,
"extract_sentiment": extract_sentiment,
}
}
queue = sqs.get_queue_by_name(QueueName=queue_name)
log.info("Processing {0} to SQS".format(newsfeed_name))
sqs_response = queue.send_message(MessageBody=json.dumps(item))
return sqs_response
def evaluate_newsfeed(event, context):
"""
Queue Message Handler which process a newsfeed message
:param event:
:param context:
:return: Call Match-logic with the resulted Match
"""
log.info("Hello From process_newsfeed")
config = common.get_secret(os.environ['SECRET'])
sentiment_result = ""
entities_result = ""
keyphrase_result = ""
# Iterate on messages available in the Queue
for message in event['Records']:
message_id = message["messageId"]
message_body = json.loads(message["body"])
message_file = message_body["file"]
message_content = message_body["content"]
message_options = message_body["options"]
log.info("Processing file {0} with Message ID {1}.".format(message_file, message_id))
url = message_body['url']
try:
client = boto3.client('comprehend')
newsfeed_bucket = config['newsfeed-bucket']
if message_options["extract_entities"]:
log.info("extracting entities")
entities_result = extract_comprehend_entities(client, message_content)
common.save_content_to_bucket(newsfeed_bucket, "entities", message_id, ".json", entities_result, "JSON")
if message_options["extract_keyphrase"]:
log.info("extracting keyphrase")
keyphrase_result = extract_comprehend_keyphrase(client, message_content)
common.save_content_to_bucket(newsfeed_bucket, "keyphrases", message_id, ".json", keyphrase_result, "JSON")
if message_options["extract_sentiment"]:
log.info("extracting sentiment")
sentiment_result = extract_comprehend_sentiment(client, message_content)
common.save_content_to_bucket(newsfeed_bucket, "sentiments", message_id, ".json", sentiment_result, "JSON")
results = query_message_match_result(entities_result, keyphrase_result)
# log.info(results)
if len(results) > 0:
log.info("Match Found!")
content = {
"results": results,
"url": url,
"sentiment": sentiment_result
}
# calling match logic with provided match result
match.detect_watchlist(content)
except Exception as e:
log.error("Error executing process_newsfeed with Message", exc_info=True)
return "Processed {0} records.".format(len(event['Records']))
def query_message_match_result(entities_result, keyphrase_result):
"""
Build and execute a query list for a given message and return the match result
:param entities_result: comprehend entities list
:param keyphrase_result: comprehend keyphrase list
:return: all passed match results
"""
results = []
query_list = []
log.info("Processing Entities")
entities_clean = clean_words(entities_result['Entities'])
query_list.extend(entities_clean)
log.info("Processing KeyPhrases")
keyphrase_clean = clean_words(keyphrase_result['KeyPhrases'])
query_list.extend(keyphrase_clean)
query_no_duplicates = list(dict.fromkeys(query_list))
log.info("Collected {0} queries".format(len(query_no_duplicates)))
# Iterate on all query keys
for key in query_no_duplicates:
query, params = watchlist.get_keyword_query(key)
query_result = watchlist.execute_statement(query, params)
if len(query_result['records']) > 0:
log.info(query_result['records'])
for rec in query_result['records']:
results.append({
'entity': rec[0]['stringValue'],
'entity_type': rec[1]['stringValue'],
'create_timestamp': rec[2]['stringValue']
})
log.info("Matched {0}".format(len(results)))
return results
def clean_words(content):
query_list = []
for entity in content:
if entity['Score'] >= 0.9:
keyword = entity['Text']
words_lowered = [w.lower() for w in keyword.split()]
words = [w for w in words_lowered if not w in stop_words]
query_list.extend(words)
log.info(len(query_list))
return query_list
def scrape_webpage(url, html_tag, html_attribute):
"""
Scrap a web page using BeautifulSoup Library and html qualifier
:param url: url of the newsfeed
:param html_tag: html qualifier indicating the section of the news
:param html_attribute: the matching html attribute to select a particular html tag
:return: the scraped text of the html page
"""
log.info("Scraping : {0}".format(url))
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
article = soup.find(html_tag, html_attribute)
results = article.text
log.info("Finished Scraping : {0}".format(url))
return results
def extract_comprehend_entities(client, input_text):
"""
Calling Comprehend Entities API
:param client: boto3 comprehend client instance
:param input_text: the input text
:return: Comprehend response
"""
# For more info on limited_text check - https://docs.aws.amazon.com/comprehend/latest/dg/API_DetectEntities.html TextSizeLimitExceededException
response_entities = client.detect_entities(
Text=common.limited_text(input_text, 5000),
LanguageCode='en'
)
return response_entities
def extract_comprehend_keyphrase(client, input_text):
"""
Calling Comprehend KeyPhrase API
:param client: boto3 comprehend client instance
:param input_text: the input text
:return: Comprehend response
"""
response_key_phrases = client.detect_key_phrases(
Text=common.limited_text(input_text, 5000),
LanguageCode='en'
)
return response_key_phrases
def extract_comprehend_sentiment(client, input_text):
"""
Calling Comprehend Sentiment API
:param client: boto3 comprehend client instance
:param input_text: the input text
:return: Comprehend response
"""
response_sentiment = client.detect_sentiment(
Text=common.limited_text(input_text, 5000),
LanguageCode='en'
)
return response_sentiment | serverless/newsfeed.py |
import requests
from bs4 import BeautifulSoup
import json
import boto3
import json
from datetime import datetime
import os
import common
import match
import watchlist
import logging
log = logging.getLogger()
log.setLevel(logging.INFO)
stop_words = ['One', 'morning', ',', 'when', 'Gregor', 'Samsa', 'woke', 'from', 'troubled', 'dreams', ',', 'he', 'found', 'himself', 'transformed', 'in', 'his', 'bed', 'into', 'a', 'horrible', 'vermin', '.', 'He', 'lay', 'on', 'his', 'armour-like', 'back', ',', 'and', 'if', 'he', 'lifted', 'his', 'head', 'a', 'little', 'he', 'could', 'see', 'his', 'brown', 'belly', ',', 'slightly', 'domed', 'and', 'divided', 'by', 'arches', 'into', 'stiff', 'sections', '.', 'The', 'bedding', 'was', 'hardly', 'able', 'to', 'cover', 'it', 'and', 'seemed', 'ready', 'to', 'slide', 'off', 'any', 'moment', '.', 'His', 'many', 'legs', ',', 'pitifully', 'thin', 'compared', 'with', 'the', 'size', 'of', 'the', 'rest', 'of', 'him', ',', 'waved', 'about', 'helplessly', 'as', 'he', 'looked', '.', '``', 'What', "'s", 'happened', 'to']
def query_newsfeed(event, context):
"""
Submit a request with a newsfeed information to be processed
Web service call format:
{
"url":"https://comicbook.com/starwars/news/lego-star-wars-probe-droid-darth-vader-scout-trooper-helmet-order/",
"html_tag":"article",
"html_attribute":{
"itemprop": "articleBody"
},
"options": {
"extract_entities": "true",
"extract_keyphrase": "true",
"extract_sentiment": "true"
}
}
:param event: see web service example
:param context:
:return: the message ID for the submitted job
"""
log.info("Hello From query_newsfeed")
# Default options:
extract_entities = False
extract_keyphrase = False
extract_sentiment = False
default_options = {
"extract_entities": "true",
"extract_keyphrase": "false",
"extract_sentiment": "true"
}
newsfeed_name_ts = "news_" + datetime.now().strftime("%Y-%d-%mT%H:%M:%S")
try:
req_body = json.loads(event['body'])
# print(req_body)
url = req_body.get('url')
html_tag = req_body.get('html_tag')
html_attribute = req_body.get('html_attribute')
newsfeed_name = req_body.get('newsfeed_name', newsfeed_name_ts)
options = req_body.get('options', default_options)
if options["extract_entities"] == "true":
extract_entities = True
if options["extract_keyphrase"] == "true":
extract_keyphrase = True
if options["extract_sentiment"] == "true":
extract_sentiment = True
log.info("newsfeed_name " + newsfeed_name)
# get secret configuration
config = common.get_secret(os.environ['SECRET'])
newsfeed_bucket = config['newsfeed-bucket']
queue_name = config['incoming-newsfeed-queue']
# scrap webpage
scraped_text = scrape_webpage(url, html_tag, html_attribute)
# save page text to bucket
common.save_content_to_bucket(newsfeed_bucket, "newsfeed", newsfeed_name, ".txt", scraped_text)
# push message to queue
sqs_response = push_message_to_queue(queue_name, newsfeed_bucket, newsfeed_name, scraped_text, url,
extract_entities, extract_keyphrase, extract_sentiment)
except Exception as e:
log.error("Error executing query_newsfeed ", e)
return {
'statusCode': 500,
'body': json.dumps({
"error": str(e)
})
}
response = {
"options": {
"extract_entities": extract_entities,
"extract_keyphrase": extract_keyphrase,
"extract_sentiment": extract_sentiment
},
"file_generated": newsfeed_name + ".txt",
"bucket_used": newsfeed_bucket,
"url": url,
"Message ID": str(sqs_response.get('MessageId', "Error"))
}
return {
'statusCode': 200,
'body': json.dumps(response)
}
def push_message_to_queue(queue_name, newsfeed_bucket, newsfeed_name, scraped_text, url,
extract_entities, extract_keyphrase, extract_sentiment):
"""
Push a message to the newsfeed queue and return the message id from SQS Service
:param queue_name: the queue name to push the message
:param newsfeed_bucket: the bucket name of the related message
:param newsfeed_name: the name of the newsfeed
:param scraped_text: the scraped clean content
:param url: the url of the resource
:param extract_entities: True/False
:param extract_keyphrase: True/False
:param extract_sentiment: True/False
:return: the sqs response including the message id
"""
sqs = boto3.resource('sqs')
item = {
"bucket": newsfeed_bucket,
"file": newsfeed_name + ".txt",
"content": scraped_text,
"url": url,
"options": {
"extract_entities": extract_entities,
"extract_keyphrase": extract_keyphrase,
"extract_sentiment": extract_sentiment,
}
}
queue = sqs.get_queue_by_name(QueueName=queue_name)
log.info("Processing {0} to SQS".format(newsfeed_name))
sqs_response = queue.send_message(MessageBody=json.dumps(item))
return sqs_response
def evaluate_newsfeed(event, context):
"""
Queue Message Handler which process a newsfeed message
:param event:
:param context:
:return: Call Match-logic with the resulted Match
"""
log.info("Hello From process_newsfeed")
config = common.get_secret(os.environ['SECRET'])
sentiment_result = ""
entities_result = ""
keyphrase_result = ""
# Iterate on messages available in the Queue
for message in event['Records']:
message_id = message["messageId"]
message_body = json.loads(message["body"])
message_file = message_body["file"]
message_content = message_body["content"]
message_options = message_body["options"]
log.info("Processing file {0} with Message ID {1}.".format(message_file, message_id))
url = message_body['url']
try:
client = boto3.client('comprehend')
newsfeed_bucket = config['newsfeed-bucket']
if message_options["extract_entities"]:
log.info("extracting entities")
entities_result = extract_comprehend_entities(client, message_content)
common.save_content_to_bucket(newsfeed_bucket, "entities", message_id, ".json", entities_result, "JSON")
if message_options["extract_keyphrase"]:
log.info("extracting keyphrase")
keyphrase_result = extract_comprehend_keyphrase(client, message_content)
common.save_content_to_bucket(newsfeed_bucket, "keyphrases", message_id, ".json", keyphrase_result, "JSON")
if message_options["extract_sentiment"]:
log.info("extracting sentiment")
sentiment_result = extract_comprehend_sentiment(client, message_content)
common.save_content_to_bucket(newsfeed_bucket, "sentiments", message_id, ".json", sentiment_result, "JSON")
results = query_message_match_result(entities_result, keyphrase_result)
# log.info(results)
if len(results) > 0:
log.info("Match Found!")
content = {
"results": results,
"url": url,
"sentiment": sentiment_result
}
# calling match logic with provided match result
match.detect_watchlist(content)
except Exception as e:
log.error("Error executing process_newsfeed with Message", exc_info=True)
return "Processed {0} records.".format(len(event['Records']))
def query_message_match_result(entities_result, keyphrase_result):
"""
Build and execute a query list for a given message and return the match result
:param entities_result: comprehend entities list
:param keyphrase_result: comprehend keyphrase list
:return: all passed match results
"""
results = []
query_list = []
log.info("Processing Entities")
entities_clean = clean_words(entities_result['Entities'])
query_list.extend(entities_clean)
log.info("Processing KeyPhrases")
keyphrase_clean = clean_words(keyphrase_result['KeyPhrases'])
query_list.extend(keyphrase_clean)
query_no_duplicates = list(dict.fromkeys(query_list))
log.info("Collected {0} queries".format(len(query_no_duplicates)))
# Iterate on all query keys
for key in query_no_duplicates:
query, params = watchlist.get_keyword_query(key)
query_result = watchlist.execute_statement(query, params)
if len(query_result['records']) > 0:
log.info(query_result['records'])
for rec in query_result['records']:
results.append({
'entity': rec[0]['stringValue'],
'entity_type': rec[1]['stringValue'],
'create_timestamp': rec[2]['stringValue']
})
log.info("Matched {0}".format(len(results)))
return results
def clean_words(content):
query_list = []
for entity in content:
if entity['Score'] >= 0.9:
keyword = entity['Text']
words_lowered = [w.lower() for w in keyword.split()]
words = [w for w in words_lowered if not w in stop_words]
query_list.extend(words)
log.info(len(query_list))
return query_list
def scrape_webpage(url, html_tag, html_attribute):
"""
Scrap a web page using BeautifulSoup Library and html qualifier
:param url: url of the newsfeed
:param html_tag: html qualifier indicating the section of the news
:param html_attribute: the matching html attribute to select a particular html tag
:return: the scraped text of the html page
"""
log.info("Scraping : {0}".format(url))
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
article = soup.find(html_tag, html_attribute)
results = article.text
log.info("Finished Scraping : {0}".format(url))
return results
def extract_comprehend_entities(client, input_text):
"""
Calling Comprehend Entities API
:param client: boto3 comprehend client instance
:param input_text: the input text
:return: Comprehend response
"""
# For more info on limited_text check - https://docs.aws.amazon.com/comprehend/latest/dg/API_DetectEntities.html TextSizeLimitExceededException
response_entities = client.detect_entities(
Text=common.limited_text(input_text, 5000),
LanguageCode='en'
)
return response_entities
def extract_comprehend_keyphrase(client, input_text):
"""
Calling Comprehend KeyPhrase API
:param client: boto3 comprehend client instance
:param input_text: the input text
:return: Comprehend response
"""
response_key_phrases = client.detect_key_phrases(
Text=common.limited_text(input_text, 5000),
LanguageCode='en'
)
return response_key_phrases
def extract_comprehend_sentiment(client, input_text):
"""
Calling Comprehend Sentiment API
:param client: boto3 comprehend client instance
:param input_text: the input text
:return: Comprehend response
"""
response_sentiment = client.detect_sentiment(
Text=common.limited_text(input_text, 5000),
LanguageCode='en'
)
return response_sentiment | 0.431824 | 0.182153 |
import os
import csv
import pickle
import numpy as np
import argparse
def get_args():
parser = argparse.ArgumentParser('NTU RGB+D Dataset Preprocessing')
parser.add_argument('--file-path', type=str, default='D:/nturgb+d_skeletons')
return parser.parse_args()
def csv2pickle(data, filename, base_dir='./', data_mode='train'):
print('saving pickle ...')
filename = os.path.join(base_dir, filename)
with open(filename, "wb") as f:
pickle.dump(data, f)
def load_data(filename):
with open(filename, 'r') as f:
return f.readlines()
def save_csv(result, out_dir):
with open(out_dir, 'w', encoding='utf-8', newline='') as f:
wr = csv.writer(f)
for d in result:
wr.writerow(d)
def main(args):
skeleton_file_list = os.listdir(args.file_path)
cross_val = [
1, 2, 4, 5, 8,
9, 13, 14, 15, 16,
17, 18, 19, 25, 27,
28, 31, 34, 35, 38
]
train_sample = {}
test_sample = {}
train_sample_count = 0
test_sample_count = 0
for file in skeleton_file_list:
filename = file.split('.')[0]
subject = int(filename[9:12])
label = int(filename[-3:])
print(filename, label, subject)
data = load_data(os.path.join(args.file_path, file))
count = 1
sub_sample = {}
while len(data) != count:
person_count = int(data[count])
count += 1
joint = np.zeros(shape=[25, 3], dtype=np.float)
for index in range(person_count):
joint_cnt = 2
while joint_cnt != 27:
line = list(data[count + joint_cnt].strip().split(' '))
joint[joint_cnt - 2, :] = [float(n) for n in line[:3]]
joint_cnt += 1
if index in sub_sample.keys():
sub_sample[index].append(joint)
else:
sub_sample[index] = [joint]
count += 27
for key, item in sub_sample.items():
if subject in cross_val:
train_sample[train_sample_count] = [item, label]
train_sample_count += 1
else:
test_sample[test_sample_count] = [item, label]
test_sample_count += 1
break
csv2pickle(train_sample, filename='all_train_sample.pkl')
csv2pickle(test_sample, filename='all_test_sample.pkl')
if __name__ == '__main__':
args = get_args()
main(args) | two_stream_recurrent_neural_network_ntu_rgbd/ntu-dataset/ntu_dataset_main.py | import os
import csv
import pickle
import numpy as np
import argparse
def get_args():
parser = argparse.ArgumentParser('NTU RGB+D Dataset Preprocessing')
parser.add_argument('--file-path', type=str, default='D:/nturgb+d_skeletons')
return parser.parse_args()
def csv2pickle(data, filename, base_dir='./', data_mode='train'):
print('saving pickle ...')
filename = os.path.join(base_dir, filename)
with open(filename, "wb") as f:
pickle.dump(data, f)
def load_data(filename):
with open(filename, 'r') as f:
return f.readlines()
def save_csv(result, out_dir):
with open(out_dir, 'w', encoding='utf-8', newline='') as f:
wr = csv.writer(f)
for d in result:
wr.writerow(d)
def main(args):
skeleton_file_list = os.listdir(args.file_path)
cross_val = [
1, 2, 4, 5, 8,
9, 13, 14, 15, 16,
17, 18, 19, 25, 27,
28, 31, 34, 35, 38
]
train_sample = {}
test_sample = {}
train_sample_count = 0
test_sample_count = 0
for file in skeleton_file_list:
filename = file.split('.')[0]
subject = int(filename[9:12])
label = int(filename[-3:])
print(filename, label, subject)
data = load_data(os.path.join(args.file_path, file))
count = 1
sub_sample = {}
while len(data) != count:
person_count = int(data[count])
count += 1
joint = np.zeros(shape=[25, 3], dtype=np.float)
for index in range(person_count):
joint_cnt = 2
while joint_cnt != 27:
line = list(data[count + joint_cnt].strip().split(' '))
joint[joint_cnt - 2, :] = [float(n) for n in line[:3]]
joint_cnt += 1
if index in sub_sample.keys():
sub_sample[index].append(joint)
else:
sub_sample[index] = [joint]
count += 27
for key, item in sub_sample.items():
if subject in cross_val:
train_sample[train_sample_count] = [item, label]
train_sample_count += 1
else:
test_sample[test_sample_count] = [item, label]
test_sample_count += 1
break
csv2pickle(train_sample, filename='all_train_sample.pkl')
csv2pickle(test_sample, filename='all_test_sample.pkl')
if __name__ == '__main__':
args = get_args()
main(args) | 0.349644 | 0.173078 |
"""Remove"""
"""Template task in which you prevent something from falling so ball can roll into container."""
import numpy as np
import phyre.creator as creator_lib
import phyre.virtual_tools as vt
@creator_lib.define_task_template(
seed=range(1000),
version="2",
search_params=dict(required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
diversify_tier='ball'),
)
def build_task(C, seed):
rng = np.random.RandomState(seed=seed)
goalWidth = [100, 200]
goalHeight = [80, 100]
ballRad = [7, 12]
slopeLeft = [40, 70]
slopeRight = [5, 10]
slopeWidth = [200, 275]
slopeHeight = [380, 450]
platformLeft = [300, 400]
platformRight = [475, 500]
platformHeight = [200, 250]
blockSize = [30, 40]
pWidth = 15 ## Platform Vertical Width
flip_lr = rng.uniform(0, 1) < 0.5
gW = rng.uniform(goalWidth[0], goalWidth[1])
gH = rng.uniform(goalHeight[0], goalHeight[1])
bR = rng.uniform(ballRad[0], ballRad[1])
sL = rng.uniform(slopeLeft[0], slopeLeft[1])
sR = rng.uniform(slopeRight[0], slopeRight[1])
sW = rng.uniform(slopeWidth[0], slopeWidth[1])
sH = rng.uniform(slopeHeight[0], slopeHeight[1])
pL = rng.uniform(platformLeft[0], platformLeft[1])
pR = rng.uniform(platformRight[0], platformRight[1])
pH = rng.uniform(platformHeight[0], platformHeight[1])
bS = rng.uniform(blockSize[0], blockSize[1])
jitter = rng.uniform(0, pR - pL)
## Set params
bLeft = (pR - pL) / 2 - bS / 2 ## Left bound of block
slopeVerts = [[0, sH], [0, sH + sL], [sW, sH + sR], [sW, sH]]
if flip_lr:
slopeVerts = vt.flip_left_right(slopeVerts)
blockXPos = vt.flip_left_right(pL + jitter)
ballXPos = vt.flip_left_right(bR + 5)
else:
blockXPos = pL + jitter
ballXPos = bR + 5
## Make the world
slopeVerts.reverse()
slope = C.add_convex_polygon(vt.convert_phyre_tools_vertices(slopeVerts),
False)
container, _ = vt.add_container(
C, [[vt.VT_SCALE - 5 - gW, gH], [vt.VT_SCALE - 5 - gW, 0.0],
[vt.VT_SCALE - 5, 0.0], [vt.VT_SCALE - 5, gH]],
10,
False,
True,
flip_lr=flip_lr)
#block = vt.add_box(C, [pL, pH + pWidth, bS + pL, pH + pWidth + bS], True)
block = C.add('dynamic ball',
bS / vt.VT_SCALE,
center_x=blockXPos * C.scene.width / vt.VT_SCALE,
bottom=(pH + pWidth) * C.scene.width / vt.VT_SCALE)
platform = vt.add_box(C, [pL, pH, pR, pH + pWidth], False, flip_lr=flip_lr)
ball = C.add('dynamic ball',
bR * 2 / vt.VT_SCALE,
center_x=ballXPos * C.scene.width / vt.VT_SCALE,
center_y=(sL + sH + bR) * C.scene.height / vt.VT_SCALE)
C.update_task(body1=ball,
body2=container,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.VIRTUAL_TOOLS) | data/task_scripts/main/task01007.py | """Remove"""
"""Template task in which you prevent something from falling so ball can roll into container."""
import numpy as np
import phyre.creator as creator_lib
import phyre.virtual_tools as vt
@creator_lib.define_task_template(
seed=range(1000),
version="2",
search_params=dict(required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
diversify_tier='ball'),
)
def build_task(C, seed):
rng = np.random.RandomState(seed=seed)
goalWidth = [100, 200]
goalHeight = [80, 100]
ballRad = [7, 12]
slopeLeft = [40, 70]
slopeRight = [5, 10]
slopeWidth = [200, 275]
slopeHeight = [380, 450]
platformLeft = [300, 400]
platformRight = [475, 500]
platformHeight = [200, 250]
blockSize = [30, 40]
pWidth = 15 ## Platform Vertical Width
flip_lr = rng.uniform(0, 1) < 0.5
gW = rng.uniform(goalWidth[0], goalWidth[1])
gH = rng.uniform(goalHeight[0], goalHeight[1])
bR = rng.uniform(ballRad[0], ballRad[1])
sL = rng.uniform(slopeLeft[0], slopeLeft[1])
sR = rng.uniform(slopeRight[0], slopeRight[1])
sW = rng.uniform(slopeWidth[0], slopeWidth[1])
sH = rng.uniform(slopeHeight[0], slopeHeight[1])
pL = rng.uniform(platformLeft[0], platformLeft[1])
pR = rng.uniform(platformRight[0], platformRight[1])
pH = rng.uniform(platformHeight[0], platformHeight[1])
bS = rng.uniform(blockSize[0], blockSize[1])
jitter = rng.uniform(0, pR - pL)
## Set params
bLeft = (pR - pL) / 2 - bS / 2 ## Left bound of block
slopeVerts = [[0, sH], [0, sH + sL], [sW, sH + sR], [sW, sH]]
if flip_lr:
slopeVerts = vt.flip_left_right(slopeVerts)
blockXPos = vt.flip_left_right(pL + jitter)
ballXPos = vt.flip_left_right(bR + 5)
else:
blockXPos = pL + jitter
ballXPos = bR + 5
## Make the world
slopeVerts.reverse()
slope = C.add_convex_polygon(vt.convert_phyre_tools_vertices(slopeVerts),
False)
container, _ = vt.add_container(
C, [[vt.VT_SCALE - 5 - gW, gH], [vt.VT_SCALE - 5 - gW, 0.0],
[vt.VT_SCALE - 5, 0.0], [vt.VT_SCALE - 5, gH]],
10,
False,
True,
flip_lr=flip_lr)
#block = vt.add_box(C, [pL, pH + pWidth, bS + pL, pH + pWidth + bS], True)
block = C.add('dynamic ball',
bS / vt.VT_SCALE,
center_x=blockXPos * C.scene.width / vt.VT_SCALE,
bottom=(pH + pWidth) * C.scene.width / vt.VT_SCALE)
platform = vt.add_box(C, [pL, pH, pR, pH + pWidth], False, flip_lr=flip_lr)
ball = C.add('dynamic ball',
bR * 2 / vt.VT_SCALE,
center_x=ballXPos * C.scene.width / vt.VT_SCALE,
center_y=(sL + sH + bR) * C.scene.height / vt.VT_SCALE)
C.update_task(body1=ball,
body2=container,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.VIRTUAL_TOOLS) | 0.576542 | 0.503601 |
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_4_6'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_4_6',error_checker=_errors._error_checker)
GL_CLIPPING_INPUT_PRIMITIVES=_C('GL_CLIPPING_INPUT_PRIMITIVES',0x82F6)
GL_CLIPPING_OUTPUT_PRIMITIVES=_C('GL_CLIPPING_OUTPUT_PRIMITIVES',0x82F7)
GL_COMPUTE_SHADER_INVOCATIONS=_C('GL_COMPUTE_SHADER_INVOCATIONS',0x82F5)
GL_CONTEXT_FLAG_NO_ERROR_BIT=_C('GL_CONTEXT_FLAG_NO_ERROR_BIT',0x00000008)
GL_CONTEXT_RELEASE_BEHAVIOR=_C('GL_CONTEXT_RELEASE_BEHAVIOR',0x82FB)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH',0x82FC)
GL_FRAGMENT_SHADER_INVOCATIONS=_C('GL_FRAGMENT_SHADER_INVOCATIONS',0x82F4)
GL_GEOMETRY_SHADER_INVOCATIONS=_C('GL_GEOMETRY_SHADER_INVOCATIONS',0x887F)
GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED=_C('GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED',0x82F3)
GL_MAX_TEXTURE_MAX_ANISOTROPY=_C('GL_MAX_TEXTURE_MAX_ANISOTROPY',0x84FF)
GL_NONE=_C('GL_NONE',0)
GL_NUM_SPIR_V_EXTENSIONS=_C('GL_NUM_SPIR_V_EXTENSIONS',0x9554)
GL_PARAMETER_BUFFER=_C('GL_PARAMETER_BUFFER',0x80EE)
GL_PARAMETER_BUFFER_BINDING=_C('GL_PARAMETER_BUFFER_BINDING',0x80EF)
GL_POLYGON_OFFSET_CLAMP=_C('GL_POLYGON_OFFSET_CLAMP',0x8E1B)
GL_PRIMITIVES_SUBMITTED=_C('GL_PRIMITIVES_SUBMITTED',0x82EF)
GL_SHADER_BINARY_FORMAT_SPIR_V=_C('GL_SHADER_BINARY_FORMAT_SPIR_V',0x9551)
GL_SPIR_V_BINARY=_C('GL_SPIR_V_BINARY',0x9552)
GL_SPIR_V_EXTENSIONS=_C('GL_SPIR_V_EXTENSIONS',0x9553)
GL_TESS_CONTROL_SHADER_PATCHES=_C('GL_TESS_CONTROL_SHADER_PATCHES',0x82F1)
GL_TESS_EVALUATION_SHADER_INVOCATIONS=_C('GL_TESS_EVALUATION_SHADER_INVOCATIONS',0x82F2)
GL_TEXTURE_MAX_ANISOTROPY=_C('GL_TEXTURE_MAX_ANISOTROPY',0x84FE)
GL_TRANSFORM_FEEDBACK_OVERFLOW=_C('GL_TRANSFORM_FEEDBACK_OVERFLOW',0x82EC)
GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW=_C('GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW',0x82ED)
GL_VERTEX_SHADER_INVOCATIONS=_C('GL_VERTEX_SHADER_INVOCATIONS',0x82F0)
GL_VERTICES_SUBMITTED=_C('GL_VERTICES_SUBMITTED',0x82EE)
@_f
@_p.types(None,_cs.GLenum,ctypes.c_void_p,_cs.GLintptr,_cs.GLsizei,_cs.GLsizei)
def glMultiDrawArraysIndirectCount(mode,indirect,drawcount,maxdrawcount,stride):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.c_void_p,_cs.GLintptr,_cs.GLsizei,_cs.GLsizei)
def glMultiDrawElementsIndirectCount(mode,type,indirect,drawcount,maxdrawcount,stride):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glPolygonOffsetClamp(factor,units,clamp):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLcharArray,_cs.GLuint,arrays.GLuintArray,arrays.GLuintArray)
def glSpecializeShader(shader,pEntryPoint,numSpecializationConstants,pConstantIndex,pConstantValue):pass | env/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_4_6.py | from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_4_6'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_4_6',error_checker=_errors._error_checker)
GL_CLIPPING_INPUT_PRIMITIVES=_C('GL_CLIPPING_INPUT_PRIMITIVES',0x82F6)
GL_CLIPPING_OUTPUT_PRIMITIVES=_C('GL_CLIPPING_OUTPUT_PRIMITIVES',0x82F7)
GL_COMPUTE_SHADER_INVOCATIONS=_C('GL_COMPUTE_SHADER_INVOCATIONS',0x82F5)
GL_CONTEXT_FLAG_NO_ERROR_BIT=_C('GL_CONTEXT_FLAG_NO_ERROR_BIT',0x00000008)
GL_CONTEXT_RELEASE_BEHAVIOR=_C('GL_CONTEXT_RELEASE_BEHAVIOR',0x82FB)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH',0x82FC)
GL_FRAGMENT_SHADER_INVOCATIONS=_C('GL_FRAGMENT_SHADER_INVOCATIONS',0x82F4)
GL_GEOMETRY_SHADER_INVOCATIONS=_C('GL_GEOMETRY_SHADER_INVOCATIONS',0x887F)
GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED=_C('GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED',0x82F3)
GL_MAX_TEXTURE_MAX_ANISOTROPY=_C('GL_MAX_TEXTURE_MAX_ANISOTROPY',0x84FF)
GL_NONE=_C('GL_NONE',0)
GL_NUM_SPIR_V_EXTENSIONS=_C('GL_NUM_SPIR_V_EXTENSIONS',0x9554)
GL_PARAMETER_BUFFER=_C('GL_PARAMETER_BUFFER',0x80EE)
GL_PARAMETER_BUFFER_BINDING=_C('GL_PARAMETER_BUFFER_BINDING',0x80EF)
GL_POLYGON_OFFSET_CLAMP=_C('GL_POLYGON_OFFSET_CLAMP',0x8E1B)
GL_PRIMITIVES_SUBMITTED=_C('GL_PRIMITIVES_SUBMITTED',0x82EF)
GL_SHADER_BINARY_FORMAT_SPIR_V=_C('GL_SHADER_BINARY_FORMAT_SPIR_V',0x9551)
GL_SPIR_V_BINARY=_C('GL_SPIR_V_BINARY',0x9552)
GL_SPIR_V_EXTENSIONS=_C('GL_SPIR_V_EXTENSIONS',0x9553)
GL_TESS_CONTROL_SHADER_PATCHES=_C('GL_TESS_CONTROL_SHADER_PATCHES',0x82F1)
GL_TESS_EVALUATION_SHADER_INVOCATIONS=_C('GL_TESS_EVALUATION_SHADER_INVOCATIONS',0x82F2)
GL_TEXTURE_MAX_ANISOTROPY=_C('GL_TEXTURE_MAX_ANISOTROPY',0x84FE)
GL_TRANSFORM_FEEDBACK_OVERFLOW=_C('GL_TRANSFORM_FEEDBACK_OVERFLOW',0x82EC)
GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW=_C('GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW',0x82ED)
GL_VERTEX_SHADER_INVOCATIONS=_C('GL_VERTEX_SHADER_INVOCATIONS',0x82F0)
GL_VERTICES_SUBMITTED=_C('GL_VERTICES_SUBMITTED',0x82EE)
@_f
@_p.types(None,_cs.GLenum,ctypes.c_void_p,_cs.GLintptr,_cs.GLsizei,_cs.GLsizei)
def glMultiDrawArraysIndirectCount(mode,indirect,drawcount,maxdrawcount,stride):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.c_void_p,_cs.GLintptr,_cs.GLsizei,_cs.GLsizei)
def glMultiDrawElementsIndirectCount(mode,type,indirect,drawcount,maxdrawcount,stride):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glPolygonOffsetClamp(factor,units,clamp):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLcharArray,_cs.GLuint,arrays.GLuintArray,arrays.GLuintArray)
def glSpecializeShader(shader,pEntryPoint,numSpecializationConstants,pConstantIndex,pConstantValue):pass | 0.347537 | 0.051439 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("time_created", models.DateTimeField(auto_now_add=True)),
("time_modified", models.DateTimeField(auto_now=True)),
(
"username",
models.CharField(
help_text=b"eg, 'philgyford'", unique=True, max_length=30
),
),
(
"url",
models.URLField(
help_text=b"eg, 'https://pinboard.in/u:philgyford'",
max_length=255,
),
),
(
"api_token",
models.CharField(
help_text=b'From https://pinboard.in/settings/password eg, "<PASSWORD>"', # noqa: E501
max_length=51,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Bookmark",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("time_created", models.DateTimeField(auto_now_add=True)),
("time_modified", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=255, blank=True)),
(
"permalink",
models.URLField(
help_text=b"URL of the item on the service's website.",
blank=True,
),
),
(
"summary",
models.TextField(
help_text=b"eg, First paragraph of a blog post, start of the description of a photo, all of a Tweet's text, etc.", # noqa: E501
blank=True,
),
),
(
"is_private",
models.BooleanField(
default=False,
help_text=b"If True, this item should NOT be shown on public-facing pages.", # noqa: E501
),
),
(
"fetch_time",
models.DateTimeField(
help_text=b"The time the Raw data was last fetched.",
null=True,
blank=True,
),
),
(
"raw",
models.TextField(
help_text=b"The raw JSON from the API.", blank=True
),
),
(
"url",
models.TextField(
unique=True, validators=[django.core.validators.URLValidator()]
),
),
("post_time", models.DateTimeField()),
(
"description",
models.TextField(
help_text=b"The 'extended' text description.", blank=True
),
),
("to_read", models.BooleanField(default=False)),
("shared", models.BooleanField(default=True)),
(
"account",
models.ForeignKey(to="pinboard.Account", on_delete=models.CASCADE),
),
],
options={"abstract": False},
),
] | ditto/pinboard/migrations/0001_initial.py | from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("time_created", models.DateTimeField(auto_now_add=True)),
("time_modified", models.DateTimeField(auto_now=True)),
(
"username",
models.CharField(
help_text=b"eg, 'philgyford'", unique=True, max_length=30
),
),
(
"url",
models.URLField(
help_text=b"eg, 'https://pinboard.in/u:philgyford'",
max_length=255,
),
),
(
"api_token",
models.CharField(
help_text=b'From https://pinboard.in/settings/password eg, "<PASSWORD>"', # noqa: E501
max_length=51,
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Bookmark",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("time_created", models.DateTimeField(auto_now_add=True)),
("time_modified", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=255, blank=True)),
(
"permalink",
models.URLField(
help_text=b"URL of the item on the service's website.",
blank=True,
),
),
(
"summary",
models.TextField(
help_text=b"eg, First paragraph of a blog post, start of the description of a photo, all of a Tweet's text, etc.", # noqa: E501
blank=True,
),
),
(
"is_private",
models.BooleanField(
default=False,
help_text=b"If True, this item should NOT be shown on public-facing pages.", # noqa: E501
),
),
(
"fetch_time",
models.DateTimeField(
help_text=b"The time the Raw data was last fetched.",
null=True,
blank=True,
),
),
(
"raw",
models.TextField(
help_text=b"The raw JSON from the API.", blank=True
),
),
(
"url",
models.TextField(
unique=True, validators=[django.core.validators.URLValidator()]
),
),
("post_time", models.DateTimeField()),
(
"description",
models.TextField(
help_text=b"The 'extended' text description.", blank=True
),
),
("to_read", models.BooleanField(default=False)),
("shared", models.BooleanField(default=True)),
(
"account",
models.ForeignKey(to="pinboard.Account", on_delete=models.CASCADE),
),
],
options={"abstract": False},
),
] | 0.672547 | 0.172974 |
import os
import argparse
import random
from tqdm import tqdm
import logging
from typing import Dict
logger = logging.getLogger(__name__)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Sample Sentences from monolingual corpora to train tokenizer")
parser.add_argument(
"--datasets_path",
type=str,
required=True,
help="Path containing monolingual corpora for different languages",
)
parser.add_argument("--output_path", type=str, required=True, help="path to store sampled sentences")
parser.add_argument("--alpha", type=float, default=0.3, help="multinomial alpha")
parser.add_argument("--seed", type=int, default=10, help="random seed")
return parser
def calc_num_samples_sentences(
lang_num_lines: Dict[str, int], alpha: float
) -> Dict[str, int]:
lang_prob = {}
total_sentences = sum(lang_num_lines.values())
for key, value in lang_num_lines.items():
lang_prob[key] = value / total_sentences
total_distr = 0
for k, v in lang_prob.items():
total_distr += v**alpha
new_prob = {k: v**alpha / total_distr for k, v in lang_prob.items()}
sampled_sentences = {}
for language, num_lines in lang_num_lines.items():
for lang_code, sampled_prob in new_prob.items():
if language == lang_code:
num_sentences = sampled_prob * num_lines
sampled_sentences[language] = round(num_sentences)
return sampled_sentences
def main():
parser = get_parser()
args = parser.parse_args()
random.seed(args.seed)
logger.info("***** Sampling Sentences for Tokenizer Training *****")
files = [
os.path.join(args.datasets_path, file)
for file in os.listdir(args.datasets_path)
]
logger.info(f"Number of training files found: {len(files)}")
lang_corpus = {}
lang_num_lines = {}
for file in files:
lang_code = file.split(".")[-1]
with open(file) as f:
txt = f.readlines()
lang_corpus[lang_code] = txt
lang_num_lines[lang_code] = len(txt)
sampled_sentences = calc_num_samples_sentences(lang_num_lines, args.alpha)
for lang in tqdm(sampled_sentences.keys()):
logger.info(f"Number of sampled sentences for {lang} = {sampled_sentences[lang]}")
sentences = random.sample(lang_corpus[lang], sampled_sentences[lang])
file = os.path.join(args.output_path, "sampled." + lang)
with open(file, "w") as out_file:
out_file.writelines(sentences)
if __name__ == "__main__":
main() | scripts/sample_tokenizer_sentences.py | import os
import argparse
import random
from tqdm import tqdm
import logging
from typing import Dict
logger = logging.getLogger(__name__)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Sample Sentences from monolingual corpora to train tokenizer")
parser.add_argument(
"--datasets_path",
type=str,
required=True,
help="Path containing monolingual corpora for different languages",
)
parser.add_argument("--output_path", type=str, required=True, help="path to store sampled sentences")
parser.add_argument("--alpha", type=float, default=0.3, help="multinomial alpha")
parser.add_argument("--seed", type=int, default=10, help="random seed")
return parser
def calc_num_samples_sentences(
lang_num_lines: Dict[str, int], alpha: float
) -> Dict[str, int]:
lang_prob = {}
total_sentences = sum(lang_num_lines.values())
for key, value in lang_num_lines.items():
lang_prob[key] = value / total_sentences
total_distr = 0
for k, v in lang_prob.items():
total_distr += v**alpha
new_prob = {k: v**alpha / total_distr for k, v in lang_prob.items()}
sampled_sentences = {}
for language, num_lines in lang_num_lines.items():
for lang_code, sampled_prob in new_prob.items():
if language == lang_code:
num_sentences = sampled_prob * num_lines
sampled_sentences[language] = round(num_sentences)
return sampled_sentences
def main():
parser = get_parser()
args = parser.parse_args()
random.seed(args.seed)
logger.info("***** Sampling Sentences for Tokenizer Training *****")
files = [
os.path.join(args.datasets_path, file)
for file in os.listdir(args.datasets_path)
]
logger.info(f"Number of training files found: {len(files)}")
lang_corpus = {}
lang_num_lines = {}
for file in files:
lang_code = file.split(".")[-1]
with open(file) as f:
txt = f.readlines()
lang_corpus[lang_code] = txt
lang_num_lines[lang_code] = len(txt)
sampled_sentences = calc_num_samples_sentences(lang_num_lines, args.alpha)
for lang in tqdm(sampled_sentences.keys()):
logger.info(f"Number of sampled sentences for {lang} = {sampled_sentences[lang]}")
sentences = random.sample(lang_corpus[lang], sampled_sentences[lang])
file = os.path.join(args.output_path, "sampled." + lang)
with open(file, "w") as out_file:
out_file.writelines(sentences)
if __name__ == "__main__":
main() | 0.671686 | 0.185172 |
import numpy as np
import andi
import csv
from tensorflow.keras.utils import Sequence
"""
Dataset generators
"""
def generate_tracks_regression(n, dimensions, min_T=5, max_T=1001):
"""
Generate tracks for training regression model
Parameters:
n: number of tracks to generate
dimensions: number of dimensions (currently only supports 1 and 2)
min_T: minimum track length
max_T: maximum track length (e.g. for 1001 will generate tracks up to 1000 steps)
Returns:
tracks_array: a numpy array of shape [n, max_T, dimensions] containing the generated tracks
exponents: a numpy array of length n, containing the anomalous exponent value for each track
"""
# Create tracks
np.random.seed() # prevents data duplication
AD = andi.andi_datasets()
X1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=n, min_T=min_T, max_T=max_T, tasks=[1], dimensions=[dimensions])
exponents = np.array(Y1[dimensions - 1])
tracks = X1[dimensions - 1]
# Package into array and preprocess
tracks_array = package_tracks(tracks=tracks, max_T=max_T, dimensions=dimensions)
return tracks_array, exponents
class TrackGeneratorRegression(Sequence):
def __init__(self, batches, batch_size, dimensions, min_T, max_T):
self.batches = batches
self.batch_size = batch_size
self.dimensions = dimensions
self.min_T = min_T
self.max_T = max_T
def __getitem__(self, item):
tracks, exponents = generate_tracks_regression(self.batch_size, dimensions=self.dimensions, min_T=self.min_T,
max_T=self.max_T)
return tracks, exponents
def __len__(self):
return self.batches
def generate_tracks_classification(n, dimensions, min_T=5, max_T=1001):
"""
Generate tracks for training classification model
Parameters:
n: number of tracks to generate
dimensions: number of dimensions (currently only supports 1 and 2)
min_T: minimum track length
max_T: maximum track length (e.g. for 1001 will generate tracks up to 1000 steps)
Returns:
tracks_array: a numpy array of shape [n, max_T, dimensions] containing the generated tracks
classes: a numpy array of length n, representing the model class for each track (see andi_datasets package)
"""
# Create tracks
np.random.seed()
AD = andi.andi_datasets()
X1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=n, min_T=min_T, max_T=max_T, tasks=[2], dimensions=[dimensions])
classes = np.array(Y2[dimensions - 1]).astype(int)
tracks = X2[dimensions - 1]
# Package into array and preprocess
tracks_array = package_tracks(tracks=tracks, max_T=max_T, dimensions=dimensions)
return tracks_array, classes
class TrackGeneratorClassification(Sequence):
def __init__(self, batches, batch_size, dimensions, min_T, max_T):
self.batches = batches
self.batch_size = batch_size
self.dimensions = dimensions
self.min_T = min_T
self.max_T = max_T
def __getitem__(self, item):
tracks, classes = generate_tracks_classification(self.batch_size, dimensions=self.dimensions, min_T=self.min_T,
max_T=self.max_T)
return tracks, classes
def __len__(self):
return self.batches
def generate_tracks_segmentation(n, dimensions):
"""
Generate tracks for training segmentation model (all length 200)
Parameters:
n: number of tracks to generate
dimensions: number of dimensions (currently only supports 1 and 2)
Returns:
tracks_array: a numpy array of shape [n, 200, dimensions] containing the generated tracks
positions: a numpy array of length n, representing the switch point for each model
"""
# Create tracks
np.random.seed()
AD = andi.andi_datasets()
X1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=n, tasks=[3], dimensions=[dimensions], min_T=200, max_T=201)
positions = np.array(Y3[dimensions - 1])[:, 1].astype(int) - 1
tracks = X3[dimensions - 1]
# Package into array and preprocess
tracks_array = package_tracks(tracks=tracks, max_T=200, dimensions=dimensions)
return tracks_array, positions
class TrackGeneratorSegmentation(Sequence):
def __init__(self, batches, batch_size, dimensions):
self.batches = batches
self.batch_size = batch_size
self.dimensions = dimensions
def __getitem__(self, item):
tracks, positions = generate_tracks_segmentation(self.batch_size, dimensions=self.dimensions)
return tracks, positions
def __len__(self):
return self.batches
"""
Track processing
"""
def package_tracks(tracks, max_T, dimensions):
"""
Convert tracks from list format (i.e. output from andi_datasets) to numpy array
This requires shorter tracks to be padded with zeros (up to length max_T)
Parameters:
tracks: tracks in list format (i.e. output from andi_datasets function)
max_T: the maximum track length
dimensions: number of track dimensions (1 or 2 supported)
Returns:
tracks_array: a numpy array containing padded and preprocessed tracks
"""
# Package into array
n = len(tracks)
tracks_array = np.zeros([n, max_T, dimensions])
if dimensions == 1:
for i, t in enumerate(tracks):
tracks_array[i, max_T - len(t):, 0] = t
elif dimensions == 2:
for i, t in enumerate(tracks):
len_t = int(len(t) / 2)
tracks_array[i, max_T - len_t:, 0] = t[:len_t]
tracks_array[i, max_T - len_t:, 1] = t[len_t:]
# Preprocess
tracks_array = preprocess_tracks(tracks_array)
return tracks_array
def preprocess_tracks(tracks):
"""
Preprocess tracks by taking the difference between successive positions and normalising (dividing by the mean step
size) -> input to models
Parameters:
tracks: numpy array of tracks (output from one of the above functions)
Returns:
tracks_processed: a numpy array of shape [n, max_T-1, d]
"""
# 1D tracks
if tracks.shape[2] == 1:
diff = np.diff(tracks[:, :, 0], axis=1)
meanstep = np.sum(abs(diff), axis=1) / np.sum(tracks[:, :, 0] != 0, axis=1)
tracks_processed = np.expand_dims(diff / np.expand_dims(meanstep, axis=-1), axis=-1)
# 2D tracks
elif tracks.shape[2] == 2:
dx = np.diff(tracks[:, :, 0], axis=1)
dy = np.diff(tracks[:, :, 1], axis=1)
meanstep = np.expand_dims(np.sum(((dx ** 2) + (dy ** 2)) ** 0.5, axis=1) / np.sum(tracks[:, :, 0] != 0, axis=1),
axis=-1)
tracks_processed = np.dstack((dx / meanstep, dy / meanstep))
return tracks_processed
def split_tracks(tracks, positions, dimensions=1, max_T=200):
"""
Split tracks according to positions (i.e. output from a segmentation model)
Parameters:
tracks: tracks in list format (i.e. output from andi_datasets)
dimensions: number of track dimensions (1 and 2 supported)
max_T: maximum track length
Returns:
split_tracks_array: numpy array of processed split tracks, shape [n * 2, max_T-1, dimensions]
"""
g = 0 # can set to > 0 to exclude points in the immediate vicinity of the switchpoint
split_tracks_array = np.zeros([len(tracks) * 2, max_T, dimensions])
# 1D tracks
if dimensions == 1:
i = 0 # counter
for j, track in enumerate(tracks):
split_tracks_array[i, max_T - max(positions[j] - g, 0):, 0] = track[:max(positions[j] - g, 0)]
split_tracks_array[i + 1, min(positions[j] + g, 199):, 0] = track[min(positions[j] + g, 199):] - track[
min(positions[j] + g, 199)]
i += 2
# 2D tracks
elif dimensions == 2:
i = 0 # counter
for j, track in enumerate(tracks):
len_t = int(len(track) / 2)
d1 = track[:len_t].flatten()
d2 = track[len_t:].flatten() - track[len_t]
split_tracks_array[i, max_T - max(positions[j] - g, 0):, 0] = d1[:max(positions[j] - g, 0)]
split_tracks_array[i, max_T - max(positions[j] - g, 0):, 1] = d2[:max(positions[j] - g, 0)]
split_tracks_array[i + 1, min(positions[j] + g, 199):, 0] = d1[min(positions[j] + g, 199):] - d1[
min(positions[j] + g, 199)]
split_tracks_array[i + 1, min(positions[j] + g, 199):, 1] = d2[min(positions[j] + g, 199):] - d2[
min(positions[j] + g, 199)]
i += 2
# Preprocess
split_tracks_array = preprocess_tracks(split_tracks_array)
return split_tracks_array
"""
File handling
"""
def import_tracks(path):
"""
Import tracks saved in the competition format. NB only imports 1D and 2D tracks
Parameters:
path: path to file
Returns:
1D and 2D trajectories in list format
"""
t = csv.reader(open(path, 'r'), delimiter=';', lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC)
X = [[], []]
for trajs in t:
if int(trajs[0]) in [1, 2]:
X[int(trajs[0]) - 1].append(trajs[1:])
return X[0], X[1]
def import_labels(direc):
"""
Import labels saved in the competition format. NB only imports 1D and 2D tracks
For task 1 this is the exponent
For task 2 this is the model
For task 3 this is the switchpoint ONLY
Parameters:
path: path to file
Returns:
Labels for 1D and 2D tracks
"""
l = csv.reader(open(direc, 'r'), delimiter=';', lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC)
Y = [[], []]
for labels in l:
if int(labels[0]) in [1, 2]:
Y[int(labels[0]) - 1].append(labels[1])
return np.array(Y[0]), np.array(Y[1])
"""
Other
"""
def rolling_ave(array, window):
"""
Apply a rolling average to a 1D array. Rolling average window specified by window parameter. Can be useful to apply
to output of segmentation CNN, but not strictly necessary
"""
array_padded = np.c_[array[:, :int(window / 2)][:, :-1], array, array[:, -int(window / 2):][:, :-1]]
cumsum = np.cumsum(array_padded, axis=1)
return (cumsum[:, window:] - cumsum[:, :-window]) / window | andi_funcs.py | import numpy as np
import andi
import csv
from tensorflow.keras.utils import Sequence
"""
Dataset generators
"""
def generate_tracks_regression(n, dimensions, min_T=5, max_T=1001):
"""
Generate tracks for training regression model
Parameters:
n: number of tracks to generate
dimensions: number of dimensions (currently only supports 1 and 2)
min_T: minimum track length
max_T: maximum track length (e.g. for 1001 will generate tracks up to 1000 steps)
Returns:
tracks_array: a numpy array of shape [n, max_T, dimensions] containing the generated tracks
exponents: a numpy array of length n, containing the anomalous exponent value for each track
"""
# Create tracks
np.random.seed() # prevents data duplication
AD = andi.andi_datasets()
X1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=n, min_T=min_T, max_T=max_T, tasks=[1], dimensions=[dimensions])
exponents = np.array(Y1[dimensions - 1])
tracks = X1[dimensions - 1]
# Package into array and preprocess
tracks_array = package_tracks(tracks=tracks, max_T=max_T, dimensions=dimensions)
return tracks_array, exponents
class TrackGeneratorRegression(Sequence):
def __init__(self, batches, batch_size, dimensions, min_T, max_T):
self.batches = batches
self.batch_size = batch_size
self.dimensions = dimensions
self.min_T = min_T
self.max_T = max_T
def __getitem__(self, item):
tracks, exponents = generate_tracks_regression(self.batch_size, dimensions=self.dimensions, min_T=self.min_T,
max_T=self.max_T)
return tracks, exponents
def __len__(self):
return self.batches
def generate_tracks_classification(n, dimensions, min_T=5, max_T=1001):
"""
Generate tracks for training classification model
Parameters:
n: number of tracks to generate
dimensions: number of dimensions (currently only supports 1 and 2)
min_T: minimum track length
max_T: maximum track length (e.g. for 1001 will generate tracks up to 1000 steps)
Returns:
tracks_array: a numpy array of shape [n, max_T, dimensions] containing the generated tracks
classes: a numpy array of length n, representing the model class for each track (see andi_datasets package)
"""
# Create tracks
np.random.seed()
AD = andi.andi_datasets()
X1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=n, min_T=min_T, max_T=max_T, tasks=[2], dimensions=[dimensions])
classes = np.array(Y2[dimensions - 1]).astype(int)
tracks = X2[dimensions - 1]
# Package into array and preprocess
tracks_array = package_tracks(tracks=tracks, max_T=max_T, dimensions=dimensions)
return tracks_array, classes
class TrackGeneratorClassification(Sequence):
def __init__(self, batches, batch_size, dimensions, min_T, max_T):
self.batches = batches
self.batch_size = batch_size
self.dimensions = dimensions
self.min_T = min_T
self.max_T = max_T
def __getitem__(self, item):
tracks, classes = generate_tracks_classification(self.batch_size, dimensions=self.dimensions, min_T=self.min_T,
max_T=self.max_T)
return tracks, classes
def __len__(self):
return self.batches
def generate_tracks_segmentation(n, dimensions):
"""
Generate tracks for training segmentation model (all length 200)
Parameters:
n: number of tracks to generate
dimensions: number of dimensions (currently only supports 1 and 2)
Returns:
tracks_array: a numpy array of shape [n, 200, dimensions] containing the generated tracks
positions: a numpy array of length n, representing the switch point for each model
"""
# Create tracks
np.random.seed()
AD = andi.andi_datasets()
X1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=n, tasks=[3], dimensions=[dimensions], min_T=200, max_T=201)
positions = np.array(Y3[dimensions - 1])[:, 1].astype(int) - 1
tracks = X3[dimensions - 1]
# Package into array and preprocess
tracks_array = package_tracks(tracks=tracks, max_T=200, dimensions=dimensions)
return tracks_array, positions
class TrackGeneratorSegmentation(Sequence):
def __init__(self, batches, batch_size, dimensions):
self.batches = batches
self.batch_size = batch_size
self.dimensions = dimensions
def __getitem__(self, item):
tracks, positions = generate_tracks_segmentation(self.batch_size, dimensions=self.dimensions)
return tracks, positions
def __len__(self):
return self.batches
"""
Track processing
"""
def package_tracks(tracks, max_T, dimensions):
"""
Convert tracks from list format (i.e. output from andi_datasets) to numpy array
This requires shorter tracks to be padded with zeros (up to length max_T)
Parameters:
tracks: tracks in list format (i.e. output from andi_datasets function)
max_T: the maximum track length
dimensions: number of track dimensions (1 or 2 supported)
Returns:
tracks_array: a numpy array containing padded and preprocessed tracks
"""
# Package into array
n = len(tracks)
tracks_array = np.zeros([n, max_T, dimensions])
if dimensions == 1:
for i, t in enumerate(tracks):
tracks_array[i, max_T - len(t):, 0] = t
elif dimensions == 2:
for i, t in enumerate(tracks):
len_t = int(len(t) / 2)
tracks_array[i, max_T - len_t:, 0] = t[:len_t]
tracks_array[i, max_T - len_t:, 1] = t[len_t:]
# Preprocess
tracks_array = preprocess_tracks(tracks_array)
return tracks_array
def preprocess_tracks(tracks):
"""
Preprocess tracks by taking the difference between successive positions and normalising (dividing by the mean step
size) -> input to models
Parameters:
tracks: numpy array of tracks (output from one of the above functions)
Returns:
tracks_processed: a numpy array of shape [n, max_T-1, d]
"""
# 1D tracks
if tracks.shape[2] == 1:
diff = np.diff(tracks[:, :, 0], axis=1)
meanstep = np.sum(abs(diff), axis=1) / np.sum(tracks[:, :, 0] != 0, axis=1)
tracks_processed = np.expand_dims(diff / np.expand_dims(meanstep, axis=-1), axis=-1)
# 2D tracks
elif tracks.shape[2] == 2:
dx = np.diff(tracks[:, :, 0], axis=1)
dy = np.diff(tracks[:, :, 1], axis=1)
meanstep = np.expand_dims(np.sum(((dx ** 2) + (dy ** 2)) ** 0.5, axis=1) / np.sum(tracks[:, :, 0] != 0, axis=1),
axis=-1)
tracks_processed = np.dstack((dx / meanstep, dy / meanstep))
return tracks_processed
def split_tracks(tracks, positions, dimensions=1, max_T=200):
"""
Split tracks according to positions (i.e. output from a segmentation model)
Parameters:
tracks: tracks in list format (i.e. output from andi_datasets)
dimensions: number of track dimensions (1 and 2 supported)
max_T: maximum track length
Returns:
split_tracks_array: numpy array of processed split tracks, shape [n * 2, max_T-1, dimensions]
"""
g = 0 # can set to > 0 to exclude points in the immediate vicinity of the switchpoint
split_tracks_array = np.zeros([len(tracks) * 2, max_T, dimensions])
# 1D tracks
if dimensions == 1:
i = 0 # counter
for j, track in enumerate(tracks):
split_tracks_array[i, max_T - max(positions[j] - g, 0):, 0] = track[:max(positions[j] - g, 0)]
split_tracks_array[i + 1, min(positions[j] + g, 199):, 0] = track[min(positions[j] + g, 199):] - track[
min(positions[j] + g, 199)]
i += 2
# 2D tracks
elif dimensions == 2:
i = 0 # counter
for j, track in enumerate(tracks):
len_t = int(len(track) / 2)
d1 = track[:len_t].flatten()
d2 = track[len_t:].flatten() - track[len_t]
split_tracks_array[i, max_T - max(positions[j] - g, 0):, 0] = d1[:max(positions[j] - g, 0)]
split_tracks_array[i, max_T - max(positions[j] - g, 0):, 1] = d2[:max(positions[j] - g, 0)]
split_tracks_array[i + 1, min(positions[j] + g, 199):, 0] = d1[min(positions[j] + g, 199):] - d1[
min(positions[j] + g, 199)]
split_tracks_array[i + 1, min(positions[j] + g, 199):, 1] = d2[min(positions[j] + g, 199):] - d2[
min(positions[j] + g, 199)]
i += 2
# Preprocess
split_tracks_array = preprocess_tracks(split_tracks_array)
return split_tracks_array
"""
File handling
"""
def import_tracks(path):
"""
Import tracks saved in the competition format. NB only imports 1D and 2D tracks
Parameters:
path: path to file
Returns:
1D and 2D trajectories in list format
"""
t = csv.reader(open(path, 'r'), delimiter=';', lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC)
X = [[], []]
for trajs in t:
if int(trajs[0]) in [1, 2]:
X[int(trajs[0]) - 1].append(trajs[1:])
return X[0], X[1]
def import_labels(direc):
"""
Import labels saved in the competition format. NB only imports 1D and 2D tracks
For task 1 this is the exponent
For task 2 this is the model
For task 3 this is the switchpoint ONLY
Parameters:
path: path to file
Returns:
Labels for 1D and 2D tracks
"""
l = csv.reader(open(direc, 'r'), delimiter=';', lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC)
Y = [[], []]
for labels in l:
if int(labels[0]) in [1, 2]:
Y[int(labels[0]) - 1].append(labels[1])
return np.array(Y[0]), np.array(Y[1])
"""
Other
"""
def rolling_ave(array, window):
"""
Apply a rolling average to a 1D array. Rolling average window specified by window parameter. Can be useful to apply
to output of segmentation CNN, but not strictly necessary
"""
array_padded = np.c_[array[:, :int(window / 2)][:, :-1], array, array[:, -int(window / 2):][:, :-1]]
cumsum = np.cumsum(array_padded, axis=1)
return (cumsum[:, window:] - cumsum[:, :-window]) / window | 0.929632 | 0.731011 |
import os
import csv
from pyparsing import Word, Combine, nums, alphas, Optional, Regex
from collections import OrderedDict
class ProxifierLog(object):
def __init__(self, dataset):
self.dataset = dataset
self.proxifierlog_grammar = self.__get_proxifierlog_grammar()
@staticmethod
def __get_proxifierlog_grammar():
# get proxifier grammar
ints = Word(nums)
date = Combine('[' + ints + '.' + ints)
time = Combine(ints + ':' + ints + ':' + ints + ']')
timestamp = date + time
service = Word(alphas + nums + '.' + '-' + '_')
arch = Optional(Word('*' + nums))
domain_or_ip = Optional(Word('-')) + Word(alphas + nums + '.' + ':' + '-')
status = Optional(Word(alphas + ',')) + Optional(':')
message = Regex('.*')
proxifierlog_grammar = timestamp + service + arch + domain_or_ip + status + message
return proxifierlog_grammar
def parse_log(self, log_line):
# parse proxifier log entries
parsed_proxifierlog = self.proxifierlog_grammar.parseString(log_line)
parsed = OrderedDict()
parsed['timestamp'] = parsed_proxifierlog[0] + ' ' + parsed_proxifierlog[1]
parsed['service'] = parsed_proxifierlog[2]
if len(parsed_proxifierlog) == 6:
parsed['service'] = parsed_proxifierlog[2] + ' ' + parsed_proxifierlog[3]
parsed['arch'] = ''
parsed['domain_or_ip'] = ''
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[4:])
elif len(parsed_proxifierlog) == 7:
parsed['arch'] = ''
parsed['domain_or_ip'] = parsed_proxifierlog[3] + ' ' + parsed_proxifierlog[4]
if parsed_proxifierlog[5].endswith(','):
parsed['status'] = parsed_proxifierlog[5]
parsed['message'] = parsed_proxifierlog[6]
else:
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[5:])
elif len(parsed_proxifierlog) == 8:
if parsed_proxifierlog[3].startswith('*'):
parsed['arch'] = parsed_proxifierlog[3]
parsed['domain_or_ip'] = parsed_proxifierlog[4] + ' ' + parsed_proxifierlog[5]
if parsed_proxifierlog[6].endswith(','):
parsed['status'] = parsed_proxifierlog[6]
parsed['message'] = parsed_proxifierlog[7]
else:
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[6:])
else:
parsed['arch'] = ''
parsed['domain_or_ip'] = parsed_proxifierlog[3] + ' ' + parsed_proxifierlog[4]
if parsed_proxifierlog[6] == ':':
parsed['status'] = parsed_proxifierlog[5] + ' ' + parsed_proxifierlog[6]
parsed['message'] = parsed_proxifierlog[7]
else:
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[5:])
elif len(parsed_proxifierlog) == 9:
parsed['arch'] = parsed_proxifierlog[3]
parsed['domain_or_ip'] = parsed_proxifierlog[4] + ' ' + parsed_proxifierlog[5]
parsed['status'] = parsed_proxifierlog[6] + ' ' + parsed_proxifierlog[7]
parsed['message'] = parsed_proxifierlog[8]
return parsed
if __name__ == '__main__':
dataset_path = '/home/hudan/Git/prlogparser/datasets/proxifier/'
filenames = ['proxifier.log']
test_file = '/home/hudan/Git/prlogparser/groundtruth/test-results/proxifier-test.csv'
f = open(test_file, 'w', newline='')
writer = csv.writer(f)
pl = ProxifierLog('')
for filename in filenames:
filename = os.path.join(dataset_path, filename)
with open(filename, 'r') as f:
for line in f:
parsed_line = pl.parse_log(line)
print(parsed_line)
row = list(parsed_line.values())
writer.writerow(row)
f.close() | nerlogparser/grammar/proxifierlog.py | import os
import csv
from pyparsing import Word, Combine, nums, alphas, Optional, Regex
from collections import OrderedDict
class ProxifierLog(object):
def __init__(self, dataset):
self.dataset = dataset
self.proxifierlog_grammar = self.__get_proxifierlog_grammar()
@staticmethod
def __get_proxifierlog_grammar():
# get proxifier grammar
ints = Word(nums)
date = Combine('[' + ints + '.' + ints)
time = Combine(ints + ':' + ints + ':' + ints + ']')
timestamp = date + time
service = Word(alphas + nums + '.' + '-' + '_')
arch = Optional(Word('*' + nums))
domain_or_ip = Optional(Word('-')) + Word(alphas + nums + '.' + ':' + '-')
status = Optional(Word(alphas + ',')) + Optional(':')
message = Regex('.*')
proxifierlog_grammar = timestamp + service + arch + domain_or_ip + status + message
return proxifierlog_grammar
def parse_log(self, log_line):
# parse proxifier log entries
parsed_proxifierlog = self.proxifierlog_grammar.parseString(log_line)
parsed = OrderedDict()
parsed['timestamp'] = parsed_proxifierlog[0] + ' ' + parsed_proxifierlog[1]
parsed['service'] = parsed_proxifierlog[2]
if len(parsed_proxifierlog) == 6:
parsed['service'] = parsed_proxifierlog[2] + ' ' + parsed_proxifierlog[3]
parsed['arch'] = ''
parsed['domain_or_ip'] = ''
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[4:])
elif len(parsed_proxifierlog) == 7:
parsed['arch'] = ''
parsed['domain_or_ip'] = parsed_proxifierlog[3] + ' ' + parsed_proxifierlog[4]
if parsed_proxifierlog[5].endswith(','):
parsed['status'] = parsed_proxifierlog[5]
parsed['message'] = parsed_proxifierlog[6]
else:
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[5:])
elif len(parsed_proxifierlog) == 8:
if parsed_proxifierlog[3].startswith('*'):
parsed['arch'] = parsed_proxifierlog[3]
parsed['domain_or_ip'] = parsed_proxifierlog[4] + ' ' + parsed_proxifierlog[5]
if parsed_proxifierlog[6].endswith(','):
parsed['status'] = parsed_proxifierlog[6]
parsed['message'] = parsed_proxifierlog[7]
else:
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[6:])
else:
parsed['arch'] = ''
parsed['domain_or_ip'] = parsed_proxifierlog[3] + ' ' + parsed_proxifierlog[4]
if parsed_proxifierlog[6] == ':':
parsed['status'] = parsed_proxifierlog[5] + ' ' + parsed_proxifierlog[6]
parsed['message'] = parsed_proxifierlog[7]
else:
parsed['status'] = ''
parsed['message'] = ' '.join(parsed_proxifierlog[5:])
elif len(parsed_proxifierlog) == 9:
parsed['arch'] = parsed_proxifierlog[3]
parsed['domain_or_ip'] = parsed_proxifierlog[4] + ' ' + parsed_proxifierlog[5]
parsed['status'] = parsed_proxifierlog[6] + ' ' + parsed_proxifierlog[7]
parsed['message'] = parsed_proxifierlog[8]
return parsed
if __name__ == '__main__':
dataset_path = '/home/hudan/Git/prlogparser/datasets/proxifier/'
filenames = ['proxifier.log']
test_file = '/home/hudan/Git/prlogparser/groundtruth/test-results/proxifier-test.csv'
f = open(test_file, 'w', newline='')
writer = csv.writer(f)
pl = ProxifierLog('')
for filename in filenames:
filename = os.path.join(dataset_path, filename)
with open(filename, 'r') as f:
for line in f:
parsed_line = pl.parse_log(line)
print(parsed_line)
row = list(parsed_line.values())
writer.writerow(row)
f.close() | 0.569494 | 0.246851 |
import json, os, sys
import numpy as np
from collections import Counter
file_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(file_path, "..", "..", "..")))
from code_aculat.visualize.plot_tools import plot_points
import pandas as pd
from matplotlib import pyplot as plt
def get_image2anno(json_path):
"""
从json里解析出{img_index_in_images:[anno1,anno2],img2:[anno1,anno2],...]
可以直接从images字段里解析出对应图片的信息
Returns:
"""
with open(json_path, 'r') as f:
jf = json.load(f)
imid2img_index = {}
img_index2annos = {}
for index, img in enumerate(jf['images']):
imid2img_index[img['id']] = index
for anno in jf['annotations']:
img_index = imid2img_index[anno['image_id']]
if img_index not in img_index2annos:
img_index2annos[img_index] = []
img_index2annos[img_index].append(anno)
return img_index2annos
def analyse_obs_size_after_resized(json_path, long_short_edges):
"从coco数据集中获取原图被resize到固定尺寸后,对象的宽高"
img_index2annos = get_image2anno(json_path)
with open(json_path, 'r') as f:
jf = json.load(f)
images = jf['images']
for edge in long_short_edges:
longer, shorter = edge
resized_wh = []
for key, value in img_index2annos.items():
height = images[key]['height']
width = images[key]['width']
if width >= height:
scale = shorter / height
if int(scale * width) > longer:
scale = longer / width
else:
scale = shorter / width
if int(scale * height) > longer:
scale = longer / height
for anno in value:
xmin, ymin, w, h = anno['bbox']
x = int(scale * w)
y = int(scale * h)
resized_wh.append([x, y])
wh_scale = np.array(resized_wh)
plot_points([(wh_scale[:, 0], wh_scale[:, 1])], label=shorter)
def analyse_obs_size(json_path):
"从coco数据集中获取对象的宽高"
with open(json_path, 'r') as f:
jf = json.load(f)
ob_h_w = []
for anno in jf['annotations']:
xmin, ymin, w, h = anno['bbox']
ob_h_w.append([h, w])
ob_h_w = np.array(ob_h_w)
plot_points([(ob_h_w[:, 0], ob_h_w[:, 1])], label='ob_h_w')
def analyse_obs_ratio(json_path):
"统计对象的ratio,长宽比"
with open(json_path, 'r') as f:
jf = json.load(f)
h_w = []
for anno in jf['annotations']:
xmin, ymin, w, h = anno['bbox']
h_w.append([h, w])
h_w = np.array(h_w, dtype=np.int)
ratio = np.ceil(h_w[:, 0] / h_w[:, 1])
ratio_dict = dict(Counter(ratio)) # 计算给ratio出现的频次
values = np.array(list(ratio_dict.values()))
keys = np.array(list(ratio_dict.keys()))
value_big = np.argsort(values)[::-1]
number_limit = 15
if len(value_big) > number_limit:
value_big = value_big[:number_limit] # 只统计频数前15
keys = keys[value_big]
values = values[value_big]
keys_ind = np.argsort(keys) # 按ratio的值排序
keys = keys[keys_ind]
values = values[keys_ind]
"宽高比的值为x轴,对应的数量为y轴"
array_x = keys
array_y = values
hw_dataframe = pd.DataFrame(array_y, columns=["h2w_ratio"]) # 实际还是调的matplotlib可视化
ax = hw_dataframe.plot(kind='bar', color="#55aacc")
ax.set_xticklabels(array_x, rotation=0)
plt.show()
def analyse_image_hw(json_path):
"统计image长宽比"
with open(json_path, 'r') as f:
jf = json.load(f)
h_w = []
for img in jf['images']:
w, h = img['width'], img['height']
h_w.append([h, w])
h_w = np.array(h_w)
plot_points([(h_w[:, 0], h_w[:, 1])], label='image_h_w')
def check_annos(json_path):
"""
测试annotations里面bbox和segmentation字段为空或者错误的
Returns:
"""
with open(json_path, 'r') as f:
jf = json.load(f)
for anno in jf["annotations"]:
seg = anno['segmentation']
bbox = anno['bbox']
if len(seg) != 1: # 不能为空,目标检测时除外
print(anno)
raise ("segmentation filed error")
if len(bbox) != 4:
print(anno)
raise ("bbox filed length error")
xmin, ymin, w, h = bbox
if xmin < 0 or ymin < 0: # 左上角坐标为负
print(anno)
raise ("bbox filed value error")
if w < 1 or h < 1: # 宽高要大于1
print(anno)
raise ("bbox filed value error")
def analyse_num_each_class(json_path,show=True):
jf = json.load(open(json_path, 'r'))
obs_dict = {}
for ann in jf['annotations']:
if ann['category_id'] not in obs_dict.keys():
obs_dict[ann['category_id']] = 0
obs_dict[ann['category_id']] += 1
print(obs_dict)
if show:
show_bar(obs_dict, title="ob_num_each_class")
return obs_dict
def checkout_iterstrat_split(json_path,split_folds):
"测试生成的多折数据与划分前实例数一致"
total_obs_dict=analyse_num_each_class(json_path,False)
fold_error_dict={}
for dir in os.listdir(split_folds):
fold_path=os.path.join(split_folds,dir)
train_json=os.path.join(fold_path,'train.json')
test_json=os.path.join(fold_path,'test.json')
train_obs_dict=analyse_num_each_class(train_json,False)
test_obs_dict=analyse_num_each_class(test_json,False)
error_num=0
for key in total_obs_dict.keys():
try:
if total_obs_dict[key]!=train_obs_dict[key]+test_obs_dict[key]:
error_num+=1
except Exception as e:
error_num += 1
fold_error_dict[dir]=error_num
print(fold_error_dict)
def show_bar(key_num_dic, title=None):
"对{key:value}形式的输入,做统计直方图,title是标题"
unique_name = list(key_num_dic.keys())
"得到每类对象的数量"
unique_count = list(key_num_dic.values())
"类别名称为x轴,对应的数量为y轴"
array_x = unique_name
array_y = unique_count
wh_dataframe = pd.DataFrame(array_y, columns=[title]) # 实际还是调的matplotlib可视化
ax = wh_dataframe.plot(kind='bar', color="#55aacc")
ax.set_xticklabels(array_x, rotation=0)
plt.show()
def stastic_ann_per_image(json_path):
from pycocotools.coco import COCO
coco = COCO(json_path)
imgids2num_ann = {}
img_ids = coco.getImgIds()
for im_id in img_ids:
num_ann = len(coco.getAnnIds(imgIds=im_id))
if num_ann > 10:
print(coco.loadImgs(im_id))
imgids2num_ann[im_id] = num_ann
stastic_num = {}
for key, value in imgids2num_ann.items():
if value not in stastic_num:
stastic_num[value] = 0
stastic_num[value] += 1
show_bar(stastic_num, title="ob_num_per_image")
def check_empty_coco(json_path):
valid_imgids=[]
with open(json_path,'r') as f:
jf=json.load(f)
for ann in jf['annotations']:
if ann['image_id'] not in valid_imgids:
valid_imgids.append(ann['image_id'])
empty_imgids=[]
for img in jf['images']:
if img['id'] not in valid_imgids:
empty_imgids.append(img['id'])
print('empty num is {} ,image ids {}'.format(len(empty_imgids),empty_imgids)) | data_analyse/data_analyse_coco.py | import json, os, sys
import numpy as np
from collections import Counter
file_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(file_path, "..", "..", "..")))
from code_aculat.visualize.plot_tools import plot_points
import pandas as pd
from matplotlib import pyplot as plt
def get_image2anno(json_path):
"""
从json里解析出{img_index_in_images:[anno1,anno2],img2:[anno1,anno2],...]
可以直接从images字段里解析出对应图片的信息
Returns:
"""
with open(json_path, 'r') as f:
jf = json.load(f)
imid2img_index = {}
img_index2annos = {}
for index, img in enumerate(jf['images']):
imid2img_index[img['id']] = index
for anno in jf['annotations']:
img_index = imid2img_index[anno['image_id']]
if img_index not in img_index2annos:
img_index2annos[img_index] = []
img_index2annos[img_index].append(anno)
return img_index2annos
def analyse_obs_size_after_resized(json_path, long_short_edges):
"从coco数据集中获取原图被resize到固定尺寸后,对象的宽高"
img_index2annos = get_image2anno(json_path)
with open(json_path, 'r') as f:
jf = json.load(f)
images = jf['images']
for edge in long_short_edges:
longer, shorter = edge
resized_wh = []
for key, value in img_index2annos.items():
height = images[key]['height']
width = images[key]['width']
if width >= height:
scale = shorter / height
if int(scale * width) > longer:
scale = longer / width
else:
scale = shorter / width
if int(scale * height) > longer:
scale = longer / height
for anno in value:
xmin, ymin, w, h = anno['bbox']
x = int(scale * w)
y = int(scale * h)
resized_wh.append([x, y])
wh_scale = np.array(resized_wh)
plot_points([(wh_scale[:, 0], wh_scale[:, 1])], label=shorter)
def analyse_obs_size(json_path):
"从coco数据集中获取对象的宽高"
with open(json_path, 'r') as f:
jf = json.load(f)
ob_h_w = []
for anno in jf['annotations']:
xmin, ymin, w, h = anno['bbox']
ob_h_w.append([h, w])
ob_h_w = np.array(ob_h_w)
plot_points([(ob_h_w[:, 0], ob_h_w[:, 1])], label='ob_h_w')
def analyse_obs_ratio(json_path):
"统计对象的ratio,长宽比"
with open(json_path, 'r') as f:
jf = json.load(f)
h_w = []
for anno in jf['annotations']:
xmin, ymin, w, h = anno['bbox']
h_w.append([h, w])
h_w = np.array(h_w, dtype=np.int)
ratio = np.ceil(h_w[:, 0] / h_w[:, 1])
ratio_dict = dict(Counter(ratio)) # 计算给ratio出现的频次
values = np.array(list(ratio_dict.values()))
keys = np.array(list(ratio_dict.keys()))
value_big = np.argsort(values)[::-1]
number_limit = 15
if len(value_big) > number_limit:
value_big = value_big[:number_limit] # 只统计频数前15
keys = keys[value_big]
values = values[value_big]
keys_ind = np.argsort(keys) # 按ratio的值排序
keys = keys[keys_ind]
values = values[keys_ind]
"宽高比的值为x轴,对应的数量为y轴"
array_x = keys
array_y = values
hw_dataframe = pd.DataFrame(array_y, columns=["h2w_ratio"]) # 实际还是调的matplotlib可视化
ax = hw_dataframe.plot(kind='bar', color="#55aacc")
ax.set_xticklabels(array_x, rotation=0)
plt.show()
def analyse_image_hw(json_path):
"统计image长宽比"
with open(json_path, 'r') as f:
jf = json.load(f)
h_w = []
for img in jf['images']:
w, h = img['width'], img['height']
h_w.append([h, w])
h_w = np.array(h_w)
plot_points([(h_w[:, 0], h_w[:, 1])], label='image_h_w')
def check_annos(json_path):
"""
测试annotations里面bbox和segmentation字段为空或者错误的
Returns:
"""
with open(json_path, 'r') as f:
jf = json.load(f)
for anno in jf["annotations"]:
seg = anno['segmentation']
bbox = anno['bbox']
if len(seg) != 1: # 不能为空,目标检测时除外
print(anno)
raise ("segmentation filed error")
if len(bbox) != 4:
print(anno)
raise ("bbox filed length error")
xmin, ymin, w, h = bbox
if xmin < 0 or ymin < 0: # 左上角坐标为负
print(anno)
raise ("bbox filed value error")
if w < 1 or h < 1: # 宽高要大于1
print(anno)
raise ("bbox filed value error")
def analyse_num_each_class(json_path,show=True):
jf = json.load(open(json_path, 'r'))
obs_dict = {}
for ann in jf['annotations']:
if ann['category_id'] not in obs_dict.keys():
obs_dict[ann['category_id']] = 0
obs_dict[ann['category_id']] += 1
print(obs_dict)
if show:
show_bar(obs_dict, title="ob_num_each_class")
return obs_dict
def checkout_iterstrat_split(json_path,split_folds):
"测试生成的多折数据与划分前实例数一致"
total_obs_dict=analyse_num_each_class(json_path,False)
fold_error_dict={}
for dir in os.listdir(split_folds):
fold_path=os.path.join(split_folds,dir)
train_json=os.path.join(fold_path,'train.json')
test_json=os.path.join(fold_path,'test.json')
train_obs_dict=analyse_num_each_class(train_json,False)
test_obs_dict=analyse_num_each_class(test_json,False)
error_num=0
for key in total_obs_dict.keys():
try:
if total_obs_dict[key]!=train_obs_dict[key]+test_obs_dict[key]:
error_num+=1
except Exception as e:
error_num += 1
fold_error_dict[dir]=error_num
print(fold_error_dict)
def show_bar(key_num_dic, title=None):
"对{key:value}形式的输入,做统计直方图,title是标题"
unique_name = list(key_num_dic.keys())
"得到每类对象的数量"
unique_count = list(key_num_dic.values())
"类别名称为x轴,对应的数量为y轴"
array_x = unique_name
array_y = unique_count
wh_dataframe = pd.DataFrame(array_y, columns=[title]) # 实际还是调的matplotlib可视化
ax = wh_dataframe.plot(kind='bar', color="#55aacc")
ax.set_xticklabels(array_x, rotation=0)
plt.show()
def stastic_ann_per_image(json_path):
from pycocotools.coco import COCO
coco = COCO(json_path)
imgids2num_ann = {}
img_ids = coco.getImgIds()
for im_id in img_ids:
num_ann = len(coco.getAnnIds(imgIds=im_id))
if num_ann > 10:
print(coco.loadImgs(im_id))
imgids2num_ann[im_id] = num_ann
stastic_num = {}
for key, value in imgids2num_ann.items():
if value not in stastic_num:
stastic_num[value] = 0
stastic_num[value] += 1
show_bar(stastic_num, title="ob_num_per_image")
def check_empty_coco(json_path):
valid_imgids=[]
with open(json_path,'r') as f:
jf=json.load(f)
for ann in jf['annotations']:
if ann['image_id'] not in valid_imgids:
valid_imgids.append(ann['image_id'])
empty_imgids=[]
for img in jf['images']:
if img['id'] not in valid_imgids:
empty_imgids.append(img['id'])
print('empty num is {} ,image ids {}'.format(len(empty_imgids),empty_imgids)) | 0.308503 | 0.28048 |
import os, logging, sys
from collections import OrderedDict as odict
import numpy as np
log = logging.getLogger(__name__)
from opticks.ana.bench import Bench
from mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable
import matplotlib.pyplot as plt
from opticks.ana.plot import init_rcParams
init_rcParams(plt)
def barplot(labels, values):
"""
"""
iso = np.argsort( values )
ivalues = (values*100).astype(np.int)
cmap = plt.get_cmap('RdYlGn')( np.linspace(0.15, 0.85, 100))
color = cmap[ivalues]
widths = values
starts = 0
ax.barh(labels[iso], widths[iso], left=starts, height=0.5, color=color[iso])
xcenters = starts + widths - 0.1
fmt_ = lambda _:"%10.3f" % _
for y, (x, c) in enumerate(zip(xcenters, widths)):
r, g, b, _ = color[y]
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
text_color = 'black'
ax.text(x, y, fmt_(c), ha='center', va='center', color=text_color)
pass
return fig, ax
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
ratios = odict()
ratios["R0/1_TITAN_V"] = "R0_TITAN_V R1_TITAN_V".split()
ratios["R0/1_TITAN_RTX"] = "R0_TITAN_RTX R1_TITAN_RTX".split()
ratios["R1/0_TITAN_V"] = "R1_TITAN_V R0_TITAN_V".split()
ratios["R1/0_TITAN_RTX"] = "R1_TITAN_RTX R0_TITAN_RTX".split()
args = Bench.Args()
args.ratios = ratios
b = Bench(args)
print(b)
titles = odict()
titles["20190526_143808"] = "JUNO360 raytrace with 1/2/4/8 NVIDIA Tesla GV100 GPUs "
titles["20190526_202537"] = "JUNO360 raytrace with NVIDIA TITAN V and TITAN RTX GPUs"
df = titles.keys()[1]
rg = b.find(df)
title = titles.get(df, "benchplot")
xlabel = "RO:RTX OFF, R1:RTX ON Time(s) to raytrace 10240 x 5760 (59M) pixels "
labels = rg.a.label
values = rg.a.metric
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.invert_yaxis()
plt.title(title)
#ax.xaxis.set_visible(False)
ax.set_xlim(0, values.max()*1.1 )
ax.set_xlabel( xlabel )
fig, ax = barplot(labels, values)
make_axes_area_auto_adjustable(ax)
plt.ion()
plt.show()
print("savefig %s " % rg.path)
plt.savefig(rg.path) | ana/benchplot.py | import os, logging, sys
from collections import OrderedDict as odict
import numpy as np
log = logging.getLogger(__name__)
from opticks.ana.bench import Bench
from mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable
import matplotlib.pyplot as plt
from opticks.ana.plot import init_rcParams
init_rcParams(plt)
def barplot(labels, values):
"""
"""
iso = np.argsort( values )
ivalues = (values*100).astype(np.int)
cmap = plt.get_cmap('RdYlGn')( np.linspace(0.15, 0.85, 100))
color = cmap[ivalues]
widths = values
starts = 0
ax.barh(labels[iso], widths[iso], left=starts, height=0.5, color=color[iso])
xcenters = starts + widths - 0.1
fmt_ = lambda _:"%10.3f" % _
for y, (x, c) in enumerate(zip(xcenters, widths)):
r, g, b, _ = color[y]
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
text_color = 'black'
ax.text(x, y, fmt_(c), ha='center', va='center', color=text_color)
pass
return fig, ax
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
ratios = odict()
ratios["R0/1_TITAN_V"] = "R0_TITAN_V R1_TITAN_V".split()
ratios["R0/1_TITAN_RTX"] = "R0_TITAN_RTX R1_TITAN_RTX".split()
ratios["R1/0_TITAN_V"] = "R1_TITAN_V R0_TITAN_V".split()
ratios["R1/0_TITAN_RTX"] = "R1_TITAN_RTX R0_TITAN_RTX".split()
args = Bench.Args()
args.ratios = ratios
b = Bench(args)
print(b)
titles = odict()
titles["20190526_143808"] = "JUNO360 raytrace with 1/2/4/8 NVIDIA Tesla GV100 GPUs "
titles["20190526_202537"] = "JUNO360 raytrace with NVIDIA TITAN V and TITAN RTX GPUs"
df = titles.keys()[1]
rg = b.find(df)
title = titles.get(df, "benchplot")
xlabel = "RO:RTX OFF, R1:RTX ON Time(s) to raytrace 10240 x 5760 (59M) pixels "
labels = rg.a.label
values = rg.a.metric
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.invert_yaxis()
plt.title(title)
#ax.xaxis.set_visible(False)
ax.set_xlim(0, values.max()*1.1 )
ax.set_xlabel( xlabel )
fig, ax = barplot(labels, values)
make_axes_area_auto_adjustable(ax)
plt.ion()
plt.show()
print("savefig %s " % rg.path)
plt.savefig(rg.path) | 0.370339 | 0.334345 |
import asynctest
import pytest
from decimal import Decimal
from market_values_api.services import MarketValuesService
class TestMarketValuesService(object):
@pytest.fixture
def company_list(self):
return ['CBA', 'ANZ', 'REA']
@pytest.fixture
def market_values(self):
return {'CBA': Decimal('123'), 'ANZ': Decimal('321'), 'REA': Decimal('789')}
@pytest.fixture
def repository_class(self):
with asynctest.patch(
'market_values_api.repositories.MarketValueRepository',
autospec=True, scope=asynctest.LIMITED
) as repository_class:
yield repository_class
@pytest.fixture
def repository(self, repository_class, market_values):
def _side_effect(company):
if market_values.get(company) == None:
raise Exception('Unexpected error')
return (company, market_values[company])
repository = repository_class.return_value
repository.get.side_effect = _side_effect
return repository
@pytest.mark.usefixtures("repository")
async def test_get_instantiate_one_market_value_repository(self, session, company_list, repository_class):
await MarketValuesService(session).get(company_list)
repository_class.assert_called_once_with(session)
async def test_get_calls_market_value_repository_for_each_company(self, session, company_list, repository):
await MarketValuesService(session).get(company_list)
assert repository.get.call_count == 3
@pytest.mark.usefixtures("repository")
async def test_get_returns_market_values_as_a_dict(self, session, company_list, market_values):
result = await MarketValuesService(session).get(company_list)
assert result == market_values
@pytest.mark.usefixtures("repository")
async def test_get_reraises_exception_occurred_in_async_task(self, session, company_list):
with pytest.raises(Exception) as err:
await MarketValuesService(session).get(company_list + ['CompanyNotExist'])
assert 'Unexpected error' in str(err) | tests/services/test_market_values_service.py | import asynctest
import pytest
from decimal import Decimal
from market_values_api.services import MarketValuesService
class TestMarketValuesService(object):
@pytest.fixture
def company_list(self):
return ['CBA', 'ANZ', 'REA']
@pytest.fixture
def market_values(self):
return {'CBA': Decimal('123'), 'ANZ': Decimal('321'), 'REA': Decimal('789')}
@pytest.fixture
def repository_class(self):
with asynctest.patch(
'market_values_api.repositories.MarketValueRepository',
autospec=True, scope=asynctest.LIMITED
) as repository_class:
yield repository_class
@pytest.fixture
def repository(self, repository_class, market_values):
def _side_effect(company):
if market_values.get(company) == None:
raise Exception('Unexpected error')
return (company, market_values[company])
repository = repository_class.return_value
repository.get.side_effect = _side_effect
return repository
@pytest.mark.usefixtures("repository")
async def test_get_instantiate_one_market_value_repository(self, session, company_list, repository_class):
await MarketValuesService(session).get(company_list)
repository_class.assert_called_once_with(session)
async def test_get_calls_market_value_repository_for_each_company(self, session, company_list, repository):
await MarketValuesService(session).get(company_list)
assert repository.get.call_count == 3
@pytest.mark.usefixtures("repository")
async def test_get_returns_market_values_as_a_dict(self, session, company_list, market_values):
result = await MarketValuesService(session).get(company_list)
assert result == market_values
@pytest.mark.usefixtures("repository")
async def test_get_reraises_exception_occurred_in_async_task(self, session, company_list):
with pytest.raises(Exception) as err:
await MarketValuesService(session).get(company_list + ['CompanyNotExist'])
assert 'Unexpected error' in str(err) | 0.424651 | 0.320875 |
from .asset_types import detect_asset_type, render_asset_html_tags, list_asset_types
from webassets import Bundle, six
from webassets.filter import get_filter
__all__ = ('Package', 'PackageError')
auto_filters = {
"less": ("less", "css"),
"coffee": ("coffeescript", "js"),
"sass": ("sass", "css"),
"scss": ("scss", "css"),
"styl": ("stylus", "css")}
class TypedBundle(Bundle):
def __init__(self, asset_type, *args, **kwargs):
super(TypedBundle, self).__init__(*args, **kwargs)
self.asset_type = asset_type
self.contents = list(self.contents)
class PackageError(Exception):
pass
class Package(object):
"""A list of mixed-typed bundles and urls
"""
def __init__(self, *items, **kwargs):
self._env = kwargs.pop("env", None)
self.output = kwargs.pop("output", None)
self.typed_bundles = {}
self.depends = []
if items:
self.append(*items)
def append(self, *items):
self._process_items(items)
def preprend(self, *items):
self._process_items(items, True)
def _process_items(self, items, prepend=False):
for item in items:
if isinstance(item, (list, tuple)):
self._process_items(item)
continue
if isinstance(item, six.string_types) and item.startswith("@"):
if prepend:
self.depends.insert(0, item[1:])
else:
self.depends.append(item[1:])
continue
if isinstance(item, Bundle):
self._auto_filter_bundle(item)
elif isinstance(item, dict):
item = self._create_bundle(item)
elif not (item.startswith("http://") or item.startswith("https://")):
item = self._auto_apply_filter(item)
asset_type = detect_asset_type(item)
typed_bundle = self.typed_bundles.get(asset_type)
if not typed_bundle:
typed_bundle = TypedBundle(asset_type, output=self._make_typed_bundle_output(asset_type))
self.typed_bundles[asset_type] = typed_bundle
if self._env:
self._webassets_env.add(typed_bundle)
if prepend:
typed_bundle.contents.insert(0, item)
else:
typed_bundle.contents.append(item)
def _make_typed_bundle_output(self, asset_type):
if not self.output:
return None
return "%s.%s" % (self.output, asset_type)
def _yield_bundle_contents(self, data):
"""Yield bundle contents from the given dict.
Each item yielded will be either a string representing a file path
or a bundle."""
if isinstance(data, list):
contents = data
else:
contents = data.get('contents', [])
if isinstance(contents, six.string_types):
contents = contents,
for content in contents:
if isinstance(content, dict):
content = self._create_bundle(content)
yield content
def _create_bundle(self, data):
"""Return a bundle initialised by the given dict."""
kwargs = {}
filters = None
if isinstance(data, dict):
kwargs.update(
filters=data.get('filters', None),
output=data.get('output', None),
debug=data.get('debug', None),
extra=data.get('extra', {}),
config=data.get('config', {}),
depends=data.get('depends', None))
bundle = Bundle(*list(self._yield_bundle_contents(data)), **kwargs)
return self._auto_filter_bundle(bundle)
def _auto_filter_bundle(self, bundle):
def filter_exists(name):
for f in bundle.filters:
if f.name == name:
return True
return False
# checks if all the bundle content has the same file extension
same_ext = None
for item in bundle.contents:
ext = item.rsplit(".", 1)[1]
if same_ext and (isinstance(item, Bundle) or same_ext != ext):
same_ext = False
if same_ext != False:
same_ext = ext
if isinstance(item, Bundle):
self._auto_filter_bundle(item)
filters = []
if same_ext in auto_filters and bundle.output:
f = auto_filters[same_ext][0]
if not filter_exists(f):
bundle.filters = list(bundle.filters) + [get_filter(f)]
else:
contents = []
for item in bundle.contents:
if not isinstance(item, Bundle):
item = self._auto_apply_filter(item)
contents.append(item)
bundle.contents = contents
return bundle
def _auto_apply_filter(self, filename):
filters = []
ext = None
for filter_ext, spec in six.iteritems(auto_filters):
if filename.rsplit(".", 1)[1] == filter_ext:
filters.append(spec[0])
ext = spec[1]
break
if not ext:
return filename
return Bundle(filename, filters=filters, output="%s.%s" % (filename.rsplit(".", 1)[0], ext))
@property
def env(self):
if not self._env:
raise PackageError("Package is not bound to any environment")
return self._env
@env.setter
def env(self, env):
self._env = env
if env:
for bundle in self.typed_bundles.itervalues():
env.env.add(bundle)
@property
def _webassets_env(self):
return self._env.env if self._env else None
@property
def asset_types(self):
return self.typed_bundles.keys()
def _ref(self, name):
if not self._env:
raise PackageError("Package includes references to other bundles but is not bound to an environment")
return self._env[name]
def urls_for_depends(self, asset_type, *args, **kwargs):
urls = []
for ref in self.depends:
urls.extend(self._ref(ref).urls_for(asset_type, *args, **kwargs))
return urls
def urls_for_self(self, asset_type, *args, **kwargs):
typed_bundle = self.typed_bundles.get(asset_type)
if typed_bundle:
return typed_bundle.urls(*args, **kwargs)
return []
def urls_for(self, asset_type, *args, **kwargs):
"""Returns urls needed to include all assets of asset_type
"""
return self.urls_for_depends(asset_type, *args, **kwargs) + \
self.urls_for_self(asset_type, *args, **kwargs)
def html_tags_for(self, asset_type, *args, **kwargs):
"""Return html tags for urls of asset_type
"""
html = []
for ref in self.depends:
html.append(self._ref(ref).html_tags_for(asset_type, *args, **kwargs))
if asset_type in self.typed_bundles:
html.append(render_asset_html_tags(asset_type, self.urls_for_self(asset_type, *args, **kwargs)))
return "\n".join(html)
def html_tags(self, *args, **kwargs):
"""Return all html tags for all asset_type
"""
html = []
for asset_type in list_asset_types():
html.append(self.html_tags_for(asset_type.name, *args, **kwargs))
return "\n".join(html)
def __iter__(self):
return self.typed_bundles.itervalues()
def __str__(self):
return self.html_tags()
def __repr__(self):
parts = []
if self.depends:
parts.append("depends=%s" % ",".join(self.depends))
if self.typed_bundles:
parts.extend(["%s=%s" % (k, repr(v)) for k, v in self.typed_bundles.iteritems()])
return "<%s(%s)>" % (self.__class__.__name__, ", ".join(parts)) | easywebassets/package.py | from .asset_types import detect_asset_type, render_asset_html_tags, list_asset_types
from webassets import Bundle, six
from webassets.filter import get_filter
__all__ = ('Package', 'PackageError')
auto_filters = {
"less": ("less", "css"),
"coffee": ("coffeescript", "js"),
"sass": ("sass", "css"),
"scss": ("scss", "css"),
"styl": ("stylus", "css")}
class TypedBundle(Bundle):
def __init__(self, asset_type, *args, **kwargs):
super(TypedBundle, self).__init__(*args, **kwargs)
self.asset_type = asset_type
self.contents = list(self.contents)
class PackageError(Exception):
pass
class Package(object):
"""A list of mixed-typed bundles and urls
"""
def __init__(self, *items, **kwargs):
self._env = kwargs.pop("env", None)
self.output = kwargs.pop("output", None)
self.typed_bundles = {}
self.depends = []
if items:
self.append(*items)
def append(self, *items):
self._process_items(items)
def preprend(self, *items):
self._process_items(items, True)
def _process_items(self, items, prepend=False):
for item in items:
if isinstance(item, (list, tuple)):
self._process_items(item)
continue
if isinstance(item, six.string_types) and item.startswith("@"):
if prepend:
self.depends.insert(0, item[1:])
else:
self.depends.append(item[1:])
continue
if isinstance(item, Bundle):
self._auto_filter_bundle(item)
elif isinstance(item, dict):
item = self._create_bundle(item)
elif not (item.startswith("http://") or item.startswith("https://")):
item = self._auto_apply_filter(item)
asset_type = detect_asset_type(item)
typed_bundle = self.typed_bundles.get(asset_type)
if not typed_bundle:
typed_bundle = TypedBundle(asset_type, output=self._make_typed_bundle_output(asset_type))
self.typed_bundles[asset_type] = typed_bundle
if self._env:
self._webassets_env.add(typed_bundle)
if prepend:
typed_bundle.contents.insert(0, item)
else:
typed_bundle.contents.append(item)
def _make_typed_bundle_output(self, asset_type):
if not self.output:
return None
return "%s.%s" % (self.output, asset_type)
def _yield_bundle_contents(self, data):
"""Yield bundle contents from the given dict.
Each item yielded will be either a string representing a file path
or a bundle."""
if isinstance(data, list):
contents = data
else:
contents = data.get('contents', [])
if isinstance(contents, six.string_types):
contents = contents,
for content in contents:
if isinstance(content, dict):
content = self._create_bundle(content)
yield content
def _create_bundle(self, data):
"""Return a bundle initialised by the given dict."""
kwargs = {}
filters = None
if isinstance(data, dict):
kwargs.update(
filters=data.get('filters', None),
output=data.get('output', None),
debug=data.get('debug', None),
extra=data.get('extra', {}),
config=data.get('config', {}),
depends=data.get('depends', None))
bundle = Bundle(*list(self._yield_bundle_contents(data)), **kwargs)
return self._auto_filter_bundle(bundle)
def _auto_filter_bundle(self, bundle):
def filter_exists(name):
for f in bundle.filters:
if f.name == name:
return True
return False
# checks if all the bundle content has the same file extension
same_ext = None
for item in bundle.contents:
ext = item.rsplit(".", 1)[1]
if same_ext and (isinstance(item, Bundle) or same_ext != ext):
same_ext = False
if same_ext != False:
same_ext = ext
if isinstance(item, Bundle):
self._auto_filter_bundle(item)
filters = []
if same_ext in auto_filters and bundle.output:
f = auto_filters[same_ext][0]
if not filter_exists(f):
bundle.filters = list(bundle.filters) + [get_filter(f)]
else:
contents = []
for item in bundle.contents:
if not isinstance(item, Bundle):
item = self._auto_apply_filter(item)
contents.append(item)
bundle.contents = contents
return bundle
def _auto_apply_filter(self, filename):
filters = []
ext = None
for filter_ext, spec in six.iteritems(auto_filters):
if filename.rsplit(".", 1)[1] == filter_ext:
filters.append(spec[0])
ext = spec[1]
break
if not ext:
return filename
return Bundle(filename, filters=filters, output="%s.%s" % (filename.rsplit(".", 1)[0], ext))
@property
def env(self):
if not self._env:
raise PackageError("Package is not bound to any environment")
return self._env
@env.setter
def env(self, env):
self._env = env
if env:
for bundle in self.typed_bundles.itervalues():
env.env.add(bundle)
@property
def _webassets_env(self):
return self._env.env if self._env else None
@property
def asset_types(self):
return self.typed_bundles.keys()
def _ref(self, name):
if not self._env:
raise PackageError("Package includes references to other bundles but is not bound to an environment")
return self._env[name]
def urls_for_depends(self, asset_type, *args, **kwargs):
urls = []
for ref in self.depends:
urls.extend(self._ref(ref).urls_for(asset_type, *args, **kwargs))
return urls
def urls_for_self(self, asset_type, *args, **kwargs):
typed_bundle = self.typed_bundles.get(asset_type)
if typed_bundle:
return typed_bundle.urls(*args, **kwargs)
return []
def urls_for(self, asset_type, *args, **kwargs):
"""Returns urls needed to include all assets of asset_type
"""
return self.urls_for_depends(asset_type, *args, **kwargs) + \
self.urls_for_self(asset_type, *args, **kwargs)
def html_tags_for(self, asset_type, *args, **kwargs):
"""Return html tags for urls of asset_type
"""
html = []
for ref in self.depends:
html.append(self._ref(ref).html_tags_for(asset_type, *args, **kwargs))
if asset_type in self.typed_bundles:
html.append(render_asset_html_tags(asset_type, self.urls_for_self(asset_type, *args, **kwargs)))
return "\n".join(html)
def html_tags(self, *args, **kwargs):
"""Return all html tags for all asset_type
"""
html = []
for asset_type in list_asset_types():
html.append(self.html_tags_for(asset_type.name, *args, **kwargs))
return "\n".join(html)
def __iter__(self):
return self.typed_bundles.itervalues()
def __str__(self):
return self.html_tags()
def __repr__(self):
parts = []
if self.depends:
parts.append("depends=%s" % ",".join(self.depends))
if self.typed_bundles:
parts.extend(["%s=%s" % (k, repr(v)) for k, v in self.typed_bundles.iteritems()])
return "<%s(%s)>" % (self.__class__.__name__, ", ".join(parts)) | 0.712132 | 0.167593 |
from token import DEDENT, INDENT, NAME, NEWLINE, STRING, ENDMARKER
from story5.parser import Parser
class Rule:
def __init__(self, name, alts):
self.name = name
self.alts = alts
def __repr__(self):
return f"Rule({self.name!r}, {self.alts})"
def __eq__(self, other):
if not isinstance(other, Rule):
return NotImplemented
return self.name == other.name and self.alts == other.alts
class Alt:
def __init__(self, items, action=None):
self.items = items
self.action = action
def __repr__(self):
if self.action:
return f"Alt({self.items!r}, {self.action!r})"
else:
return f"Alt({self.items!r})"
def __str__(self):
items = " ".join(self.items)
if self.action:
return f"{items} {{ {self.action} }}"
else:
return items
def __eq__(self, other):
if not isinstance(other, Alt):
return NotImplemented
return self.items == other.items and self.action == other.action
class GrammarParser(Parser):
def grammar(self):
pos = self.mark()
if rule := self.rule():
rules = [rule]
while rule := self.rule():
rules.append(rule)
if self.expect(ENDMARKER):
return rules
self.reset(pos)
return None
def rule(self):
pos = self.mark()
if (name := self.expect(NAME)) and self.expect(":"):
if alts := self.alts_newline():
pass
elif self.expect(NEWLINE):
alts = []
else:
self.reset(pos)
return None
if alts1 := self.indented_alts():
alts.extend(alts1)
if alts:
return Rule(name.string, alts)
self.reset(pos)
return None
def indented_alts(self):
pos = self.mark()
if self.expect(INDENT):
alts = []
while alts1 := self.bar_alts_newline():
alts.extend(alts1)
if self.expect(DEDENT):
return alts
self.reset(pos)
return None
def bar_alts_newline(self):
pos = self.mark()
if self.expect("|") and (alts := self.alts_newline()):
return alts
self.reset(pos)
return None
def alts_newline(self):
pos = self.mark()
if (alts := self.alts()) and self.expect(NEWLINE):
return alts
self.reset(pos)
return None
def alts(self):
pos = self.mark()
if alt := self.alternative():
alts = [alt]
while alt := self.bar_alt():
alts.append(alt)
return alts
self.reset(pos)
return None
def bar_alt(self):
pos = self.mark()
if self.expect("|") and (alt := self.alternative()):
return alt
self.reset(pos)
return None
def alternative(self):
items = []
while item := self.item():
items.append(item)
if not items:
return None
# Look for {...}
action = None
pos = self.mark()
if self.expect("{"):
# Collect arbitrary tokens until "}" found, skipping matching {...} pairs.
action_tokens = []
level = 0
while True:
token = self.tokenizer.get_token().string
if token == "{":
level += 1
elif token == "}":
level -= 1
if level < 0:
break
action_tokens.append(token)
action = " ".join(action_tokens)
return Alt(items, action)
def item(self):
if name := self.expect(NAME):
return name.string
if string := self.expect(STRING):
return string.string
return None | story5/grammar.py |
from token import DEDENT, INDENT, NAME, NEWLINE, STRING, ENDMARKER
from story5.parser import Parser
class Rule:
def __init__(self, name, alts):
self.name = name
self.alts = alts
def __repr__(self):
return f"Rule({self.name!r}, {self.alts})"
def __eq__(self, other):
if not isinstance(other, Rule):
return NotImplemented
return self.name == other.name and self.alts == other.alts
class Alt:
def __init__(self, items, action=None):
self.items = items
self.action = action
def __repr__(self):
if self.action:
return f"Alt({self.items!r}, {self.action!r})"
else:
return f"Alt({self.items!r})"
def __str__(self):
items = " ".join(self.items)
if self.action:
return f"{items} {{ {self.action} }}"
else:
return items
def __eq__(self, other):
if not isinstance(other, Alt):
return NotImplemented
return self.items == other.items and self.action == other.action
class GrammarParser(Parser):
def grammar(self):
pos = self.mark()
if rule := self.rule():
rules = [rule]
while rule := self.rule():
rules.append(rule)
if self.expect(ENDMARKER):
return rules
self.reset(pos)
return None
def rule(self):
pos = self.mark()
if (name := self.expect(NAME)) and self.expect(":"):
if alts := self.alts_newline():
pass
elif self.expect(NEWLINE):
alts = []
else:
self.reset(pos)
return None
if alts1 := self.indented_alts():
alts.extend(alts1)
if alts:
return Rule(name.string, alts)
self.reset(pos)
return None
def indented_alts(self):
pos = self.mark()
if self.expect(INDENT):
alts = []
while alts1 := self.bar_alts_newline():
alts.extend(alts1)
if self.expect(DEDENT):
return alts
self.reset(pos)
return None
def bar_alts_newline(self):
pos = self.mark()
if self.expect("|") and (alts := self.alts_newline()):
return alts
self.reset(pos)
return None
def alts_newline(self):
pos = self.mark()
if (alts := self.alts()) and self.expect(NEWLINE):
return alts
self.reset(pos)
return None
def alts(self):
pos = self.mark()
if alt := self.alternative():
alts = [alt]
while alt := self.bar_alt():
alts.append(alt)
return alts
self.reset(pos)
return None
def bar_alt(self):
pos = self.mark()
if self.expect("|") and (alt := self.alternative()):
return alt
self.reset(pos)
return None
def alternative(self):
items = []
while item := self.item():
items.append(item)
if not items:
return None
# Look for {...}
action = None
pos = self.mark()
if self.expect("{"):
# Collect arbitrary tokens until "}" found, skipping matching {...} pairs.
action_tokens = []
level = 0
while True:
token = self.tokenizer.get_token().string
if token == "{":
level += 1
elif token == "}":
level -= 1
if level < 0:
break
action_tokens.append(token)
action = " ".join(action_tokens)
return Alt(items, action)
def item(self):
if name := self.expect(NAME):
return name.string
if string := self.expect(STRING):
return string.string
return None | 0.659953 | 0.27594 |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.github_utils import GithubSingleton
PR_LINKED_ISSUE_URL = reverse('flowie:pr_linked_issue-view')
class PRLinkedIssuePublicAPI(TestCase):
"""Tests for PR Linked Issue public API"""
def setUp(self) -> None:
self.client = APIClient()
self.g = GithubSingleton.get()
def test_view_linked_issues_hashtag(self):
"""Test for viewing linked issues with hashtag"""
repo_name = 'JuliaPlots/Plots.jl'
repo = self.g.get_repo(repo_name)
pr = repo.get_pull(2807)
issue_1 = repo.get_issue(2202)
issue_2 = repo.get_issue(2330)
payload = {
'body': pr.body,
'repo_name': repo_name
}
res = self.client.post(PR_LINKED_ISSUE_URL, payload)
self.assertEqual(
res.status_code, status.HTTP_200_OK
)
for linked_issue in res.data:
for attr in ['html_url', 'title', 'state']:
self.assertIn(linked_issue[attr], [getattr(
issue_1, attr
), getattr(issue_2, attr)])
def test_view_linked_issues_http(self):
"""Test for viewing linked issues with http"""
repo_name = 'MLH-Fellowship/react-jsonschema-form'
repo = self.g.get_repo(repo_name)
pr = repo.get_pull(39)
issue_1 = repo.get_issue(9)
payload = {
'body': pr.body,
'repo_name': repo_name
}
res = self.client.post(PR_LINKED_ISSUE_URL, payload)
self.assertEqual(
res.status_code, status.HTTP_200_OK
)
for linked_issue in res.data:
for attr in ['html_url', 'title', 'state']:
self.assertIn(linked_issue[attr], getattr(
issue_1, attr
))
def test_view_linked_issues_none(self):
"""Test for viewing linked issues when none exists"""
repo_name = 'JuliaPlots/Plots.jl'
repo = self.g.get_repo(repo_name)
pr = repo.get_pull(2858)
payload = {
'repo_name': repo_name,
'body': pr.body
}
res = self.client.post(PR_LINKED_ISSUE_URL, payload)
self.assertEqual(
res.status_code, status.HTTP_200_OK
)
self.assertEqual(
len(res.data), 0
)
def test_view_linked_issue_wrong_repo(self):
"""Test linked issue URL with repo that does not exist"""
res = self.client.post(PR_LINKED_ISSUE_URL,
{'repo_name': 'asijdiawj',
'body': 'asd'})
self.assertEqual(
res.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
res.data['message'], 'Bad request'
)
def test_view_linked_issue_no_body(self):
"""Test linked issue URL with no body passed"""
res = self.client.post(PR_LINKED_ISSUE_URL)
self.assertEqual(
res.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
res.data['message'], 'Bad request'
) | backend/flowie/tests/test_pr_api.py | from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.github_utils import GithubSingleton
PR_LINKED_ISSUE_URL = reverse('flowie:pr_linked_issue-view')
class PRLinkedIssuePublicAPI(TestCase):
"""Tests for PR Linked Issue public API"""
def setUp(self) -> None:
self.client = APIClient()
self.g = GithubSingleton.get()
def test_view_linked_issues_hashtag(self):
"""Test for viewing linked issues with hashtag"""
repo_name = 'JuliaPlots/Plots.jl'
repo = self.g.get_repo(repo_name)
pr = repo.get_pull(2807)
issue_1 = repo.get_issue(2202)
issue_2 = repo.get_issue(2330)
payload = {
'body': pr.body,
'repo_name': repo_name
}
res = self.client.post(PR_LINKED_ISSUE_URL, payload)
self.assertEqual(
res.status_code, status.HTTP_200_OK
)
for linked_issue in res.data:
for attr in ['html_url', 'title', 'state']:
self.assertIn(linked_issue[attr], [getattr(
issue_1, attr
), getattr(issue_2, attr)])
def test_view_linked_issues_http(self):
"""Test for viewing linked issues with http"""
repo_name = 'MLH-Fellowship/react-jsonschema-form'
repo = self.g.get_repo(repo_name)
pr = repo.get_pull(39)
issue_1 = repo.get_issue(9)
payload = {
'body': pr.body,
'repo_name': repo_name
}
res = self.client.post(PR_LINKED_ISSUE_URL, payload)
self.assertEqual(
res.status_code, status.HTTP_200_OK
)
for linked_issue in res.data:
for attr in ['html_url', 'title', 'state']:
self.assertIn(linked_issue[attr], getattr(
issue_1, attr
))
def test_view_linked_issues_none(self):
"""Test for viewing linked issues when none exists"""
repo_name = 'JuliaPlots/Plots.jl'
repo = self.g.get_repo(repo_name)
pr = repo.get_pull(2858)
payload = {
'repo_name': repo_name,
'body': pr.body
}
res = self.client.post(PR_LINKED_ISSUE_URL, payload)
self.assertEqual(
res.status_code, status.HTTP_200_OK
)
self.assertEqual(
len(res.data), 0
)
def test_view_linked_issue_wrong_repo(self):
"""Test linked issue URL with repo that does not exist"""
res = self.client.post(PR_LINKED_ISSUE_URL,
{'repo_name': 'asijdiawj',
'body': 'asd'})
self.assertEqual(
res.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
res.data['message'], 'Bad request'
)
def test_view_linked_issue_no_body(self):
"""Test linked issue URL with no body passed"""
res = self.client.post(PR_LINKED_ISSUE_URL)
self.assertEqual(
res.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
res.data['message'], 'Bad request'
) | 0.430626 | 0.228071 |
import torch
import torchvision
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch import optim
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
sequence_length = 28
input_size = 28
hidden_size = 256
num_layers = 2
num_classes = 10
learning_rate = 0.005
batch_size = 64
num_epochs = 3
class LSTM(nn.Module):
'''Recurrent neural network with LSTM (many-to-one)
'''
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
# Using the last rnn output with fc to obtain the final classificaiton result
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
'''
'''
out, _ = self.lstm(x) # x=[64, 28, 28], out=[64, 28, 256]=(batch, seq_len, 1 * hidden_size)
# Decode the hidden state of the last time step
# only take the last hidden state and send it into fc
out = out[:, -1, :] # out = [64, 256]
out = self.fc(out)
return out
def check_accuracy(loader, model):
'''Check accuracy on training & test to see how good our model
'''
num_correct = 0
num_samples = 0
# Set model to eval
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device).squeeze(1)
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
# Toggle model back to train
model.train()
return num_correct / num_samples
# Load Data
train_dataset = datasets.MNIST(root="mnist/MNIST", train=True,
transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root="mnist/MNIST", train=False,
transform=transforms.ToTensor(), download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM)
model = LSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# model = BLSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train Network
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# (torch.Size([64, 1, 28, 28]), torch.Size([64]))
# Get data to cuda if possible
data = data.to(device=device).squeeze(1) # [64, 1, 28, 28] -> [64, 28, 28]
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent update step/adam step
optimizer.step()
print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}")
print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") | RNN/Embedding/Embedding_RNN.py | import torch
import torchvision
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch import optim
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
sequence_length = 28
input_size = 28
hidden_size = 256
num_layers = 2
num_classes = 10
learning_rate = 0.005
batch_size = 64
num_epochs = 3
class LSTM(nn.Module):
'''Recurrent neural network with LSTM (many-to-one)
'''
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
# Using the last rnn output with fc to obtain the final classificaiton result
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
'''
'''
out, _ = self.lstm(x) # x=[64, 28, 28], out=[64, 28, 256]=(batch, seq_len, 1 * hidden_size)
# Decode the hidden state of the last time step
# only take the last hidden state and send it into fc
out = out[:, -1, :] # out = [64, 256]
out = self.fc(out)
return out
def check_accuracy(loader, model):
'''Check accuracy on training & test to see how good our model
'''
num_correct = 0
num_samples = 0
# Set model to eval
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device).squeeze(1)
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
# Toggle model back to train
model.train()
return num_correct / num_samples
# Load Data
train_dataset = datasets.MNIST(root="mnist/MNIST", train=True,
transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root="mnist/MNIST", train=False,
transform=transforms.ToTensor(), download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM)
model = LSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# model = BLSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train Network
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# (torch.Size([64, 1, 28, 28]), torch.Size([64]))
# Get data to cuda if possible
data = data.to(device=device).squeeze(1) # [64, 1, 28, 28] -> [64, 28, 28]
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent update step/adam step
optimizer.step()
print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}")
print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") | 0.943458 | 0.753603 |
import datetime
import logging
import os
import sys
import time
from collections import namedtuple
import click
from dateutil.parser import parse
import requests
from mutualfunds import DailyNAVPS
LOGFILE = 'runtime.log'
TIMEOUT = 5
OUTDIR = 'Reports'
def stringify_date(date):
"""Convert datetime object into human-readable string"""
return date.strftime('%Y-%m-%d')
def save_report(report):
"""Create the directory structure where output is saved"""
rdate = report.date
fname = 'mf-navps-report-{:%Y-%m-%d}.csv'.format(rdate)
fpath = '{},{:%Y,%m-%B}'.format(OUTDIR, rdate).split(',')
spath = os.path.join(*fpath)
if not os.path.exists(spath):
os.makedirs(spath, exist_ok=True)
report.to_csv(os.path.join(spath, fname))
def set_date_range(start, end):
"""Handle the date parameters properly"""
DateRange = namedtuple('DateRange', 'start end')
dates = None
if end:
end = parse(end)
start = parse(start) if start else end
dates = DateRange(start=start, end=end)
else:
if start:
start = parse(start)
end = datetime.datetime.now()
dates = DateRange(start=start, end=end)
return dates
@click.command()
@click.option('-s', '--start',
help='Starting date (not earlier than Dec. 20, 2004)')
@click.option('-e', '--end',
help='Ending date (up to most recent trading day)')
def run(start, end):
logging.basicConfig(format='%(asctime)s %(message)s',
filename=LOGFILE,
level=logging.INFO)
logging.info('[INFO] Started')
click.echo('Initializing')
dates = set_date_range(start, end)
if dates is None:
msg = 'No dates specified. Exited'
click.echo(msg)
logging.info('[INFO] {}'.format(msg))
sys.exit(1)
curr_date = dates.start
from_str = stringify_date(curr_date)
to_str = stringify_date(dates.end)
msg = 'Set range from {} to {}'.format(from_str, to_str)
click.echo(msg)
logging.info('[INFO] {}'.format(msg))
one_day = datetime.timedelta(1)
with requests.Session() as session:
logging.info('Session established.')
click.echo('Session started')
while curr_date <= dates.end:
if curr_date.weekday() <= 4: # weekdays only
click.echo('Processing report {}'.format(from_str),
nl=False)
try:
report = DailyNAVPS(
session=session,
date=curr_date
)
except Exception as e:
click.echo(' [Error: {}]'.format(e))
logging.error('[ERROR] {} at {}'.format(e, from_str))
else:
if not report.open:
click.echo(' [No data. Date skipped]')
logging.info(
'[INFO] Report {} skipped'.format(from_str)
)
else:
if not report.data:
click.echo(' [Unable to obtain data]')
logging.warning(
'[WARNING] {} is empty'.format(from_str)
)
else:
save_report(report)
click.echo(' [Saved output file]')
finally:
time.sleep(TIMEOUT)
curr_date += one_day
from_str = stringify_date(curr_date)
click.echo('Done')
logging.info('[INFO] Finished') | main.py | import datetime
import logging
import os
import sys
import time
from collections import namedtuple
import click
from dateutil.parser import parse
import requests
from mutualfunds import DailyNAVPS
LOGFILE = 'runtime.log'
TIMEOUT = 5
OUTDIR = 'Reports'
def stringify_date(date):
"""Convert datetime object into human-readable string"""
return date.strftime('%Y-%m-%d')
def save_report(report):
"""Create the directory structure where output is saved"""
rdate = report.date
fname = 'mf-navps-report-{:%Y-%m-%d}.csv'.format(rdate)
fpath = '{},{:%Y,%m-%B}'.format(OUTDIR, rdate).split(',')
spath = os.path.join(*fpath)
if not os.path.exists(spath):
os.makedirs(spath, exist_ok=True)
report.to_csv(os.path.join(spath, fname))
def set_date_range(start, end):
"""Handle the date parameters properly"""
DateRange = namedtuple('DateRange', 'start end')
dates = None
if end:
end = parse(end)
start = parse(start) if start else end
dates = DateRange(start=start, end=end)
else:
if start:
start = parse(start)
end = datetime.datetime.now()
dates = DateRange(start=start, end=end)
return dates
@click.command()
@click.option('-s', '--start',
help='Starting date (not earlier than Dec. 20, 2004)')
@click.option('-e', '--end',
help='Ending date (up to most recent trading day)')
def run(start, end):
logging.basicConfig(format='%(asctime)s %(message)s',
filename=LOGFILE,
level=logging.INFO)
logging.info('[INFO] Started')
click.echo('Initializing')
dates = set_date_range(start, end)
if dates is None:
msg = 'No dates specified. Exited'
click.echo(msg)
logging.info('[INFO] {}'.format(msg))
sys.exit(1)
curr_date = dates.start
from_str = stringify_date(curr_date)
to_str = stringify_date(dates.end)
msg = 'Set range from {} to {}'.format(from_str, to_str)
click.echo(msg)
logging.info('[INFO] {}'.format(msg))
one_day = datetime.timedelta(1)
with requests.Session() as session:
logging.info('Session established.')
click.echo('Session started')
while curr_date <= dates.end:
if curr_date.weekday() <= 4: # weekdays only
click.echo('Processing report {}'.format(from_str),
nl=False)
try:
report = DailyNAVPS(
session=session,
date=curr_date
)
except Exception as e:
click.echo(' [Error: {}]'.format(e))
logging.error('[ERROR] {} at {}'.format(e, from_str))
else:
if not report.open:
click.echo(' [No data. Date skipped]')
logging.info(
'[INFO] Report {} skipped'.format(from_str)
)
else:
if not report.data:
click.echo(' [Unable to obtain data]')
logging.warning(
'[WARNING] {} is empty'.format(from_str)
)
else:
save_report(report)
click.echo(' [Saved output file]')
finally:
time.sleep(TIMEOUT)
curr_date += one_day
from_str = stringify_date(curr_date)
click.echo('Done')
logging.info('[INFO] Finished') | 0.207536 | 0.100834 |
from vitruncate import GT
from numpy import *
import matplotlib
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as patches
pyplot.rc('font', size=16) #set defaults so that the plots are readable
pyplot.rc('axes', titlesize=16)
pyplot.rc('axes', labelsize=16)
pyplot.rc('xtick', labelsize=16)
pyplot.rc('ytick', labelsize=16)
pyplot.rc('legend', fontsize=16)
pyplot.rc('figure', titlesize=16)
def plot_pdf():
gt_1d_pdf = lambda x, std, beta: 1/(std*sqrt(2*pi))*exp(-x**2/(2*std**2))*(x<=1)*(x>=-1) + (x>1)*1/(x**beta) + (x<-1)*1/((-x)**beta)
fig,ax = pyplot.subplots()
x = linspace(-3,3,num=5000,endpoint=True)
y_1 = gt_1d_pdf(x,std=1,beta=5)
y_2 = gt_1d_pdf(x,std=.5,beta=4)
ax.plot(x,y_1,color='c',label=r'$\tilde{\mathcal{N}}(0,1,\beta=5)$')
ax.plot(x,y_2,color='g',label=r'$\tilde{\mathcal{N}}(0,1/4,\beta=4)$')
ax.set_xlim([-3,3])
ax.set_xticks([-3,0,3])
ax.set_ylim([0,1])
ax.set_yticks([0,1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.18), ncol=2, fancybox=True, shadow=True)
pyplot.savefig('paper/figs/pdf.png',dpi=250)
def plot_cub_ext():
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = meshgrid([0,1], [0,1])
ax.plot_surface(X,Y,ones((2,2)), color='b')
ax.plot_surface(X,Y,zeros((2,2)), color='b')
ax.plot_surface(X,zeros((2,2)),Y, color='b')
ax.plot_surface(X,ones((2,2)),Y, color='b')
ax.plot_surface(ones((2,2)),X,Y, color='b')
ax.plot_surface(zeros((2,2)),X,Y, color='b')
ext = 'faces'
if 'lines' in ext:
ax.plot([-1,2],[0,0],[0,0],color='r')
ax.plot([-1,2],[1,1],[0,0],color='r')
ax.plot([-1,2],[0,0],[1,1],color='r')
ax.plot([-1,2],[1,1],[1,1],color='r')
ax.plot([0,0],[-1,2],[0,0],color='r')
ax.plot([1,1],[-1,2],[0,0],color='r')
ax.plot([0,0],[-1,2],[1,1],color='r')
ax.plot([1,1],[-1,2],[1,1],color='r')
ax.plot([0,0],[0,0],[-1,2],color='r')
ax.plot([1,1],[0,0],[-1,2],color='r')
ax.plot([0,0],[1,1],[-1,2],color='r')
ax.plot([1,1],[1,1],[-1,2],color='r')
if 'faces' in ext:
Xe, Ye = meshgrid([-1,2], [-2,2])
ax.plot_surface(Xe,Ye,ones((2,2)), color='b',alpha=.5)
#ax.plot_surface(Xe,Ye,zeros((2,2)), color='b',alpha=.5)
ax.plot_surface(Xe,zeros((2,2)),Ye, color='g',alpha=.5)
#ax.plot_surface(Xe,ones((2,2)),Ye, color='g',alpha=.5)
ax.plot_surface(ones((2,2)),Xe,Ye, color='r',alpha=.5)
#ax.plot_surface(zeros((2,2)),Xe,Ye, color='r',alpha=.5)
if 'points' in ext:
points = array([[0, 0, 0],[1, 0, 0 ],[1, 1, 0],[0, 1, 0],[0, 0, 1],[1, 0, 1 ],[1, 1, 1],[0, 1, 1]])
ax.scatter3D(points[:, 0], points[:, 1], points[:, 2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(elev=20, azim=0)
pyplot.savefig('paper/figs/cube_ext.png',dpi=250)
def plot_square_ext():
fig,ax = pyplot.subplots(figsize=(5,5))
ax.add_patch(patches.Rectangle((-.5,-.5),1,1,linewidth=0,edgecolor='b',facecolor='c'))
ax.plot([-2,2],[-.5,-.5],color='g')
ax.plot([-2,2],[.5,.5],color='g')
ax.plot([-.5,-.5],[-2,2],color='g')
ax.plot([.5,.5],[-2,2],color='g')
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
ax.set_aspect(1)
for s in ['left','right','top','bottom']:
ax.spines[s].set_visible(False)
pyplot.axis('off')
adj = -.05
pyplot.text(-1+adj, -1+adj, '2')
pyplot.text(-1+adj, 1+adj, '2')
pyplot.text(1+adj, 1+adj, '2')
pyplot.text(1+adj, -1+adj, '2')
pyplot.text(0+adj, -1+adj, '1')
pyplot.text(0+adj, 1+adj, '1')
pyplot.text(-1+adj, 0+adj, '1')
pyplot.text(1+adj, 0+adj, '1')
pyplot.text(0+adj, 0+adj, '0')
pyplot.tight_layout()
pyplot.savefig('paper/figs/square_ext.png',dpi=250)
def heatmap():
# params
L = [-2,-4]
U = [4,5]
n_mesh = 100
pdelta = 1
xlim = [L[0]-pdelta,U[0]+pdelta]
ylim = [L[1]-pdelta,U[1]+pdelta]
# generate points
gt = GT(
n = 2**8,
d = 2,
mu = [1,2],
Sigma = [[5,4],[4,9]], #[[5,0],[0,9]],
L = L,
U = U,
init_type = 'Sobol',
seed = None,
n_block = None,
alpha=.1)
x = gt.update(steps=1000, epsilon=5e-3, eta=.9)
# evaluate meshgrid for pdf contour
mesh = zeros(((n_mesh)**2,3),dtype=float)
x_grid_tics = linspace(xlim[0],xlim[1],n_mesh)
y_grid_tics = linspace(ylim[0],ylim[1],n_mesh)
x_mesh,y_mesh = meshgrid(x_grid_tics,y_grid_tics)
mesh[:,0] = x_mesh.flatten()
mesh[:,1] = y_mesh.flatten()
mesh[:,2] = log2(gt._pdf(mesh[:,:2]))
z_mesh = mesh[:,2].reshape((n_mesh,n_mesh))
# plots
fig,ax = pyplot.subplots(figsize=(5,5))
# colors
clevel = linspace(mesh[:,2].min(),mesh[:,2].max(),100)
cmap = pyplot.get_cmap('GnBu') # https://matplotlib.org/tutorials/colors/colormaps.html
#cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", [(.95,.95,.95),(0,0,1)])
# contours
ax.contourf(x_mesh,y_mesh,z_mesh,clevel,cmap=cmap,extend='both')
#ax.contour(x_mesh,y_mesh,z_mesh,levels=[-50,-30,-10,-1])
# scatter plot
ax.scatter(x[:,0],x[:,1],s=5,color='w')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks([xlim[0],L[0],U[0],xlim[1]])
ax.set_yticks([ylim[0],L[1],U[1],ylim[1]])
ax.set_title(r'Density Log Contour with $\alpha$=.1')
# output
pyplot.savefig('paper/figs/heatmap.png',dpi=250)
if __name__ == '__main__':
#plot_pdf()
#plot_cub_ext()
#plot_square_ext()
heatmap() | paper/figs.py | from vitruncate import GT
from numpy import *
import matplotlib
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as patches
pyplot.rc('font', size=16) #set defaults so that the plots are readable
pyplot.rc('axes', titlesize=16)
pyplot.rc('axes', labelsize=16)
pyplot.rc('xtick', labelsize=16)
pyplot.rc('ytick', labelsize=16)
pyplot.rc('legend', fontsize=16)
pyplot.rc('figure', titlesize=16)
def plot_pdf():
gt_1d_pdf = lambda x, std, beta: 1/(std*sqrt(2*pi))*exp(-x**2/(2*std**2))*(x<=1)*(x>=-1) + (x>1)*1/(x**beta) + (x<-1)*1/((-x)**beta)
fig,ax = pyplot.subplots()
x = linspace(-3,3,num=5000,endpoint=True)
y_1 = gt_1d_pdf(x,std=1,beta=5)
y_2 = gt_1d_pdf(x,std=.5,beta=4)
ax.plot(x,y_1,color='c',label=r'$\tilde{\mathcal{N}}(0,1,\beta=5)$')
ax.plot(x,y_2,color='g',label=r'$\tilde{\mathcal{N}}(0,1/4,\beta=4)$')
ax.set_xlim([-3,3])
ax.set_xticks([-3,0,3])
ax.set_ylim([0,1])
ax.set_yticks([0,1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.18), ncol=2, fancybox=True, shadow=True)
pyplot.savefig('paper/figs/pdf.png',dpi=250)
def plot_cub_ext():
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = meshgrid([0,1], [0,1])
ax.plot_surface(X,Y,ones((2,2)), color='b')
ax.plot_surface(X,Y,zeros((2,2)), color='b')
ax.plot_surface(X,zeros((2,2)),Y, color='b')
ax.plot_surface(X,ones((2,2)),Y, color='b')
ax.plot_surface(ones((2,2)),X,Y, color='b')
ax.plot_surface(zeros((2,2)),X,Y, color='b')
ext = 'faces'
if 'lines' in ext:
ax.plot([-1,2],[0,0],[0,0],color='r')
ax.plot([-1,2],[1,1],[0,0],color='r')
ax.plot([-1,2],[0,0],[1,1],color='r')
ax.plot([-1,2],[1,1],[1,1],color='r')
ax.plot([0,0],[-1,2],[0,0],color='r')
ax.plot([1,1],[-1,2],[0,0],color='r')
ax.plot([0,0],[-1,2],[1,1],color='r')
ax.plot([1,1],[-1,2],[1,1],color='r')
ax.plot([0,0],[0,0],[-1,2],color='r')
ax.plot([1,1],[0,0],[-1,2],color='r')
ax.plot([0,0],[1,1],[-1,2],color='r')
ax.plot([1,1],[1,1],[-1,2],color='r')
if 'faces' in ext:
Xe, Ye = meshgrid([-1,2], [-2,2])
ax.plot_surface(Xe,Ye,ones((2,2)), color='b',alpha=.5)
#ax.plot_surface(Xe,Ye,zeros((2,2)), color='b',alpha=.5)
ax.plot_surface(Xe,zeros((2,2)),Ye, color='g',alpha=.5)
#ax.plot_surface(Xe,ones((2,2)),Ye, color='g',alpha=.5)
ax.plot_surface(ones((2,2)),Xe,Ye, color='r',alpha=.5)
#ax.plot_surface(zeros((2,2)),Xe,Ye, color='r',alpha=.5)
if 'points' in ext:
points = array([[0, 0, 0],[1, 0, 0 ],[1, 1, 0],[0, 1, 0],[0, 0, 1],[1, 0, 1 ],[1, 1, 1],[0, 1, 1]])
ax.scatter3D(points[:, 0], points[:, 1], points[:, 2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(elev=20, azim=0)
pyplot.savefig('paper/figs/cube_ext.png',dpi=250)
def plot_square_ext():
fig,ax = pyplot.subplots(figsize=(5,5))
ax.add_patch(patches.Rectangle((-.5,-.5),1,1,linewidth=0,edgecolor='b',facecolor='c'))
ax.plot([-2,2],[-.5,-.5],color='g')
ax.plot([-2,2],[.5,.5],color='g')
ax.plot([-.5,-.5],[-2,2],color='g')
ax.plot([.5,.5],[-2,2],color='g')
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
ax.set_aspect(1)
for s in ['left','right','top','bottom']:
ax.spines[s].set_visible(False)
pyplot.axis('off')
adj = -.05
pyplot.text(-1+adj, -1+adj, '2')
pyplot.text(-1+adj, 1+adj, '2')
pyplot.text(1+adj, 1+adj, '2')
pyplot.text(1+adj, -1+adj, '2')
pyplot.text(0+adj, -1+adj, '1')
pyplot.text(0+adj, 1+adj, '1')
pyplot.text(-1+adj, 0+adj, '1')
pyplot.text(1+adj, 0+adj, '1')
pyplot.text(0+adj, 0+adj, '0')
pyplot.tight_layout()
pyplot.savefig('paper/figs/square_ext.png',dpi=250)
def heatmap():
# params
L = [-2,-4]
U = [4,5]
n_mesh = 100
pdelta = 1
xlim = [L[0]-pdelta,U[0]+pdelta]
ylim = [L[1]-pdelta,U[1]+pdelta]
# generate points
gt = GT(
n = 2**8,
d = 2,
mu = [1,2],
Sigma = [[5,4],[4,9]], #[[5,0],[0,9]],
L = L,
U = U,
init_type = 'Sobol',
seed = None,
n_block = None,
alpha=.1)
x = gt.update(steps=1000, epsilon=5e-3, eta=.9)
# evaluate meshgrid for pdf contour
mesh = zeros(((n_mesh)**2,3),dtype=float)
x_grid_tics = linspace(xlim[0],xlim[1],n_mesh)
y_grid_tics = linspace(ylim[0],ylim[1],n_mesh)
x_mesh,y_mesh = meshgrid(x_grid_tics,y_grid_tics)
mesh[:,0] = x_mesh.flatten()
mesh[:,1] = y_mesh.flatten()
mesh[:,2] = log2(gt._pdf(mesh[:,:2]))
z_mesh = mesh[:,2].reshape((n_mesh,n_mesh))
# plots
fig,ax = pyplot.subplots(figsize=(5,5))
# colors
clevel = linspace(mesh[:,2].min(),mesh[:,2].max(),100)
cmap = pyplot.get_cmap('GnBu') # https://matplotlib.org/tutorials/colors/colormaps.html
#cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", [(.95,.95,.95),(0,0,1)])
# contours
ax.contourf(x_mesh,y_mesh,z_mesh,clevel,cmap=cmap,extend='both')
#ax.contour(x_mesh,y_mesh,z_mesh,levels=[-50,-30,-10,-1])
# scatter plot
ax.scatter(x[:,0],x[:,1],s=5,color='w')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks([xlim[0],L[0],U[0],xlim[1]])
ax.set_yticks([ylim[0],L[1],U[1],ylim[1]])
ax.set_title(r'Density Log Contour with $\alpha$=.1')
# output
pyplot.savefig('paper/figs/heatmap.png',dpi=250)
if __name__ == '__main__':
#plot_pdf()
#plot_cub_ext()
#plot_square_ext()
heatmap() | 0.39257 | 0.641085 |
import json
import logging
import pysnooper as psn
from akshareinterface import Init, Index, Stocks, Futures, FuturesForeign, Options
logging.basicConfig(level=logging.DEBUG,format='[%(asctime)s] %(filename)s [line:%(lineno)d] \
[%(levelname)s] %(message)s', datefmt='%Y-%m-%d(%a) %H:%M:%S')
def tool_info():
info = {
"0 指数" : {
"01": "stock_zh_index_spot",
"02": "stock_zh_index_daily",
"03": "stock_zh_kcb_spot - symbol",
"04": "stock_zh_kcb_daily - symbol, adjust"
},
"1 股票" : {
"11": "stock_summary",
"12": "stock_individual_info - symbol",
"13": "stock_zh_a_spot",
"14": "stock_zh_a_hist - symbol,period,start_date,end_date,adjust",
"15": "stock_zh_a_minute - symbol, start_date, end_date, period, adjust",
"16": "stock_zh_a_tick - symbol, trade_date",
"17": "stock_hk_spot_em"
},
"2 期货" : {
"21": "match_main_contract_spot - exchange",
"22": "futures_display_main_sina",
"23": "futures_main_sina - symbol, start_date, end_date",
"24": "futures_contract_detail - symbol",
"25": "get_futures_daily_exchange - start_date, end_date, market",
"26": "futures_zh_minute - symbol, period",
"27": "futures_zh_daily_sina - symbol",
"28": "futures_zh_spot - symbol, market, adjust"
},
"3 外盘" : {
"31": "futures_hq_subscribe_exchange_symbol",
"32": "futures_foreign_hist - symbol",
"33": "futures_foreign_detail - symbol",
"34": "futures_sgx_daily - trade_date, recent_day"
},
"4 期权" : {
"41": "option_finance_board - symbol, end_month",
"42": "option_finance_minute_sina - symbol",
"43": "option_current_em",
"44": "option_cffex_hs300_list_sina",
"45": "option_cffex_hs300_spot_sina - symbol",
"46": "option_cffex_hs300_daily_sina - symbol",
"47": "option_sse_list_sina - symbol, exchange",
"48": "option_sse_spot_price_sina - symbol",
"49": "option_sse_underlying_spot_price_sina - symbol",
"4a": "option_sse_greeks_sina - symbol",
"4b": "option_sse_minute_sina",
"4c": "option_sse_daily_sina - symbol"
}
}
return info
def main():
info = tool_info()
info = json.dumps(info, sort_keys=True, indent=4, ensure_ascii=False, separators=(', ', ': '))
opt = {
"01": Index().stock_zh_index_spot,
"02": Index().stock_zh_index_daily,
"03": Index().stock_zh_kcb_spot,
"04": Index().stock_zh_kcb_daily,
"11": Stocks().stock_summary,
"12": Stocks().stock_individual_info,
"13": Stocks().stock_zh_a_spot,
"14": Stocks().stock_zh_a_hist,
"15": Stocks().stock_zh_a_minute,
"16": Stocks().stock_zh_a_tick,
"17": Stocks().stock_hk_spot_em,
"21": Futures().match_main_contract_spot,
"22": Futures().futures_display_main_sina,
"23": Futures().futures_main_sina,
"24": Futures().futures_contract_detail,
"25": Futures().get_futures_daily_exchange,
"26": Futures().futures_zh_minute,
"27": Futures().futures_zh_daily_sina,
"28": Futures().futures_zh_spot,
"31": FuturesForeign().futures_hq_subscribe_exchange_symbol,
"32": FuturesForeign().futures_foreign_hist,
"33": FuturesForeign().futures_foreign_detail,
"34": FuturesForeign().futures_sgx_daily,
"41": Options().option_finance_board,
"42": Options().option_finance_minute_sina,
"43": Options().option_current_em,
"44": Options().option_cffex_hs300_list_sina,
"45": Options().option_cffex_hs300_spot_sina,
"46": Options().option_cffex_hs300_daily_sina,
"47": Options().option_sse_list_sina,
"48": Options().option_sse_spot_price_sina,
"49": Options().option_sse_underlying_spot_price_sina,
"4a": Options().option_sse_greeks_sina,
"4b": Options().option_sse_minute_sina,
"4c": Options().option_sse_daily_sina
}
while True:
print(info)
para = []
index = input("\n\nInput Number :")
symbol = input("input symbol :")
para.append(symbol)
period = input("input period :")
para.append(period)
adjust = input("input adjust :")
para.append(adjust)
start_date = input("input start :")
para.append(start_date)
end_date = input("input end :")
para.append(end_date)
trade_date = input("input date :")
para.append(trade_date)
print("\nList of inputs:", para)
print("\n Wating for get data ...")
try:
opt[index]()
except:
print("Access akshareinterface ERROR.")
next_step = input("\n===>> Enter to continue;\n<<=== Anykey to exit. \n")
if next_step != "":
break
if __name__ == '__main__':
Init()
main() | aksharetool.py | import json
import logging
import pysnooper as psn
from akshareinterface import Init, Index, Stocks, Futures, FuturesForeign, Options
logging.basicConfig(level=logging.DEBUG,format='[%(asctime)s] %(filename)s [line:%(lineno)d] \
[%(levelname)s] %(message)s', datefmt='%Y-%m-%d(%a) %H:%M:%S')
def tool_info():
info = {
"0 指数" : {
"01": "stock_zh_index_spot",
"02": "stock_zh_index_daily",
"03": "stock_zh_kcb_spot - symbol",
"04": "stock_zh_kcb_daily - symbol, adjust"
},
"1 股票" : {
"11": "stock_summary",
"12": "stock_individual_info - symbol",
"13": "stock_zh_a_spot",
"14": "stock_zh_a_hist - symbol,period,start_date,end_date,adjust",
"15": "stock_zh_a_minute - symbol, start_date, end_date, period, adjust",
"16": "stock_zh_a_tick - symbol, trade_date",
"17": "stock_hk_spot_em"
},
"2 期货" : {
"21": "match_main_contract_spot - exchange",
"22": "futures_display_main_sina",
"23": "futures_main_sina - symbol, start_date, end_date",
"24": "futures_contract_detail - symbol",
"25": "get_futures_daily_exchange - start_date, end_date, market",
"26": "futures_zh_minute - symbol, period",
"27": "futures_zh_daily_sina - symbol",
"28": "futures_zh_spot - symbol, market, adjust"
},
"3 外盘" : {
"31": "futures_hq_subscribe_exchange_symbol",
"32": "futures_foreign_hist - symbol",
"33": "futures_foreign_detail - symbol",
"34": "futures_sgx_daily - trade_date, recent_day"
},
"4 期权" : {
"41": "option_finance_board - symbol, end_month",
"42": "option_finance_minute_sina - symbol",
"43": "option_current_em",
"44": "option_cffex_hs300_list_sina",
"45": "option_cffex_hs300_spot_sina - symbol",
"46": "option_cffex_hs300_daily_sina - symbol",
"47": "option_sse_list_sina - symbol, exchange",
"48": "option_sse_spot_price_sina - symbol",
"49": "option_sse_underlying_spot_price_sina - symbol",
"4a": "option_sse_greeks_sina - symbol",
"4b": "option_sse_minute_sina",
"4c": "option_sse_daily_sina - symbol"
}
}
return info
def main():
info = tool_info()
info = json.dumps(info, sort_keys=True, indent=4, ensure_ascii=False, separators=(', ', ': '))
opt = {
"01": Index().stock_zh_index_spot,
"02": Index().stock_zh_index_daily,
"03": Index().stock_zh_kcb_spot,
"04": Index().stock_zh_kcb_daily,
"11": Stocks().stock_summary,
"12": Stocks().stock_individual_info,
"13": Stocks().stock_zh_a_spot,
"14": Stocks().stock_zh_a_hist,
"15": Stocks().stock_zh_a_minute,
"16": Stocks().stock_zh_a_tick,
"17": Stocks().stock_hk_spot_em,
"21": Futures().match_main_contract_spot,
"22": Futures().futures_display_main_sina,
"23": Futures().futures_main_sina,
"24": Futures().futures_contract_detail,
"25": Futures().get_futures_daily_exchange,
"26": Futures().futures_zh_minute,
"27": Futures().futures_zh_daily_sina,
"28": Futures().futures_zh_spot,
"31": FuturesForeign().futures_hq_subscribe_exchange_symbol,
"32": FuturesForeign().futures_foreign_hist,
"33": FuturesForeign().futures_foreign_detail,
"34": FuturesForeign().futures_sgx_daily,
"41": Options().option_finance_board,
"42": Options().option_finance_minute_sina,
"43": Options().option_current_em,
"44": Options().option_cffex_hs300_list_sina,
"45": Options().option_cffex_hs300_spot_sina,
"46": Options().option_cffex_hs300_daily_sina,
"47": Options().option_sse_list_sina,
"48": Options().option_sse_spot_price_sina,
"49": Options().option_sse_underlying_spot_price_sina,
"4a": Options().option_sse_greeks_sina,
"4b": Options().option_sse_minute_sina,
"4c": Options().option_sse_daily_sina
}
while True:
print(info)
para = []
index = input("\n\nInput Number :")
symbol = input("input symbol :")
para.append(symbol)
period = input("input period :")
para.append(period)
adjust = input("input adjust :")
para.append(adjust)
start_date = input("input start :")
para.append(start_date)
end_date = input("input end :")
para.append(end_date)
trade_date = input("input date :")
para.append(trade_date)
print("\nList of inputs:", para)
print("\n Wating for get data ...")
try:
opt[index]()
except:
print("Access akshareinterface ERROR.")
next_step = input("\n===>> Enter to continue;\n<<=== Anykey to exit. \n")
if next_step != "":
break
if __name__ == '__main__':
Init()
main() | 0.241937 | 0.202502 |
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
from lexererr import *
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\n")
buf.write("\61\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3")
buf.write("\3\3\3\4\6\4\37\n\4\r\4\16\4 \3\5\3\5\3\6\6\6&\n\6\r\6")
buf.write("\16\6\'\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\2\2\n\3\3\5\4")
buf.write("\7\5\t\6\13\7\r\b\17\t\21\n\3\2\4\3\2c|\5\2\13\f\17\17")
buf.write("\"\"\2\62\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2")
buf.write("\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2")
buf.write("\2\3\23\3\2\2\2\5\31\3\2\2\2\7\36\3\2\2\2\t\"\3\2\2\2")
buf.write("\13%\3\2\2\2\r+\3\2\2\2\17-\3\2\2\2\21/\3\2\2\2\23\24")
buf.write("\7h\2\2\24\25\7n\2\2\25\26\7q\2\2\26\27\7c\2\2\27\30\7")
buf.write("v\2\2\30\4\3\2\2\2\31\32\7k\2\2\32\33\7p\2\2\33\34\7v")
buf.write("\2\2\34\6\3\2\2\2\35\37\t\2\2\2\36\35\3\2\2\2\37 \3\2")
buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!\b\3\2\2\2\"#\7.\2\2#\n\3\2")
buf.write("\2\2$&\t\3\2\2%$\3\2\2\2&\'\3\2\2\2\'%\3\2\2\2\'(\3\2")
buf.write("\2\2()\3\2\2\2)*\b\6\2\2*\f\3\2\2\2+,\13\2\2\2,\16\3\2")
buf.write("\2\2-.\13\2\2\2.\20\3\2\2\2/\60\13\2\2\2\60\22\3\2\2\2")
buf.write("\5\2 \'\3\b\2\2")
return buf.getvalue()
class MCLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
FLOATTYPE = 1
INTTYPE = 2
ID = 3
COMMA = 4
WS = 5
ERROR_CHAR = 6
UNCLOSE_STRING = 7
ILLEGAL_ESCAPE = 8
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'float'", "'int'", "','" ]
symbolicNames = [ "<INVALID>",
"FLOATTYPE", "INTTYPE", "ID", "COMMA", "WS", "ERROR_CHAR", "UNCLOSE_STRING",
"ILLEGAL_ESCAPE" ]
ruleNames = [ "FLOATTYPE", "INTTYPE", "ID", "COMMA", "WS", "ERROR_CHAR",
"UNCLOSE_STRING", "ILLEGAL_ESCAPE" ]
grammarFileName = "MC.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def emit(self):
tk = self.type
if tk == self.UNCLOSE_STRING:
result = super().emit();
raise UncloseString(result.text);
elif tk == self.ILLEGAL_ESCAPE:
result = super().emit();
raise IllegalEscape(result.text);
elif tk == self.ERROR_CHAR:
result = super().emit();
raise ErrorToken(result.text);
else:
return super().emit(); | tutorial/week6/problem2/src/main/mc/parser/.antlr/MCLexer.py | from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
from lexererr import *
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\n")
buf.write("\61\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3")
buf.write("\3\3\3\4\6\4\37\n\4\r\4\16\4 \3\5\3\5\3\6\6\6&\n\6\r\6")
buf.write("\16\6\'\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\2\2\n\3\3\5\4")
buf.write("\7\5\t\6\13\7\r\b\17\t\21\n\3\2\4\3\2c|\5\2\13\f\17\17")
buf.write("\"\"\2\62\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2")
buf.write("\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2")
buf.write("\2\3\23\3\2\2\2\5\31\3\2\2\2\7\36\3\2\2\2\t\"\3\2\2\2")
buf.write("\13%\3\2\2\2\r+\3\2\2\2\17-\3\2\2\2\21/\3\2\2\2\23\24")
buf.write("\7h\2\2\24\25\7n\2\2\25\26\7q\2\2\26\27\7c\2\2\27\30\7")
buf.write("v\2\2\30\4\3\2\2\2\31\32\7k\2\2\32\33\7p\2\2\33\34\7v")
buf.write("\2\2\34\6\3\2\2\2\35\37\t\2\2\2\36\35\3\2\2\2\37 \3\2")
buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!\b\3\2\2\2\"#\7.\2\2#\n\3\2")
buf.write("\2\2$&\t\3\2\2%$\3\2\2\2&\'\3\2\2\2\'%\3\2\2\2\'(\3\2")
buf.write("\2\2()\3\2\2\2)*\b\6\2\2*\f\3\2\2\2+,\13\2\2\2,\16\3\2")
buf.write("\2\2-.\13\2\2\2.\20\3\2\2\2/\60\13\2\2\2\60\22\3\2\2\2")
buf.write("\5\2 \'\3\b\2\2")
return buf.getvalue()
class MCLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
FLOATTYPE = 1
INTTYPE = 2
ID = 3
COMMA = 4
WS = 5
ERROR_CHAR = 6
UNCLOSE_STRING = 7
ILLEGAL_ESCAPE = 8
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'float'", "'int'", "','" ]
symbolicNames = [ "<INVALID>",
"FLOATTYPE", "INTTYPE", "ID", "COMMA", "WS", "ERROR_CHAR", "UNCLOSE_STRING",
"ILLEGAL_ESCAPE" ]
ruleNames = [ "FLOATTYPE", "INTTYPE", "ID", "COMMA", "WS", "ERROR_CHAR",
"UNCLOSE_STRING", "ILLEGAL_ESCAPE" ]
grammarFileName = "MC.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def emit(self):
tk = self.type
if tk == self.UNCLOSE_STRING:
result = super().emit();
raise UncloseString(result.text);
elif tk == self.ILLEGAL_ESCAPE:
result = super().emit();
raise IllegalEscape(result.text);
elif tk == self.ERROR_CHAR:
result = super().emit();
raise ErrorToken(result.text);
else:
return super().emit(); | 0.408159 | 0.338979 |
from django.views.generic import TemplateView
from decharges.decharge.mixins import CheckConfigurationMixin, FederationRequiredMixin
from decharges.decharge.models import (
TempsDeDecharge,
UtilisationCreditDeTempsSyndicalPonctuel,
UtilisationTempsDecharge,
)
from decharges.decharge.views.utils import calcul_repartition_temps
from decharges.user_manager.models import Syndicat
class SyndicatsARelancer(
CheckConfigurationMixin, FederationRequiredMixin, TemplateView
):
template_name = "decharge/syndicats_a_relancer.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
annee_en_cours = self.params.annee_en_cours
context["annee"] = annee_en_cours
temps_de_decharge_mutualise = TempsDeDecharge.objects.filter(
annee=annee_en_cours
)
utilisation_temps_decharge = UtilisationTempsDecharge.objects.filter(
annee=annee_en_cours,
)
utilisation_cts = UtilisationCreditDeTempsSyndicalPonctuel.objects.filter(
annee=annee_en_cours,
)
syndicats_depassant_leur_quota = []
for syndicat in Syndicat.objects.all():
(
cts_consommes,
temps_decharge_federation,
temps_donnes,
temps_donnes_total,
temps_recus_par_des_syndicats,
temps_recus_par_la_federation,
temps_restant,
temps_utilises,
temps_utilises_total,
) = calcul_repartition_temps(annee_en_cours, self.federation, syndicat)
if temps_restant < 0:
syndicats_depassant_leur_quota.append((syndicat, abs(temps_restant)))
context["syndicats_n_ayant_rien_rempli"] = (
Syndicat.objects.exclude(pk=self.federation.pk)
.exclude(temps_de_decharges_donnes__in=temps_de_decharge_mutualise)
.exclude(
utilisation_temps_de_decharges_par_annee__in=utilisation_temps_decharge
)
.exclude(utilisation_cts_ponctuels_par_annee__in=utilisation_cts)
.order_by("username")
)
context["syndicats_depassant_leur_quota"] = syndicats_depassant_leur_quota
return context | decharges/decharge/views/syndicats_a_relancer.py | from django.views.generic import TemplateView
from decharges.decharge.mixins import CheckConfigurationMixin, FederationRequiredMixin
from decharges.decharge.models import (
TempsDeDecharge,
UtilisationCreditDeTempsSyndicalPonctuel,
UtilisationTempsDecharge,
)
from decharges.decharge.views.utils import calcul_repartition_temps
from decharges.user_manager.models import Syndicat
class SyndicatsARelancer(
CheckConfigurationMixin, FederationRequiredMixin, TemplateView
):
template_name = "decharge/syndicats_a_relancer.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
annee_en_cours = self.params.annee_en_cours
context["annee"] = annee_en_cours
temps_de_decharge_mutualise = TempsDeDecharge.objects.filter(
annee=annee_en_cours
)
utilisation_temps_decharge = UtilisationTempsDecharge.objects.filter(
annee=annee_en_cours,
)
utilisation_cts = UtilisationCreditDeTempsSyndicalPonctuel.objects.filter(
annee=annee_en_cours,
)
syndicats_depassant_leur_quota = []
for syndicat in Syndicat.objects.all():
(
cts_consommes,
temps_decharge_federation,
temps_donnes,
temps_donnes_total,
temps_recus_par_des_syndicats,
temps_recus_par_la_federation,
temps_restant,
temps_utilises,
temps_utilises_total,
) = calcul_repartition_temps(annee_en_cours, self.federation, syndicat)
if temps_restant < 0:
syndicats_depassant_leur_quota.append((syndicat, abs(temps_restant)))
context["syndicats_n_ayant_rien_rempli"] = (
Syndicat.objects.exclude(pk=self.federation.pk)
.exclude(temps_de_decharges_donnes__in=temps_de_decharge_mutualise)
.exclude(
utilisation_temps_de_decharges_par_annee__in=utilisation_temps_decharge
)
.exclude(utilisation_cts_ponctuels_par_annee__in=utilisation_cts)
.order_by("username")
)
context["syndicats_depassant_leur_quota"] = syndicats_depassant_leur_quota
return context | 0.414899 | 0.209348 |
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.comm import ServiceAccessPoint, ApplicationServiceElement, bind
from ..trapped_classes import TrappedServiceAccessPoint, \
TrappedApplicationServiceElement
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
sap = None
ase = None
@bacpypes_debugging
class EchoAccessPoint(ServiceAccessPoint):
def sap_indication(self, pdu):
if _debug: EchoAccessPoint._debug("sap_indication %r", pdu)
self.sap_response(pdu)
def sap_confirmation(self, pdu):
if _debug: EchoAccessPoint._debug("sap_confirmation %r", pdu)
pass
class TrappedEchoAccessPoint(TrappedServiceAccessPoint, EchoAccessPoint):
pass
@bacpypes_debugging
class EchoServiceElement(ApplicationServiceElement):
def indication(self, pdu):
if _debug: EchoServiceElement._debug("indication %r", pdu)
self.response(pdu)
def confirmation(self, pdu):
if _debug: EchoServiceElement._debug("confirmation %r", pdu)
pass
class TrappedEchoServiceElement(TrappedApplicationServiceElement, EchoServiceElement):
pass
@bacpypes_debugging
def setup_module():
if _debug: setup_module._debug("setup_module")
global sap, ase
# verify the echo access point is trapped correctly
assert TrappedEchoAccessPoint.__mro__ == (
TrappedEchoAccessPoint,
TrappedServiceAccessPoint,
EchoAccessPoint,
ServiceAccessPoint,
object,
)
# create an access point
sap = TrappedEchoAccessPoint()
# verify the echo service element is trapped correctly
assert TrappedEchoServiceElement.__mro__ == (
TrappedEchoServiceElement,
TrappedApplicationServiceElement,
EchoServiceElement,
ApplicationServiceElement,
object,
)
# create a service element
ase = TrappedEchoServiceElement()
# bind them together
bind(ase, sap)
@bacpypes_debugging
def teardown_module():
if _debug: setup_module._debug("teardown_module")
global sap, ase
# toss the objects into the garbage
sap = None
ase = None
@bacpypes_debugging
class TestApplicationService(unittest.TestCase):
def test_sap_request(self):
if _debug: TestApplicationService._debug("test_sap_request")
global sap, ase
# make a pdu object
pdu = object()
# service access point is going to request something
sap.sap_request(pdu)
# make sure the request was sent and received
assert sap.sap_request_sent is pdu
assert ase.indication_received is pdu
# make sure the echo response was sent and received
assert ase.response_sent is pdu
assert sap.sap_confirmation_received is pdu
def test_ase_request(self):
if _debug: TestApplicationService._debug("test_ase_request")
global sap, ase
# make a pdu object
pdu = object()
# service element is going to request something
ase.request(pdu)
# make sure the request was sent and received
assert ase.request_sent is pdu
assert sap.sap_indication_received is pdu
# make sure the echo response was sent and received
assert sap.sap_response_sent is pdu
assert ase.confirmation_received is pdu | tests/test_utilities/test_service_access_point.py | import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.comm import ServiceAccessPoint, ApplicationServiceElement, bind
from ..trapped_classes import TrappedServiceAccessPoint, \
TrappedApplicationServiceElement
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
sap = None
ase = None
@bacpypes_debugging
class EchoAccessPoint(ServiceAccessPoint):
def sap_indication(self, pdu):
if _debug: EchoAccessPoint._debug("sap_indication %r", pdu)
self.sap_response(pdu)
def sap_confirmation(self, pdu):
if _debug: EchoAccessPoint._debug("sap_confirmation %r", pdu)
pass
class TrappedEchoAccessPoint(TrappedServiceAccessPoint, EchoAccessPoint):
pass
@bacpypes_debugging
class EchoServiceElement(ApplicationServiceElement):
def indication(self, pdu):
if _debug: EchoServiceElement._debug("indication %r", pdu)
self.response(pdu)
def confirmation(self, pdu):
if _debug: EchoServiceElement._debug("confirmation %r", pdu)
pass
class TrappedEchoServiceElement(TrappedApplicationServiceElement, EchoServiceElement):
pass
@bacpypes_debugging
def setup_module():
if _debug: setup_module._debug("setup_module")
global sap, ase
# verify the echo access point is trapped correctly
assert TrappedEchoAccessPoint.__mro__ == (
TrappedEchoAccessPoint,
TrappedServiceAccessPoint,
EchoAccessPoint,
ServiceAccessPoint,
object,
)
# create an access point
sap = TrappedEchoAccessPoint()
# verify the echo service element is trapped correctly
assert TrappedEchoServiceElement.__mro__ == (
TrappedEchoServiceElement,
TrappedApplicationServiceElement,
EchoServiceElement,
ApplicationServiceElement,
object,
)
# create a service element
ase = TrappedEchoServiceElement()
# bind them together
bind(ase, sap)
@bacpypes_debugging
def teardown_module():
if _debug: setup_module._debug("teardown_module")
global sap, ase
# toss the objects into the garbage
sap = None
ase = None
@bacpypes_debugging
class TestApplicationService(unittest.TestCase):
def test_sap_request(self):
if _debug: TestApplicationService._debug("test_sap_request")
global sap, ase
# make a pdu object
pdu = object()
# service access point is going to request something
sap.sap_request(pdu)
# make sure the request was sent and received
assert sap.sap_request_sent is pdu
assert ase.indication_received is pdu
# make sure the echo response was sent and received
assert ase.response_sent is pdu
assert sap.sap_confirmation_received is pdu
def test_ase_request(self):
if _debug: TestApplicationService._debug("test_ase_request")
global sap, ase
# make a pdu object
pdu = object()
# service element is going to request something
ase.request(pdu)
# make sure the request was sent and received
assert ase.request_sent is pdu
assert sap.sap_indication_received is pdu
# make sure the echo response was sent and received
assert sap.sap_response_sent is pdu
assert ase.confirmation_received is pdu | 0.396886 | 0.239891 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
import tagging.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('categories', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(editable=False)),
('modified_date', models.DateTimeField()),
('created_by', models.CharField(blank=True, default=b'', max_length=50, null=True)),
('modified_by', models.CharField(blank=True, default=b'', max_length=50, null=True)),
('is_active', models.BooleanField(default=True)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(help_text=b"Used to build the entry's URL.", max_length=255, unique_for_date=b'publication_date')),
('status', models.CharField(choices=[(b'draft', b'draft'), (b'hidden', b'hidden'), (b'published', b'published')], db_index=True, default=b'draft', max_length=10)),
('publication_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('comment_enabled', models.BooleanField(default=True)),
('tags', tagging.fields.TagField(blank=True, max_length=255, verbose_name=b'tags')),
('authors', models.ManyToManyField(blank=True, related_name='posts', to=settings.AUTH_USER_MODEL)),
('categories', models.ManyToManyField(blank=True, related_name='posts', to='categories.Category')),
],
options={
'get_latest_by': 'publication_date',
'ordering': ['-publication_date'],
'verbose_name_plural': 'posts',
'verbose_name': 'post',
'permissions': (('can_view_all', 'Can view all entries'), ('can_change_status', 'Can change status'), ('can_change_author', 'Can change author(s)')),
},
),
migrations.AlterIndexTogether(
name='posts',
index_together=set([('status', 'publication_date'), ('slug', 'publication_date')]),
),
] | posts/migrations/0001_initial.py | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
import tagging.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('categories', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(editable=False)),
('modified_date', models.DateTimeField()),
('created_by', models.CharField(blank=True, default=b'', max_length=50, null=True)),
('modified_by', models.CharField(blank=True, default=b'', max_length=50, null=True)),
('is_active', models.BooleanField(default=True)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(help_text=b"Used to build the entry's URL.", max_length=255, unique_for_date=b'publication_date')),
('status', models.CharField(choices=[(b'draft', b'draft'), (b'hidden', b'hidden'), (b'published', b'published')], db_index=True, default=b'draft', max_length=10)),
('publication_date', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('comment_enabled', models.BooleanField(default=True)),
('tags', tagging.fields.TagField(blank=True, max_length=255, verbose_name=b'tags')),
('authors', models.ManyToManyField(blank=True, related_name='posts', to=settings.AUTH_USER_MODEL)),
('categories', models.ManyToManyField(blank=True, related_name='posts', to='categories.Category')),
],
options={
'get_latest_by': 'publication_date',
'ordering': ['-publication_date'],
'verbose_name_plural': 'posts',
'verbose_name': 'post',
'permissions': (('can_view_all', 'Can view all entries'), ('can_change_status', 'Can change status'), ('can_change_author', 'Can change author(s)')),
},
),
migrations.AlterIndexTogether(
name='posts',
index_together=set([('status', 'publication_date'), ('slug', 'publication_date')]),
),
] | 0.57069 | 0.141815 |
from __future__ import print_function
import sys
from .util import is_a_tty
class ColorPrinterMeta(type):
def __init__(cls, *args, **kwargs):
def _make_methods(_color):
def _print(self, *a, **kw):
kw["color"] = _color
return self.print(*a, **kw)
def _string(self, *a, **kw):
return self.string(_color, *a, **kw)
setattr(cls, f"print_{color}", _print)
setattr(cls, f"string_{color}", _string)
for color in cls.colors:
_make_methods(color)
class ColorPrinter(metaclass=ColorPrinterMeta):
"""Prints things in color (or not).
Default colors can be overridden by passing a dict of colors to
the constructor.
Use stand-alone or as a mixin::
>>> printer = ColorPrinter()
>>> printer.print('boring old message')
boring old message
>>> printer.string('none', 'boring old message')
'boring old message\\x1b[0m'
>>> printer.print_info('check this out')
check this out
>>> printer.print_error('whoopsie')
whoopsie
>>> printer.string('error', 'whoopsie')
'\\x1b[91mwhoopsie\\x1b[0m'
>>> MyClass = type('MyClass', (ColorPrinter,), {})
>>> my_obj = MyClass(colors={'header': '\033[96m'})
>>> my_obj.print_header('Header')
Header
>>> my_obj.string('header', 'Header')
'\\x1b[96mHeader\\x1b[0m'
Note: This uses the print function from Python 3.
"""
colors = {
"header": "\033[95m",
"info": "\033[94m",
"success": "\033[92m",
"warning": "\033[93m",
"error": "\033[91m",
"reset": "\033[0m",
"none": "",
}
def __init__(self, colors=None):
if colors is not None:
self.colors = self.colors.copy()
self.colors.update(colors)
def print(self, *args, **kwargs):
"""Like built-in ``print()`` but colorizes strings.
Pass ``color`` as a keyword arg to colorize ``*args`` before
printing them. If no ``color`` is passed, *args will printed
without color.
"""
color = kwargs.pop("color", None)
file = kwargs.get("file", sys.stdout)
if color and is_a_tty(file):
string = self.string(color, *args, **kwargs)
print(string, **kwargs)
else:
print(*args, **kwargs)
def string(self, color, *args, **kwargs):
"""Returns a colorized string (joining ``args`` into one str).
The arguments for this are similar to the built-in ``print()``.
``sep`` is a space by default, but ``end`` is an empty string.
"""
color = self.colors[color]
sep = kwargs.get("sep", " ")
end = kwargs.get("end", "")
string = sep.join(str(a) for a in args)
string = f"{color}{string}{self.colors['reset']}"
if end:
string += end
return string
color_printer = ColorPrinter() | src/local_settings/color_printer.py | from __future__ import print_function
import sys
from .util import is_a_tty
class ColorPrinterMeta(type):
def __init__(cls, *args, **kwargs):
def _make_methods(_color):
def _print(self, *a, **kw):
kw["color"] = _color
return self.print(*a, **kw)
def _string(self, *a, **kw):
return self.string(_color, *a, **kw)
setattr(cls, f"print_{color}", _print)
setattr(cls, f"string_{color}", _string)
for color in cls.colors:
_make_methods(color)
class ColorPrinter(metaclass=ColorPrinterMeta):
"""Prints things in color (or not).
Default colors can be overridden by passing a dict of colors to
the constructor.
Use stand-alone or as a mixin::
>>> printer = ColorPrinter()
>>> printer.print('boring old message')
boring old message
>>> printer.string('none', 'boring old message')
'boring old message\\x1b[0m'
>>> printer.print_info('check this out')
check this out
>>> printer.print_error('whoopsie')
whoopsie
>>> printer.string('error', 'whoopsie')
'\\x1b[91mwhoopsie\\x1b[0m'
>>> MyClass = type('MyClass', (ColorPrinter,), {})
>>> my_obj = MyClass(colors={'header': '\033[96m'})
>>> my_obj.print_header('Header')
Header
>>> my_obj.string('header', 'Header')
'\\x1b[96mHeader\\x1b[0m'
Note: This uses the print function from Python 3.
"""
colors = {
"header": "\033[95m",
"info": "\033[94m",
"success": "\033[92m",
"warning": "\033[93m",
"error": "\033[91m",
"reset": "\033[0m",
"none": "",
}
def __init__(self, colors=None):
if colors is not None:
self.colors = self.colors.copy()
self.colors.update(colors)
def print(self, *args, **kwargs):
"""Like built-in ``print()`` but colorizes strings.
Pass ``color`` as a keyword arg to colorize ``*args`` before
printing them. If no ``color`` is passed, *args will printed
without color.
"""
color = kwargs.pop("color", None)
file = kwargs.get("file", sys.stdout)
if color and is_a_tty(file):
string = self.string(color, *args, **kwargs)
print(string, **kwargs)
else:
print(*args, **kwargs)
def string(self, color, *args, **kwargs):
"""Returns a colorized string (joining ``args`` into one str).
The arguments for this are similar to the built-in ``print()``.
``sep`` is a space by default, but ``end`` is an empty string.
"""
color = self.colors[color]
sep = kwargs.get("sep", " ")
end = kwargs.get("end", "")
string = sep.join(str(a) for a in args)
string = f"{color}{string}{self.colors['reset']}"
if end:
string += end
return string
color_printer = ColorPrinter() | 0.606382 | 0.117193 |
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm with EIP and check.')
vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
test_obj_dict.add_vm(vm)
l3_name = os.environ.get('l3VlanNetworkName1')
vr1_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr1_l3_uuid)
temp_vm1 = None
if not vrs:
#create temp_vm1 for getting vlan1's vr for test pf_vm portforwarding
temp_vm1 = test_stub.create_vlan_vm()
test_obj_dict.add_vm(temp_vm1)
vr1 = test_lib.lib_find_vr_by_vm(temp_vm1.vm)[0]
else:
vr1 = vrs[0]
l3_name = os.environ.get('l3NoVlanNetworkName1')
vr2_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr2_l3_uuid)
temp_vm2 = None
if not vrs:
#create temp_vm2 for getting novlan's vr for test pf_vm portforwarding
temp_vm2 = test_stub.create_user_vlan_vm()
test_obj_dict.add_vm(temp_vm2)
vr2 = test_lib.lib_find_vr_by_vm(temp_vm2.vm)[0]
else:
vr2 = vrs[0]
#we do not need temp_vm1 and temp_vm2, since we just use their VRs.
if temp_vm1:
temp_vm1.destroy()
test_obj_dict.rm_vm(temp_vm1)
if temp_vm2:
temp_vm2.destroy()
test_obj_dict.rm_vm(temp_vm2)
vm_nic = vm.vm.vmNics[0]
vm_nic_uuid = vm_nic.uuid
pri_l3_uuid = vm_nic.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip = test_stub.create_vip('create_eip_test', l3_uuid)
test_obj_dict.add_vip(vip)
eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm)
vip.attach_eip(eip)
vm_ip = vm_nic.ip
user_name = 'root'
user_password = 'password'
vm.check()
vip.check()
cmd = "ping -c 4 172.20.0.1"
rsp_ping = os.system("sshpass -p '%s' ssh %s@%s '%s'"%(user_password, user_name, vm_ip, cmd))
#Try to download webpage from Jenkins server
cmd = "curl http://192.168.200.100"
rsp_curl = os.system("sshpass -p '%s' ssh %s@%s '%s'"%(user_password, user_name, vm_ip, cmd))
if rsp_ping != 0:
test_util.test_fail('Attach EIP but cannot ping from VM')
if rsp_ping == 0 and rsp_curl != 0:
test_util.test_fail('Attach EIP and can ping from the VM, but cannot download anything in VM')
vm.destroy()
test_obj_dict.rm_vm(vm)
vip.check()
eip.delete()
vip.delete()
test_obj_dict.rm_vip(vip)
test_util.test_pass('Attached EIP and download webpage in VM Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict) | integrationtest/vm/virtualrouter/eip/test_check_download_on_eip_vm.py | import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm with EIP and check.')
vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
test_obj_dict.add_vm(vm)
l3_name = os.environ.get('l3VlanNetworkName1')
vr1_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr1_l3_uuid)
temp_vm1 = None
if not vrs:
#create temp_vm1 for getting vlan1's vr for test pf_vm portforwarding
temp_vm1 = test_stub.create_vlan_vm()
test_obj_dict.add_vm(temp_vm1)
vr1 = test_lib.lib_find_vr_by_vm(temp_vm1.vm)[0]
else:
vr1 = vrs[0]
l3_name = os.environ.get('l3NoVlanNetworkName1')
vr2_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr2_l3_uuid)
temp_vm2 = None
if not vrs:
#create temp_vm2 for getting novlan's vr for test pf_vm portforwarding
temp_vm2 = test_stub.create_user_vlan_vm()
test_obj_dict.add_vm(temp_vm2)
vr2 = test_lib.lib_find_vr_by_vm(temp_vm2.vm)[0]
else:
vr2 = vrs[0]
#we do not need temp_vm1 and temp_vm2, since we just use their VRs.
if temp_vm1:
temp_vm1.destroy()
test_obj_dict.rm_vm(temp_vm1)
if temp_vm2:
temp_vm2.destroy()
test_obj_dict.rm_vm(temp_vm2)
vm_nic = vm.vm.vmNics[0]
vm_nic_uuid = vm_nic.uuid
pri_l3_uuid = vm_nic.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip = test_stub.create_vip('create_eip_test', l3_uuid)
test_obj_dict.add_vip(vip)
eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm)
vip.attach_eip(eip)
vm_ip = vm_nic.ip
user_name = 'root'
user_password = 'password'
vm.check()
vip.check()
cmd = "ping -c 4 172.20.0.1"
rsp_ping = os.system("sshpass -p '%s' ssh %s@%s '%s'"%(user_password, user_name, vm_ip, cmd))
#Try to download webpage from Jenkins server
cmd = "curl http://192.168.200.100"
rsp_curl = os.system("sshpass -p '%s' ssh %s@%s '%s'"%(user_password, user_name, vm_ip, cmd))
if rsp_ping != 0:
test_util.test_fail('Attach EIP but cannot ping from VM')
if rsp_ping == 0 and rsp_curl != 0:
test_util.test_fail('Attach EIP and can ping from the VM, but cannot download anything in VM')
vm.destroy()
test_obj_dict.rm_vm(vm)
vip.check()
eip.delete()
vip.delete()
test_obj_dict.rm_vip(vip)
test_util.test_pass('Attached EIP and download webpage in VM Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict) | 0.078124 | 0.353261 |
import datetime as dt
import copy
from enum import Enum
from dataclasses import dataclass, field
import uuid
from typing import List, Optional, Dict, cast
class VectorClockItem:
"""Stores the timestamp from a specific provider"""
provider_id: "str"
timestamp: "dt.datetime"
def __init__(self, provider_id: "str", timestamp: "dt.datetime"):
self.provider_id = provider_id
self.timestamp = timestamp
def __repr__(self): # pragma: no cover
return f"VectorClockItem(provider_id='{self.provider_id}', timestamp={repr(self.timestamp)})"
def __lt__(self, other: "VectorClockItem"):
assert isinstance(other, VectorClockItem)
assert (
other.provider_id == self.provider_id
), f"Can't compare clocks from different providers ({self.provider_id}, {other.provider_id})"
return self.timestamp < other.timestamp
def __gt__(self, other: "VectorClockItem"):
assert isinstance(other, VectorClockItem)
assert (
other.provider_id == self.provider_id
), f"Can't compare clocks from different providers ({self.provider_id}, {other.provider_id})"
return self.timestamp > other.timestamp
def __eq__(self, other: "object"):
assert isinstance(other, VectorClockItem)
assert (
other.provider_id == self.provider_id
), f"Can't compare clocks from different providers ({self.provider_id}, {other.provider_id})"
return self.timestamp == other.timestamp
def __hash__(self):
return hash((self.provider_id, self.timestamp))
def is_empty(self):
return self.timestamp == dt.datetime.min.replace(tzinfo=dt.timezone.utc)
class VectorClock:
"""Groups multiple VectorClockItems from different providers. This class represents the
state of synchronization of a given provider at a specific instant."""
_vector_clock_items_by_id: "Dict[str, VectorClockItem]"
def __init__(self, *vector_clock_items: "VectorClockItem"):
self._vector_clock_items_by_id = {}
for vector_item in vector_clock_items:
if vector_item.provider_id in self._vector_clock_items_by_id:
raise ValueError(f"Duplicate provider ids! {vector_item.provider_id}")
self._vector_clock_items_by_id[vector_item.provider_id] = vector_item
def __repr__(self): # pragma: no cover
return (
"VectorClock("
+ ", ".join(list(repr(v) for v in self._vector_clock_items_by_id.values()))
+ ")"
)
def __iter__(self):
"""Iterates the VectorClockItems"""
return iter(self._vector_clock_items_by_id.values())
def __eq__(self, other: "object"):
"""Compares two VectorClocks. For them to be equal, their VectorClockItems must be equal."""
assert isinstance(other, VectorClock)
for vector_item in self:
other_vector_item = other.get_vector_clock_item(vector_item.provider_id)
if vector_item.timestamp != other_vector_item.timestamp:
return False
for other_vector_item in other:
vector_item = self.get_vector_clock_item(other_vector_item.provider_id)
if vector_item.timestamp != other_vector_item.timestamp:
return False
return True
def __hash__(self):
return hash(
tuple(
[
vector_clock_item
for vector_clock_item in self._vector_clock_items_by_id.values()
if not vector_clock_item.is_empty()
]
)
)
@classmethod
def create_empty(cls, provider_ids: "List[str]") -> "VectorClock":
"""Initializes a VectorClock with the minimum timestamp for the given provider identifiers.
Args:
provider_ids (List[str]): List of provider identifiers.
"""
vector_clock = VectorClock()
for provider_id in provider_ids:
vector_clock.get_vector_clock_item(provider_id=provider_id)
return vector_clock
def get_vector_clock_item(self, provider_id: "str") -> "VectorClockItem":
"""Returns a VectorClockItem matching the given provider identifier. If none is found,
a new one is created with timestamp equal to dt.datetime.min.
Args:
provider_id (str): Provider identifier.
Returns:
VectorClockItem: The matching item.
"""
vector_clock_item = self._vector_clock_items_by_id.get(provider_id)
if vector_clock_item is None:
vector_clock_item = VectorClockItem(
provider_id=provider_id,
timestamp=dt.datetime.min.replace(tzinfo=dt.timezone.utc),
)
self._vector_clock_items_by_id[provider_id] = vector_clock_item
return vector_clock_item
def update(self, vector_clock_item: "VectorClockItem"):
"""Updates the corresponding VectorClockItem with the new timestamp only if the new
timestamp is greater than the current one.
Args:
vector_clock_item (VectorClockItem): The VectorClockItem that should be updated.
"""
old_vector_clock_item = self.get_vector_clock_item(
vector_clock_item.provider_id
)
if old_vector_clock_item < vector_clock_item:
old_vector_clock_item.timestamp = vector_clock_item.timestamp
class Operation(Enum):
"""Represents an operation that can be performed to an item."""
INSERT = "INSERT"
UPDATE = "UPDATE"
DELETE = "DELETE"
@dataclass(frozen=True)
class SerializationResult:
"""Stores the result of serializing an item
Attributes:
item_id (str): The primary key of the item.
entity_name (str): The type of entity represented by this item.
serialized_item (str): The serialized item.
"""
item_id: "str"
entity_name: "str"
serialized_item: "str" = field(repr=False)
class ItemChange:
"""Represents a change performed to an item.
Attributes:
id (uuid.UUID): The change's primary key
date_created (dt.datetime): The date that the change was saved to the data store.
is_applied (bool): Indicates whether this change has already been applied to the item.
operation (Operation): The operation that was performed with this change.
change_vector_clock_item (VectorClockItem): The VectorClockItem generated by the provider that created the change.
insert_vector_clock_item (VectorClockItem): The VectorClockItem generated by the provider that first inserted the item referenced by this change.
serialization_result (SerializationResult): The serialization information for the item referenced by this change.
should_ignore (bool): Indicates whether this change should be ignored (will only be true if this change lost a conflict dispute).
vector_clock (VectorClock): The synchronization clock at the time this change was created.
"""
id: "uuid.UUID"
date_created: "dt.datetime"
operation: "Operation"
serialization_result: "SerializationResult"
change_vector_clock_item: "VectorClockItem"
insert_vector_clock_item: "VectorClockItem"
should_ignore: "bool"
is_applied: "bool"
vector_clock: "VectorClock"
def __init__(
self,
id: "uuid.UUID",
date_created: "dt.datetime",
operation: "Operation",
change_vector_clock_item: "VectorClockItem",
insert_vector_clock_item: "VectorClockItem",
serialization_result: "SerializationResult",
should_ignore: "bool",
is_applied: "bool",
vector_clock: "VectorClock",
):
"""
Args:
id (uuid.UUID): The change's primary key
date_created (dt.datetime): The date that the change was saved to the data store.
is_applied (bool): Indicates whether this change has already been applied to the item.
operation (Operation): The operation that was performed with this change.
change_vector_clock_item (str): The VectorClockItem generated by the provider that created the change.
insert_vector_clock_item (str): The VectorClockItem generated by the provider that first inserted the item referenced by this change.
serialization_result (SerializationResult): The serialization information for the item referenced by this change.
should_ignore (bool): Indicates whether this change should be ignored (will only be true if this change lost a conflict dispute).
vector_clock (VectorClock): The synchronization clock at the time this change was created.
"""
self.id = id
self.date_created = date_created
self.operation = operation
self.change_vector_clock_item = change_vector_clock_item
self.insert_vector_clock_item = insert_vector_clock_item
self.serialization_result = serialization_result
self.should_ignore = should_ignore
self.is_applied = is_applied
self.vector_clock = vector_clock
def __repr__(self): # pragma: no cover
return f"ItemChange(id='{self.id}', operation={self.operation}, serialization_result={self.serialization_result}, change_vector_clock_item={self.change_vector_clock_item}, should_ignore={self.should_ignore}, is_applied={self.is_applied})"
def reset_status(self):
"""Resets all the fields that only make sense locally to a provider that applied the change.
This method is called before the change is sent to a remote provider."""
self.is_applied = False
self.should_ignore = False
self.date_created = None
def __eq__(self, other: "object"):
assert isinstance(other, ItemChange)
for param in [
"id",
"operation",
"change_vector_clock_item",
"insert_vector_clock_item",
"serialization_result",
"should_ignore",
"is_applied",
"vector_clock",
]:
self_param = getattr(self, param)
other_param = getattr(other, param)
if not self_param == other_param:
return False
return True
def __hash__(self):
return hash(
(
getattr(self, param)
for param in [
"id",
"operation",
"change_vector_clock_item",
"insert_vector_clock_item",
"serialization_result",
"should_ignore",
"is_applied",
"vector_clock",
]
)
)
class ItemChangeBatch:
"""Represents a page of changes. This class' goal is to group the changes being synced into smaller groups
so that a synchronization session containing many changes is not performed all at once but in chunks.
Attributes:
item_changes (List[ItemChange]): List of changes contained in the batch.
is_last_batch (bool): Indicates whether this is the last batch of items to be processed.
"""
item_changes: "List[ItemChange]"
is_last_batch: "bool"
def __init__(self, item_changes: "List[ItemChange]", is_last_batch: "bool"):
"""
Args:
item_changes (List[ItemChange]): List of changes contained in the batch.
is_last_batch (bool): Indicates whether this is the last batch of items to be processed.
"""
self.item_changes = item_changes
self.is_last_batch = is_last_batch
def __repr__(self): # pragma: no cover
return f"ItemChangeBatch(item_changes=[...{len(self.item_changes)} changes], is_last_batch={self.is_last_batch})"
def get_vector_clock_after_done(
self, initial_vector_clock: "VectorClock"
) -> "VectorClock":
"""Retrieves the new VectorClock that the provider will have after applying all the changes contained in the batch.
Args:
initial_vector_clock (VectorClock): The VectorClock before the changes are applied.
"""
final_vector_clock = copy.deepcopy(initial_vector_clock)
for item_change in self.item_changes:
final_vector_clock.update(
vector_clock_item=item_change.change_vector_clock_item,
)
return final_vector_clock
def reset_status(self):
"""Resets all the local information contained in the changes in this batch before it is sent to a remote provider."""
for item_change in self.item_changes:
item_change.reset_status()
def __eq__(self, other: "object"):
assert isinstance(other, ItemChangeBatch)
return (
self.item_changes == other.item_changes
and self.is_last_batch == other.is_last_batch
)
class ItemVersion:
"""Represents the current version of an item: it links an item in the data store to the last change that was applied to it.
Attributes:
current_item_change (Optional[ItemChange]): The last change that was applied to the item. It will only be null if the item does not exist in the data store yet.
item_id (str): The primary key of the item being referenced by this version.
date_created (dt.datetime): The date this item was first added to this data store.
vector_clock (VectorClock): The VectorClock of the last change applied to this item (equals the "vector_clock" attribute of "current_item_change").
"""
current_item_change: "Optional[ItemChange]"
item_id: "str"
date_created: "dt.datetime"
vector_clock: "VectorClock"
def __init__(
self,
current_item_change: "Optional[ItemChange]",
item_id: "str",
date_created: "dt.datetime",
vector_clock: "Optional[VectorClock]" = None,
):
"""
Args:
current_item_change (Optional[ItemChange]): The last change that was applied to the item. It will only be null if the item does not exist in the data store yet.
item_id (str): The primary key of the item being referenced by this version.
date_created (dt.datetime): The date this item was first added to this data store.
vector_clock (VectorClock): The VectorClock of the last change applied to this item (equals the "vector_clock" attribute of "current_item_change").
"""
assert current_item_change is not None or vector_clock is not None
if current_item_change is not None and vector_clock is not None:
assert vector_clock == current_item_change.vector_clock
self.current_item_change = current_item_change
self.item_id = item_id
if not vector_clock:
self.vector_clock = cast("ItemChange", current_item_change).vector_clock
else:
self.vector_clock = vector_clock
self.date_created = date_created
def __repr__(self): # pragma: no cover
return f"ItemVersion(item_id='{self.item_id}', current_item_change_id='{self.current_item_change.id if self.current_item_change else None}')"
def __eq__(self, other: "object"):
assert isinstance(other, ItemVersion)
return self.current_item_change == other.current_item_change and str(
self.item_id
) == str(other.item_id)
def __hash__(self):
return hash((self.current_item_change, self.item_id))
class ConflictStatus(Enum):
"""Represents the status of a conflict."""
DEFERRED = "DEFERRED"
RESOLVED = "RESOLVED"
class ConflictType(Enum):
"""Represents the type of conflict that was detected."""
LOCAL_UPDATE_REMOTE_UPDATE = "LOCAL_UPDATE_REMOTE_UPDATE"
LOCAL_UPDATE_REMOTE_DELETE = "LOCAL_UPDATE_REMOTE_DELETE"
LOCAL_UPDATE_REMOTE_INSERT = "LOCAL_UPDATE_REMOTE_INSERT"
LOCAL_INSERT_REMOTE_UPDATE = "LOCAL_INSERT_REMOTE_UPDATE"
LOCAL_DELETE_REMOTE_UPDATE = "LOCAL_DELETE_REMOTE_UPDATE"
LOCAL_DELETE_REMOTE_DELETE = "LOCAL_DELETE_REMOTE_DELETE"
EXCEPTION_OCCURRED = "EXCEPTION_OCCURRED"
class ConflictLog:
"""Represents the occurrence of a conflict during a synchronization session.
Attributes:
id (uuid.UUID): This instance's primary key.
created_at (dt.datetime): The date that this conflict was detected.
resolved_at (Optional[dt.datetime]): The date when the conflict was resolved or None if the conflict's status is ConflictStatus.DEFERRED.
item_change_loser (ItemChange): The change that lost the conflict or, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED, the change that caused the exception to be raised.
item_change_winner (Optional[ItemChange]): The change that won the conflict or None, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED.
status (ConflictStatus): The status of the conflict.
conflict_type (ConflictType): The type of conflict.
description (Optional[str]): This field will contain the stack trace in cause the type of conflict is ConflictType.EXCEPTION_OCURRED, otherwise it will be null.
query_ids (List[str], optional): A list with identifiers of queries that tried to sync this change
"""
id: "uuid.UUID"
created_at: "dt.datetime"
resolved_at: "Optional[dt.datetime]"
item_change_loser: "ItemChange"
item_change_winner: "Optional[ItemChange]"
query_ids: "List[str]"
status: "ConflictStatus"
conflict_type: "ConflictType"
description: "Optional[str]"
def __init__(
self,
id: "uuid.UUID",
created_at: "dt.datetime",
resolved_at: "Optional[dt.datetime]",
item_change_loser: "ItemChange",
item_change_winner: "Optional[ItemChange]",
status: "ConflictStatus",
conflict_type: "ConflictType",
description: "Optional[str]",
query_ids: "List[str]" = [],
):
"""
Args:
id (uuid.UUID): This instance's primary key.
created_at (dt.datetime): The date that this conflict was detected.
resolved_at (Optional[dt.datetime]): The date when the conflict was resolved or None if the conflict's status is ConflictStatus.DEFERRED.
item_change_loser (ItemChange): The change that lost the conflict or, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED, the change that caused the exception to be raised.
item_change_winner (Optional[ItemChange]): The change that won the conflict or None, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED.
status (ConflictStatus): The status of the conflict.
conflict_type (ConflictType): The type of conflict.
description (Optional[str]): This field will contain the stack trace in cause the type of conflict is ConflictType.EXCEPTION_OCURRED, otherwise it will be null.
query_ids (List[str], optional): A list with identifiers of queries that tried to sync this change
"""
self.id = id
self.created_at = created_at
self.resolved_at = resolved_at
self.item_change_loser = item_change_loser
self.item_change_winner = item_change_winner
self.status = status
self.conflict_type = conflict_type
self.description = description
self.query_ids = query_ids
def __repr__(self): # pragma: no cover
return f"ConflictLog(id='{self.id}', item_change_loser_id='{self.item_change_loser.id}', item_change_winner_id='{self.item_change_winner.id if self.item_change_winner else None}', conflict_type={self.conflict_type}, status={self.status}, query_ids={self.query_ids})"
def __eq__(self, other: "object"):
assert isinstance(other, ConflictLog)
for param in [
"id",
"created_at",
"resolved_at",
"item_change_loser",
"item_change_winner",
"status",
"conflict_type",
"description",
"query_ids",
]:
if not getattr(self, param) == getattr(other, param):
return False
return True
class SyncSessionStatus(Enum):
IN_PROGRESS = "IN_PROGRESS"
FINISHED = "FINISHED"
FAILED = "FAILED"
class SyncSession:
"""Represents a synchronization session. A session consists of the following stages:
- Processing deferred changes
- Retrieving data from the source provider
- Sending data to the target provider.
Attributes:
id (uuid.UUID): This instance's primary key.
started_at (str): The date when this session started.
ended_at (Optional[dt.datetime]): The date when this session ended. It will only be None right after the session is created.
status (SyncSessionStatus): The status of this session.
source_provider_id (str): The source provider's identifier.
target_provider_id (str): The target provider's identifier.
item_changes (List[ItemChange]): The list of changes that were exchanged in this session (either sent or received).
query_id (Optional[str]): The ID of the query that synced in this session.
"""
id: "uuid.UUID"
started_at: "dt.datetime"
ended_at: "Optional[dt.datetime]"
status: "SyncSessionStatus"
source_provider_id: "str"
target_provider_id: "str"
item_changes: "List[ItemChange]"
query_id: "Optional[str]"
def __init__(
self,
id: "uuid.UUID",
started_at: "dt.datetime",
ended_at: "Optional[dt.datetime]",
status: "SyncSessionStatus",
source_provider_id: "str",
target_provider_id: "str",
item_changes: "List[ItemChange]",
query_id: "Optional[str]" = None,
):
"""
Args:
id (uuid.UUID): This instance's primary key.
started_at (str): The date when this session started.
ended_at (Optional[dt.datetime]): The date when this session ended. It will only be None right after the session is created.
status (SyncSessionStatus): The status of this session.
source_provider_id (str): The source provider's identifier.
target_provider_id (str): The target provider's identifier.
item_changes (List[ItemChange]): The list of changes that were exchanged in this session (either sent or received).
query_id (Optional[str]): The ID of the query that synced in this session.
"""
self.id = id
self.started_at = started_at
self.ended_at = ended_at
self.status = status
self.source_provider_id = source_provider_id
self.target_provider_id = target_provider_id
self.item_changes = item_changes
self.query_id = query_id
def __repr__(self): # pragma: no cover
return f"SyncSession(id='{self.id}', started_at='{self.started_at}', ended_at='{self.ended_at}', status={self.status}, source_provider_id={self.source_provider_id}, target_provider_id={self.target_provider_id}, item_changes=[...{len(self.item_changes)} changes], query_id={self.query_id})"
def __eq__(self, other: "object"):
assert isinstance(other, SyncSession)
for param in [
"id",
"started_at",
"ended_at",
"status",
"source_provider_id",
"target_provider_id",
"item_changes",
"query_id",
]:
if not getattr(self, param) == getattr(other, param):
return False
return True | maestro/core/metadata.py | import datetime as dt
import copy
from enum import Enum
from dataclasses import dataclass, field
import uuid
from typing import List, Optional, Dict, cast
class VectorClockItem:
"""Stores the timestamp from a specific provider"""
provider_id: "str"
timestamp: "dt.datetime"
def __init__(self, provider_id: "str", timestamp: "dt.datetime"):
self.provider_id = provider_id
self.timestamp = timestamp
def __repr__(self): # pragma: no cover
return f"VectorClockItem(provider_id='{self.provider_id}', timestamp={repr(self.timestamp)})"
def __lt__(self, other: "VectorClockItem"):
assert isinstance(other, VectorClockItem)
assert (
other.provider_id == self.provider_id
), f"Can't compare clocks from different providers ({self.provider_id}, {other.provider_id})"
return self.timestamp < other.timestamp
def __gt__(self, other: "VectorClockItem"):
assert isinstance(other, VectorClockItem)
assert (
other.provider_id == self.provider_id
), f"Can't compare clocks from different providers ({self.provider_id}, {other.provider_id})"
return self.timestamp > other.timestamp
def __eq__(self, other: "object"):
assert isinstance(other, VectorClockItem)
assert (
other.provider_id == self.provider_id
), f"Can't compare clocks from different providers ({self.provider_id}, {other.provider_id})"
return self.timestamp == other.timestamp
def __hash__(self):
return hash((self.provider_id, self.timestamp))
def is_empty(self):
return self.timestamp == dt.datetime.min.replace(tzinfo=dt.timezone.utc)
class VectorClock:
"""Groups multiple VectorClockItems from different providers. This class represents the
state of synchronization of a given provider at a specific instant."""
_vector_clock_items_by_id: "Dict[str, VectorClockItem]"
def __init__(self, *vector_clock_items: "VectorClockItem"):
self._vector_clock_items_by_id = {}
for vector_item in vector_clock_items:
if vector_item.provider_id in self._vector_clock_items_by_id:
raise ValueError(f"Duplicate provider ids! {vector_item.provider_id}")
self._vector_clock_items_by_id[vector_item.provider_id] = vector_item
def __repr__(self): # pragma: no cover
return (
"VectorClock("
+ ", ".join(list(repr(v) for v in self._vector_clock_items_by_id.values()))
+ ")"
)
def __iter__(self):
"""Iterates the VectorClockItems"""
return iter(self._vector_clock_items_by_id.values())
def __eq__(self, other: "object"):
"""Compares two VectorClocks. For them to be equal, their VectorClockItems must be equal."""
assert isinstance(other, VectorClock)
for vector_item in self:
other_vector_item = other.get_vector_clock_item(vector_item.provider_id)
if vector_item.timestamp != other_vector_item.timestamp:
return False
for other_vector_item in other:
vector_item = self.get_vector_clock_item(other_vector_item.provider_id)
if vector_item.timestamp != other_vector_item.timestamp:
return False
return True
def __hash__(self):
return hash(
tuple(
[
vector_clock_item
for vector_clock_item in self._vector_clock_items_by_id.values()
if not vector_clock_item.is_empty()
]
)
)
@classmethod
def create_empty(cls, provider_ids: "List[str]") -> "VectorClock":
"""Initializes a VectorClock with the minimum timestamp for the given provider identifiers.
Args:
provider_ids (List[str]): List of provider identifiers.
"""
vector_clock = VectorClock()
for provider_id in provider_ids:
vector_clock.get_vector_clock_item(provider_id=provider_id)
return vector_clock
def get_vector_clock_item(self, provider_id: "str") -> "VectorClockItem":
"""Returns a VectorClockItem matching the given provider identifier. If none is found,
a new one is created with timestamp equal to dt.datetime.min.
Args:
provider_id (str): Provider identifier.
Returns:
VectorClockItem: The matching item.
"""
vector_clock_item = self._vector_clock_items_by_id.get(provider_id)
if vector_clock_item is None:
vector_clock_item = VectorClockItem(
provider_id=provider_id,
timestamp=dt.datetime.min.replace(tzinfo=dt.timezone.utc),
)
self._vector_clock_items_by_id[provider_id] = vector_clock_item
return vector_clock_item
def update(self, vector_clock_item: "VectorClockItem"):
"""Updates the corresponding VectorClockItem with the new timestamp only if the new
timestamp is greater than the current one.
Args:
vector_clock_item (VectorClockItem): The VectorClockItem that should be updated.
"""
old_vector_clock_item = self.get_vector_clock_item(
vector_clock_item.provider_id
)
if old_vector_clock_item < vector_clock_item:
old_vector_clock_item.timestamp = vector_clock_item.timestamp
class Operation(Enum):
"""Represents an operation that can be performed to an item."""
INSERT = "INSERT"
UPDATE = "UPDATE"
DELETE = "DELETE"
@dataclass(frozen=True)
class SerializationResult:
"""Stores the result of serializing an item
Attributes:
item_id (str): The primary key of the item.
entity_name (str): The type of entity represented by this item.
serialized_item (str): The serialized item.
"""
item_id: "str"
entity_name: "str"
serialized_item: "str" = field(repr=False)
class ItemChange:
"""Represents a change performed to an item.
Attributes:
id (uuid.UUID): The change's primary key
date_created (dt.datetime): The date that the change was saved to the data store.
is_applied (bool): Indicates whether this change has already been applied to the item.
operation (Operation): The operation that was performed with this change.
change_vector_clock_item (VectorClockItem): The VectorClockItem generated by the provider that created the change.
insert_vector_clock_item (VectorClockItem): The VectorClockItem generated by the provider that first inserted the item referenced by this change.
serialization_result (SerializationResult): The serialization information for the item referenced by this change.
should_ignore (bool): Indicates whether this change should be ignored (will only be true if this change lost a conflict dispute).
vector_clock (VectorClock): The synchronization clock at the time this change was created.
"""
id: "uuid.UUID"
date_created: "dt.datetime"
operation: "Operation"
serialization_result: "SerializationResult"
change_vector_clock_item: "VectorClockItem"
insert_vector_clock_item: "VectorClockItem"
should_ignore: "bool"
is_applied: "bool"
vector_clock: "VectorClock"
def __init__(
self,
id: "uuid.UUID",
date_created: "dt.datetime",
operation: "Operation",
change_vector_clock_item: "VectorClockItem",
insert_vector_clock_item: "VectorClockItem",
serialization_result: "SerializationResult",
should_ignore: "bool",
is_applied: "bool",
vector_clock: "VectorClock",
):
"""
Args:
id (uuid.UUID): The change's primary key
date_created (dt.datetime): The date that the change was saved to the data store.
is_applied (bool): Indicates whether this change has already been applied to the item.
operation (Operation): The operation that was performed with this change.
change_vector_clock_item (str): The VectorClockItem generated by the provider that created the change.
insert_vector_clock_item (str): The VectorClockItem generated by the provider that first inserted the item referenced by this change.
serialization_result (SerializationResult): The serialization information for the item referenced by this change.
should_ignore (bool): Indicates whether this change should be ignored (will only be true if this change lost a conflict dispute).
vector_clock (VectorClock): The synchronization clock at the time this change was created.
"""
self.id = id
self.date_created = date_created
self.operation = operation
self.change_vector_clock_item = change_vector_clock_item
self.insert_vector_clock_item = insert_vector_clock_item
self.serialization_result = serialization_result
self.should_ignore = should_ignore
self.is_applied = is_applied
self.vector_clock = vector_clock
def __repr__(self): # pragma: no cover
return f"ItemChange(id='{self.id}', operation={self.operation}, serialization_result={self.serialization_result}, change_vector_clock_item={self.change_vector_clock_item}, should_ignore={self.should_ignore}, is_applied={self.is_applied})"
def reset_status(self):
"""Resets all the fields that only make sense locally to a provider that applied the change.
This method is called before the change is sent to a remote provider."""
self.is_applied = False
self.should_ignore = False
self.date_created = None
def __eq__(self, other: "object"):
assert isinstance(other, ItemChange)
for param in [
"id",
"operation",
"change_vector_clock_item",
"insert_vector_clock_item",
"serialization_result",
"should_ignore",
"is_applied",
"vector_clock",
]:
self_param = getattr(self, param)
other_param = getattr(other, param)
if not self_param == other_param:
return False
return True
def __hash__(self):
return hash(
(
getattr(self, param)
for param in [
"id",
"operation",
"change_vector_clock_item",
"insert_vector_clock_item",
"serialization_result",
"should_ignore",
"is_applied",
"vector_clock",
]
)
)
class ItemChangeBatch:
"""Represents a page of changes. This class' goal is to group the changes being synced into smaller groups
so that a synchronization session containing many changes is not performed all at once but in chunks.
Attributes:
item_changes (List[ItemChange]): List of changes contained in the batch.
is_last_batch (bool): Indicates whether this is the last batch of items to be processed.
"""
item_changes: "List[ItemChange]"
is_last_batch: "bool"
def __init__(self, item_changes: "List[ItemChange]", is_last_batch: "bool"):
"""
Args:
item_changes (List[ItemChange]): List of changes contained in the batch.
is_last_batch (bool): Indicates whether this is the last batch of items to be processed.
"""
self.item_changes = item_changes
self.is_last_batch = is_last_batch
def __repr__(self): # pragma: no cover
return f"ItemChangeBatch(item_changes=[...{len(self.item_changes)} changes], is_last_batch={self.is_last_batch})"
def get_vector_clock_after_done(
self, initial_vector_clock: "VectorClock"
) -> "VectorClock":
"""Retrieves the new VectorClock that the provider will have after applying all the changes contained in the batch.
Args:
initial_vector_clock (VectorClock): The VectorClock before the changes are applied.
"""
final_vector_clock = copy.deepcopy(initial_vector_clock)
for item_change in self.item_changes:
final_vector_clock.update(
vector_clock_item=item_change.change_vector_clock_item,
)
return final_vector_clock
def reset_status(self):
"""Resets all the local information contained in the changes in this batch before it is sent to a remote provider."""
for item_change in self.item_changes:
item_change.reset_status()
def __eq__(self, other: "object"):
assert isinstance(other, ItemChangeBatch)
return (
self.item_changes == other.item_changes
and self.is_last_batch == other.is_last_batch
)
class ItemVersion:
"""Represents the current version of an item: it links an item in the data store to the last change that was applied to it.
Attributes:
current_item_change (Optional[ItemChange]): The last change that was applied to the item. It will only be null if the item does not exist in the data store yet.
item_id (str): The primary key of the item being referenced by this version.
date_created (dt.datetime): The date this item was first added to this data store.
vector_clock (VectorClock): The VectorClock of the last change applied to this item (equals the "vector_clock" attribute of "current_item_change").
"""
current_item_change: "Optional[ItemChange]"
item_id: "str"
date_created: "dt.datetime"
vector_clock: "VectorClock"
def __init__(
self,
current_item_change: "Optional[ItemChange]",
item_id: "str",
date_created: "dt.datetime",
vector_clock: "Optional[VectorClock]" = None,
):
"""
Args:
current_item_change (Optional[ItemChange]): The last change that was applied to the item. It will only be null if the item does not exist in the data store yet.
item_id (str): The primary key of the item being referenced by this version.
date_created (dt.datetime): The date this item was first added to this data store.
vector_clock (VectorClock): The VectorClock of the last change applied to this item (equals the "vector_clock" attribute of "current_item_change").
"""
assert current_item_change is not None or vector_clock is not None
if current_item_change is not None and vector_clock is not None:
assert vector_clock == current_item_change.vector_clock
self.current_item_change = current_item_change
self.item_id = item_id
if not vector_clock:
self.vector_clock = cast("ItemChange", current_item_change).vector_clock
else:
self.vector_clock = vector_clock
self.date_created = date_created
def __repr__(self): # pragma: no cover
return f"ItemVersion(item_id='{self.item_id}', current_item_change_id='{self.current_item_change.id if self.current_item_change else None}')"
def __eq__(self, other: "object"):
assert isinstance(other, ItemVersion)
return self.current_item_change == other.current_item_change and str(
self.item_id
) == str(other.item_id)
def __hash__(self):
return hash((self.current_item_change, self.item_id))
class ConflictStatus(Enum):
"""Represents the status of a conflict."""
DEFERRED = "DEFERRED"
RESOLVED = "RESOLVED"
class ConflictType(Enum):
"""Represents the type of conflict that was detected."""
LOCAL_UPDATE_REMOTE_UPDATE = "LOCAL_UPDATE_REMOTE_UPDATE"
LOCAL_UPDATE_REMOTE_DELETE = "LOCAL_UPDATE_REMOTE_DELETE"
LOCAL_UPDATE_REMOTE_INSERT = "LOCAL_UPDATE_REMOTE_INSERT"
LOCAL_INSERT_REMOTE_UPDATE = "LOCAL_INSERT_REMOTE_UPDATE"
LOCAL_DELETE_REMOTE_UPDATE = "LOCAL_DELETE_REMOTE_UPDATE"
LOCAL_DELETE_REMOTE_DELETE = "LOCAL_DELETE_REMOTE_DELETE"
EXCEPTION_OCCURRED = "EXCEPTION_OCCURRED"
class ConflictLog:
"""Represents the occurrence of a conflict during a synchronization session.
Attributes:
id (uuid.UUID): This instance's primary key.
created_at (dt.datetime): The date that this conflict was detected.
resolved_at (Optional[dt.datetime]): The date when the conflict was resolved or None if the conflict's status is ConflictStatus.DEFERRED.
item_change_loser (ItemChange): The change that lost the conflict or, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED, the change that caused the exception to be raised.
item_change_winner (Optional[ItemChange]): The change that won the conflict or None, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED.
status (ConflictStatus): The status of the conflict.
conflict_type (ConflictType): The type of conflict.
description (Optional[str]): This field will contain the stack trace in cause the type of conflict is ConflictType.EXCEPTION_OCURRED, otherwise it will be null.
query_ids (List[str], optional): A list with identifiers of queries that tried to sync this change
"""
id: "uuid.UUID"
created_at: "dt.datetime"
resolved_at: "Optional[dt.datetime]"
item_change_loser: "ItemChange"
item_change_winner: "Optional[ItemChange]"
query_ids: "List[str]"
status: "ConflictStatus"
conflict_type: "ConflictType"
description: "Optional[str]"
def __init__(
self,
id: "uuid.UUID",
created_at: "dt.datetime",
resolved_at: "Optional[dt.datetime]",
item_change_loser: "ItemChange",
item_change_winner: "Optional[ItemChange]",
status: "ConflictStatus",
conflict_type: "ConflictType",
description: "Optional[str]",
query_ids: "List[str]" = [],
):
"""
Args:
id (uuid.UUID): This instance's primary key.
created_at (dt.datetime): The date that this conflict was detected.
resolved_at (Optional[dt.datetime]): The date when the conflict was resolved or None if the conflict's status is ConflictStatus.DEFERRED.
item_change_loser (ItemChange): The change that lost the conflict or, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED, the change that caused the exception to be raised.
item_change_winner (Optional[ItemChange]): The change that won the conflict or None, in the case of a conflict of type ConflictType.EXCEPTION_OCCURRED.
status (ConflictStatus): The status of the conflict.
conflict_type (ConflictType): The type of conflict.
description (Optional[str]): This field will contain the stack trace in cause the type of conflict is ConflictType.EXCEPTION_OCURRED, otherwise it will be null.
query_ids (List[str], optional): A list with identifiers of queries that tried to sync this change
"""
self.id = id
self.created_at = created_at
self.resolved_at = resolved_at
self.item_change_loser = item_change_loser
self.item_change_winner = item_change_winner
self.status = status
self.conflict_type = conflict_type
self.description = description
self.query_ids = query_ids
def __repr__(self): # pragma: no cover
return f"ConflictLog(id='{self.id}', item_change_loser_id='{self.item_change_loser.id}', item_change_winner_id='{self.item_change_winner.id if self.item_change_winner else None}', conflict_type={self.conflict_type}, status={self.status}, query_ids={self.query_ids})"
def __eq__(self, other: "object"):
assert isinstance(other, ConflictLog)
for param in [
"id",
"created_at",
"resolved_at",
"item_change_loser",
"item_change_winner",
"status",
"conflict_type",
"description",
"query_ids",
]:
if not getattr(self, param) == getattr(other, param):
return False
return True
class SyncSessionStatus(Enum):
IN_PROGRESS = "IN_PROGRESS"
FINISHED = "FINISHED"
FAILED = "FAILED"
class SyncSession:
"""Represents a synchronization session. A session consists of the following stages:
- Processing deferred changes
- Retrieving data from the source provider
- Sending data to the target provider.
Attributes:
id (uuid.UUID): This instance's primary key.
started_at (str): The date when this session started.
ended_at (Optional[dt.datetime]): The date when this session ended. It will only be None right after the session is created.
status (SyncSessionStatus): The status of this session.
source_provider_id (str): The source provider's identifier.
target_provider_id (str): The target provider's identifier.
item_changes (List[ItemChange]): The list of changes that were exchanged in this session (either sent or received).
query_id (Optional[str]): The ID of the query that synced in this session.
"""
id: "uuid.UUID"
started_at: "dt.datetime"
ended_at: "Optional[dt.datetime]"
status: "SyncSessionStatus"
source_provider_id: "str"
target_provider_id: "str"
item_changes: "List[ItemChange]"
query_id: "Optional[str]"
def __init__(
self,
id: "uuid.UUID",
started_at: "dt.datetime",
ended_at: "Optional[dt.datetime]",
status: "SyncSessionStatus",
source_provider_id: "str",
target_provider_id: "str",
item_changes: "List[ItemChange]",
query_id: "Optional[str]" = None,
):
"""
Args:
id (uuid.UUID): This instance's primary key.
started_at (str): The date when this session started.
ended_at (Optional[dt.datetime]): The date when this session ended. It will only be None right after the session is created.
status (SyncSessionStatus): The status of this session.
source_provider_id (str): The source provider's identifier.
target_provider_id (str): The target provider's identifier.
item_changes (List[ItemChange]): The list of changes that were exchanged in this session (either sent or received).
query_id (Optional[str]): The ID of the query that synced in this session.
"""
self.id = id
self.started_at = started_at
self.ended_at = ended_at
self.status = status
self.source_provider_id = source_provider_id
self.target_provider_id = target_provider_id
self.item_changes = item_changes
self.query_id = query_id
def __repr__(self): # pragma: no cover
return f"SyncSession(id='{self.id}', started_at='{self.started_at}', ended_at='{self.ended_at}', status={self.status}, source_provider_id={self.source_provider_id}, target_provider_id={self.target_provider_id}, item_changes=[...{len(self.item_changes)} changes], query_id={self.query_id})"
def __eq__(self, other: "object"):
assert isinstance(other, SyncSession)
for param in [
"id",
"started_at",
"ended_at",
"status",
"source_provider_id",
"target_provider_id",
"item_changes",
"query_id",
]:
if not getattr(self, param) == getattr(other, param):
return False
return True | 0.930229 | 0.507019 |
import pytest
from eth_abi import encode_single
from eth_tester import exceptions
from web3 import Web3
ZERO_ADDRESS = "0x" + "0" * 40
SECRET = encode_single("bytes32", b"123456ab")
HASHED_SECRET = Web3.solidityKeccak(["bytes32"], [SECRET])
MAX_FEE = 2 ** 64 - 1
WEEK_SECONDS = 60 * 60 * 24 * 7
def get_events_of_contract(contract, event_name, from_block=0):
return list(getattr(contract.events, event_name).getLogs(fromBlock=from_block))
def get_single_event_of_contract(contract, event_name, from_block=0):
events = get_events_of_contract(contract, event_name, from_block)
assert len(events) == 1, f"No single event of type {event_name}"
return events[0]
@pytest.fixture()
def sender(accounts):
return accounts[1]
@pytest.fixture()
def receiver(accounts):
return accounts[2]
@pytest.fixture()
def swap_tl_amount():
return 100
@pytest.fixture()
def commit_swap(
tl_swap_contract, tl_currency_network_contract, sender, receiver, swap_tl_amount
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
swap_tl_amount,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
def test_tl_swap_emit_initiated_event(
tl_swap_contract, tl_currency_network_contract, accounts, web3
):
sender = accounts[1]
receiver = accounts[2]
network = tl_currency_network_contract.address
amount = 100
tl_swap_contract.functions.commit(
receiver, network, amount, receiver, 1, WEEK_SECONDS, HASHED_SECRET,
).transact({"from": sender})
commit_initiated_event = get_single_event_of_contract(
tl_swap_contract, "Commit", 0,
)["args"]
assert commit_initiated_event.hash == HASHED_SECRET
assert commit_initiated_event.sender == sender
assert commit_initiated_event.receiver == receiver
assert commit_initiated_event.TLNetwork == network
assert commit_initiated_event.TLMoneyAmount == amount
assert (
commit_initiated_event.expiryTime
== WEEK_SECONDS + web3.eth.getBlock("latest").timestamp
)
def test_tl_swap_commit_entry_alredy_exists(
tl_swap_contract, tl_currency_network_contract, sender, receiver
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
receiver,
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
200,
receiver,
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
assert "Entry already exists" in str(excinfo.value)
def test_tl_swap_commit_tl_money_required(
tl_swap_contract, sender, receiver, tl_currency_network_contract
):
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
0,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
"f81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad",
).transact({"from": sender})
assert "TL total money amount is required" in str(excinfo.value)
def test_tl_swap_commit_eth_address_required(
tl_swap_contract, sender, receiver, tl_currency_network_contract
):
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
ZERO_ADDRESS,
1,
WEEK_SECONDS,
"f81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad",
).transact({"from": sender})
assert "Ethereum address is required" in str(excinfo.value)
def test_tl_swap_commit_eth_amount_required(
tl_swap_contract, sender, receiver, tl_currency_network_contract
):
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
"0xa6C7310A1fc7A806Fd7c20B4b030501fCe2AC977",
0,
WEEK_SECONDS,
"f81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad",
).transact({"from": sender})
assert "Eth total amount is required" in str(excinfo.value)
def test_tl_swap_claim_tl_money(
tl_swap_contract, tl_currency_network_contract, sender, receiver
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
extra_data = b""
tl_swap_contract.functions.claim(
[sender, receiver], MAX_FEE, extra_data, SECRET,
).transact({"from": receiver})
currency_transfer_called = get_single_event_of_contract(
tl_currency_network_contract, "Transfer", 0,
)["args"]
assert currency_transfer_called._from == sender
assert currency_transfer_called._to == receiver
assert currency_transfer_called._value == 100
assert currency_transfer_called._extraData == extra_data
def test_tl_swap_remove_commitment(
tl_swap_contract, tl_currency_network_contract, sender, receiver
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
0,
HASHED_SECRET,
).transact({"from": sender})
commit_initiated_event = get_single_event_of_contract(
tl_swap_contract, "Commit", 0,
)["args"]
assert commit_initiated_event.hash == HASHED_SECRET
assert commit_initiated_event.TLMoneyAmount == 100
tl_swap_contract.functions.removeCommitment(HASHED_SECRET).transact()
expired_event = get_single_event_of_contract(
tl_swap_contract, "ExpireCommitment", 0,
)["args"]
assert expired_event.hash == HASHED_SECRET
@pytest.mark.usefixtures("commit_swap")
def test_claim_removed_commitment(tl_swap_contract, chain, sender, receiver, web3):
expiry_time = web3.eth.getBlock("latest").timestamp + WEEK_SECONDS + 1
chain.time_travel(expiry_time)
tl_swap_contract.functions.removeCommitment(HASHED_SECRET).transact()
with pytest.raises(exceptions.TransactionFailed):
tl_swap_contract.functions.claim(
[sender, receiver], MAX_FEE, b"", SECRET,
).transact()
def test_claim_your_own_commitment(
accounts, tl_currency_network_contract, tl_swap_contract, sender, receiver
):
network = tl_currency_network_contract.address
amount = 100
tl_swap_contract.functions.commit(
receiver,
network,
amount,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
with pytest.raises(exceptions.TransactionFailed):
sender_friend = accounts[3]
path = [sender, sender_friend, sender]
tl_swap_contract.functions.claim(path, 0, b"", SECRET).transact(
{"from": receiver}
) | tests/test_tl_swap.py | import pytest
from eth_abi import encode_single
from eth_tester import exceptions
from web3 import Web3
ZERO_ADDRESS = "0x" + "0" * 40
SECRET = encode_single("bytes32", b"123456ab")
HASHED_SECRET = Web3.solidityKeccak(["bytes32"], [SECRET])
MAX_FEE = 2 ** 64 - 1
WEEK_SECONDS = 60 * 60 * 24 * 7
def get_events_of_contract(contract, event_name, from_block=0):
return list(getattr(contract.events, event_name).getLogs(fromBlock=from_block))
def get_single_event_of_contract(contract, event_name, from_block=0):
events = get_events_of_contract(contract, event_name, from_block)
assert len(events) == 1, f"No single event of type {event_name}"
return events[0]
@pytest.fixture()
def sender(accounts):
return accounts[1]
@pytest.fixture()
def receiver(accounts):
return accounts[2]
@pytest.fixture()
def swap_tl_amount():
return 100
@pytest.fixture()
def commit_swap(
tl_swap_contract, tl_currency_network_contract, sender, receiver, swap_tl_amount
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
swap_tl_amount,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
def test_tl_swap_emit_initiated_event(
tl_swap_contract, tl_currency_network_contract, accounts, web3
):
sender = accounts[1]
receiver = accounts[2]
network = tl_currency_network_contract.address
amount = 100
tl_swap_contract.functions.commit(
receiver, network, amount, receiver, 1, WEEK_SECONDS, HASHED_SECRET,
).transact({"from": sender})
commit_initiated_event = get_single_event_of_contract(
tl_swap_contract, "Commit", 0,
)["args"]
assert commit_initiated_event.hash == HASHED_SECRET
assert commit_initiated_event.sender == sender
assert commit_initiated_event.receiver == receiver
assert commit_initiated_event.TLNetwork == network
assert commit_initiated_event.TLMoneyAmount == amount
assert (
commit_initiated_event.expiryTime
== WEEK_SECONDS + web3.eth.getBlock("latest").timestamp
)
def test_tl_swap_commit_entry_alredy_exists(
tl_swap_contract, tl_currency_network_contract, sender, receiver
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
receiver,
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
200,
receiver,
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
assert "Entry already exists" in str(excinfo.value)
def test_tl_swap_commit_tl_money_required(
tl_swap_contract, sender, receiver, tl_currency_network_contract
):
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
0,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
"f81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad",
).transact({"from": sender})
assert "TL total money amount is required" in str(excinfo.value)
def test_tl_swap_commit_eth_address_required(
tl_swap_contract, sender, receiver, tl_currency_network_contract
):
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
ZERO_ADDRESS,
1,
WEEK_SECONDS,
"f81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad",
).transact({"from": sender})
assert "Ethereum address is required" in str(excinfo.value)
def test_tl_swap_commit_eth_amount_required(
tl_swap_contract, sender, receiver, tl_currency_network_contract
):
with pytest.raises(exceptions.TransactionFailed) as excinfo:
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
"0xa6C7310A1fc7A806Fd7c20B4b030501fCe2AC977",
0,
WEEK_SECONDS,
"f81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad",
).transact({"from": sender})
assert "Eth total amount is required" in str(excinfo.value)
def test_tl_swap_claim_tl_money(
tl_swap_contract, tl_currency_network_contract, sender, receiver
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
extra_data = b""
tl_swap_contract.functions.claim(
[sender, receiver], MAX_FEE, extra_data, SECRET,
).transact({"from": receiver})
currency_transfer_called = get_single_event_of_contract(
tl_currency_network_contract, "Transfer", 0,
)["args"]
assert currency_transfer_called._from == sender
assert currency_transfer_called._to == receiver
assert currency_transfer_called._value == 100
assert currency_transfer_called._extraData == extra_data
def test_tl_swap_remove_commitment(
tl_swap_contract, tl_currency_network_contract, sender, receiver
):
tl_swap_contract.functions.commit(
receiver,
tl_currency_network_contract.address,
100,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
0,
HASHED_SECRET,
).transact({"from": sender})
commit_initiated_event = get_single_event_of_contract(
tl_swap_contract, "Commit", 0,
)["args"]
assert commit_initiated_event.hash == HASHED_SECRET
assert commit_initiated_event.TLMoneyAmount == 100
tl_swap_contract.functions.removeCommitment(HASHED_SECRET).transact()
expired_event = get_single_event_of_contract(
tl_swap_contract, "ExpireCommitment", 0,
)["args"]
assert expired_event.hash == HASHED_SECRET
@pytest.mark.usefixtures("commit_swap")
def test_claim_removed_commitment(tl_swap_contract, chain, sender, receiver, web3):
expiry_time = web3.eth.getBlock("latest").timestamp + WEEK_SECONDS + 1
chain.time_travel(expiry_time)
tl_swap_contract.functions.removeCommitment(HASHED_SECRET).transact()
with pytest.raises(exceptions.TransactionFailed):
tl_swap_contract.functions.claim(
[sender, receiver], MAX_FEE, b"", SECRET,
).transact()
def test_claim_your_own_commitment(
accounts, tl_currency_network_contract, tl_swap_contract, sender, receiver
):
network = tl_currency_network_contract.address
amount = 100
tl_swap_contract.functions.commit(
receiver,
network,
amount,
"0xBf6CA0E4b2B5C788dB424383A95fd019d2EB717f",
1,
WEEK_SECONDS,
HASHED_SECRET,
).transact({"from": sender})
with pytest.raises(exceptions.TransactionFailed):
sender_friend = accounts[3]
path = [sender, sender_friend, sender]
tl_swap_contract.functions.claim(path, 0, b"", SECRET).transact(
{"from": receiver}
) | 0.677581 | 0.307235 |
import argparse
import sys
import logging
import time
import subprocess
import socket
import numpy
import dask
from distributed import Executor
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--tasks",
type=int,
default=1,
help="")
parser.add_argument(
"--task-time-sec",
type=float,
default=1.0,
help="")
parser.add_argument(
"--task-allocate-mb",
type=float,
default=0.0,
help="")
parser.add_argument(
"--task-input-mb",
type=float,
default=0.0,
help="")
parser.add_argument(
"--task-output-mb",
type=float,
default=0.0,
help="")
parser.add_argument(
"--dask-scheduler",
metavar="HOST:PORT",
help="Host and port of dask distributed scheduler")
parser.add_argument(
"--jobs-range",
type=int,
nargs=3,
default=None,
help="")
parser.add_argument(
"--replicas",
type=int,
default=1,
help="")
parser.add_argument(
"--scale-command",
default="kubectl scale deployment daskd-worker --replicas=%d",
help="")
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="")
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="")
parser.add_argument(
"--out",
help="")
def make_data(size_mb):
if not size_mb:
return None
return numpy.random.rand(int(size_mb * 2**20 / 8))
def task(task_data, task_time, task_allocate_mb, task_output_mb):
allocated = make_data(task_allocate_mb)
time.sleep(task_time)
return (socket.gethostname(), make_data(task_output_mb))
def go(client, args, cores, out_fds):
for replica in range(args.replicas):
tasks = [
dask.delayed(task)(
make_data(args.task_input_mb),
args.task_time_sec,
args.task_allocate_mb,
args.task_output_mb)
for _ in range(args.tasks)
]
start = time.time()
results = client.compute(tasks, sync=True)
print(results)
length = time.time() - start
assert len(results) == args.tasks
logging.info("Hosts: %s" % len(set([x[0] for x in results])))
for fd in out_fds:
fd.write(", ".join([str(x) for x in [
"RESULT_ROW",
cores,
replica,
args.tasks,
args.task_input_mb,
args.task_time_sec,
args.task_allocate_mb,
args.task_output_mb,
length
]]))
fd.write("\n")
fd.flush()
if __name__ == "__main__":
args = parser.parse_args(sys.argv[1:])
if not args.quiet:
logging.basicConfig(level="INFO")
if args.verbose:
logging.basicConfig(level="DEBUG")
out_fds = [sys.stdout]
if args.out:
out_fds.append(open(args.out, 'w'))
client = None
if args.dask_scheduler:
client = Executor(args.dask_scheduler)
else:
client = Executor()
print(dir(client))
logging.info(
"Running with dask scheduler: %s [%s cores]" % (
args.dask_scheduler,
sum(client.ncores().values())))
if args.jobs_range is not None:
for i in range(*args.jobs_range):
command = args.scale_command % i
logging.info("Running: %s" % command)
subprocess.check_call(command, shell=True)
while True:
cores = sum(client.ncores().values())
logging.info(
"Cores: %d. Waiting for %d cores." % (cores, i))
if cores == i:
break
time.sleep(1)
go(client, args, cores, out_fds)
else:
cores = sum(client.ncores().values())
go(client, args, cores, out_fds) | benchmarking/benchmark.py | import argparse
import sys
import logging
import time
import subprocess
import socket
import numpy
import dask
from distributed import Executor
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--tasks",
type=int,
default=1,
help="")
parser.add_argument(
"--task-time-sec",
type=float,
default=1.0,
help="")
parser.add_argument(
"--task-allocate-mb",
type=float,
default=0.0,
help="")
parser.add_argument(
"--task-input-mb",
type=float,
default=0.0,
help="")
parser.add_argument(
"--task-output-mb",
type=float,
default=0.0,
help="")
parser.add_argument(
"--dask-scheduler",
metavar="HOST:PORT",
help="Host and port of dask distributed scheduler")
parser.add_argument(
"--jobs-range",
type=int,
nargs=3,
default=None,
help="")
parser.add_argument(
"--replicas",
type=int,
default=1,
help="")
parser.add_argument(
"--scale-command",
default="kubectl scale deployment daskd-worker --replicas=%d",
help="")
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="")
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="")
parser.add_argument(
"--out",
help="")
def make_data(size_mb):
if not size_mb:
return None
return numpy.random.rand(int(size_mb * 2**20 / 8))
def task(task_data, task_time, task_allocate_mb, task_output_mb):
allocated = make_data(task_allocate_mb)
time.sleep(task_time)
return (socket.gethostname(), make_data(task_output_mb))
def go(client, args, cores, out_fds):
for replica in range(args.replicas):
tasks = [
dask.delayed(task)(
make_data(args.task_input_mb),
args.task_time_sec,
args.task_allocate_mb,
args.task_output_mb)
for _ in range(args.tasks)
]
start = time.time()
results = client.compute(tasks, sync=True)
print(results)
length = time.time() - start
assert len(results) == args.tasks
logging.info("Hosts: %s" % len(set([x[0] for x in results])))
for fd in out_fds:
fd.write(", ".join([str(x) for x in [
"RESULT_ROW",
cores,
replica,
args.tasks,
args.task_input_mb,
args.task_time_sec,
args.task_allocate_mb,
args.task_output_mb,
length
]]))
fd.write("\n")
fd.flush()
if __name__ == "__main__":
args = parser.parse_args(sys.argv[1:])
if not args.quiet:
logging.basicConfig(level="INFO")
if args.verbose:
logging.basicConfig(level="DEBUG")
out_fds = [sys.stdout]
if args.out:
out_fds.append(open(args.out, 'w'))
client = None
if args.dask_scheduler:
client = Executor(args.dask_scheduler)
else:
client = Executor()
print(dir(client))
logging.info(
"Running with dask scheduler: %s [%s cores]" % (
args.dask_scheduler,
sum(client.ncores().values())))
if args.jobs_range is not None:
for i in range(*args.jobs_range):
command = args.scale_command % i
logging.info("Running: %s" % command)
subprocess.check_call(command, shell=True)
while True:
cores = sum(client.ncores().values())
logging.info(
"Cores: %d. Waiting for %d cores." % (cores, i))
if cores == i:
break
time.sleep(1)
go(client, args, cores, out_fds)
else:
cores = sum(client.ncores().values())
go(client, args, cores, out_fds) | 0.282196 | 0.109992 |
from em import molecule
from em.dataset import metrics
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
from concurrent.futures import wait
import os
import argparse
import numpy as np
import pandas as pd
import copy
import json
from json import encoder
from skimage.measure import regionprops
import traceback
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
def overlapAndSplit(i, steps_range, sigma_range, resample_n, df, offset):
gt_entry = df.iloc[i]
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step','sigma','voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity','label'])
try:
molecule_object = molecule.Molecule(gt_entry['map_path'], gt_entry['contourLevel'])
gt_object = molecule.Molecule(gt_entry['tagged_path'], 0.001)
molecule_density = molecule_object.getDataAtContour(1)
gt_labels = gt_object.getDataAtContour(1)
number_gt_segments = gt_entry['matched_subunits']
# Remove noise
molecule_density[gt_labels==0] = 0
molecule_object.setData(molecule_density)
# First append ground truth to result dataframe
iou_gt = metrics.intersection_over_union(gt_object,gt_object)
proportion_gt = metrics.proportion(gt_object, gt_object)
homogenity_gt = metrics.homogenity(gt_object, gt_object)
consistency_gt = metrics.consistency(gt_object, gt_object)
output_df = output_df.append({'id':gt_entry['id'],'map_path':gt_entry['map_path'], 'contourLevel':gt_entry['contourLevel'], 'gt_path':gt_entry['tagged_path'], \
'gt_subunits':gt_entry['subunits'], 'matched_subunits':gt_entry['matched_subunits'], 'step':0, 'sigma':0, 'voxels':gt_entry['voxels'],\
'voxels_assigned':gt_entry['voxels_assigned'], 'euler_segments':gt_entry['euler_segments'], 'iou':iou_gt, 'proportion':proportion_gt, \
'consistency':consistency_gt, 'homogenity':homogenity_gt,'label':'good'},ignore_index=True)
# Compute diferent segmentation results with watershed
for i in range(resample_n):
segmented_count = 1
it = 1
while(segmented_count==1):
step = np.random.choice(range(steps_range[0],steps_range[1]+1),1, p=[0.4,0.4,0.2])
sigma = np.random.uniform(sigma_range[0], sigma_range[1],1)
print("iteration {} on molecule {} with {} steps and {} sigma".format(it,gt_entry['id'], step[0], sigma[0]))
molecule_object.generateSegments(step[0], sigma[0])
labels = molecule_object.labels.astype(np.int32)
label_props = regionprops(labels)
segmented_count = len(label_props)
it+=1
# Get gt labels and random choose one to split
gt_label_props = regionprops(gt_labels.astype(np.int32))
gt_label_list = [ l.label for l in gt_label_props ]
label_can_be_splitted = False
count = 0
while(label_can_be_splitted==False):
label_to_be_splitted = np.random.choice(gt_label_list)
label_mask = (gt_labels == label_to_be_splitted)
labels_found = np.unique(labels[label_mask])
number_segments = len(labels_found)
if ((number_segments > 1) & (number_segments<60)):
print("label {} can be splitted in {} segments for molecule {} sample {} after {} iterations".format(label_to_be_splitted,number_segments,gt_entry['id'],i,it))
label_can_be_splitted = True
if count > len(gt_label_list):
step = np.random.choice(range(steps_range[0],steps_range[1]+1),1, p=[0.4,0.4,0.2])
sigma = np.random.uniform(sigma_range[0], sigma_range[1],1)
print("Recomputing iteration {} on molecule {} with {} steps and {} sigma".format(it,gt_entry['id'], step[0], sigma[0]))
molecule_object.generateSegments(step[0], sigma[0])
labels = molecule_object.labels.astype(np.int32)
count = 0
count += 1
print("spliting label in {} segments with labels {}".format(number_segments, labels_found))
np.random.shuffle(labels_found)
rename_label_dict = {}
count = len(gt_label_list)
for l in labels_found:
if count==len(gt_label_list):
rename_label_dict[l]=label_to_be_splitted
count+=1
else:
rename_label_dict[l] = count
count+=1
print("Rename label dict {}".format(rename_label_dict))
new_labels_object = copy.deepcopy(gt_object)
new_labels = copy.deepcopy(gt_labels)
# Split and assign
for key in np.sort(list(rename_label_dict.keys())):
mask = np.logical_and(labels==key, new_labels==label_to_be_splitted)
print("Assigning label {} to {} voxels from gt".format(rename_label_dict[key],np.sum(mask)))
new_labels[mask] = rename_label_dict[key]
new_labels_object.setData(new_labels)
segment_voxels_dict = {}
segment_euler_dict = {}
iou = metrics.intersection_over_union(new_labels_object, gt_object)
proportion = metrics.proportion(new_labels_object, gt_object)
consistency = metrics.consistency(new_labels_object, gt_object)
homogenity = metrics.homogenity(new_labels_object, gt_object)
splitted_labels_props = regionprops(new_labels.astype(np.int32))
for l in splitted_labels_props:
segment_voxels_dict[l.label] = np.sum(new_labels == l.label)
segment_euler_dict[l.label] = l.euler_number
dict_to_append = {'id':gt_entry['id'], 'map_path':gt_entry['map_path'], 'contourLevel':gt_entry['contourLevel'], 'gt_path':gt_entry['tagged_path'], 'gt_subunits':gt_entry['subunits'], \
'matched_subunits':len(splitted_labels_props),'step':int(round(step[0])), 'sigma':sigma[0], 'voxels':gt_entry['voxels'], 'voxels_assigned':json.dumps(segment_voxels_dict,default=convert), 'euler_segments':json.dumps(segment_euler_dict, default=convert),'iou':iou, 'proportion':proportion,\
'consistency':consistency, 'homogenity':homogenity,'label':'good'}
print(dict_to_append)
output_df = output_df.append(dict_to_append, ignore_index=True)
except Exception as e:
print("Error computing good segmentation for {}: {}".format(gt_entry['id'],e))
print(traceback.format_exc())
return output_df
def applyWatershed(i, steps_range, sigma_range, resample_n, df):
gt_entry = df.iloc[i]
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step','sigma','voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity','label'])
try:
molecule_object = molecule.Molecule(gt_entry['map_path'], gt_entry['contourLevel'])
gt_object = molecule.Molecule(gt_entry['tagged_path'], 0.001)
molecule_density = molecule_object.getDataAtContour(1)
gt_labels = gt_object.getDataAtContour(1)
# Remove noise
molecule_density[gt_labels==0] = 0
molecule_object.setData(molecule_density)
# Compute diferent segmentation results with watershed
for i in range(resample_n):
segmented_count = 61
while((segmented_count>60) | (segmented_count==1)):
step = np.random.choice(range(steps_range[0],steps_range[1]+1),1)
sigma = np.random.uniform(sigma_range[0], sigma_range[1],1)
molecule_object.generateSegments(step[0], sigma[0])
labels = molecule_object.labels.astype(np.int32)
label_props = regionprops(labels)
segmented_count = len(label_props)
new_labels_object = copy.deepcopy(gt_object)
new_labels_object.setData(molecule_object.labels)
segment_voxels_dict = {}
segment_euler_dict = {}
iou = metrics.intersection_over_union(new_labels_object, gt_object)
proportion = metrics.proportion(new_labels_object, gt_object)
consistency = metrics.consistency(new_labels_object, gt_object)
homogenity = metrics.homogenity(new_labels_object, gt_object)
for l in label_props:
segment_voxels_dict[l.label] = np.sum(labels == l.label)
segment_euler_dict[l.label] = l.euler_number
dict_to_append = {'id':gt_entry['id'], 'map_path':gt_entry['map_path'], 'contourLevel':gt_entry['contourLevel'], 'gt_path':gt_entry['tagged_path'], 'gt_subunits':gt_entry['subunits'], \
'matched_subunits':segmented_count,'step':int(round(step[0])), 'sigma':sigma[0], 'voxels':gt_entry['voxels'], 'voxels_assigned':json.dumps(segment_voxels_dict,default=convert), 'euler_segments':json.dumps(segment_euler_dict, default=convert),'iou':iou, 'proportion':proportion,\
'consistency':consistency, 'homogenity':homogenity,'label':'bad'}
print(dict_to_append)
output_df = output_df.append(dict_to_append, ignore_index=True)
except Exception as e:
print("Error computing bad segmentation for {}: {}".format(gt_entry['id'],e))
print(traceback.format_exc())
return output_df
def generateBadSegmentation(df, steps_range, sigma_range, resample_n):
id_list = df.index.tolist()
# Construct dataframe to store results
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step', 'sigma', 'voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity', 'label'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform operation
for i in id_list:
futures.append(executor.submit(applyWatershed, i, steps_range, sigma_range, resample_n, df))
wait(futures)
for f in futures:
try:
res = f.result()
print("Received {}".format(res))
output_df = output_df.append(res, ignore_index=True)
except ValueError as error:
print("Error computing bad segments")
return output_df
def generateGoodSegmentation(df, steps_range, sigma_range, resample_n):
id_list = df.index.tolist()
# Construct dataframe to store results
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step', 'sigma', 'voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity','label'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform operation
for i in id_list:
futures.append(executor.submit(overlapAndSplit, i, steps_range, sigma_range, resample_n, df, 4))
wait(futures)
for f in futures:
try:
res = f.result()
print("Received {}".format(res))
output_df = output_df.append(res, ignore_index=True)
except ValueError as error:
print("Error computing bad segments")
return output_df
def main():
tagged_df = pd.read_csv('dataset_exp_tagged.csv')
# Only entries with matched subunits
groundt_df = tagged_df[tagged_df['matched_subunits']>0]
groundt_df = groundt_df.reset_index()
#segmented_df = generateBadSegmentation(groundt_df, [2,4], [1,3], 10)
#segmented_df.to_csv('segmented_bad.csv', index = False)
segmented_df = generateGoodSegmentation(groundt_df, [4,6],[1,4],10)
segmented_df.to_csv('segmented_good.csv', index = False)
if __name__ == '__main__':
main() | em/src/dataset/data_augmentation.py | from em import molecule
from em.dataset import metrics
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
from concurrent.futures import wait
import os
import argparse
import numpy as np
import pandas as pd
import copy
import json
from json import encoder
from skimage.measure import regionprops
import traceback
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
def overlapAndSplit(i, steps_range, sigma_range, resample_n, df, offset):
gt_entry = df.iloc[i]
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step','sigma','voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity','label'])
try:
molecule_object = molecule.Molecule(gt_entry['map_path'], gt_entry['contourLevel'])
gt_object = molecule.Molecule(gt_entry['tagged_path'], 0.001)
molecule_density = molecule_object.getDataAtContour(1)
gt_labels = gt_object.getDataAtContour(1)
number_gt_segments = gt_entry['matched_subunits']
# Remove noise
molecule_density[gt_labels==0] = 0
molecule_object.setData(molecule_density)
# First append ground truth to result dataframe
iou_gt = metrics.intersection_over_union(gt_object,gt_object)
proportion_gt = metrics.proportion(gt_object, gt_object)
homogenity_gt = metrics.homogenity(gt_object, gt_object)
consistency_gt = metrics.consistency(gt_object, gt_object)
output_df = output_df.append({'id':gt_entry['id'],'map_path':gt_entry['map_path'], 'contourLevel':gt_entry['contourLevel'], 'gt_path':gt_entry['tagged_path'], \
'gt_subunits':gt_entry['subunits'], 'matched_subunits':gt_entry['matched_subunits'], 'step':0, 'sigma':0, 'voxels':gt_entry['voxels'],\
'voxels_assigned':gt_entry['voxels_assigned'], 'euler_segments':gt_entry['euler_segments'], 'iou':iou_gt, 'proportion':proportion_gt, \
'consistency':consistency_gt, 'homogenity':homogenity_gt,'label':'good'},ignore_index=True)
# Compute diferent segmentation results with watershed
for i in range(resample_n):
segmented_count = 1
it = 1
while(segmented_count==1):
step = np.random.choice(range(steps_range[0],steps_range[1]+1),1, p=[0.4,0.4,0.2])
sigma = np.random.uniform(sigma_range[0], sigma_range[1],1)
print("iteration {} on molecule {} with {} steps and {} sigma".format(it,gt_entry['id'], step[0], sigma[0]))
molecule_object.generateSegments(step[0], sigma[0])
labels = molecule_object.labels.astype(np.int32)
label_props = regionprops(labels)
segmented_count = len(label_props)
it+=1
# Get gt labels and random choose one to split
gt_label_props = regionprops(gt_labels.astype(np.int32))
gt_label_list = [ l.label for l in gt_label_props ]
label_can_be_splitted = False
count = 0
while(label_can_be_splitted==False):
label_to_be_splitted = np.random.choice(gt_label_list)
label_mask = (gt_labels == label_to_be_splitted)
labels_found = np.unique(labels[label_mask])
number_segments = len(labels_found)
if ((number_segments > 1) & (number_segments<60)):
print("label {} can be splitted in {} segments for molecule {} sample {} after {} iterations".format(label_to_be_splitted,number_segments,gt_entry['id'],i,it))
label_can_be_splitted = True
if count > len(gt_label_list):
step = np.random.choice(range(steps_range[0],steps_range[1]+1),1, p=[0.4,0.4,0.2])
sigma = np.random.uniform(sigma_range[0], sigma_range[1],1)
print("Recomputing iteration {} on molecule {} with {} steps and {} sigma".format(it,gt_entry['id'], step[0], sigma[0]))
molecule_object.generateSegments(step[0], sigma[0])
labels = molecule_object.labels.astype(np.int32)
count = 0
count += 1
print("spliting label in {} segments with labels {}".format(number_segments, labels_found))
np.random.shuffle(labels_found)
rename_label_dict = {}
count = len(gt_label_list)
for l in labels_found:
if count==len(gt_label_list):
rename_label_dict[l]=label_to_be_splitted
count+=1
else:
rename_label_dict[l] = count
count+=1
print("Rename label dict {}".format(rename_label_dict))
new_labels_object = copy.deepcopy(gt_object)
new_labels = copy.deepcopy(gt_labels)
# Split and assign
for key in np.sort(list(rename_label_dict.keys())):
mask = np.logical_and(labels==key, new_labels==label_to_be_splitted)
print("Assigning label {} to {} voxels from gt".format(rename_label_dict[key],np.sum(mask)))
new_labels[mask] = rename_label_dict[key]
new_labels_object.setData(new_labels)
segment_voxels_dict = {}
segment_euler_dict = {}
iou = metrics.intersection_over_union(new_labels_object, gt_object)
proportion = metrics.proportion(new_labels_object, gt_object)
consistency = metrics.consistency(new_labels_object, gt_object)
homogenity = metrics.homogenity(new_labels_object, gt_object)
splitted_labels_props = regionprops(new_labels.astype(np.int32))
for l in splitted_labels_props:
segment_voxels_dict[l.label] = np.sum(new_labels == l.label)
segment_euler_dict[l.label] = l.euler_number
dict_to_append = {'id':gt_entry['id'], 'map_path':gt_entry['map_path'], 'contourLevel':gt_entry['contourLevel'], 'gt_path':gt_entry['tagged_path'], 'gt_subunits':gt_entry['subunits'], \
'matched_subunits':len(splitted_labels_props),'step':int(round(step[0])), 'sigma':sigma[0], 'voxels':gt_entry['voxels'], 'voxels_assigned':json.dumps(segment_voxels_dict,default=convert), 'euler_segments':json.dumps(segment_euler_dict, default=convert),'iou':iou, 'proportion':proportion,\
'consistency':consistency, 'homogenity':homogenity,'label':'good'}
print(dict_to_append)
output_df = output_df.append(dict_to_append, ignore_index=True)
except Exception as e:
print("Error computing good segmentation for {}: {}".format(gt_entry['id'],e))
print(traceback.format_exc())
return output_df
def applyWatershed(i, steps_range, sigma_range, resample_n, df):
gt_entry = df.iloc[i]
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step','sigma','voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity','label'])
try:
molecule_object = molecule.Molecule(gt_entry['map_path'], gt_entry['contourLevel'])
gt_object = molecule.Molecule(gt_entry['tagged_path'], 0.001)
molecule_density = molecule_object.getDataAtContour(1)
gt_labels = gt_object.getDataAtContour(1)
# Remove noise
molecule_density[gt_labels==0] = 0
molecule_object.setData(molecule_density)
# Compute diferent segmentation results with watershed
for i in range(resample_n):
segmented_count = 61
while((segmented_count>60) | (segmented_count==1)):
step = np.random.choice(range(steps_range[0],steps_range[1]+1),1)
sigma = np.random.uniform(sigma_range[0], sigma_range[1],1)
molecule_object.generateSegments(step[0], sigma[0])
labels = molecule_object.labels.astype(np.int32)
label_props = regionprops(labels)
segmented_count = len(label_props)
new_labels_object = copy.deepcopy(gt_object)
new_labels_object.setData(molecule_object.labels)
segment_voxels_dict = {}
segment_euler_dict = {}
iou = metrics.intersection_over_union(new_labels_object, gt_object)
proportion = metrics.proportion(new_labels_object, gt_object)
consistency = metrics.consistency(new_labels_object, gt_object)
homogenity = metrics.homogenity(new_labels_object, gt_object)
for l in label_props:
segment_voxels_dict[l.label] = np.sum(labels == l.label)
segment_euler_dict[l.label] = l.euler_number
dict_to_append = {'id':gt_entry['id'], 'map_path':gt_entry['map_path'], 'contourLevel':gt_entry['contourLevel'], 'gt_path':gt_entry['tagged_path'], 'gt_subunits':gt_entry['subunits'], \
'matched_subunits':segmented_count,'step':int(round(step[0])), 'sigma':sigma[0], 'voxels':gt_entry['voxels'], 'voxels_assigned':json.dumps(segment_voxels_dict,default=convert), 'euler_segments':json.dumps(segment_euler_dict, default=convert),'iou':iou, 'proportion':proportion,\
'consistency':consistency, 'homogenity':homogenity,'label':'bad'}
print(dict_to_append)
output_df = output_df.append(dict_to_append, ignore_index=True)
except Exception as e:
print("Error computing bad segmentation for {}: {}".format(gt_entry['id'],e))
print(traceback.format_exc())
return output_df
def generateBadSegmentation(df, steps_range, sigma_range, resample_n):
id_list = df.index.tolist()
# Construct dataframe to store results
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step', 'sigma', 'voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity', 'label'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform operation
for i in id_list:
futures.append(executor.submit(applyWatershed, i, steps_range, sigma_range, resample_n, df))
wait(futures)
for f in futures:
try:
res = f.result()
print("Received {}".format(res))
output_df = output_df.append(res, ignore_index=True)
except ValueError as error:
print("Error computing bad segments")
return output_df
def generateGoodSegmentation(df, steps_range, sigma_range, resample_n):
id_list = df.index.tolist()
# Construct dataframe to store results
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','gt_path','gt_subunits','matched_subunits','step', 'sigma', 'voxels','voxels_assigned','euler_segments','iou', 'proportion', 'consistency', 'homogenity','label'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform operation
for i in id_list:
futures.append(executor.submit(overlapAndSplit, i, steps_range, sigma_range, resample_n, df, 4))
wait(futures)
for f in futures:
try:
res = f.result()
print("Received {}".format(res))
output_df = output_df.append(res, ignore_index=True)
except ValueError as error:
print("Error computing bad segments")
return output_df
def main():
tagged_df = pd.read_csv('dataset_exp_tagged.csv')
# Only entries with matched subunits
groundt_df = tagged_df[tagged_df['matched_subunits']>0]
groundt_df = groundt_df.reset_index()
#segmented_df = generateBadSegmentation(groundt_df, [2,4], [1,3], 10)
#segmented_df.to_csv('segmented_bad.csv', index = False)
segmented_df = generateGoodSegmentation(groundt_df, [4,6],[1,4],10)
segmented_df.to_csv('segmented_good.csv', index = False)
if __name__ == '__main__':
main() | 0.351089 | 0.297291 |
from django.contrib.auth import authenticate
from django import forms
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils.safestring import mark_safe
from .models import Profile, Device
from phonenumber_field.formfields import PhoneNumberField
from .verification import Verificator
User = get_user_model()
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'phone', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'phone')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('date_of_birth', 'photo')
widgets = {
'date_of_birth': forms.DateInput(attrs={'type': 'date', 'class': ''}),
# 'photo':forms.ClearableFileInput(attrs={'class':''})
}
class LoginForm(forms.Form):
username = forms.CharField(max_length=255, required=True,
help_text='You can use username, email or phone number',
widget=forms.TextInput(
attrs={'class': ''}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': ''}), required=True)
def __init__(self, *args, **kwargs):
self.cached_user = None
super(LoginForm, self).__init__(*args, **kwargs)
def clean(self, **kwargs):
super().clean()
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
self.cached_user = user
if not user:
raise forms.ValidationError("Sorry, that login was invalid. Please try again.", code='invalid')
if not user.profile.email_verified:
raise forms.ValidationError(
'Please verify your email - <a href="{}"> Request a verification Link here.</a> '.format(
reverse('auth:verification_request', kwargs={'pk': user.pk})), code='unverified')
if not user.is_active and user.profile.email_verified:
raise forms.ValidationError("Your account is locked out - please contact support.", code="suspended")
return self.cleaned_data
def login(self, request):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user
class PhoneVerificationForm(forms.ModelForm):
phone = PhoneNumberField()
class Meta:
model = Profile
fields = ('phone',)
def clean_phone(self):
cleaned_data = super().clean()
phone = cleaned_data.get("phone")
try:
User.objects.get(phone=phone)
except User.DoesNotExist as err:
# self.add_error('phone', err)
raise ValidationError(err)
return phone
class TokenVerificationForm(forms.Form):
token = forms.CharField(
required=True,
widget=forms.NumberInput(
# dirty native javascript hook
attrs={'pattern': '/^-?\d+\.?\d*$/', 'onKeyPress': 'if(this.value.length==4) return false;'}
)
)
def __init__(self, request, *args, **kwargs):
super(TokenVerificationForm, self).__init__(*args, **kwargs)
self.request = request
def is_valid(self):
try:
verificator = Verificator(self.request)
if verificator.fetch_user_phone_from_session():
if verificator.check_token():
return True
else:
self._errors = {'token': ['Invalid token']}
else:
self._errors = {'token': [
mark_safe('Expired request, <a href="{}">Request a new Token</a>'.format(reverse('auth:verify_phone')))]}
except KeyError:
self._errors = {'token': ['Invalid request, please request another code']}
return super(TokenVerificationForm, self).is_valid()
class TrustedDeviceForm(forms.ModelForm):
class Meta:
model = Device
fields = ['trusted']
widgets = {
'trusted': forms.HiddenInput()
} | accounts/forms.py | from django.contrib.auth import authenticate
from django import forms
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils.safestring import mark_safe
from .models import Profile, Device
from phonenumber_field.formfields import PhoneNumberField
from .verification import Verificator
User = get_user_model()
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'phone', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'phone')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('date_of_birth', 'photo')
widgets = {
'date_of_birth': forms.DateInput(attrs={'type': 'date', 'class': ''}),
# 'photo':forms.ClearableFileInput(attrs={'class':''})
}
class LoginForm(forms.Form):
username = forms.CharField(max_length=255, required=True,
help_text='You can use username, email or phone number',
widget=forms.TextInput(
attrs={'class': ''}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': ''}), required=True)
def __init__(self, *args, **kwargs):
self.cached_user = None
super(LoginForm, self).__init__(*args, **kwargs)
def clean(self, **kwargs):
super().clean()
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
self.cached_user = user
if not user:
raise forms.ValidationError("Sorry, that login was invalid. Please try again.", code='invalid')
if not user.profile.email_verified:
raise forms.ValidationError(
'Please verify your email - <a href="{}"> Request a verification Link here.</a> '.format(
reverse('auth:verification_request', kwargs={'pk': user.pk})), code='unverified')
if not user.is_active and user.profile.email_verified:
raise forms.ValidationError("Your account is locked out - please contact support.", code="suspended")
return self.cleaned_data
def login(self, request):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user
class PhoneVerificationForm(forms.ModelForm):
phone = PhoneNumberField()
class Meta:
model = Profile
fields = ('phone',)
def clean_phone(self):
cleaned_data = super().clean()
phone = cleaned_data.get("phone")
try:
User.objects.get(phone=phone)
except User.DoesNotExist as err:
# self.add_error('phone', err)
raise ValidationError(err)
return phone
class TokenVerificationForm(forms.Form):
token = forms.CharField(
required=True,
widget=forms.NumberInput(
# dirty native javascript hook
attrs={'pattern': '/^-?\d+\.?\d*$/', 'onKeyPress': 'if(this.value.length==4) return false;'}
)
)
def __init__(self, request, *args, **kwargs):
super(TokenVerificationForm, self).__init__(*args, **kwargs)
self.request = request
def is_valid(self):
try:
verificator = Verificator(self.request)
if verificator.fetch_user_phone_from_session():
if verificator.check_token():
return True
else:
self._errors = {'token': ['Invalid token']}
else:
self._errors = {'token': [
mark_safe('Expired request, <a href="{}">Request a new Token</a>'.format(reverse('auth:verify_phone')))]}
except KeyError:
self._errors = {'token': ['Invalid request, please request another code']}
return super(TokenVerificationForm, self).is_valid()
class TrustedDeviceForm(forms.ModelForm):
class Meta:
model = Device
fields = ['trusted']
widgets = {
'trusted': forms.HiddenInput()
} | 0.53048 | 0.090333 |
import os
from urllib.request import urlopen
import requests
import tarfile
import pandas as pd
from typing import List
from tqdm import tqdm
from microsim.column_names import ColumnNames
class Optimise:
"""
Functions to optimise the memory use of pandas dataframes.
From https://medium.com/bigdatarepublic/advanced-pandas-optimize-speed-and-memory-a654b53be6c2
"""
@staticmethod
def optimize(df: pd.DataFrame, datetime_features: List[str] = []):
return Optimise._optimize_floats(Optimise._optimize_ints(Optimise._optimize_objects(df, datetime_features)))
@staticmethod
def _optimize_floats(df: pd.DataFrame) -> pd.DataFrame:
floats = df.select_dtypes(include=['float64']).columns.tolist()
df[floats] = df[floats].apply(pd.to_numeric, downcast='float')
return df
@staticmethod
def _optimize_ints(df: pd.DataFrame) -> pd.DataFrame:
ints = df.select_dtypes(include=['int64']).columns.tolist()
df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')
return df
@staticmethod
def _optimize_objects(df: pd.DataFrame, datetime_features: List[str]) -> pd.DataFrame:
for col in df.select_dtypes(include=['object']):
if col not in datetime_features:
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if float(num_unique_values) / num_total_values < 0.5:
df[col] = df[col].astype('category')
else:
df[col] = pd.to_datetime(df[col])
return df
def check_durations_sum_to_1(individuals, activities):
total_duration = [0.0] * len(individuals) # Add up all the different activity durations
for activity in activities:
total_duration = total_duration + individuals.loc[:, f"{activity}{ColumnNames.ACTIVITY_DURATION}"]
if not (total_duration.apply(lambda x: round(x, 5)) == 1.0).all():
print("Some activity durations don't sum to 1", flush=True)
print(total_duration[total_duration != 1.0], flush=True)
raise Exception("Some activity durations don't sum to 1")
# data fetching functions
def download_data(url : str):
"""Download data utility function
Args:
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
response = requests.get(url, stream=True)
# specify target_path as name of tarfile downloaded by splitting url
# and retrieving last item
target_path = os.path.join(url.split('/')[-1])
# Create a progress bar
file_size = int(urlopen(url).info().get('Content-Length', -1))
pbar = tqdm(total=file_size, initial=0, unit='B', unit_scale=True, desc=url.split('/')[-1])
if response.status_code == 200:
with open(target_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return target_path
def unpack_data(archive : str):
"""unpack tar data archive
Args:
archive (str): A string directory path to archive file using
"""
tar_file = tarfile.open(archive)
tar_file.extractall(".")
def data_setup(url : str = "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz"):
"""A wrapper function for downloading and unpacking Azure stored devon_data
Args:
archive (str): A string directory path to archive file using
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
archive_file = download_data(url = url)
unpack_data(archive = archive_file) | microsim/utilities.py | import os
from urllib.request import urlopen
import requests
import tarfile
import pandas as pd
from typing import List
from tqdm import tqdm
from microsim.column_names import ColumnNames
class Optimise:
"""
Functions to optimise the memory use of pandas dataframes.
From https://medium.com/bigdatarepublic/advanced-pandas-optimize-speed-and-memory-a654b53be6c2
"""
@staticmethod
def optimize(df: pd.DataFrame, datetime_features: List[str] = []):
return Optimise._optimize_floats(Optimise._optimize_ints(Optimise._optimize_objects(df, datetime_features)))
@staticmethod
def _optimize_floats(df: pd.DataFrame) -> pd.DataFrame:
floats = df.select_dtypes(include=['float64']).columns.tolist()
df[floats] = df[floats].apply(pd.to_numeric, downcast='float')
return df
@staticmethod
def _optimize_ints(df: pd.DataFrame) -> pd.DataFrame:
ints = df.select_dtypes(include=['int64']).columns.tolist()
df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')
return df
@staticmethod
def _optimize_objects(df: pd.DataFrame, datetime_features: List[str]) -> pd.DataFrame:
for col in df.select_dtypes(include=['object']):
if col not in datetime_features:
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if float(num_unique_values) / num_total_values < 0.5:
df[col] = df[col].astype('category')
else:
df[col] = pd.to_datetime(df[col])
return df
def check_durations_sum_to_1(individuals, activities):
total_duration = [0.0] * len(individuals) # Add up all the different activity durations
for activity in activities:
total_duration = total_duration + individuals.loc[:, f"{activity}{ColumnNames.ACTIVITY_DURATION}"]
if not (total_duration.apply(lambda x: round(x, 5)) == 1.0).all():
print("Some activity durations don't sum to 1", flush=True)
print(total_duration[total_duration != 1.0], flush=True)
raise Exception("Some activity durations don't sum to 1")
# data fetching functions
def download_data(url : str):
"""Download data utility function
Args:
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
response = requests.get(url, stream=True)
# specify target_path as name of tarfile downloaded by splitting url
# and retrieving last item
target_path = os.path.join(url.split('/')[-1])
# Create a progress bar
file_size = int(urlopen(url).info().get('Content-Length', -1))
pbar = tqdm(total=file_size, initial=0, unit='B', unit_scale=True, desc=url.split('/')[-1])
if response.status_code == 200:
with open(target_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return target_path
def unpack_data(archive : str):
"""unpack tar data archive
Args:
archive (str): A string directory path to archive file using
"""
tar_file = tarfile.open(archive)
tar_file.extractall(".")
def data_setup(url : str = "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz"):
"""A wrapper function for downloading and unpacking Azure stored devon_data
Args:
archive (str): A string directory path to archive file using
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
archive_file = download_data(url = url)
unpack_data(archive = archive_file) | 0.710628 | 0.371593 |
import numpy as np
class WeightInitializer:
def compute_fans(self, shape):
"""
func: compute_fans adapted from keras: https://github.com/fchollet/keras/blob/master/keras/initializers.py
copyright held by fchollet(keras-team), 2017 as part of Keras project
licence: MIT
"""
# kernel shape: ('NF': Total Filters, 'CF': Filter Channels, 'HF': Filter Height 'WF': Filter Width)
shape = (shape[0], 1) if len(shape) == 1 else shape
receptive_field_size = np.prod(shape[:2])
fan_out = shape[0] * receptive_field_size # NF *receptive_field_size
fan_in = shape[1] * receptive_field_size # CF *receptive_field_size
return fan_in, fan_out
class HeNormal(WeightInitializer):
"""
**He Normal (HeNormal)**
HeNormal is a robust initialization method that particularly considers the
rectifier nonlinearities. He normal is an implementation based on Gaussian
distribution
References:
[1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
* [Kaiming He, 2015] https://arxiv.org/abs/1502.01852
* [PDF] https://arxiv.org/pdf/1502.01852.pdf
[2] Initialization Of Deep Networks Case of Rectifiers
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/TBNw5t
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(2. / fan_in)
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class HeUniform(WeightInitializer):
"""
**He Normal (HeNormal)**
HeNormal is a robust initialization method that particularly considers the
rectifier nonlinearities. He uniform is an implementation based on Uniform
distribution
References:
[1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
* [<NAME>, 2015] https://arxiv.org/abs/1502.01852
* [PDF] https://arxiv.org/pdf/1502.01852.pdf
[2] Initialization Of Deep Networks Case of Rectifiers
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/TBNw5t
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(6. / fan_in)
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class GlorotNormal(WeightInitializer):
"""
**Glorot Normal (GlorotNormal)**
GlorotNormal, more famously known as the Xavier initialization is based on
the effort to try mantain the same variance of the gradients of the weights
for all the layers. Glorot normal is an implementation based on Gaussian
distribution
References:
[1] Understanding the difficulty of training deep feedforward neural networks
* [<NAME>, 2010] http://proceedings.mlr.press/v9/glorot10a.html
* [PDF] http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
[2] Initialization Of Deep Feedfoward Networks
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/E2XrGe
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(2. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class GlorotUniform(WeightInitializer):
"""
**Glorot Uniform (GlorotUniform)**
GlorotUniform, more famously known as the Xavier initialization is based on
the effort to try mantain the same variance of the gradients of the weights
for all the layers. Glorot uniform is an implementation based on Uniform
distribution
References:
[1] Understanding the difficulty of training deep feedforward neural networks
* [<NAME>, 2010] http://proceedings.mlr.press/v9/glorot10a.html
* [PDF] http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
[2] Initialization Of Deep Feedfoward Networks
* [DeepGrid Article - <NAME>afunah] https://goo.gl/E2XrGe
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(6. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class LeCunUniform(WeightInitializer):
"""
**LeCun Uniform (LeCunUniform)**
Weights should be randomly chosen but in such a way that the sigmoid is
primarily activated in its linear region. LeCun uniform is an implementation
based on Uniform distribution
References:
[1] Efficient Backprop
* [LeCun, 1998][PDF] http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(3. / fan_in)
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class LeCunNormal(WeightInitializer):
"""
**LeCun Normal (LeCunNormal)**
Weights should be randomly chosen but in such a way that the sigmoid is
primarily activated in its linear region. LeCun uniform is an implementation
based on Gaussian distribution
References:
[1] Efficient Backprop
* [LeCun, 1998][PDF] http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / fan_in)
np.random.seed(random_seed)
return np.random.normal(loc = -scale, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class RandomUniform(WeightInitializer):
"""
**Random Uniform (RandomUniform)**
Random uniform, an implementation of weight initialization based on Uniform
distribution
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class RandomNormal(WeightInitializer):
"""
**Random Normal (RandomNormal)**
Random uniform, an implementation of weight initialization based on Gaussian
distribution
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class Zero(WeightInitializer):
"""
**Zero (Zero)**
Zero is an implementation of weight initialization that returns all zeros
"""
def weights(self, shape, random_seed):
return np.zeros(shape = shape)
@property
def init_name(self):
return self.__class__.__name__
class One(WeightInitializer):
"""
**One (One)**
One is an implementation of weight initialization that returns all ones
"""
def weights(self, shape, random_seed):
return np.ones(shape = shape)
@property
def init_name(self):
return self.__class__.__name__
class Identity(WeightInitializer):
"""
**Identity (Identity)**
Identity is an implementation of weight initialization that returns an
identity matrix of size shape
"""
def weights(self, shape, random_seed):
return np.eye(shape[0], shape[1], dtype = np.float32)
@property
def init_name(self):
return self.__class__.__name__
class InitializeWeights:
_methods = {
'ones' : One,
'zeros' : Zero,
'identity' : Identity,
'he_normal' : HeNormal,
'he_uniform' : HeUniform,
'lecun_normal' : LeCunNormal,
'lecun_uniform' : LeCunUniform,
'random_normal' : RandomNormal,
'glorot_normal' : GlorotNormal,
'random_uniform' : RandomUniform,
'glorot_uniform' : GlorotUniform
}
def __init__(self, name):
if name not in self._methods.keys():
raise Exception('Weight initialization method must be either one of the following: {}.'.format(', '.join(self._methods.keys())))
self.init_method = self._methods[name]()
@property
def name(self):
return self.init_method.init_name
def initialize_weights(self, shape, random_seed = None):
return self.init_method.weights(shape, random_seed) | ztlearn/initializers.py |
import numpy as np
class WeightInitializer:
def compute_fans(self, shape):
"""
func: compute_fans adapted from keras: https://github.com/fchollet/keras/blob/master/keras/initializers.py
copyright held by fchollet(keras-team), 2017 as part of Keras project
licence: MIT
"""
# kernel shape: ('NF': Total Filters, 'CF': Filter Channels, 'HF': Filter Height 'WF': Filter Width)
shape = (shape[0], 1) if len(shape) == 1 else shape
receptive_field_size = np.prod(shape[:2])
fan_out = shape[0] * receptive_field_size # NF *receptive_field_size
fan_in = shape[1] * receptive_field_size # CF *receptive_field_size
return fan_in, fan_out
class HeNormal(WeightInitializer):
"""
**He Normal (HeNormal)**
HeNormal is a robust initialization method that particularly considers the
rectifier nonlinearities. He normal is an implementation based on Gaussian
distribution
References:
[1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
* [Kaiming He, 2015] https://arxiv.org/abs/1502.01852
* [PDF] https://arxiv.org/pdf/1502.01852.pdf
[2] Initialization Of Deep Networks Case of Rectifiers
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/TBNw5t
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(2. / fan_in)
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class HeUniform(WeightInitializer):
"""
**He Normal (HeNormal)**
HeNormal is a robust initialization method that particularly considers the
rectifier nonlinearities. He uniform is an implementation based on Uniform
distribution
References:
[1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
* [<NAME>, 2015] https://arxiv.org/abs/1502.01852
* [PDF] https://arxiv.org/pdf/1502.01852.pdf
[2] Initialization Of Deep Networks Case of Rectifiers
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/TBNw5t
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(6. / fan_in)
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class GlorotNormal(WeightInitializer):
"""
**Glorot Normal (GlorotNormal)**
GlorotNormal, more famously known as the Xavier initialization is based on
the effort to try mantain the same variance of the gradients of the weights
for all the layers. Glorot normal is an implementation based on Gaussian
distribution
References:
[1] Understanding the difficulty of training deep feedforward neural networks
* [<NAME>, 2010] http://proceedings.mlr.press/v9/glorot10a.html
* [PDF] http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
[2] Initialization Of Deep Feedfoward Networks
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/E2XrGe
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(2. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class GlorotUniform(WeightInitializer):
"""
**Glorot Uniform (GlorotUniform)**
GlorotUniform, more famously known as the Xavier initialization is based on
the effort to try mantain the same variance of the gradients of the weights
for all the layers. Glorot uniform is an implementation based on Uniform
distribution
References:
[1] Understanding the difficulty of training deep feedforward neural networks
* [<NAME>, 2010] http://proceedings.mlr.press/v9/glorot10a.html
* [PDF] http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
[2] Initialization Of Deep Feedfoward Networks
* [DeepGrid Article - <NAME>afunah] https://goo.gl/E2XrGe
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(6. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class LeCunUniform(WeightInitializer):
"""
**LeCun Uniform (LeCunUniform)**
Weights should be randomly chosen but in such a way that the sigmoid is
primarily activated in its linear region. LeCun uniform is an implementation
based on Uniform distribution
References:
[1] Efficient Backprop
* [LeCun, 1998][PDF] http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(3. / fan_in)
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class LeCunNormal(WeightInitializer):
"""
**LeCun Normal (LeCunNormal)**
Weights should be randomly chosen but in such a way that the sigmoid is
primarily activated in its linear region. LeCun uniform is an implementation
based on Gaussian distribution
References:
[1] Efficient Backprop
* [LeCun, 1998][PDF] http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / fan_in)
np.random.seed(random_seed)
return np.random.normal(loc = -scale, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class RandomUniform(WeightInitializer):
"""
**Random Uniform (RandomUniform)**
Random uniform, an implementation of weight initialization based on Uniform
distribution
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class RandomNormal(WeightInitializer):
"""
**Random Normal (RandomNormal)**
Random uniform, an implementation of weight initialization based on Gaussian
distribution
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class Zero(WeightInitializer):
"""
**Zero (Zero)**
Zero is an implementation of weight initialization that returns all zeros
"""
def weights(self, shape, random_seed):
return np.zeros(shape = shape)
@property
def init_name(self):
return self.__class__.__name__
class One(WeightInitializer):
"""
**One (One)**
One is an implementation of weight initialization that returns all ones
"""
def weights(self, shape, random_seed):
return np.ones(shape = shape)
@property
def init_name(self):
return self.__class__.__name__
class Identity(WeightInitializer):
"""
**Identity (Identity)**
Identity is an implementation of weight initialization that returns an
identity matrix of size shape
"""
def weights(self, shape, random_seed):
return np.eye(shape[0], shape[1], dtype = np.float32)
@property
def init_name(self):
return self.__class__.__name__
class InitializeWeights:
_methods = {
'ones' : One,
'zeros' : Zero,
'identity' : Identity,
'he_normal' : HeNormal,
'he_uniform' : HeUniform,
'lecun_normal' : LeCunNormal,
'lecun_uniform' : LeCunUniform,
'random_normal' : RandomNormal,
'glorot_normal' : GlorotNormal,
'random_uniform' : RandomUniform,
'glorot_uniform' : GlorotUniform
}
def __init__(self, name):
if name not in self._methods.keys():
raise Exception('Weight initialization method must be either one of the following: {}.'.format(', '.join(self._methods.keys())))
self.init_method = self._methods[name]()
@property
def name(self):
return self.init_method.init_name
def initialize_weights(self, shape, random_seed = None):
return self.init_method.weights(shape, random_seed) | 0.955371 | 0.701541 |
import os
import sys
import csv
import random
import pandas as pd
import numpy as np
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
_parentDir = os.path.abspath(os.path.join(_thisDir, os.pardir))
dataDir = _parentDir + '/data/'
def get_trivia():
questions = pd.read_csv(_thisDir + "/Kanga_TriviaList1.csv", usecols=['QuestionNum', 'Question', 'AnswerUse'])
questions.columns = ['QuestionNum','Question','Answer']
questions = questions.to_dict('records')
random.shuffle(questions)
return questions
def get_trivia_questions():
questions = pd.read_csv(_thisDir + "/Kanga_TriviaList1.csv", usecols=['QuestionNum', 'Question', 'AnswerUse'])
questions.columns = ['QuestionNum','Question','Answer']
return questions['Question'].values
def get_trivia_as_dicts():
questions = pd.read_csv(_thisDir + "/Kanga_TriviaList1.csv", usecols=['QuestionNum', 'Question', 'AnswerUse'])
questions.columns = ['QuestionNum','Question','Answer']
keys = questions['Question'].values
values = questions['Answer'].values
dictionary1 = dict(zip(keys, values))
keys = questions['Question'].values
values = questions['QuestionNum'].values
dictionary2 = dict(zip(keys, values))
return dictionary1, dictionary2
def get_practice_trivia():
questions = pd.read_csv(_thisDir + "/Kanga_PracticeQs.csv")
questions.columns = ['QuestionNum','Question','Answer']
questions=questions[0:3]
questions = questions.to_dict('records')
random.shuffle(questions)
return questions
def get_jitter():
#jitter = [random.uniform(0.5, 2) for i in xrange(300)]
jitter = [random.uniform(0.5, 2) for i in xrange(400)]
return jitter
def get_wait():
w = np.array([4, 8, 12, 16])
wait = np.repeat(w, 200)
random.shuffle(wait)
return wait
def get_questions_used_in_task(subjectId):
datafile = os.path.join(dataDir,'Kanga',subjectId,subjectId+'_results.csv')
if os.path.exists(datafile):
df=pd.read_csv(datafile)
questions = df['Question'].values
return questions
print "File not found"
return None
def get_unused_questions(subjectId):
all_questions=get_trivia_questions()
old_questions=get_questions_used_in_task(subjectId)
random.shuffle(all_questions)
unused_questions=[]
i=0
j=0
while i < 10: # there are hundreds of questions, so should not be an infinite loop
q = all_questions[j]
if q not in old_questions:
unused_questions.append(q)
i+=1
j+=1
random.shuffle(unused_questions)
return unused_questions
def get_ratingtask_expVariables(subjectId):
trivia_dict,trivia_qnum_dict=get_trivia_as_dicts()
unused_q=get_unused_questions(subjectId)
expVariables = []
rs_min=0
rs_max=100
rs_tickIncrement=25
rs_increment=1
rs_labelNames=["0", "25", "50", "75", "100"]
for q in unused_q:
trial={}
trial['TrialType']='RateQuestion'
trial['Question']=q
answer=trivia_dict[q]
trial['Answer']=answer
qnum=trivia_qnum_dict[q]
trial['QuestionNum']=qnum
trial['rs_min']=rs_min
trial['rs_max']=rs_max
trial['rs_tickIncrement']=rs_tickIncrement
trial['rs_increment']=rs_increment
trial['rs_labelNames']=rs_labelNames
expVariables.append(trial)
trial={}
trial['TrialType']='RateAnswer'
trial['Question']=q
answer=trivia_dict[q]
trial['Answer']=answer
qnum=trivia_qnum_dict[q]
trial['QuestionNum']=qnum
trial['rs_min']=rs_min
trial['rs_max']=rs_max
trial['rs_tickIncrement']=rs_tickIncrement
trial['rs_increment']=rs_increment
trial['rs_labelNames']=rs_labelNames
expVariables.append(trial)
return expVariables
questionnaire_dict={1:'FiveDimensionalCuriosityScale',2:'BigFiveInventory',3:'EpistemicCuriosityScale',4:'NeedForClosure',5:'NeedForCognition',6:'PerceptualCuriosity',7:'ArnettInventoryOfSensationSeeking'}
def get_questionnaire(n):
qName = questionnaire_dict[n]
info = pd.read_csv(_thisDir + '/'+qName+'.csv')
questions=info['Question']
info=info.set_index('Question')
new_info=[]
for j in range(0,len(questions)):
q = questions[j]
tmp=info.loc[q].values
options=[]
for i in tmp:
if type(i) == str: # remove nan values
options.append(i)
new_info.append({q:options})
return new_info
def get_questionnaire_instructions(n):
qName = questionnaire_dict[n]
info = pd.read_csv(_thisDir + '/QuestionnaireInstructions.csv')
return info.loc[info['Questionnaire']==qName,'Instructions'].values[0]
def get_demographicq():
info = pd.read_csv(_thisDir + '/DemographicQuestions.csv')
questions=info['Question']
info=info.set_index('Question')
new_info=[]
for j in range(0,len(questions)):
q = questions[j]
tmp=info.loc[q].values
options=[]
for i in tmp:
if type(i) == str: # remove nan values
options.append(i)
new_info.append({q:options})
return new_info
"""
Checks request.args has assignmentId, hitId, turkSubmitTo, workerId, live - all but live is passed by MTurk
live refers to whether HIT is live or in sandbox
"""
def contains_necessary_args(args):
if 'workerId' in args and 'assignmentId' in args and 'hitId' in args and 'turkSubmitTo' in args and 'live' in args:
return True
else:
return False
"""
Retrieve necessary args: assignmentId, hitId, turkSubmitTo, workerId, live
"""
def get_necessary_args(args):
workerId = args.get('workerId')
assignmentId = args.get('assignmentId')
hitId = args.get('hitId')
turkSubmitTo = args.get('turkSubmitTo')
live = args.get('live') == "True"
return [workerId, assignmentId, hitId, turkSubmitTo, live] | kangacuriositytask/utils.py | import os
import sys
import csv
import random
import pandas as pd
import numpy as np
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
_parentDir = os.path.abspath(os.path.join(_thisDir, os.pardir))
dataDir = _parentDir + '/data/'
def get_trivia():
questions = pd.read_csv(_thisDir + "/Kanga_TriviaList1.csv", usecols=['QuestionNum', 'Question', 'AnswerUse'])
questions.columns = ['QuestionNum','Question','Answer']
questions = questions.to_dict('records')
random.shuffle(questions)
return questions
def get_trivia_questions():
questions = pd.read_csv(_thisDir + "/Kanga_TriviaList1.csv", usecols=['QuestionNum', 'Question', 'AnswerUse'])
questions.columns = ['QuestionNum','Question','Answer']
return questions['Question'].values
def get_trivia_as_dicts():
questions = pd.read_csv(_thisDir + "/Kanga_TriviaList1.csv", usecols=['QuestionNum', 'Question', 'AnswerUse'])
questions.columns = ['QuestionNum','Question','Answer']
keys = questions['Question'].values
values = questions['Answer'].values
dictionary1 = dict(zip(keys, values))
keys = questions['Question'].values
values = questions['QuestionNum'].values
dictionary2 = dict(zip(keys, values))
return dictionary1, dictionary2
def get_practice_trivia():
questions = pd.read_csv(_thisDir + "/Kanga_PracticeQs.csv")
questions.columns = ['QuestionNum','Question','Answer']
questions=questions[0:3]
questions = questions.to_dict('records')
random.shuffle(questions)
return questions
def get_jitter():
#jitter = [random.uniform(0.5, 2) for i in xrange(300)]
jitter = [random.uniform(0.5, 2) for i in xrange(400)]
return jitter
def get_wait():
w = np.array([4, 8, 12, 16])
wait = np.repeat(w, 200)
random.shuffle(wait)
return wait
def get_questions_used_in_task(subjectId):
datafile = os.path.join(dataDir,'Kanga',subjectId,subjectId+'_results.csv')
if os.path.exists(datafile):
df=pd.read_csv(datafile)
questions = df['Question'].values
return questions
print "File not found"
return None
def get_unused_questions(subjectId):
all_questions=get_trivia_questions()
old_questions=get_questions_used_in_task(subjectId)
random.shuffle(all_questions)
unused_questions=[]
i=0
j=0
while i < 10: # there are hundreds of questions, so should not be an infinite loop
q = all_questions[j]
if q not in old_questions:
unused_questions.append(q)
i+=1
j+=1
random.shuffle(unused_questions)
return unused_questions
def get_ratingtask_expVariables(subjectId):
trivia_dict,trivia_qnum_dict=get_trivia_as_dicts()
unused_q=get_unused_questions(subjectId)
expVariables = []
rs_min=0
rs_max=100
rs_tickIncrement=25
rs_increment=1
rs_labelNames=["0", "25", "50", "75", "100"]
for q in unused_q:
trial={}
trial['TrialType']='RateQuestion'
trial['Question']=q
answer=trivia_dict[q]
trial['Answer']=answer
qnum=trivia_qnum_dict[q]
trial['QuestionNum']=qnum
trial['rs_min']=rs_min
trial['rs_max']=rs_max
trial['rs_tickIncrement']=rs_tickIncrement
trial['rs_increment']=rs_increment
trial['rs_labelNames']=rs_labelNames
expVariables.append(trial)
trial={}
trial['TrialType']='RateAnswer'
trial['Question']=q
answer=trivia_dict[q]
trial['Answer']=answer
qnum=trivia_qnum_dict[q]
trial['QuestionNum']=qnum
trial['rs_min']=rs_min
trial['rs_max']=rs_max
trial['rs_tickIncrement']=rs_tickIncrement
trial['rs_increment']=rs_increment
trial['rs_labelNames']=rs_labelNames
expVariables.append(trial)
return expVariables
questionnaire_dict={1:'FiveDimensionalCuriosityScale',2:'BigFiveInventory',3:'EpistemicCuriosityScale',4:'NeedForClosure',5:'NeedForCognition',6:'PerceptualCuriosity',7:'ArnettInventoryOfSensationSeeking'}
def get_questionnaire(n):
qName = questionnaire_dict[n]
info = pd.read_csv(_thisDir + '/'+qName+'.csv')
questions=info['Question']
info=info.set_index('Question')
new_info=[]
for j in range(0,len(questions)):
q = questions[j]
tmp=info.loc[q].values
options=[]
for i in tmp:
if type(i) == str: # remove nan values
options.append(i)
new_info.append({q:options})
return new_info
def get_questionnaire_instructions(n):
qName = questionnaire_dict[n]
info = pd.read_csv(_thisDir + '/QuestionnaireInstructions.csv')
return info.loc[info['Questionnaire']==qName,'Instructions'].values[0]
def get_demographicq():
info = pd.read_csv(_thisDir + '/DemographicQuestions.csv')
questions=info['Question']
info=info.set_index('Question')
new_info=[]
for j in range(0,len(questions)):
q = questions[j]
tmp=info.loc[q].values
options=[]
for i in tmp:
if type(i) == str: # remove nan values
options.append(i)
new_info.append({q:options})
return new_info
"""
Checks request.args has assignmentId, hitId, turkSubmitTo, workerId, live - all but live is passed by MTurk
live refers to whether HIT is live or in sandbox
"""
def contains_necessary_args(args):
if 'workerId' in args and 'assignmentId' in args and 'hitId' in args and 'turkSubmitTo' in args and 'live' in args:
return True
else:
return False
"""
Retrieve necessary args: assignmentId, hitId, turkSubmitTo, workerId, live
"""
def get_necessary_args(args):
workerId = args.get('workerId')
assignmentId = args.get('assignmentId')
hitId = args.get('hitId')
turkSubmitTo = args.get('turkSubmitTo')
live = args.get('live') == "True"
return [workerId, assignmentId, hitId, turkSubmitTo, live] | 0.081918 | 0.169543 |
"""Service functions to handle athena connections."""
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from ontask import models
from ontask.connection.services.crud import (
ConnectionTableAdmin, ConnectionTableSelect,
)
from ontask.core import OperationsColumn
class AthenaConnectionTableAdmin(ConnectionTableAdmin):
"""Table to render the Athena admin items."""
@staticmethod
def render_enabled(record):
"""Render the boolean to allow changes."""
return render_to_string(
'connection/includes/partial_enable.html',
{
'id': record['id'],
'enabled': record['enabled'],
'toggle_url': reverse(
'dataops:athenaconn_toggle',
kwargs={'pk': record['id']})})
class Meta(ConnectionTableAdmin.Meta):
"""Define model, fields, sequence and attributes."""
model = models.AthenaConnection
class AthenaConnectionTableSelect(ConnectionTableSelect):
"""Class to render the table of Athena connections."""
class Meta(ConnectionTableSelect.Meta):
"""Define models, fields, sequence and attributes."""
model = models.AthenaConnection
def create_athena_connection_admintable() -> AthenaConnectionTableAdmin:
"""Create the table structure with the SQL connections for Admin.
:return: Athena Connection Table Admin object.
"""
op_column = OperationsColumn(
verbose_name='',
template_file='connection/includes/partial_adminop.html',
template_context=lambda record: {
'id': record['id'],
'edit_url': reverse(
'dataops:athenaconn_edit',
kwargs={'pk': record['id']}),
'view_url': reverse(
'dataops:athenaconn_view',
kwargs={'pk': record['id']}),
'clone_url': reverse(
'dataops:athenaconn_clone',
kwargs={'pk': record['id']}),
'delete_url': reverse(
'dataops:athenaconn_delete',
kwargs={'pk': record['id']})})
return AthenaConnectionTableAdmin(
models.AthenaConnection.objects.values(
'id',
'name',
'description_text',
'enabled'),
orderable=False,
extra_columns=[('operations', op_column)])
def create_athena_connection_runtable() -> AthenaConnectionTableSelect:
"""Create the table structure with the SQL connections for Running.
:return: SQL Connection Table Run object.
"""
operation_column = OperationsColumn(
verbose_name=_('Operations'),
template_file='connection/includes/partial_select.html',
template_context=lambda record: {
'id': record['id'],
'run_url': reverse(
'dataops:athenaupload_start',
kwargs={'pk': record['id']}),
'view_url': reverse(
'dataops:athenaconn_view',
kwargs={'pk': record['id']})})
return AthenaConnectionTableSelect(
models.AthenaConnection.objects.filter(enabled=True).values(
'id',
'name',
'description_text'),
orderable=False,
extra_columns=[('operations', operation_column)]) | ontask/connection/services/athena.py |
"""Service functions to handle athena connections."""
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from ontask import models
from ontask.connection.services.crud import (
ConnectionTableAdmin, ConnectionTableSelect,
)
from ontask.core import OperationsColumn
class AthenaConnectionTableAdmin(ConnectionTableAdmin):
"""Table to render the Athena admin items."""
@staticmethod
def render_enabled(record):
"""Render the boolean to allow changes."""
return render_to_string(
'connection/includes/partial_enable.html',
{
'id': record['id'],
'enabled': record['enabled'],
'toggle_url': reverse(
'dataops:athenaconn_toggle',
kwargs={'pk': record['id']})})
class Meta(ConnectionTableAdmin.Meta):
"""Define model, fields, sequence and attributes."""
model = models.AthenaConnection
class AthenaConnectionTableSelect(ConnectionTableSelect):
"""Class to render the table of Athena connections."""
class Meta(ConnectionTableSelect.Meta):
"""Define models, fields, sequence and attributes."""
model = models.AthenaConnection
def create_athena_connection_admintable() -> AthenaConnectionTableAdmin:
"""Create the table structure with the SQL connections for Admin.
:return: Athena Connection Table Admin object.
"""
op_column = OperationsColumn(
verbose_name='',
template_file='connection/includes/partial_adminop.html',
template_context=lambda record: {
'id': record['id'],
'edit_url': reverse(
'dataops:athenaconn_edit',
kwargs={'pk': record['id']}),
'view_url': reverse(
'dataops:athenaconn_view',
kwargs={'pk': record['id']}),
'clone_url': reverse(
'dataops:athenaconn_clone',
kwargs={'pk': record['id']}),
'delete_url': reverse(
'dataops:athenaconn_delete',
kwargs={'pk': record['id']})})
return AthenaConnectionTableAdmin(
models.AthenaConnection.objects.values(
'id',
'name',
'description_text',
'enabled'),
orderable=False,
extra_columns=[('operations', op_column)])
def create_athena_connection_runtable() -> AthenaConnectionTableSelect:
"""Create the table structure with the SQL connections for Running.
:return: SQL Connection Table Run object.
"""
operation_column = OperationsColumn(
verbose_name=_('Operations'),
template_file='connection/includes/partial_select.html',
template_context=lambda record: {
'id': record['id'],
'run_url': reverse(
'dataops:athenaupload_start',
kwargs={'pk': record['id']}),
'view_url': reverse(
'dataops:athenaconn_view',
kwargs={'pk': record['id']})})
return AthenaConnectionTableSelect(
models.AthenaConnection.objects.filter(enabled=True).values(
'id',
'name',
'description_text'),
orderable=False,
extra_columns=[('operations', operation_column)]) | 0.806434 | 0.227748 |
import math
import cv2
import numpy as np
from tqdm import tqdm
from .base import Eval
class SRN(Eval):
'''Tests SRN
'''
def __init__(self, dataset, model, ms=False, max_downsample=1):
super(SRN, self).__init__(dataset, model, ms, max_downsample)
def multi_scale_testing(self):
largest_input = 2100 * 2100
flops_total = int(0)
pbar = tqdm(self.dataset, desc='Multi-scale testing {} on {}'.format(self.model.name, self.dataset.name))
for img in pbar:
flops_det01, flops_det2, flops_det3, flops_det4 = [int(0)] * 4
img_h, img_w, img_c = img.shape
if img_h * img_w < largest_input:
# det0 and det1 (0-original, 1-flip)
flops_det01 = self._calc_flops(img, 1, flip=False)
# det2 (shrink 0.5)
flops_det2 = self._calc_flops(img, 0.5, flip=False)
# det3 (enlarge)
enlarge_time = int(math.floor(math.log(largest_input / img_w / img_h, 2.25)))
for t in range(enlarge_time):
resize_scale = math.pow(1.5, t+1)
flops_det3 += self._calc_flops(img, resize_scale, flip=False)
# det4 (final ratio)
final_ratio = math.sqrt(largest_input / img_h / img_w)
flops_det4 = self._calc_flops(img, final_ratio, flip=False)
else:
largest_ratio = math.sqrt(largest_input / img_w / img_h)
# det0 and det1 (0-largest, 1-largest's flip)
flops_det01 = self._calc_flops(img, largest_ratio, flip=False)
# det2 (shrink 0.75)
flops_det2 = self._calc_flops(img, 0.75, flip=False)
# det3 (shrink 0.5)
flops_det3 = self._calc_flops(img, 0.5, flip=False)
# det4 (no det4)
flops_total += flops_det01 + flops_det2 + flops_det3 + flops_det4
flops_avg = flops_total / len(self.dataset)
return flops_avg | eval_core/srn.py | import math
import cv2
import numpy as np
from tqdm import tqdm
from .base import Eval
class SRN(Eval):
'''Tests SRN
'''
def __init__(self, dataset, model, ms=False, max_downsample=1):
super(SRN, self).__init__(dataset, model, ms, max_downsample)
def multi_scale_testing(self):
largest_input = 2100 * 2100
flops_total = int(0)
pbar = tqdm(self.dataset, desc='Multi-scale testing {} on {}'.format(self.model.name, self.dataset.name))
for img in pbar:
flops_det01, flops_det2, flops_det3, flops_det4 = [int(0)] * 4
img_h, img_w, img_c = img.shape
if img_h * img_w < largest_input:
# det0 and det1 (0-original, 1-flip)
flops_det01 = self._calc_flops(img, 1, flip=False)
# det2 (shrink 0.5)
flops_det2 = self._calc_flops(img, 0.5, flip=False)
# det3 (enlarge)
enlarge_time = int(math.floor(math.log(largest_input / img_w / img_h, 2.25)))
for t in range(enlarge_time):
resize_scale = math.pow(1.5, t+1)
flops_det3 += self._calc_flops(img, resize_scale, flip=False)
# det4 (final ratio)
final_ratio = math.sqrt(largest_input / img_h / img_w)
flops_det4 = self._calc_flops(img, final_ratio, flip=False)
else:
largest_ratio = math.sqrt(largest_input / img_w / img_h)
# det0 and det1 (0-largest, 1-largest's flip)
flops_det01 = self._calc_flops(img, largest_ratio, flip=False)
# det2 (shrink 0.75)
flops_det2 = self._calc_flops(img, 0.75, flip=False)
# det3 (shrink 0.5)
flops_det3 = self._calc_flops(img, 0.5, flip=False)
# det4 (no det4)
flops_total += flops_det01 + flops_det2 + flops_det3 + flops_det4
flops_avg = flops_total / len(self.dataset)
return flops_avg | 0.409575 | 0.160661 |
import dateutil.tz
from datetime import datetime, date, timedelta
import pytz
from django.conf import settings
from django.db.models.functions import TruncDay
from django.utils.lru_cache import lru_cache
from manabi.apps.flashcards.models import CardHistory
_WEEKS_TO_REPORT = 9
def _start_of_today(user_timezone):
start_of_today = datetime.now(user_timezone)
if start_of_today.hour < settings.START_OF_DAY:
start_of_today -= timedelta(days=1)
return start_of_today.replace(hour=settings.START_OF_DAY)
def _start_of_this_week(user_timezone):
start_of_today = _start_of_today(user_timezone)
if start_of_today.isoweekday() == 7:
return start_of_today
else:
return (
start_of_today
- timedelta(days=start_of_today.isoweekday())
)
class ReviewResults:
'''
Results for the last review session.
'''
def __init__(self, user, user_timezone, review_began_at):
self.user = user
self.user_timezone = user_timezone
self.review_began_at = review_began_at
self._card_history = CardHistory.objects.of_user(user)
@property
def cards_reviewed(self):
'''
Non-unique card reviews.
'''
return (self._card_history
.filter(reviewed_at__gte=self.review_began_at)
.exclude(was_new=True)
.count()
)
@property
def cards_learned(self):
return (self._card_history
.filter(
reviewed_at__gte=self.review_began_at,
was_new=True,
)
.count()
)
@property
def cards_learned_or_reviewed_this_week(self):
return (self._card_history
.filter(reviewed_at__gte=_start_of_this_week(self.user_timezone))
.count()
)
@property
def current_daily_streak(self):
start_of_today = _start_of_today(self.user_timezone)
day_to_check = start_of_today.date()
streak = 0
while True:
if day_to_check not in self._days_reviewed():
return streak
day_to_check -= timedelta(days=1)
streak += 1
@lru_cache(maxsize=None)
def _days_reviewed(self):
'''
Returns a set of date objects.
'''
return set(dt.date() for dt in
self._card_history
.annotate(reviewed_day=
TruncDay('reviewed_at', tzinfo=dateutil.tz.tzoffset(
self.user_timezone, settings.START_OF_DAY)))
.values('reviewed_day')
.distinct()
.order_by('reviewed_day')
.values_list('reviewed_day', flat=True)
)
@property
def was_review_first_of_today(self):
'''
Was this review session the first of today?
'''
return not self._card_history.filter(
reviewed_at__gte=_start_of_today(self.user_timezone),
reviewed_at__lt=self.review_began_at,
).exists()
@property
def days_reviewed_by_week(self):
week_sunday = _start_of_this_week(self.user_timezone).date()
review_days = self._days_reviewed()
weeks = []
for _ in range(0, _WEEKS_TO_REPORT):
days_reviewed = sum(
1 for day in [
week_sunday + timedelta(days=day_offset)
for day_offset
in range(0, 7)
]
if day in review_days
)
weeks.insert(0, {
'week': '{}/{}'.format(week_sunday.month, week_sunday.day),
'days_reviewed': days_reviewed,
})
week_sunday -= timedelta(weeks=1)
return weeks | manabi/apps/review_results/models.py | import dateutil.tz
from datetime import datetime, date, timedelta
import pytz
from django.conf import settings
from django.db.models.functions import TruncDay
from django.utils.lru_cache import lru_cache
from manabi.apps.flashcards.models import CardHistory
_WEEKS_TO_REPORT = 9
def _start_of_today(user_timezone):
start_of_today = datetime.now(user_timezone)
if start_of_today.hour < settings.START_OF_DAY:
start_of_today -= timedelta(days=1)
return start_of_today.replace(hour=settings.START_OF_DAY)
def _start_of_this_week(user_timezone):
start_of_today = _start_of_today(user_timezone)
if start_of_today.isoweekday() == 7:
return start_of_today
else:
return (
start_of_today
- timedelta(days=start_of_today.isoweekday())
)
class ReviewResults:
'''
Results for the last review session.
'''
def __init__(self, user, user_timezone, review_began_at):
self.user = user
self.user_timezone = user_timezone
self.review_began_at = review_began_at
self._card_history = CardHistory.objects.of_user(user)
@property
def cards_reviewed(self):
'''
Non-unique card reviews.
'''
return (self._card_history
.filter(reviewed_at__gte=self.review_began_at)
.exclude(was_new=True)
.count()
)
@property
def cards_learned(self):
return (self._card_history
.filter(
reviewed_at__gte=self.review_began_at,
was_new=True,
)
.count()
)
@property
def cards_learned_or_reviewed_this_week(self):
return (self._card_history
.filter(reviewed_at__gte=_start_of_this_week(self.user_timezone))
.count()
)
@property
def current_daily_streak(self):
start_of_today = _start_of_today(self.user_timezone)
day_to_check = start_of_today.date()
streak = 0
while True:
if day_to_check not in self._days_reviewed():
return streak
day_to_check -= timedelta(days=1)
streak += 1
@lru_cache(maxsize=None)
def _days_reviewed(self):
'''
Returns a set of date objects.
'''
return set(dt.date() for dt in
self._card_history
.annotate(reviewed_day=
TruncDay('reviewed_at', tzinfo=dateutil.tz.tzoffset(
self.user_timezone, settings.START_OF_DAY)))
.values('reviewed_day')
.distinct()
.order_by('reviewed_day')
.values_list('reviewed_day', flat=True)
)
@property
def was_review_first_of_today(self):
'''
Was this review session the first of today?
'''
return not self._card_history.filter(
reviewed_at__gte=_start_of_today(self.user_timezone),
reviewed_at__lt=self.review_began_at,
).exists()
@property
def days_reviewed_by_week(self):
week_sunday = _start_of_this_week(self.user_timezone).date()
review_days = self._days_reviewed()
weeks = []
for _ in range(0, _WEEKS_TO_REPORT):
days_reviewed = sum(
1 for day in [
week_sunday + timedelta(days=day_offset)
for day_offset
in range(0, 7)
]
if day in review_days
)
weeks.insert(0, {
'week': '{}/{}'.format(week_sunday.month, week_sunday.day),
'days_reviewed': days_reviewed,
})
week_sunday -= timedelta(weeks=1)
return weeks | 0.651133 | 0.203628 |
import json
import warnings
import requests
from .exceptions import (
InvalidSearchParamException,
InvalidVersionException,
InvalidUseFirstNameAliasException,
InvalidAddressPurposeException,
NPyIException,
)
BASE_URI = "https://npiregistry.cms.hhs.gov/api/"
VALID_SEARCH_PARAMS = (
"number",
"enumeration_type",
"taxonomy_description",
"first_name",
"use_first_name_alias",
"last_name",
"organization_name",
"address_purpose",
"city",
"state",
"postal_code",
"country_code",
)
VALID_VERSIONS = ("1.0", "2.0", "2.1")
VALID_ADDRESS_PURPOSE_VALUES = ("LOCATION", "MAILING", "PRIMARY", "SECONDARY")
def search(search_params, version="2.1", limit=None, skip=None):
"""
Main wrapper function around the NPPES API.
:param search_params: Search criteria to the NPPES API. See VALID_SEARCH_PARAMS
for list of valid search params and
https://npiregistry.cms.hhs.gov/registry/help-api for parameter descriptions.
:type search_params: dict
:param version: NPPES API version to use, defaults to '2.1'.
:type version: str/int, optional
:param limit: Limit results returned from API, defaults to None. If no value is
passed, 10 results are returned by default.
:type limit: int, optional
:param skip: Bypass first N results from the response, defaults to None.
:type skip: int, optional
:return: API response as a dictionary, containing a "results_count"
and "results" key.
:rtype: dict
"""
version = _clean_version(version)
_validate_version(version)
_validate_search_params(search_params)
if "use_first_name_alias" in search_params:
use_first_name_alias = search_params["use_first_name_alias"]
use_first_name_alias = _clean_use_first_name_alias(use_first_name_alias)
_validate_use_first_name_alias(use_first_name_alias)
search_params["use_first_name_alias"] = use_first_name_alias
if "address_purpose" in search_params:
address_purpose = search_params["address_purpose"]
address_purpose = _clear_address_purpose(address_purpose)
_validate_address_purpose(address_purpose)
search_params["address_purpose"] = address_purpose
search_params["version"] = version
if limit is not None:
search_params["limit"] = limit
if skip is not None:
search_params["skip"] = skip
response = requests.get(BASE_URI, params=search_params).json()
_validate_response(response)
return response
def _clean_version(version):
"""
Convert version to string and append decimal if appropriate and missing
:param version: NPPES API version
:type version: int/str
:return: The cleaned version
:rtype: str
"""
version = str(version)
if version in ("1", "2"):
version += ".0"
return version
def _validate_version(version):
"""
Validate version is an valid value
:param version: NPPES API version
:type version: str
:raises InvalidVersionException: if API version is invalid
"""
if version not in VALID_VERSIONS:
valid_version_str = ", ".join(VALID_VERSIONS)
raise InvalidVersionException(
"{} is not a supported version. Supported versions are: {}".format(
version, valid_version_str
)
)
if version == "1.0":
warnings.warn(
"Version 1.0 of the NPPES API will be deprecated on 2019-06-01",
DeprecationWarning,
)
if version == "2.0":
warnings.warn(
"Version 2.0 of the NPPES API will be deprecated on 2019-09-01",
DeprecationWarning,
)
def _clean_use_first_name_alias(use_first_name_alias):
"""
Converts "True"/"true"/"False"/"false" from a string to a bool
:param use_first_name_alias: The use_first_name_alias API param value
:type use_first_name_alias: bool/str
:return: The cleaned use_first_name_alias value
:rtype: bool/str
"""
if isinstance(use_first_name_alias, str) and use_first_name_alias.lower() in (
"true",
"false",
):
return json.loads(use_first_name_alias.lower())
else:
return use_first_name_alias
def _validate_use_first_name_alias(use_first_name_alias):
"""
Validates use_first_name_alias is a valid value
:param use_first_name_alias: The use_first_name_alias API param value
:type use_first_name_alias: bool
:raises InvalidUseFirstNameAliasException: if use_first_name_alias is not bool
"""
if not isinstance(use_first_name_alias, bool):
raise InvalidUseFirstNameAliasException(
"{} is not a valid value for the use_first_name_alias param. "
"use_first_name_alias must be a bool".format(use_first_name_alias)
)
def _clear_address_purpose(address_purpose):
"""
Converts address_purpose to all uppercase
:param address_purpose: The address_purpose API param value
:type address_purpose: str
:return: Cleaned address_purpose value
:rtype: str
"""
return address_purpose.upper()
def _validate_address_purpose(address_purpose):
"""
Validtes address_purpose is a valid value
:param address_purpose: The address_purpose API param value
:type address_purpose: str
:raises InvalidAddressPurposeException: if address_purpose is not a valid value
"""
if address_purpose not in VALID_ADDRESS_PURPOSE_VALUES:
valid_address_purpose_str = ", ".join(VALID_ADDRESS_PURPOSE_VALUES)
raise InvalidAddressPurposeException(
"{} is not a valid value for the address_purpose param. "
"Valid values are: {}".format(address_purpose, valid_address_purpose_str)
)
def _validate_search_params(search_params):
"""
Validates that the keys in search_params are valid values.
:param search_params: Mapping of API param to value
:type search_params: dict
:raises InvalidSearchParamException: if any keys are invalid values
"""
for param in search_params:
if param not in VALID_SEARCH_PARAMS:
valid_search_params_str = ", ".join(VALID_SEARCH_PARAMS)
raise InvalidSearchParamException(
"{} is not a valid parameter. Valid search_params are: {}".format(
param, valid_search_params_str
)
)
def _validate_response(response):
"""
Validates API response to ensure ther are no errors.
:param response: The API response
:type response: dict
:raises NPyIException: if the response returns an error
"""
if "Errors" in response:
first_error = response["Errors"][0]["description"]
raise NPyIException(first_error) | npyi/npi.py | import json
import warnings
import requests
from .exceptions import (
InvalidSearchParamException,
InvalidVersionException,
InvalidUseFirstNameAliasException,
InvalidAddressPurposeException,
NPyIException,
)
BASE_URI = "https://npiregistry.cms.hhs.gov/api/"
VALID_SEARCH_PARAMS = (
"number",
"enumeration_type",
"taxonomy_description",
"first_name",
"use_first_name_alias",
"last_name",
"organization_name",
"address_purpose",
"city",
"state",
"postal_code",
"country_code",
)
VALID_VERSIONS = ("1.0", "2.0", "2.1")
VALID_ADDRESS_PURPOSE_VALUES = ("LOCATION", "MAILING", "PRIMARY", "SECONDARY")
def search(search_params, version="2.1", limit=None, skip=None):
"""
Main wrapper function around the NPPES API.
:param search_params: Search criteria to the NPPES API. See VALID_SEARCH_PARAMS
for list of valid search params and
https://npiregistry.cms.hhs.gov/registry/help-api for parameter descriptions.
:type search_params: dict
:param version: NPPES API version to use, defaults to '2.1'.
:type version: str/int, optional
:param limit: Limit results returned from API, defaults to None. If no value is
passed, 10 results are returned by default.
:type limit: int, optional
:param skip: Bypass first N results from the response, defaults to None.
:type skip: int, optional
:return: API response as a dictionary, containing a "results_count"
and "results" key.
:rtype: dict
"""
version = _clean_version(version)
_validate_version(version)
_validate_search_params(search_params)
if "use_first_name_alias" in search_params:
use_first_name_alias = search_params["use_first_name_alias"]
use_first_name_alias = _clean_use_first_name_alias(use_first_name_alias)
_validate_use_first_name_alias(use_first_name_alias)
search_params["use_first_name_alias"] = use_first_name_alias
if "address_purpose" in search_params:
address_purpose = search_params["address_purpose"]
address_purpose = _clear_address_purpose(address_purpose)
_validate_address_purpose(address_purpose)
search_params["address_purpose"] = address_purpose
search_params["version"] = version
if limit is not None:
search_params["limit"] = limit
if skip is not None:
search_params["skip"] = skip
response = requests.get(BASE_URI, params=search_params).json()
_validate_response(response)
return response
def _clean_version(version):
"""
Convert version to string and append decimal if appropriate and missing
:param version: NPPES API version
:type version: int/str
:return: The cleaned version
:rtype: str
"""
version = str(version)
if version in ("1", "2"):
version += ".0"
return version
def _validate_version(version):
"""
Validate version is an valid value
:param version: NPPES API version
:type version: str
:raises InvalidVersionException: if API version is invalid
"""
if version not in VALID_VERSIONS:
valid_version_str = ", ".join(VALID_VERSIONS)
raise InvalidVersionException(
"{} is not a supported version. Supported versions are: {}".format(
version, valid_version_str
)
)
if version == "1.0":
warnings.warn(
"Version 1.0 of the NPPES API will be deprecated on 2019-06-01",
DeprecationWarning,
)
if version == "2.0":
warnings.warn(
"Version 2.0 of the NPPES API will be deprecated on 2019-09-01",
DeprecationWarning,
)
def _clean_use_first_name_alias(use_first_name_alias):
"""
Converts "True"/"true"/"False"/"false" from a string to a bool
:param use_first_name_alias: The use_first_name_alias API param value
:type use_first_name_alias: bool/str
:return: The cleaned use_first_name_alias value
:rtype: bool/str
"""
if isinstance(use_first_name_alias, str) and use_first_name_alias.lower() in (
"true",
"false",
):
return json.loads(use_first_name_alias.lower())
else:
return use_first_name_alias
def _validate_use_first_name_alias(use_first_name_alias):
"""
Validates use_first_name_alias is a valid value
:param use_first_name_alias: The use_first_name_alias API param value
:type use_first_name_alias: bool
:raises InvalidUseFirstNameAliasException: if use_first_name_alias is not bool
"""
if not isinstance(use_first_name_alias, bool):
raise InvalidUseFirstNameAliasException(
"{} is not a valid value for the use_first_name_alias param. "
"use_first_name_alias must be a bool".format(use_first_name_alias)
)
def _clear_address_purpose(address_purpose):
"""
Converts address_purpose to all uppercase
:param address_purpose: The address_purpose API param value
:type address_purpose: str
:return: Cleaned address_purpose value
:rtype: str
"""
return address_purpose.upper()
def _validate_address_purpose(address_purpose):
"""
Validtes address_purpose is a valid value
:param address_purpose: The address_purpose API param value
:type address_purpose: str
:raises InvalidAddressPurposeException: if address_purpose is not a valid value
"""
if address_purpose not in VALID_ADDRESS_PURPOSE_VALUES:
valid_address_purpose_str = ", ".join(VALID_ADDRESS_PURPOSE_VALUES)
raise InvalidAddressPurposeException(
"{} is not a valid value for the address_purpose param. "
"Valid values are: {}".format(address_purpose, valid_address_purpose_str)
)
def _validate_search_params(search_params):
"""
Validates that the keys in search_params are valid values.
:param search_params: Mapping of API param to value
:type search_params: dict
:raises InvalidSearchParamException: if any keys are invalid values
"""
for param in search_params:
if param not in VALID_SEARCH_PARAMS:
valid_search_params_str = ", ".join(VALID_SEARCH_PARAMS)
raise InvalidSearchParamException(
"{} is not a valid parameter. Valid search_params are: {}".format(
param, valid_search_params_str
)
)
def _validate_response(response):
"""
Validates API response to ensure ther are no errors.
:param response: The API response
:type response: dict
:raises NPyIException: if the response returns an error
"""
if "Errors" in response:
first_error = response["Errors"][0]["description"]
raise NPyIException(first_error) | 0.699562 | 0.200597 |
from dataclasses import dataclass
from datetime import datetime
from typing import Union
from json import dumps
from aiohttp import FormData
from ..errors.message import SendMessageFailed
from ..errors.channel import *
@dataclass
class Guild:
"""Guild class.
Args:
client (Client): Krema client.
data (dict): Sent packet from websocket.
Attributes are same with https://discord.com/developers/docs/resources/guild#guild-object-guild-structure
"""
def __init__(self, client, data: dict) -> None:
from ..utils import convert_iso
from .user import Member
self.client = client
self.id: int = int(data.get("id"))
self.name: str = data.get("name")
self.icon: Union[str, None] = data.get("icon")
self.icon_hash: Union[str, None] = data.get("icon_hash")
self.splash: Union[str, None] = data.get("splash")
self.discovery_splash: Union[str, None] = data.get("discovery_splash")
self.owner: Union[bool, None] = data.get("owner")
self.owner_id: Union[int, None] = int(
data.get("owner_id")) if data.get("owner_id") is not None else None
self.permissions: Union[str, None] = data.get("permissions")
self.region: Union[str, None] = data.get("region")
self.afk_channel_id: Union[int, None] = int(
data.get("afk_channel_id")) if data.get("afk_channel_id") is not None else None
self.afk_timeout: int = data.get("afk_timeout")
self.widget_enabled: Union[bool, None] = data.get("widget_enabled")
self.widget_channel_id: Union[int, None] = int(
data.get("widget_channel_id")) if data.get("widget_channel_id") is not None else None
self.verification_level: int = data.get("verification_level")
self.default_message_notifications: int = data.get(
"default_message_notifications")
self.explicit_content_filter: int = data.get("explicit_content_filter")
self.roles: list = data.get("roles")
self.emojis: list = data.get("emojis")
self.features: list = data.get("features")
self.mfa_level: int = data.get("mfa_level")
self.application_id: Union[int, None] = int(
data.get("application_id")) if data.get("application_id") is not None else None
self.system_channel_id: Union[int, None] = int(
data.get("system_channel_id")) if data.get("system_channel_id") is not None else None
self.system_channel_flags: Union[int, None] = data.get(
"system_channel_flags")
self.rules_channel_id: Union[int, None] = int(
data.get("rules_channel_id")) if data.get("rules_channel_id") is not None else None
self.joined_at: Union[datetime, None] = convert_iso(
data.get("joined_at")) if data.get("joined_at") is not None else None
self.large: Union[bool, None] = data.get("large")
self.unavailable: Union[bool, None] = data.get("unavailable")
self.member_count: Union[int, None] = data.get("member_count")
self.voice_states: Union[list, None] = data.get("voice_states")
self.members: Union[list, None] = [Member(self.client, i) for i in data.get(
"members")] if data.get("members") is not None else None
self.channels: Union[list, None] = [Channel(self.client, i) for i in data.get(
"channels")] if data.get("channels") is not None else None
self.threads: Union[list, None] = [Channel(self.client, i) for i in data.get(
"threads")] if data.get("threads") is not None else None
self.presences: Union[list, None] = data.get("presences")
self.max_presences: Union[int, None] = data.get("max_presences")
self.max_members: Union[int, None] = data.get("max_members")
self.vanity_url_code: Union[str, None] = data.get("vanity_url_code")
self.description: Union[str, None] = data.get("description")
self.banner: Union[str, None] = data.get("banner")
self.premium_tier: int = data.get("premium_tier")
self.premium_subscription_count: Union[int, None] = data.get(
"premium_subscription_count")
self.preferred_locale: str = data.get("preferred_locale")
self.public_updates_channel_id: Union[int, None] = int(data.get(
"public_updates_channel_id")) if data.get("public_updates_channel_id") is not None else None
self.max_video_channel_users: Union[int, None] = data.get(
"max_video_channel_users")
self.approximate_member_count: Union[int, None] = data.get(
"approximate_member_count")
self.approximate_presence_count: Union[int, None] = data.get(
"approximate_presence_count")
self.welcome_screen: Union[dict, None] = data.get("welcome_screen")
self.nsfw_level: int = data.get("nsfw_level")
self.stage_instances: Union[list, None] = data.get("stage_instances")
@dataclass
class Channel:
"""Channel class.
Args:
client (Client): Krema client.
data (dict): Sent packet from websocket.
Attributes:
client (Client): Krema client.
id (int): Channel ID.
type (int): Channel type.
guild_id (int, None): Guild ID.
position (int, None): Channel position.
permission_overwrites (list, None): List of permission_overwrite.
name (str, None): Channel name.
topic (str, None): Channel topic.
nsfw (bool, None): Channel nsfw boolean.
last_message_id (int, None): Last message ID in the channel.
bitrate (int, None): Channel bitrate (voice).
user_limit (int, None): "The user limit of the voice channel" (voice).
rate_limit_per_user (int, None): Channel slowmode second.
recipients (list, None): List of user object.
icon (str, None): Icon hash for DM channel.
owner_id (int, None): DM channel owner id.
application_id (int, None): "Application id of the group DM creator if it is bot-created".
parent_id (int, None): "for guild channels: id of the parent category for a channel (each parent category can contain up to 50 channels), for threads: id of the text channel this thread was created".
last_pin_timestamp (datetime, None): When the last pinned message was pinned.
rtc_region (str, None): Voice region.
video_quality_mode (int, None): "the camera video quality mode of the voice channel, 1 when not present".
message_count (int, None): "an approximate count of messages in a thread, stops counting at 50".
member_count (int, None): "an approximate count of users in a thread, stops counting at 50".
thread_metadata (dict, None): "Thread-specific fields not needed by other channels".
member (ThreadMember, None): "Thread member object for the current user, if they have joined the thread, only included on certain API endpoints".
default_auto_archive_duration (int, None): "Default duration for newly created threads, in minutes, to automatically archive the thread after recent activity, can be set to: 60, 1440, 4320, 10080".
"""
def __init__(self, client, data: dict) -> None:
from .user import User, ThreadMember
from ..utils import convert_iso
self.client = client
self.id: int = int(data.get("id"))
self.type: int = data.get("type")
self.guild_id: Union[int, None] = int(
data.get("guild_id")) if data.get("guild_id") is not None else None
self.position: Union[int, None] = data.get("position")
self.permission_overwrites: Union[list, None] = data.get(
"permission_overwrites")
self.name: Union[str, None] = data.get("name")
self.topic: Union[str, None] = data.get("topic")
self.nsfw: Union[bool, None] = data.get("nsfw")
self.last_message_id: Union[int, None] = int(
data.get("last_message_id")) if data.get("last_message_id") is not None else None
self.bitrate: Union[int, None] = data.get("bitrate")
self.user_limit: Union[int, None] = data.get("user_limit")
self.rate_limit_per_user: Union[int,
None] = data.get("rate_limit_per_user")
self.recipients: Union[list, None] = [User(self.client, i) for i in data.get(
"recipients")] if data.get("recipients") is not None else None
self.icon: Union[str, None] = data.get("icon")
self.owner_id: Union[int, None] = int(
data.get("owner_id")) if data.get("owner_id") is not None else None
self.application_id: Union[int, None] = int(
data.get("application_id")) if data.get("application_id") is not None else None
self.parent_id: Union[int, None] = int(
data.get("parent_id")) if data.get("parent_id") is not None else None
self.last_pin_timestamp: Union[datetime, None] = convert_iso(
data.get("last_pin_timestamp")) if data.get("last_pin_timestamp") is not None else None
self.rtc_region: Union[str, None] = data.get("rtc_region")
self.video_quality_mode: Union[int,
None] = data.get("video_quality_mode")
self.message_count: Union[int, None] = data.get("message_count")
self.member_count: Union[int, None] = data.get("member_count")
self.thread_metadata: Union[dict, None] = data.get("thread_metadata")
self.member: Union[ThreadMember, None] = ThreadMember(
data.get("member")) if data.get("member") is not None else None
self.default_auto_archive_duration: Union[int, None] = data.get(
"default_auto_archive_duration")
pass
async def fetch_messages(self, limit: int = 10):
"""Fetch messages from channel.
Args:
limit (int): Maximum message limit (default is 10).
Returns:
list: List of message object.
Raises:
FetchChannelMessagesFailed: Fetching the messages from channel is failed.
"""
from .message import Message
atom, result = await self.client.http.request("GET", f"/channels/{self.id}/messages?limit={limit}")
if atom == 0:
return [Message(self.client, i) for i in result]
else:
raise FetchChannelMessagesFailed(result)
async def purge(self, limit: int = 2):
"""Bulk-delete messages from channel.
Args:
limit (int): Maximum message limit (default is 2).
Returns:
list: List of purged? messages.
Raises:
BulkDeleteMessagesFailed: Channel purge is failed.
"""
messages = await self.fetch_messages(limit)
atom, result = await self.client.http.request("POST", f"/channels/{self.id}/messages/bulk-delete", json={
"messages": [i.id for i in messages]
})
if atom == 0:
return messages
else:
raise BulkDeleteMessagesFailed(result)
async def send(self, file: dict = None, **kwargs):
"""Send message to the text-channel.
Args:
file (dict): For send a message / embed attachment with file, use `krema.utils.file_builder` for make it easier.
**kwargs: https://discord.com/developers/docs/resources/channel#create-message-jsonform-params
Returns:
Message: Sent message object.
Raises:
SendMessageFailed: Sending the message is failed.
"""
from .message import Message
params, payload = {}, FormData()
if file is not None:
payload.add_field(name='payload_json', value=dumps(
kwargs), content_type="application/json")
payload.add_field(**file)
params["data"] = payload
else:
params["json"] = kwargs
atom, result = await self.client.http.request("POST", f"/channels/{self.id}/messages", **params)
if atom == 0:
return Message(self.client, result)
else:
raise SendMessageFailed(result)
async def edit(self, **kwargs):
"""Edit channel with API params.
Args:
**kwargs: https://discord.com/developers/docs/resources/channel#modify-channel
Returns:
Channel: New channel.
Raises:
EditChannelFailed: Editing the channel is failed.
"""
atom, result = await self.client.http.request("PATCH", f"/channels/{self.id}", json=kwargs)
if atom == 0:
return Channel(self.client, result)
else:
raise EditChannelFailed(result)
async def delete(self):
"""Delete channel.
Returns:
Channel: Deleted channel.
Raises:
DeleteChannelFailed: Editing the channel is failed.
"""
atom, result = await self.client.http.request("DELETE", f"/channels/{self.id}")
if atom == 0:
return Channel(self.client, result)
else:
raise DeleteChannelFailed(result)
async def fetch_message(self, message_id: int):
"""Fetch a message from channel by ID.
Args:
message_id: Message ID.
Returns:
Message: Found message.
Raises:
FetchChannelMessageFailed: Fetching the message is failed.
"""
from .message import Message
atom, result = await self.client.http.request("GET", f"/channels/{self.id}/messages/{message_id}")
if atom == 0:
return Message(self.client, result)
else:
raise FetchChannelMessageFailed(result) | src/models/guild.py | from dataclasses import dataclass
from datetime import datetime
from typing import Union
from json import dumps
from aiohttp import FormData
from ..errors.message import SendMessageFailed
from ..errors.channel import *
@dataclass
class Guild:
"""Guild class.
Args:
client (Client): Krema client.
data (dict): Sent packet from websocket.
Attributes are same with https://discord.com/developers/docs/resources/guild#guild-object-guild-structure
"""
def __init__(self, client, data: dict) -> None:
from ..utils import convert_iso
from .user import Member
self.client = client
self.id: int = int(data.get("id"))
self.name: str = data.get("name")
self.icon: Union[str, None] = data.get("icon")
self.icon_hash: Union[str, None] = data.get("icon_hash")
self.splash: Union[str, None] = data.get("splash")
self.discovery_splash: Union[str, None] = data.get("discovery_splash")
self.owner: Union[bool, None] = data.get("owner")
self.owner_id: Union[int, None] = int(
data.get("owner_id")) if data.get("owner_id") is not None else None
self.permissions: Union[str, None] = data.get("permissions")
self.region: Union[str, None] = data.get("region")
self.afk_channel_id: Union[int, None] = int(
data.get("afk_channel_id")) if data.get("afk_channel_id") is not None else None
self.afk_timeout: int = data.get("afk_timeout")
self.widget_enabled: Union[bool, None] = data.get("widget_enabled")
self.widget_channel_id: Union[int, None] = int(
data.get("widget_channel_id")) if data.get("widget_channel_id") is not None else None
self.verification_level: int = data.get("verification_level")
self.default_message_notifications: int = data.get(
"default_message_notifications")
self.explicit_content_filter: int = data.get("explicit_content_filter")
self.roles: list = data.get("roles")
self.emojis: list = data.get("emojis")
self.features: list = data.get("features")
self.mfa_level: int = data.get("mfa_level")
self.application_id: Union[int, None] = int(
data.get("application_id")) if data.get("application_id") is not None else None
self.system_channel_id: Union[int, None] = int(
data.get("system_channel_id")) if data.get("system_channel_id") is not None else None
self.system_channel_flags: Union[int, None] = data.get(
"system_channel_flags")
self.rules_channel_id: Union[int, None] = int(
data.get("rules_channel_id")) if data.get("rules_channel_id") is not None else None
self.joined_at: Union[datetime, None] = convert_iso(
data.get("joined_at")) if data.get("joined_at") is not None else None
self.large: Union[bool, None] = data.get("large")
self.unavailable: Union[bool, None] = data.get("unavailable")
self.member_count: Union[int, None] = data.get("member_count")
self.voice_states: Union[list, None] = data.get("voice_states")
self.members: Union[list, None] = [Member(self.client, i) for i in data.get(
"members")] if data.get("members") is not None else None
self.channels: Union[list, None] = [Channel(self.client, i) for i in data.get(
"channels")] if data.get("channels") is not None else None
self.threads: Union[list, None] = [Channel(self.client, i) for i in data.get(
"threads")] if data.get("threads") is not None else None
self.presences: Union[list, None] = data.get("presences")
self.max_presences: Union[int, None] = data.get("max_presences")
self.max_members: Union[int, None] = data.get("max_members")
self.vanity_url_code: Union[str, None] = data.get("vanity_url_code")
self.description: Union[str, None] = data.get("description")
self.banner: Union[str, None] = data.get("banner")
self.premium_tier: int = data.get("premium_tier")
self.premium_subscription_count: Union[int, None] = data.get(
"premium_subscription_count")
self.preferred_locale: str = data.get("preferred_locale")
self.public_updates_channel_id: Union[int, None] = int(data.get(
"public_updates_channel_id")) if data.get("public_updates_channel_id") is not None else None
self.max_video_channel_users: Union[int, None] = data.get(
"max_video_channel_users")
self.approximate_member_count: Union[int, None] = data.get(
"approximate_member_count")
self.approximate_presence_count: Union[int, None] = data.get(
"approximate_presence_count")
self.welcome_screen: Union[dict, None] = data.get("welcome_screen")
self.nsfw_level: int = data.get("nsfw_level")
self.stage_instances: Union[list, None] = data.get("stage_instances")
@dataclass
class Channel:
"""Channel class.
Args:
client (Client): Krema client.
data (dict): Sent packet from websocket.
Attributes:
client (Client): Krema client.
id (int): Channel ID.
type (int): Channel type.
guild_id (int, None): Guild ID.
position (int, None): Channel position.
permission_overwrites (list, None): List of permission_overwrite.
name (str, None): Channel name.
topic (str, None): Channel topic.
nsfw (bool, None): Channel nsfw boolean.
last_message_id (int, None): Last message ID in the channel.
bitrate (int, None): Channel bitrate (voice).
user_limit (int, None): "The user limit of the voice channel" (voice).
rate_limit_per_user (int, None): Channel slowmode second.
recipients (list, None): List of user object.
icon (str, None): Icon hash for DM channel.
owner_id (int, None): DM channel owner id.
application_id (int, None): "Application id of the group DM creator if it is bot-created".
parent_id (int, None): "for guild channels: id of the parent category for a channel (each parent category can contain up to 50 channels), for threads: id of the text channel this thread was created".
last_pin_timestamp (datetime, None): When the last pinned message was pinned.
rtc_region (str, None): Voice region.
video_quality_mode (int, None): "the camera video quality mode of the voice channel, 1 when not present".
message_count (int, None): "an approximate count of messages in a thread, stops counting at 50".
member_count (int, None): "an approximate count of users in a thread, stops counting at 50".
thread_metadata (dict, None): "Thread-specific fields not needed by other channels".
member (ThreadMember, None): "Thread member object for the current user, if they have joined the thread, only included on certain API endpoints".
default_auto_archive_duration (int, None): "Default duration for newly created threads, in minutes, to automatically archive the thread after recent activity, can be set to: 60, 1440, 4320, 10080".
"""
def __init__(self, client, data: dict) -> None:
from .user import User, ThreadMember
from ..utils import convert_iso
self.client = client
self.id: int = int(data.get("id"))
self.type: int = data.get("type")
self.guild_id: Union[int, None] = int(
data.get("guild_id")) if data.get("guild_id") is not None else None
self.position: Union[int, None] = data.get("position")
self.permission_overwrites: Union[list, None] = data.get(
"permission_overwrites")
self.name: Union[str, None] = data.get("name")
self.topic: Union[str, None] = data.get("topic")
self.nsfw: Union[bool, None] = data.get("nsfw")
self.last_message_id: Union[int, None] = int(
data.get("last_message_id")) if data.get("last_message_id") is not None else None
self.bitrate: Union[int, None] = data.get("bitrate")
self.user_limit: Union[int, None] = data.get("user_limit")
self.rate_limit_per_user: Union[int,
None] = data.get("rate_limit_per_user")
self.recipients: Union[list, None] = [User(self.client, i) for i in data.get(
"recipients")] if data.get("recipients") is not None else None
self.icon: Union[str, None] = data.get("icon")
self.owner_id: Union[int, None] = int(
data.get("owner_id")) if data.get("owner_id") is not None else None
self.application_id: Union[int, None] = int(
data.get("application_id")) if data.get("application_id") is not None else None
self.parent_id: Union[int, None] = int(
data.get("parent_id")) if data.get("parent_id") is not None else None
self.last_pin_timestamp: Union[datetime, None] = convert_iso(
data.get("last_pin_timestamp")) if data.get("last_pin_timestamp") is not None else None
self.rtc_region: Union[str, None] = data.get("rtc_region")
self.video_quality_mode: Union[int,
None] = data.get("video_quality_mode")
self.message_count: Union[int, None] = data.get("message_count")
self.member_count: Union[int, None] = data.get("member_count")
self.thread_metadata: Union[dict, None] = data.get("thread_metadata")
self.member: Union[ThreadMember, None] = ThreadMember(
data.get("member")) if data.get("member") is not None else None
self.default_auto_archive_duration: Union[int, None] = data.get(
"default_auto_archive_duration")
pass
async def fetch_messages(self, limit: int = 10):
"""Fetch messages from channel.
Args:
limit (int): Maximum message limit (default is 10).
Returns:
list: List of message object.
Raises:
FetchChannelMessagesFailed: Fetching the messages from channel is failed.
"""
from .message import Message
atom, result = await self.client.http.request("GET", f"/channels/{self.id}/messages?limit={limit}")
if atom == 0:
return [Message(self.client, i) for i in result]
else:
raise FetchChannelMessagesFailed(result)
async def purge(self, limit: int = 2):
"""Bulk-delete messages from channel.
Args:
limit (int): Maximum message limit (default is 2).
Returns:
list: List of purged? messages.
Raises:
BulkDeleteMessagesFailed: Channel purge is failed.
"""
messages = await self.fetch_messages(limit)
atom, result = await self.client.http.request("POST", f"/channels/{self.id}/messages/bulk-delete", json={
"messages": [i.id for i in messages]
})
if atom == 0:
return messages
else:
raise BulkDeleteMessagesFailed(result)
async def send(self, file: dict = None, **kwargs):
"""Send message to the text-channel.
Args:
file (dict): For send a message / embed attachment with file, use `krema.utils.file_builder` for make it easier.
**kwargs: https://discord.com/developers/docs/resources/channel#create-message-jsonform-params
Returns:
Message: Sent message object.
Raises:
SendMessageFailed: Sending the message is failed.
"""
from .message import Message
params, payload = {}, FormData()
if file is not None:
payload.add_field(name='payload_json', value=dumps(
kwargs), content_type="application/json")
payload.add_field(**file)
params["data"] = payload
else:
params["json"] = kwargs
atom, result = await self.client.http.request("POST", f"/channels/{self.id}/messages", **params)
if atom == 0:
return Message(self.client, result)
else:
raise SendMessageFailed(result)
async def edit(self, **kwargs):
"""Edit channel with API params.
Args:
**kwargs: https://discord.com/developers/docs/resources/channel#modify-channel
Returns:
Channel: New channel.
Raises:
EditChannelFailed: Editing the channel is failed.
"""
atom, result = await self.client.http.request("PATCH", f"/channels/{self.id}", json=kwargs)
if atom == 0:
return Channel(self.client, result)
else:
raise EditChannelFailed(result)
async def delete(self):
"""Delete channel.
Returns:
Channel: Deleted channel.
Raises:
DeleteChannelFailed: Editing the channel is failed.
"""
atom, result = await self.client.http.request("DELETE", f"/channels/{self.id}")
if atom == 0:
return Channel(self.client, result)
else:
raise DeleteChannelFailed(result)
async def fetch_message(self, message_id: int):
"""Fetch a message from channel by ID.
Args:
message_id: Message ID.
Returns:
Message: Found message.
Raises:
FetchChannelMessageFailed: Fetching the message is failed.
"""
from .message import Message
atom, result = await self.client.http.request("GET", f"/channels/{self.id}/messages/{message_id}")
if atom == 0:
return Message(self.client, result)
else:
raise FetchChannelMessageFailed(result) | 0.890634 | 0.194081 |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
import json
class ApiTests(APITestCase):
fixtures = ['test.json']
def test_root_view_contains_link_to_documentation(self):
url = reverse('root')
response = self.client.get(url)
self.assertContains(response, '/docs/', status_code=status.HTTP_200_OK)
def test_swagger_documentation_is_visible_to_public(self):
url = reverse('swagger-ui')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_listing_all_events(self):
url = reverse('event-list')
response = self.client.get(url, format='json')
self.assertContains(response, 'OPEN_SOURCED',
status_code=status.HTTP_200_OK)
self.assertContains(response, 'CLOSE_SOURCED')
self.assertContains(response, 'Matrix: Recompilations')
def __get_remaining_seats(self, event_id):
url = reverse('event-detail', kwargs={'pk': event_id})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_response = json.loads(response.content)
remaining_seats = set(
map(int, json_response['remaining_seats'].split(', ')))
return remaining_seats
def test_all_seats_are_available(self):
remaining_seats = self.__get_remaining_seats(1)
for i in range(1, 12):
self.assertIn(i, remaining_seats)
def test_unreserved_seat_is_available(self):
remaining_seats = self.__get_remaining_seats(3)
self.assertIn(1, remaining_seats)
def test_reserved_and_paid_seat_is_not_available(self):
remaining_seats = self.__get_remaining_seats(3)
self.assertNotIn(5, remaining_seats)
def test_reserved_but_expired_seat_is_available(self):
remaining_seats = self.__get_remaining_seats(3)
self.assertIn(9, remaining_seats) | ticketapi/api/tests.py | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
import json
class ApiTests(APITestCase):
fixtures = ['test.json']
def test_root_view_contains_link_to_documentation(self):
url = reverse('root')
response = self.client.get(url)
self.assertContains(response, '/docs/', status_code=status.HTTP_200_OK)
def test_swagger_documentation_is_visible_to_public(self):
url = reverse('swagger-ui')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_listing_all_events(self):
url = reverse('event-list')
response = self.client.get(url, format='json')
self.assertContains(response, 'OPEN_SOURCED',
status_code=status.HTTP_200_OK)
self.assertContains(response, 'CLOSE_SOURCED')
self.assertContains(response, 'Matrix: Recompilations')
def __get_remaining_seats(self, event_id):
url = reverse('event-detail', kwargs={'pk': event_id})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_response = json.loads(response.content)
remaining_seats = set(
map(int, json_response['remaining_seats'].split(', ')))
return remaining_seats
def test_all_seats_are_available(self):
remaining_seats = self.__get_remaining_seats(1)
for i in range(1, 12):
self.assertIn(i, remaining_seats)
def test_unreserved_seat_is_available(self):
remaining_seats = self.__get_remaining_seats(3)
self.assertIn(1, remaining_seats)
def test_reserved_and_paid_seat_is_not_available(self):
remaining_seats = self.__get_remaining_seats(3)
self.assertNotIn(5, remaining_seats)
def test_reserved_but_expired_seat_is_available(self):
remaining_seats = self.__get_remaining_seats(3)
self.assertIn(9, remaining_seats) | 0.494385 | 0.228253 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('./gen-py')
from FacePi.ttypes import PersonEntry
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.python.platform import gfile
import numpy as np
import sys
from .DetectAndAlign import DetectAndAlign as detect_and_align
from .IdData import getIdData as id_data
from scipy import misc
import re
import cv2
import time
import pickle
sys.path.append('../../')
import config
class StartTensor():
def __init__(self):
pass
@classmethod
def find_matching_id(self, id_dataset, embedding):
threshold = 1.1
min_dist = 10.0
matching_id = None
for id_data_in_for in id_dataset:
dist = self.get_embedding_distance(id_data_in_for.embedding, embedding)
if dist < threshold and dist < min_dist:
min_dist = dist
matching_id = id_data_in_for.name
return matching_id, min_dist
@staticmethod
def get_embedding_distance(emb1, emb2):
dist = np.sqrt(np.sum(np.square(np.subtract(emb1, emb2))))
return dist
@classmethod
def save_model(self):
model_exp = config.tensor_model_path
meta_file, ckpt_file = self.get_model_filenames()
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
saver.save(self.sess, os.path.join(model_exp, ckpt_file))
@staticmethod
def get_model_filenames():
model_dir = config.tensor_model_path
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files) == 0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files) > 1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups()) >= 2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
@staticmethod
def save_id_dataset(obj):
with open(config.tensor_id_file, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load_id_dataset():
try:
with open(config.tensor_id_file, 'rb') as f:
return pickle.load(f)
except FileNotFoundError:
raise Exception
@classmethod
def train(self, sess):
self.sess = sess
try:
from subprocess import call
call("find ./ids/ -name '.DS_Store' -type f -delete")
except:
pass
pnet, rnet, onet = detect_and_align.create_mtcnn(self.sess, None)
# self.load_model()
# id_dataset = self.load_id_dataset()
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
id_dataset = id_data().get_id_data(config.tensor_id_file, pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
self.save_model()
self.save_id_dataset(id_dataset)
return id_dataset
@classmethod
def loadWithoutTrainFromFrame(self, sess, frame):
self.sess = sess
personList = []
print("in tensor 158")
pnet, rnet, onet = detect_and_align.create_mtcnn(self.sess, None)
# self.load_model()
try:
id_dataset = self.load_id_dataset()
except:
id_dataset = self.train(sess)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)
print(face_patches)
if len(face_patches) > 0:
face_patches = np.stack(face_patches)
feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
embs = self.sess.run(embeddings, feed_dict=feed_dict)
print('Matches in frame:')
for i in range(len(embs)):
bb = padded_bounding_boxes[i]
matching_id, dist = self.find_matching_id(id_dataset, embs[i, :])
if matching_id:
person = PersonEntry()
person.person = matching_id
person.chance = dist
person.image = pickle.dumps(obj=frame, protocol=None, fix_imports=False)
person.algoritm = 'TensorFlow'
personList.append(person)
print('Hi %s! Distance: %1.4f' % (matching_id, dist))
else:
matching_id = 'Unkown'
print('Unkown! Couldn\'t find match.')
return personList
if __name__ == '__main__':
try:
from subprocess import call
call("find ./ids/ -name '.DS_Store' -type f -delete")
except:
pass | FacePi/src/FaceRecognition/TensorFlow/startTensor.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('./gen-py')
from FacePi.ttypes import PersonEntry
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.python.platform import gfile
import numpy as np
import sys
from .DetectAndAlign import DetectAndAlign as detect_and_align
from .IdData import getIdData as id_data
from scipy import misc
import re
import cv2
import time
import pickle
sys.path.append('../../')
import config
class StartTensor():
def __init__(self):
pass
@classmethod
def find_matching_id(self, id_dataset, embedding):
threshold = 1.1
min_dist = 10.0
matching_id = None
for id_data_in_for in id_dataset:
dist = self.get_embedding_distance(id_data_in_for.embedding, embedding)
if dist < threshold and dist < min_dist:
min_dist = dist
matching_id = id_data_in_for.name
return matching_id, min_dist
@staticmethod
def get_embedding_distance(emb1, emb2):
dist = np.sqrt(np.sum(np.square(np.subtract(emb1, emb2))))
return dist
@classmethod
def save_model(self):
model_exp = config.tensor_model_path
meta_file, ckpt_file = self.get_model_filenames()
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
saver.save(self.sess, os.path.join(model_exp, ckpt_file))
@staticmethod
def get_model_filenames():
model_dir = config.tensor_model_path
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files) == 0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files) > 1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups()) >= 2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
@staticmethod
def save_id_dataset(obj):
with open(config.tensor_id_file, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load_id_dataset():
try:
with open(config.tensor_id_file, 'rb') as f:
return pickle.load(f)
except FileNotFoundError:
raise Exception
@classmethod
def train(self, sess):
self.sess = sess
try:
from subprocess import call
call("find ./ids/ -name '.DS_Store' -type f -delete")
except:
pass
pnet, rnet, onet = detect_and_align.create_mtcnn(self.sess, None)
# self.load_model()
# id_dataset = self.load_id_dataset()
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
id_dataset = id_data().get_id_data(config.tensor_id_file, pnet, rnet, onet, sess, embeddings, images_placeholder, phase_train_placeholder)
self.save_model()
self.save_id_dataset(id_dataset)
return id_dataset
@classmethod
def loadWithoutTrainFromFrame(self, sess, frame):
self.sess = sess
personList = []
print("in tensor 158")
pnet, rnet, onet = detect_and_align.create_mtcnn(self.sess, None)
# self.load_model()
try:
id_dataset = self.load_id_dataset()
except:
id_dataset = self.train(sess)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(frame, pnet, rnet, onet)
print(face_patches)
if len(face_patches) > 0:
face_patches = np.stack(face_patches)
feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
embs = self.sess.run(embeddings, feed_dict=feed_dict)
print('Matches in frame:')
for i in range(len(embs)):
bb = padded_bounding_boxes[i]
matching_id, dist = self.find_matching_id(id_dataset, embs[i, :])
if matching_id:
person = PersonEntry()
person.person = matching_id
person.chance = dist
person.image = pickle.dumps(obj=frame, protocol=None, fix_imports=False)
person.algoritm = 'TensorFlow'
personList.append(person)
print('Hi %s! Distance: %1.4f' % (matching_id, dist))
else:
matching_id = 'Unkown'
print('Unkown! Couldn\'t find match.')
return personList
if __name__ == '__main__':
try:
from subprocess import call
call("find ./ids/ -name '.DS_Store' -type f -delete")
except:
pass | 0.356895 | 0.058239 |
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
#load dataset
dataset = open('../data/input.txt','r').read()
len_of_dataset = len(dataset)
print('length of dataset:',len_of_dataset)
vocab = set(dataset)
len_of_vocab = len(vocab)
print('length of vocab:',len_of_vocab)
char_to_idx = {char:idx for idx,char in enumerate(vocab)}
print('char_to_idx:',char_to_idx)
idx_to_char = {idx:char for idx,char in enumerate(vocab)}
print('idx_to_char:',idx_to_char)
#hyperparameter initialization
lr = 1e-1
time_steps = 25
start_ptr = 0
mean = 0.
std = 0.01
epoches = 10000
Wi,Wf,Wz,Wo,Wout = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab))
Ri,Rf,Rz,Ro = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab))
Pi,Pf,Po = np.random.normal(mean,std,(len_of_vocab,1)),\
np.random.normal(mean,std,(len_of_vocab,1)),\
np.random.normal(mean,std,(len_of_vocab,1))
bi,bo,bf,bz,bout = np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1))
mWi,mWf,mWz,mWo,mWout = np.zeros_like(Wi),np.zeros_like(Wf),np.zeros_like(Wz),np.zeros_like(Wo),np.zeros_like(Wout)
mRi,mRf,mRz,mRo = np.zeros_like(Ri),np.zeros_like(Rf),np.zeros_like(Rz),np.zeros_like(Ro)
mPi,mPf,mPo = np.zeros_like(Pi),np.zeros_like(Pf),np.zeros_like(Po)
mbi,mbo,mbf,mbz,mbout = np.zeros_like(bi),np.zeros_like(bo),np.zeros_like(bf),np.zeros_like(bz),np.zeros_like(bout)
#utility functions
def sigmoid(x):
return 1/(1+np.exp(-x))
def softmax(x):
return np.exp(x)/np.sum(np.exp(x))
def sample(h_prev,c_prev,num_char):
hs = np.copy(h_prev)
cs = np.copy(c_prev)
x = np.zeros((len_of_vocab,1))
x[np.random.randint(0,len_of_vocab),0] = 1
idxs = []
for t in range(num_char):
I = np.dot(Wi,x) + np.dot(Ri,hs) + Pi*cs + bi
i_gate = sigmoid(I)
F = np.dot(Wf,x) + np.dot(Rf,hs) + Pf*cs + bf
f_gate = sigmoid(F)
Z = np.dot(Wz,x) + np.dot(Rz,hs) + bz
z = np.tanh(Z)
cs = i_gate*z + f_gate*cs
O = np.dot(Wo,x) + np.dot(Ro,hs) + Po*cs +bo
o_gate = sigmoid(O)
hs = o_gate * np.tanh(cs)
out = np.dot(Wout,hs) + bout
p = softmax(out)
idx = np.random.choice(len_of_vocab,1,p=p.ravel())[0]
x = np.zeros((len_of_vocab,1))
x[idx,0] = 1
idxs.append(idx)
print(''.join(idx_to_char[c] for c in idxs))
#forward_backward_pass
def forward_backward_pass(input,output,h_prev,c_prev):
hs={}
cs={}
i_gate={}
f_gate={}
o_gate={}
z ={}
hs[-1] = np.copy(h_prev)
cs[-1] = np.copy(c_prev)
p = {}
loss = 0
for t in range(time_steps):
x = np.zeros((len_of_vocab,1))
x[input[t],0] = 1
I = np.dot(Wi,x) + np.dot(Ri,hs[t-1]) + Pi*cs[t-1] + bi
i_gate[t] = sigmoid(I)
F = np.dot(Wf,x) + np.dot(Rf,hs[t-1]) + Pf*cs[t-1] + bf
f_gate[t] = sigmoid(F)
Z = np.dot(Wz,x) + np.dot(Rz,hs[t-1]) + bz
z[t] = np.tanh(Z)
cs[t] = i_gate[t]*z[t] + f_gate[t]*cs[t-1]
O = np.dot(Wo,x) + np.dot(Ro,hs[t-1]) + Po*cs[t] +bo
o_gate[t] = sigmoid(O)
hs[t] = o_gate[t] * np.tanh(cs[t])
out = np.dot(Wout,hs[t]) + bout
p[t] = softmax(out)
loss += -np.log(p[t][output[t],0])
dWi,dWf,dWz,dWo,dWout = np.zeros_like(Wi),np.zeros_like(Wf),np.zeros_like(Wz),np.zeros_like(Wo),np.zeros_like(Wout)
dRi,dRf,dRz,dRo = np.zeros_like(Ri),np.zeros_like(Rf),np.zeros_like(Rz),np.zeros_like(Ro)
dPi,dPo,dPf = np.zeros_like(Pi),np.zeros_like(Po),np.zeros_like(Pf)
dbi,dbo,dbf,dbz,dbout = np.zeros_like(bi),np.zeros_like(bo),np.zeros_like(bf),np.zeros_like(bz),np.zeros_like(bout)
#Backward pass
dht_z = np.zeros((len_of_vocab,1))
dht_f = np.zeros((len_of_vocab,1))
dht_o = np.zeros((len_of_vocab,1))
dht_i = np.zeros((len_of_vocab,1))
dct_cs = np.zeros((len_of_vocab,1))
dct_f = np.zeros((len_of_vocab,1))
dct_o = np.zeros((len_of_vocab,1))
dct_i = np.zeros((len_of_vocab,1))
for t in reversed(range(time_steps)):
x = np.zeros((len_of_vocab,1))
x[input[t],0] = 1
dout = np.copy(p[t])
dout[output[t],0] -= 1
dWout += np.dot(dout,hs[t].T)
dht = np.dot(Wout.T,dout) + dht_z + dht_f + dht_o + dht_i
dbout += dout
dog = np.tanh(cs[t])*dht
dog_ = o_gate[t]*(1-o_gate[t])*dog
dWo += np.dot(dog_,x.T)
dRo += np.dot(dog_,hs[t-1].T)
dht_o = np.dot(Ro.T,dog_)
dPo += cs[t]*dog_
dct_o = Po * dog_
dbo += dog_
dct = (1-np.tanh(cs[t])*np.tanh(cs[t]))*o_gate[t]*dht + dct_cs + dct_f + dct_o + dct_i
dig = z[t] * dct
dz = i_gate[t] * dct
dfg = cs[t-1] * dct
dct_cs = f_gate[t] * dct
dz_ = (1-z[t]*z[t])*dz
dWz += np.dot(dz_,x.T)
dRz += np.dot(dz_,hs[t-1].T)
dht_z = np.dot(Rz.T,dz_)
dbz += dz_
dfg_ = f_gate[t]*(1-f_gate[t])*dfg
dWf += np.dot(dfg_,x.T)
dRf += np.dot(dfg_,hs[t-1].T)
dht_f = np.dot(Rf.T,dfg_)
dPf += cs[t-1] * dfg_
dct_f = Pf * dfg_
dbf += dfg_
dig_ = i_gate[t]*(1-i_gate[t])*dig
dWi += np.dot(dig_,x.T)
dRi += np.dot(dig_,hs[t-1].T)
dht_i = np.dot(Ri.T,dig_)
dPi += cs[t-1]*dig_
dct_i = Pi * dig_
dbi += dig_
for dparam in [dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout]:
np.clip(dparam,-1,1,out=dparam)
return loss,dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout,hs[time_steps-1],cs[time_steps-1]
x=[]
y=[]
n = 0
smooth_loss = -np.log(1/len_of_vocab)*time_steps
h_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1))
while n<=10000:
if start_ptr+time_steps > len_of_dataset:
start_ptr = 0
h_prev = np.zeros((len_of_vocab,1))
else:
input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_steps]]
output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_steps+1]]
loss,dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout,h_prev,c_prev=forward_backward_pass\
(input=input,output=output,h_prev=h_prev,c_prev=c_prev)
smooth_loss = (0.999*smooth_loss)+(0.001*loss)
x.append(n)
y.append(smooth_loss)
if n%epoches==0:
print('--------------------------------------------')
print('iter:',n)
print('smooth_loss:',smooth_loss)
sample(h_prev=h_prev,c_prev=c_prev,num_char=300)
print('--------------------------------------------')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.plot(x,y,color='r')
plt.pause(1e-9)
for params,dparam,mparam in zip([Wi,Wf,Wz,Wo,Wout,Ri,Rf,Rz,Ro,Pi,Po,Pf,bi,bo,bf,bz,bout],\
[dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout],\
[mWi,mWf,mWz,mWo,mWout,mRi,mRf,mRz,mRo,mPi,mPo,mPf,mbi,mbo,mbf,mbz,mbout]):
mparam += dparam*dparam
params += -lr*dparam/np.sqrt(mparam+1e-8)
n+=1
start_ptr += time_steps
plt.savefig('../Performance/lstm_with_peephole_connection.png') | code/lstm_with_peephole_connections.py | import numpy as np
import matplotlib.pyplot as plt
plt.ion()
#load dataset
dataset = open('../data/input.txt','r').read()
len_of_dataset = len(dataset)
print('length of dataset:',len_of_dataset)
vocab = set(dataset)
len_of_vocab = len(vocab)
print('length of vocab:',len_of_vocab)
char_to_idx = {char:idx for idx,char in enumerate(vocab)}
print('char_to_idx:',char_to_idx)
idx_to_char = {idx:char for idx,char in enumerate(vocab)}
print('idx_to_char:',idx_to_char)
#hyperparameter initialization
lr = 1e-1
time_steps = 25
start_ptr = 0
mean = 0.
std = 0.01
epoches = 10000
Wi,Wf,Wz,Wo,Wout = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab))
Ri,Rf,Rz,Ro = np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab)),\
np.random.normal(mean,std,(len_of_vocab,len_of_vocab))
Pi,Pf,Po = np.random.normal(mean,std,(len_of_vocab,1)),\
np.random.normal(mean,std,(len_of_vocab,1)),\
np.random.normal(mean,std,(len_of_vocab,1))
bi,bo,bf,bz,bout = np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1)),\
np.zeros((len_of_vocab,1))
mWi,mWf,mWz,mWo,mWout = np.zeros_like(Wi),np.zeros_like(Wf),np.zeros_like(Wz),np.zeros_like(Wo),np.zeros_like(Wout)
mRi,mRf,mRz,mRo = np.zeros_like(Ri),np.zeros_like(Rf),np.zeros_like(Rz),np.zeros_like(Ro)
mPi,mPf,mPo = np.zeros_like(Pi),np.zeros_like(Pf),np.zeros_like(Po)
mbi,mbo,mbf,mbz,mbout = np.zeros_like(bi),np.zeros_like(bo),np.zeros_like(bf),np.zeros_like(bz),np.zeros_like(bout)
#utility functions
def sigmoid(x):
return 1/(1+np.exp(-x))
def softmax(x):
return np.exp(x)/np.sum(np.exp(x))
def sample(h_prev,c_prev,num_char):
hs = np.copy(h_prev)
cs = np.copy(c_prev)
x = np.zeros((len_of_vocab,1))
x[np.random.randint(0,len_of_vocab),0] = 1
idxs = []
for t in range(num_char):
I = np.dot(Wi,x) + np.dot(Ri,hs) + Pi*cs + bi
i_gate = sigmoid(I)
F = np.dot(Wf,x) + np.dot(Rf,hs) + Pf*cs + bf
f_gate = sigmoid(F)
Z = np.dot(Wz,x) + np.dot(Rz,hs) + bz
z = np.tanh(Z)
cs = i_gate*z + f_gate*cs
O = np.dot(Wo,x) + np.dot(Ro,hs) + Po*cs +bo
o_gate = sigmoid(O)
hs = o_gate * np.tanh(cs)
out = np.dot(Wout,hs) + bout
p = softmax(out)
idx = np.random.choice(len_of_vocab,1,p=p.ravel())[0]
x = np.zeros((len_of_vocab,1))
x[idx,0] = 1
idxs.append(idx)
print(''.join(idx_to_char[c] for c in idxs))
#forward_backward_pass
def forward_backward_pass(input,output,h_prev,c_prev):
hs={}
cs={}
i_gate={}
f_gate={}
o_gate={}
z ={}
hs[-1] = np.copy(h_prev)
cs[-1] = np.copy(c_prev)
p = {}
loss = 0
for t in range(time_steps):
x = np.zeros((len_of_vocab,1))
x[input[t],0] = 1
I = np.dot(Wi,x) + np.dot(Ri,hs[t-1]) + Pi*cs[t-1] + bi
i_gate[t] = sigmoid(I)
F = np.dot(Wf,x) + np.dot(Rf,hs[t-1]) + Pf*cs[t-1] + bf
f_gate[t] = sigmoid(F)
Z = np.dot(Wz,x) + np.dot(Rz,hs[t-1]) + bz
z[t] = np.tanh(Z)
cs[t] = i_gate[t]*z[t] + f_gate[t]*cs[t-1]
O = np.dot(Wo,x) + np.dot(Ro,hs[t-1]) + Po*cs[t] +bo
o_gate[t] = sigmoid(O)
hs[t] = o_gate[t] * np.tanh(cs[t])
out = np.dot(Wout,hs[t]) + bout
p[t] = softmax(out)
loss += -np.log(p[t][output[t],0])
dWi,dWf,dWz,dWo,dWout = np.zeros_like(Wi),np.zeros_like(Wf),np.zeros_like(Wz),np.zeros_like(Wo),np.zeros_like(Wout)
dRi,dRf,dRz,dRo = np.zeros_like(Ri),np.zeros_like(Rf),np.zeros_like(Rz),np.zeros_like(Ro)
dPi,dPo,dPf = np.zeros_like(Pi),np.zeros_like(Po),np.zeros_like(Pf)
dbi,dbo,dbf,dbz,dbout = np.zeros_like(bi),np.zeros_like(bo),np.zeros_like(bf),np.zeros_like(bz),np.zeros_like(bout)
#Backward pass
dht_z = np.zeros((len_of_vocab,1))
dht_f = np.zeros((len_of_vocab,1))
dht_o = np.zeros((len_of_vocab,1))
dht_i = np.zeros((len_of_vocab,1))
dct_cs = np.zeros((len_of_vocab,1))
dct_f = np.zeros((len_of_vocab,1))
dct_o = np.zeros((len_of_vocab,1))
dct_i = np.zeros((len_of_vocab,1))
for t in reversed(range(time_steps)):
x = np.zeros((len_of_vocab,1))
x[input[t],0] = 1
dout = np.copy(p[t])
dout[output[t],0] -= 1
dWout += np.dot(dout,hs[t].T)
dht = np.dot(Wout.T,dout) + dht_z + dht_f + dht_o + dht_i
dbout += dout
dog = np.tanh(cs[t])*dht
dog_ = o_gate[t]*(1-o_gate[t])*dog
dWo += np.dot(dog_,x.T)
dRo += np.dot(dog_,hs[t-1].T)
dht_o = np.dot(Ro.T,dog_)
dPo += cs[t]*dog_
dct_o = Po * dog_
dbo += dog_
dct = (1-np.tanh(cs[t])*np.tanh(cs[t]))*o_gate[t]*dht + dct_cs + dct_f + dct_o + dct_i
dig = z[t] * dct
dz = i_gate[t] * dct
dfg = cs[t-1] * dct
dct_cs = f_gate[t] * dct
dz_ = (1-z[t]*z[t])*dz
dWz += np.dot(dz_,x.T)
dRz += np.dot(dz_,hs[t-1].T)
dht_z = np.dot(Rz.T,dz_)
dbz += dz_
dfg_ = f_gate[t]*(1-f_gate[t])*dfg
dWf += np.dot(dfg_,x.T)
dRf += np.dot(dfg_,hs[t-1].T)
dht_f = np.dot(Rf.T,dfg_)
dPf += cs[t-1] * dfg_
dct_f = Pf * dfg_
dbf += dfg_
dig_ = i_gate[t]*(1-i_gate[t])*dig
dWi += np.dot(dig_,x.T)
dRi += np.dot(dig_,hs[t-1].T)
dht_i = np.dot(Ri.T,dig_)
dPi += cs[t-1]*dig_
dct_i = Pi * dig_
dbi += dig_
for dparam in [dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout]:
np.clip(dparam,-1,1,out=dparam)
return loss,dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout,hs[time_steps-1],cs[time_steps-1]
x=[]
y=[]
n = 0
smooth_loss = -np.log(1/len_of_vocab)*time_steps
h_prev,c_prev = np.zeros((len_of_vocab,1)),np.zeros((len_of_vocab,1))
while n<=10000:
if start_ptr+time_steps > len_of_dataset:
start_ptr = 0
h_prev = np.zeros((len_of_vocab,1))
else:
input = [char_to_idx[c] for c in dataset[start_ptr:start_ptr+time_steps]]
output = [char_to_idx[c] for c in dataset[start_ptr+1:start_ptr+time_steps+1]]
loss,dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout,h_prev,c_prev=forward_backward_pass\
(input=input,output=output,h_prev=h_prev,c_prev=c_prev)
smooth_loss = (0.999*smooth_loss)+(0.001*loss)
x.append(n)
y.append(smooth_loss)
if n%epoches==0:
print('--------------------------------------------')
print('iter:',n)
print('smooth_loss:',smooth_loss)
sample(h_prev=h_prev,c_prev=c_prev,num_char=300)
print('--------------------------------------------')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.plot(x,y,color='r')
plt.pause(1e-9)
for params,dparam,mparam in zip([Wi,Wf,Wz,Wo,Wout,Ri,Rf,Rz,Ro,Pi,Po,Pf,bi,bo,bf,bz,bout],\
[dWi,dWf,dWz,dWo,dWout,dRi,dRf,dRz,dRo,dPi,dPo,dPf,dbi,dbo,dbf,dbz,dbout],\
[mWi,mWf,mWz,mWo,mWout,mRi,mRf,mRz,mRo,mPi,mPo,mPf,mbi,mbo,mbf,mbz,mbout]):
mparam += dparam*dparam
params += -lr*dparam/np.sqrt(mparam+1e-8)
n+=1
start_ptr += time_steps
plt.savefig('../Performance/lstm_with_peephole_connection.png') | 0.180431 | 0.326728 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'LedgerTagArgs',
'StreamKinesisConfigurationArgs',
'StreamTagArgs',
]
@pulumi.input_type
class LedgerTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StreamKinesisConfigurationArgs:
def __init__(__self__, *,
aggregation_enabled: Optional[pulumi.Input[bool]] = None,
stream_arn: Optional[pulumi.Input[str]] = None):
if aggregation_enabled is not None:
pulumi.set(__self__, "aggregation_enabled", aggregation_enabled)
if stream_arn is not None:
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="aggregationEnabled")
def aggregation_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "aggregation_enabled")
@aggregation_enabled.setter
def aggregation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "aggregation_enabled", value)
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "stream_arn")
@stream_arn.setter
def stream_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_arn", value)
@pulumi.input_type
class StreamTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value) | sdk/python/pulumi_aws_native/qldb/_inputs.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'LedgerTagArgs',
'StreamKinesisConfigurationArgs',
'StreamTagArgs',
]
@pulumi.input_type
class LedgerTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StreamKinesisConfigurationArgs:
def __init__(__self__, *,
aggregation_enabled: Optional[pulumi.Input[bool]] = None,
stream_arn: Optional[pulumi.Input[str]] = None):
if aggregation_enabled is not None:
pulumi.set(__self__, "aggregation_enabled", aggregation_enabled)
if stream_arn is not None:
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="aggregationEnabled")
def aggregation_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "aggregation_enabled")
@aggregation_enabled.setter
def aggregation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "aggregation_enabled", value)
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "stream_arn")
@stream_arn.setter
def stream_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_arn", value)
@pulumi.input_type
class StreamTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value) | 0.870638 | 0.083255 |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBRFClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, classification_report
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,Dropout
def label2onehot(arr):
new_arr = np.zeros((arr.shape[0],5))
for i in range(arr.shape[0]):
new_arr[i][int(arr[i])] = 1
return new_arr
def onehot2label(arr):
new_arr = np.zeros(arr.shape[0])
for i in range(arr.shape[0]):
new_arr[i] = np.argmax(arr[i])
return new_arr
x_train = pd.read_csv('final_data/x-train-s.csv')
y_train = pd.read_csv('final_data/y-train-s.csv')
x_eval = pd.read_csv('final_data/x-eval-s.csv')
y_eval = pd.read_csv('final_data/y-eval-s.csv')
x_train = x_train.drop(columns=['player-name', 'Unnamed: 0'])
x_eval = x_eval.drop(columns=['player-name', 'Unnamed: 0'])
y_train = (np.array(y_train['binned-points'])-1)
y_eval = (np.array(y_eval['binned-points'])-1)
y_train_onehot = label2onehot(y_train)
y_eval_onehot = label2onehot(y_eval)
models = {
#'linear regression': LinearRegression,
#'RBF SVM': SVR,
#'linear SVM': lambda: SVR(kernel='linear'),
#'poly SVM': lambda: SVR(kernel='poly'),
#'Decision Tree': lambda: DecisionTreeClassifier(max_depth=10),
#'ensemble-random-forest': RandomForestClassifier,
#'ensemble-xgb': XGBRFClassifier,
#'ensemble-lightgbm': LGBMClassifier,
}
print('x:', x_train.shape)
print('y:', y_train.shape)
print()
for name,model_func in models.items():
print(name)
model = model_func()
model.fit(x_train,y_train)
y_pred_train = model.predict(x_train)
y_pred_eval = model.predict(x_eval)
print('accuracy_score(train): ', accuracy_score(y_train, y_pred_train))
print('accuracy_score(eval): ', accuracy_score(y_eval, y_pred_eval))
print('classification_report(train): ', classification_report(y_train, y_pred_train))
print('classification_report(eval): ', classification_report(y_eval, y_pred_eval))
print()
#neural_network
def create_baseline():
# create model
model = Sequential()
model.add(Dense(200, input_dim=151, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = create_baseline()
model.fit(x_train,y_train_onehot,epochs=100,batch_size=128)
y_pred_train = onehot2label(model.predict(x_train))
y_pred_eval = onehot2label(model.predict(x_eval))
print(y_pred_train[:10])
print(y_train[:10])
print('accuracy_score(train): ', accuracy_score(y_train, y_pred_train))
print('accuracy_score(eval): ', accuracy_score(y_eval, y_pred_eval))
print('classification_report(train):\n', classification_report(y_train, y_pred_train))
print('classification_report(eval):\n', classification_report(y_eval, y_pred_eval))
print() | baseline_model_classifier.py | import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBRFClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, classification_report
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,Dropout
def label2onehot(arr):
new_arr = np.zeros((arr.shape[0],5))
for i in range(arr.shape[0]):
new_arr[i][int(arr[i])] = 1
return new_arr
def onehot2label(arr):
new_arr = np.zeros(arr.shape[0])
for i in range(arr.shape[0]):
new_arr[i] = np.argmax(arr[i])
return new_arr
x_train = pd.read_csv('final_data/x-train-s.csv')
y_train = pd.read_csv('final_data/y-train-s.csv')
x_eval = pd.read_csv('final_data/x-eval-s.csv')
y_eval = pd.read_csv('final_data/y-eval-s.csv')
x_train = x_train.drop(columns=['player-name', 'Unnamed: 0'])
x_eval = x_eval.drop(columns=['player-name', 'Unnamed: 0'])
y_train = (np.array(y_train['binned-points'])-1)
y_eval = (np.array(y_eval['binned-points'])-1)
y_train_onehot = label2onehot(y_train)
y_eval_onehot = label2onehot(y_eval)
models = {
#'linear regression': LinearRegression,
#'RBF SVM': SVR,
#'linear SVM': lambda: SVR(kernel='linear'),
#'poly SVM': lambda: SVR(kernel='poly'),
#'Decision Tree': lambda: DecisionTreeClassifier(max_depth=10),
#'ensemble-random-forest': RandomForestClassifier,
#'ensemble-xgb': XGBRFClassifier,
#'ensemble-lightgbm': LGBMClassifier,
}
print('x:', x_train.shape)
print('y:', y_train.shape)
print()
for name,model_func in models.items():
print(name)
model = model_func()
model.fit(x_train,y_train)
y_pred_train = model.predict(x_train)
y_pred_eval = model.predict(x_eval)
print('accuracy_score(train): ', accuracy_score(y_train, y_pred_train))
print('accuracy_score(eval): ', accuracy_score(y_eval, y_pred_eval))
print('classification_report(train): ', classification_report(y_train, y_pred_train))
print('classification_report(eval): ', classification_report(y_eval, y_pred_eval))
print()
#neural_network
def create_baseline():
# create model
model = Sequential()
model.add(Dense(200, input_dim=151, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = create_baseline()
model.fit(x_train,y_train_onehot,epochs=100,batch_size=128)
y_pred_train = onehot2label(model.predict(x_train))
y_pred_eval = onehot2label(model.predict(x_eval))
print(y_pred_train[:10])
print(y_train[:10])
print('accuracy_score(train): ', accuracy_score(y_train, y_pred_train))
print('accuracy_score(eval): ', accuracy_score(y_eval, y_pred_eval))
print('classification_report(train):\n', classification_report(y_train, y_pred_train))
print('classification_report(eval):\n', classification_report(y_eval, y_pred_eval))
print() | 0.621885 | 0.28768 |
import requests
from eth_utils import to_hex
from raiden.constants import DEFAULT_HTTP_REQUEST_TIMEOUT
from raiden.exceptions import ServiceRequestFailed
from raiden.utils import typing
from raiden_contracts.utils.proofs import sign_one_to_n_iou
def get_pfs_info(url: str) -> typing.Optional[typing.Dict]:
try:
return requests.get(f'{url}/api/v1/info', timeout=DEFAULT_HTTP_REQUEST_TIMEOUT).json()
except requests.exceptions.RequestException:
return None
def get_pfs_iou(
url: str,
token_network_address: typing.Union[typing.TokenNetworkAddress, typing.TokenNetworkID],
**kwargs,
) -> typing.Optional[typing.Dict]:
try:
return requests.get(
f'{url}/api/v1/{token_network_address}/payment/iou',
data=kwargs,
timeout=DEFAULT_HTTP_REQUEST_TIMEOUT,
).json().get('last_iou')
except (requests.exceptions.RequestException, ValueError) as e:
raise ServiceRequestFailed(str(e))
def make_iou(
config: typing.Dict[str, typing.Any],
our_address: typing.Address,
privkey: bytes,
block_number: typing.BlockNumber,
) -> typing.Dict:
expiration = block_number + config['pathfinding_iou_timeout']
iou = dict(
sender=our_address,
receiver=config['pathfinding_eth_address'],
amount=config['pathfinding_max_fee'],
)
iou.update(
expiration_block=expiration,
signature=sign_one_to_n_iou(privatekey=to_hex(privkey), expiration=expiration, **iou),
)
return iou
def update_iou(
iou: typing.Dict[str, typing.Any],
privkey: bytes,
added_amount: typing.TokenAmount = 0,
expiration_block: typing.Optional[typing.BlockNumber] = None,
) -> typing.Dict[str, typing.Any]:
iou['amount'] += added_amount
if expiration_block:
iou['expiration_block'] = expiration_block
iou['signature'] = sign_one_to_n_iou(
privatekey=to_hex(privkey),
expiration=iou['expiration_block'],
sender=iou['sender'],
receiver=iou['receiver'],
amount=iou['amount'],
)
return iou
def create_current_iou(
config: typing.Dict[str, typing.Any],
token_network_address: typing.Union[typing.TokenNetworkAddress, typing.TokenNetworkID],
our_address: typing.Address,
privkey: bytes,
block_number: typing.BlockNumber,
) -> typing.Dict[str, typing.Any]:
url = config['pathfinding_service_address']
latest_iou = get_pfs_iou(url, token_network_address)
if latest_iou is None:
return make_iou(
config=config,
our_address=our_address,
privkey=privkey,
block_number=block_number,
)
else:
added_amount = config['pathfinding_max_fee']
return update_iou(iou=latest_iou, privkey=privkey, added_amount=added_amount)
def query_paths(
service_config: typing.Dict[str, typing.Any],
our_address: typing.Address,
privkey: bytes,
current_block_number: typing.BlockNumber,
token_network_address: typing.Union[typing.TokenNetworkAddress, typing.TokenNetworkID],
route_from: typing.InitiatorAddress,
route_to: typing.TargetAddress,
value: typing.TokenAmount,
) -> typing.List[typing.Dict[str, typing.Any]]:
max_paths = service_config['pathfinding_max_paths']
url = service_config['pathfinding_service_address']
payload = {'from': route_from, 'to': route_to, 'value': value, 'max_paths': max_paths}
payload.update(create_current_iou(
config=service_config,
token_network_address=token_network_address,
our_address=our_address,
privkey=privkey,
block_number=current_block_number,
))
try:
response = requests.post(
f'{url}/api/v1/{token_network_address}/paths',
data=payload,
timeout=DEFAULT_HTTP_REQUEST_TIMEOUT,
)
except requests.RequestException:
raise ServiceRequestFailed(
'Could not connect to Pathfinding Service',
dict(parameters=payload, exc_info=True),
)
if response.status_code != 200:
info = {'error_code': response.status_code}
try:
error = response.json().get('errors')
if error is not None:
info['pfs_error'] = error
except ValueError: # invalid json
pass
raise ServiceRequestFailed('Pathfinding service returned error code', info)
try:
return response.json()['result']
except KeyError:
raise ServiceRequestFailed(
"Answer from pathfinding service not understood ('result' field missing)",
dict(response=response.json()),
)
except ValueError:
raise ServiceRequestFailed(
'Pathfinding service returned invalid json',
dict(response_text=response.text, exc_info=True),
) | raiden/network/pathfinding.py | import requests
from eth_utils import to_hex
from raiden.constants import DEFAULT_HTTP_REQUEST_TIMEOUT
from raiden.exceptions import ServiceRequestFailed
from raiden.utils import typing
from raiden_contracts.utils.proofs import sign_one_to_n_iou
def get_pfs_info(url: str) -> typing.Optional[typing.Dict]:
try:
return requests.get(f'{url}/api/v1/info', timeout=DEFAULT_HTTP_REQUEST_TIMEOUT).json()
except requests.exceptions.RequestException:
return None
def get_pfs_iou(
url: str,
token_network_address: typing.Union[typing.TokenNetworkAddress, typing.TokenNetworkID],
**kwargs,
) -> typing.Optional[typing.Dict]:
try:
return requests.get(
f'{url}/api/v1/{token_network_address}/payment/iou',
data=kwargs,
timeout=DEFAULT_HTTP_REQUEST_TIMEOUT,
).json().get('last_iou')
except (requests.exceptions.RequestException, ValueError) as e:
raise ServiceRequestFailed(str(e))
def make_iou(
config: typing.Dict[str, typing.Any],
our_address: typing.Address,
privkey: bytes,
block_number: typing.BlockNumber,
) -> typing.Dict:
expiration = block_number + config['pathfinding_iou_timeout']
iou = dict(
sender=our_address,
receiver=config['pathfinding_eth_address'],
amount=config['pathfinding_max_fee'],
)
iou.update(
expiration_block=expiration,
signature=sign_one_to_n_iou(privatekey=to_hex(privkey), expiration=expiration, **iou),
)
return iou
def update_iou(
iou: typing.Dict[str, typing.Any],
privkey: bytes,
added_amount: typing.TokenAmount = 0,
expiration_block: typing.Optional[typing.BlockNumber] = None,
) -> typing.Dict[str, typing.Any]:
iou['amount'] += added_amount
if expiration_block:
iou['expiration_block'] = expiration_block
iou['signature'] = sign_one_to_n_iou(
privatekey=to_hex(privkey),
expiration=iou['expiration_block'],
sender=iou['sender'],
receiver=iou['receiver'],
amount=iou['amount'],
)
return iou
def create_current_iou(
config: typing.Dict[str, typing.Any],
token_network_address: typing.Union[typing.TokenNetworkAddress, typing.TokenNetworkID],
our_address: typing.Address,
privkey: bytes,
block_number: typing.BlockNumber,
) -> typing.Dict[str, typing.Any]:
url = config['pathfinding_service_address']
latest_iou = get_pfs_iou(url, token_network_address)
if latest_iou is None:
return make_iou(
config=config,
our_address=our_address,
privkey=privkey,
block_number=block_number,
)
else:
added_amount = config['pathfinding_max_fee']
return update_iou(iou=latest_iou, privkey=privkey, added_amount=added_amount)
def query_paths(
service_config: typing.Dict[str, typing.Any],
our_address: typing.Address,
privkey: bytes,
current_block_number: typing.BlockNumber,
token_network_address: typing.Union[typing.TokenNetworkAddress, typing.TokenNetworkID],
route_from: typing.InitiatorAddress,
route_to: typing.TargetAddress,
value: typing.TokenAmount,
) -> typing.List[typing.Dict[str, typing.Any]]:
max_paths = service_config['pathfinding_max_paths']
url = service_config['pathfinding_service_address']
payload = {'from': route_from, 'to': route_to, 'value': value, 'max_paths': max_paths}
payload.update(create_current_iou(
config=service_config,
token_network_address=token_network_address,
our_address=our_address,
privkey=privkey,
block_number=current_block_number,
))
try:
response = requests.post(
f'{url}/api/v1/{token_network_address}/paths',
data=payload,
timeout=DEFAULT_HTTP_REQUEST_TIMEOUT,
)
except requests.RequestException:
raise ServiceRequestFailed(
'Could not connect to Pathfinding Service',
dict(parameters=payload, exc_info=True),
)
if response.status_code != 200:
info = {'error_code': response.status_code}
try:
error = response.json().get('errors')
if error is not None:
info['pfs_error'] = error
except ValueError: # invalid json
pass
raise ServiceRequestFailed('Pathfinding service returned error code', info)
try:
return response.json()['result']
except KeyError:
raise ServiceRequestFailed(
"Answer from pathfinding service not understood ('result' field missing)",
dict(response=response.json()),
)
except ValueError:
raise ServiceRequestFailed(
'Pathfinding service returned invalid json',
dict(response_text=response.text, exc_info=True),
) | 0.601125 | 0.10316 |
import xml.etree.ElementTree as ET
parsers = {}
def getParser(langPath):
global parsers
if langPath not in parsers:
parsers[langPath] = Parser(langPath)
return parsers[langPath]
class Parser:
def __init__(self, langPath):
#Initialize variables
self.syllables = set()
self.memoDict = dict(); #memoization dictionary
#Load syllable list
tree = ET.parse(langPath)
cRoot = tree.find(".//Graphemes")
for child in cRoot:
self.syllables.add(child.text.lower())
def parseWord(self, word):
"""Parse a word into the longest matching graphemes"""
# Start by adding the absolute base case that an empty string has no graphemes
self.memoDict[""] = []
# Call the helper function
resultGraphemes = self._parseWord(word.lower())
#Reset the memoization dictionary so we don't hog all the memory with things we may never see again
self.memoDict.clear()
return resultGraphemes
def _parseWord(self, word):
"""Recursive helper function to parse a word into the longest matching graphemes"""
# Base case
if word in self.memoDict:
return self.memoDict[word]
prefix = word
suffix = ""
while len(prefix) > 0:
if prefix in self.syllables: # first part is a valid grapheme
suffixSyllables = self._parseWord(suffix)
if suffixSyllables is not None: # second part is made of valid graphemes
# add it to the grapheme dictionary so we don't solve it again
self.memoDict[word] = [prefix] + suffixSyllables
return self.memoDict[word]
#If the prefix wasn't a grapheme, or the suffix wasn't parseable into graphemes, shift a character from the prefix to the suffix, and try again
suffix = prefix[-1] + suffix
prefix = prefix[:-1]
# We ran out of characters in the prefix trying to find a matching grapheme, so therefore it can't be parsed.
return None | SyllableParser/parser.py | import xml.etree.ElementTree as ET
parsers = {}
def getParser(langPath):
global parsers
if langPath not in parsers:
parsers[langPath] = Parser(langPath)
return parsers[langPath]
class Parser:
def __init__(self, langPath):
#Initialize variables
self.syllables = set()
self.memoDict = dict(); #memoization dictionary
#Load syllable list
tree = ET.parse(langPath)
cRoot = tree.find(".//Graphemes")
for child in cRoot:
self.syllables.add(child.text.lower())
def parseWord(self, word):
"""Parse a word into the longest matching graphemes"""
# Start by adding the absolute base case that an empty string has no graphemes
self.memoDict[""] = []
# Call the helper function
resultGraphemes = self._parseWord(word.lower())
#Reset the memoization dictionary so we don't hog all the memory with things we may never see again
self.memoDict.clear()
return resultGraphemes
def _parseWord(self, word):
"""Recursive helper function to parse a word into the longest matching graphemes"""
# Base case
if word in self.memoDict:
return self.memoDict[word]
prefix = word
suffix = ""
while len(prefix) > 0:
if prefix in self.syllables: # first part is a valid grapheme
suffixSyllables = self._parseWord(suffix)
if suffixSyllables is not None: # second part is made of valid graphemes
# add it to the grapheme dictionary so we don't solve it again
self.memoDict[word] = [prefix] + suffixSyllables
return self.memoDict[word]
#If the prefix wasn't a grapheme, or the suffix wasn't parseable into graphemes, shift a character from the prefix to the suffix, and try again
suffix = prefix[-1] + suffix
prefix = prefix[:-1]
# We ran out of characters in the prefix trying to find a matching grapheme, so therefore it can't be parsed.
return None | 0.450843 | 0.16872 |
import csv
import datetime
import FileUtil
import json
import os
import sys
from com.ziclix.python.sql import zxJDBC
from org.slf4j import LoggerFactory
from wherehows.common import Constant
class OracleExtract:
table_dict = {}
table_output_list = []
field_output_list = []
sample_output_list = []
ignored_owner_regex = 'ANONYMOUS|PUBLIC|SYS|SYSTEM|DBSNMP|MDSYS|CTXSYS|XDB|TSMSYS|ORACLE.*|APEX.*|TEST?*|GG_.*|\$'
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
def get_table_info(self, excluded_owner_list, table_name):
"""
get table, column info from Oracle all_tables
here Owner, Schema, Database have same meaning: a collection of tables
:param excluded_owner_list: schema blacklist
:param table_name: get specific table name, not used in common case
:return:
"""
owner_exclusion_filter = ''
table_name_filter = ''
if excluded_owner_list and len(excluded_owner_list) > 0:
owner_exclusion_filter = " AND NOT REGEXP_LIKE(t.OWNER, '%s') " % '|'.join(excluded_owner_list)
self.logger.info("Get Oracle metadata with extra excluded schema: %s" % excluded_owner_list)
if table_name and len(table_name) > 0:
if table_name.find('.') > 0:
table_name_filter = " AND OWNER='%s' AND TABLE_NAME='%s' " % table_name.split('.')
else:
table_name_filter = " AND TABLE_NAME='%s' " % table_name
self.logger.info("Get Oracle metadata with extra filter: %s" % table_name_filter)
curs_meta = self.conn_db.cursor()
column_info_sql = """
select
t.OWNER, t.TABLE_NAME, t.PARTITIONED,
c.COLUMN_ID, c.COLUMN_NAME, c.DATA_TYPE, c.NULLABLE,
c.DATA_LENGTH, c.DATA_PRECISION, c.DATA_SCALE,
c.CHAR_LENGTH, c.CHARACTER_SET_NAME,
c.DATA_DEFAULT, m.COMMENTS
from ALL_TABLES t
join ALL_TAB_COLUMNS c
on t.OWNER = c.OWNER
and t.TABLE_NAME = c.TABLE_NAME
left join ALL_COL_COMMENTS m
on c.OWNER = m.OWNER
and c.TABLE_NAME = m.TABLE_NAME
and c.COLUMN_NAME = m.COLUMN_NAME
where NOT REGEXP_LIKE(t.OWNER, '%s')
%s /* extra excluded schema/owner */
%s /* table filter */
order by 1,2,4
""" % (self.ignored_owner_regex, owner_exclusion_filter, table_name_filter)
self.logger.debug(column_info_sql)
curs_meta.execute(column_info_sql)
rows = curs_meta.fetchall()
self.logger.info("Fetched %d records of Oracle metadata" % len(rows))
curs_meta.close()
prev_table_key = ''
for row in rows:
current_table_key = "%s.%s" % (row[0], row[1]) # OWNER.TABLE_NAME
if current_table_key != prev_table_key:
self.table_dict[current_table_key] = {"partitioned": row[2]}
prev_table_key = current_table_key
self.logger.info("Fetched %d tables: %s" % (len(self.table_dict), self.table_dict))
return rows
def get_extra_table_info(self):
"""
Index, Partition, Size info
:return: index,partition,constraint
"""
index_info_sql = """
select
i.TABLE_OWNER, i.TABLE_NAME, i.INDEX_NAME, i.INDEX_TYPE, i.UNIQUENESS,
t.CONSTRAINT_NAME,
--LISTAGG(c.COLUMN_NAME,',')
-- WITHIN GROUP (ORDER BY c.COLUMN_POSITION) as INDEX_COLUMNS,
RTRIM(XMLAGG(xmlelement(s,c.COLUMN_NAME,',').extract('//text()')
ORDER BY c.COLUMN_POSITION),',') INDEX_COLUMNS,
COUNT(1) NUM_COLUMNS
from ALL_INDEXES i
join ALL_IND_COLUMNS c
on i.OWNER = c.INDEX_OWNER
and i.INDEX_NAME = c.INDEX_NAME
and i.TABLE_OWNER = c.TABLE_OWNER
and i.TABLE_NAME = c.TABLE_NAME
left join (select coalesce(INDEX_OWNER,OWNER) OWNER, INDEX_NAME, CONSTRAINT_NAME
from ALL_CONSTRAINTS t
where INDEX_NAME IS NOT NULL) t
on i.OWNER = t.OWNER
and i.INDEX_NAME = t.INDEX_NAME
where NOT REGEXP_LIKE(i.TABLE_OWNER, '%s')
group by i.TABLE_OWNER, i.TABLE_NAME, i.INDEX_NAME,
i.INDEX_TYPE, i.UNIQUENESS, t.CONSTRAINT_NAME
order by 1,2,3
""" % self.ignored_owner_regex
partition_col_sql = """
select
OWNER TABLE_OWNER, NAME TABLE_NAME,
RTRIM(XMLAGG(xmlelement(s,c.COLUMN_NAME,',').extract('//text()')
ORDER BY c.COLUMN_POSITION),',') PARTITION_COLUMNS,
COUNT(1) NUM_COLUMNS
from ALL_PART_KEY_COLUMNS c
where c.OBJECT_TYPE = 'TABLE' and NOT REGEXP_LIKE(c.OWNER, '%s')
group by c.OWNER, c.NAME
order by 1,2
""" % self.ignored_owner_regex
curs_meta = self.conn_db.cursor()
# get index and partition info one by one
curs_meta.execute(partition_col_sql)
rows = curs_meta.fetchall()
for row in rows:
table_name_key = "%s.%s" % (row[0], row[1])
if table_name_key not in self.table_dict:
continue
self.table_dict[table_name_key]['partition_columns'] = row[2]
self.logger.info("Found %d record for partition info" % len(rows))
curs_meta.execute(index_info_sql)
rows = curs_meta.fetchall()
curs_meta.close()
for row in rows:
table_name_key = "%s.%s" % (row[0], row[1])
if table_name_key not in self.table_dict:
continue
if "indexes" not in self.table_dict[table_name_key]:
self.table_dict[table_name_key]["indexes"] = []
self.table_dict[table_name_key]["indexes"].append(
{
"name": row[2],
"type": row[3],
"is_unique": 'Y' if row[4] == 'UNIQUE' else 'N',
"constraint_name": row[5],
"index_columns": row[6],
"num_of_columns": row[7]
}
)
self.logger.info("Found %d record for index info" % len(rows))
def format_table_metadata(self, rows):
"""
add table info with columns from rows into table schema
:param rows: input. each row is a table column
:param schema: {schema : _, type : _, tables : ['name' : _, ... 'original_name' : _] }
:return:
"""
schema_dict = {"fields": []}
table_record = {}
table_idx = 0
field_idx = 0
for row in rows:
table_name_key = "%s.%s" % (row[0], row[1])
table_urn = "oracle:///%s/%s" % (row[0], row[1])
if 'urn' not in table_record or table_urn != table_record['urn']:
# This is a new table. Let's push the previous table record into output_list
if 'urn' in table_record:
schema_dict["num_fields"] = field_idx
table_record["columns"] = json.dumps(schema_dict)
self.table_output_list.append(table_record)
properties = {
"indexes": self.table_dict[table_name_key].get("indexes"),
"partition_column": self.table_dict[table_name_key].get("partition_column")
}
table_record = {
"name": row[1],
"columns": None,
"schema_type": "JSON",
"properties": json.dumps(properties),
"urn": table_urn,
"source": "Oracle",
"location_prefix": row[0],
"parent_name": row[0],
"storage_type": "Table",
"dataset_type": "oracle",
"is_partitioned": 'Y' if self.table_dict[table_name_key]["partitioned"] == 'YES' else 'N'
}
schema_dict = {"fields": []}
table_idx += 1
field_idx = 0
field_record = {
"sort_id": self.num_to_int(row[3]),
"name": row[4],
"data_type": row[5],
"nullable": row[6],
"size": self.num_to_int(row[7]),
"precision": self.num_to_int(row[8]),
"scale": self.num_to_int(row[9]),
"default_value": self.trim_newline(row[12]),
"doc": self.trim_newline(row[13])
}
schema_dict['fields'].append(field_record)
field_record['dataset_urn'] = table_urn
self.field_output_list.append(field_record)
field_idx += 1
# finish all remaining rows
schema_dict["num_fields"] = field_idx
table_record["columns"] = json.dumps(schema_dict)
self.table_output_list.append(table_record)
self.logger.info("%d Table records generated" % table_idx)
def get_sample_data(self, table_fullname, num_rows):
"""
select top rows from table as sample data
:return: json of sample data
"""
table_urn = "oracle:///%s" % (table_fullname.replace('.', '/'))
columns = []
sample_data = []
sql = 'SELECT * FROM %s WHERE ROWNUM <= %d' % (table_fullname, num_rows)
cursor = self.conn_db.cursor()
try:
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) == 0:
self.logger.error("dataset {} is empty".format(table_fullname))
return
# retrieve column names
columns = [i[0] for i in cursor.description]
# self.logger.info("Table {} columns: {}".format(table_fullname, columns))
# retrieve data
for row in rows:
row_data = []
# encode each field to a new value
for value in row:
if value is None:
row_data.append('')
else:
row_data.append(unicode(value, errors='ignore'))
sample_data.append(row_data)
except Exception as ex:
self.logger.error("Error fetch sample for {}: {}".format(table_fullname, str(ex)))
return
cursor.close()
data_with_column = map(lambda x: dict(zip(columns, x)), sample_data)
self.sample_output_list.append({'dataset_urn': table_urn, 'sample_data': data_with_column})
def num_to_int(self, num):
try:
return int(num)
except (ValueError, TypeError):
return None
def trim_newline(self, line):
return line.replace('\n', ' ').replace('\r', ' ').strip().encode('ascii', 'ignore') if line else None
def write_csv(self, csv_filename, csv_columns, data_list):
csvfile = open(csv_filename, 'wb')
os.chmod(csv_filename, 0644)
writer = csv.DictWriter(csvfile, fieldnames=csv_columns, delimiter='\x1A', lineterminator='\n',
quoting=csv.QUOTE_NONE, quotechar='\1', escapechar='\0')
writer.writeheader()
for data in data_list:
writer.writerow(data)
csvfile.close()
def run(self, exclude_database_list, table_name, table_file, field_file, sample_file, sample=False):
"""
The entrance of the class, extract schema and sample data
Notice the database need to have a order that the databases have more info (DWH_STG) should be scaned first.
:param exclude_database_list: list of excluded databases/owners/schemas
:param table_name: specific table name to query
:param table_file: table output csv file path
:param field_file: table fields output csv file path
:param sample_file: sample data output csv file path
:param sample: do sample or not
:return:
"""
begin = datetime.datetime.now().strftime("%H:%M:%S")
# collect table info
rows = self.get_table_info(exclude_database_list, table_name)
self.get_extra_table_info()
self.format_table_metadata(rows)
mid = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Collecting table info [%s -> %s]" % (str(begin), str(mid)))
csv_columns = ['name', 'columns', 'schema_type', 'properties', 'urn', 'source', 'location_prefix',
'parent_name', 'storage_type', 'dataset_type', 'is_partitioned']
self.write_csv(table_file, csv_columns, self.table_output_list)
csv_columns = ['dataset_urn', 'sort_id', 'name', 'data_type', 'nullable',
'size', 'precision', 'scale', 'default_value', 'doc']
self.write_csv(field_file, csv_columns, self.field_output_list)
if sample:
# collect sample data
for table in self.table_dict.keys():
self.get_sample_data(table, 10)
end = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Collecting sample data [%s -> %s]" % (str(mid), str(end)))
csv_columns = ['dataset_urn', 'sample_data']
self.write_csv(sample_file, csv_columns, self.sample_output_list)
if __name__ == "__main__":
args = sys.argv[1]
# connection
username = args[Constant.ORA_DB_USERNAME_KEY]
password = args[Constant.ORA_DB_PASSWORD_KEY]
JDBC_DRIVER = args[Constant.ORA_DB_DRIVER_KEY]
JDBC_URL = args[Constant.ORA_DB_URL_KEY]
e = OracleExtract()
e.conn_db = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER)
exclude_databases = filter(bool, args[Constant.ORA_EXCLUDE_DATABASES_KEY].split(','))
collect_sample = False
if Constant.ORA_LOAD_SAMPLE in args:
collect_sample = FileUtil.parse_bool(args[Constant.ORA_LOAD_SAMPLE], False)
temp_dir = FileUtil.etl_temp_dir(args, "ORACLE")
table_output_file = os.path.join(temp_dir, args[Constant.ORA_SCHEMA_OUTPUT_KEY])
field_output_file = os.path.join(temp_dir, args[Constant.ORA_FIELD_OUTPUT_KEY])
sample_output_file = os.path.join(temp_dir, args[Constant.ORA_SAMPLE_OUTPUT_KEY])
try:
e.conn_db.cursor().execute("ALTER SESSION SET TIME_ZONE = 'US/Pacific'")
e.conn_db.cursor().execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'")
e.conn_db.cursor().execute("CALL dbms_application_info.set_module('%s','%d')" %
('WhereHows (Jython)', os.getpid()))
e.conn_db.commit()
e.run(exclude_databases,
None,
table_output_file,
field_output_file,
sample_output_file,
sample=collect_sample)
finally:
e.conn_db.cursor().close()
e.conn_db.close() | wherehows-etl/src/main/resources/jython/OracleExtract.py |
import csv
import datetime
import FileUtil
import json
import os
import sys
from com.ziclix.python.sql import zxJDBC
from org.slf4j import LoggerFactory
from wherehows.common import Constant
class OracleExtract:
table_dict = {}
table_output_list = []
field_output_list = []
sample_output_list = []
ignored_owner_regex = 'ANONYMOUS|PUBLIC|SYS|SYSTEM|DBSNMP|MDSYS|CTXSYS|XDB|TSMSYS|ORACLE.*|APEX.*|TEST?*|GG_.*|\$'
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
def get_table_info(self, excluded_owner_list, table_name):
"""
get table, column info from Oracle all_tables
here Owner, Schema, Database have same meaning: a collection of tables
:param excluded_owner_list: schema blacklist
:param table_name: get specific table name, not used in common case
:return:
"""
owner_exclusion_filter = ''
table_name_filter = ''
if excluded_owner_list and len(excluded_owner_list) > 0:
owner_exclusion_filter = " AND NOT REGEXP_LIKE(t.OWNER, '%s') " % '|'.join(excluded_owner_list)
self.logger.info("Get Oracle metadata with extra excluded schema: %s" % excluded_owner_list)
if table_name and len(table_name) > 0:
if table_name.find('.') > 0:
table_name_filter = " AND OWNER='%s' AND TABLE_NAME='%s' " % table_name.split('.')
else:
table_name_filter = " AND TABLE_NAME='%s' " % table_name
self.logger.info("Get Oracle metadata with extra filter: %s" % table_name_filter)
curs_meta = self.conn_db.cursor()
column_info_sql = """
select
t.OWNER, t.TABLE_NAME, t.PARTITIONED,
c.COLUMN_ID, c.COLUMN_NAME, c.DATA_TYPE, c.NULLABLE,
c.DATA_LENGTH, c.DATA_PRECISION, c.DATA_SCALE,
c.CHAR_LENGTH, c.CHARACTER_SET_NAME,
c.DATA_DEFAULT, m.COMMENTS
from ALL_TABLES t
join ALL_TAB_COLUMNS c
on t.OWNER = c.OWNER
and t.TABLE_NAME = c.TABLE_NAME
left join ALL_COL_COMMENTS m
on c.OWNER = m.OWNER
and c.TABLE_NAME = m.TABLE_NAME
and c.COLUMN_NAME = m.COLUMN_NAME
where NOT REGEXP_LIKE(t.OWNER, '%s')
%s /* extra excluded schema/owner */
%s /* table filter */
order by 1,2,4
""" % (self.ignored_owner_regex, owner_exclusion_filter, table_name_filter)
self.logger.debug(column_info_sql)
curs_meta.execute(column_info_sql)
rows = curs_meta.fetchall()
self.logger.info("Fetched %d records of Oracle metadata" % len(rows))
curs_meta.close()
prev_table_key = ''
for row in rows:
current_table_key = "%s.%s" % (row[0], row[1]) # OWNER.TABLE_NAME
if current_table_key != prev_table_key:
self.table_dict[current_table_key] = {"partitioned": row[2]}
prev_table_key = current_table_key
self.logger.info("Fetched %d tables: %s" % (len(self.table_dict), self.table_dict))
return rows
def get_extra_table_info(self):
"""
Index, Partition, Size info
:return: index,partition,constraint
"""
index_info_sql = """
select
i.TABLE_OWNER, i.TABLE_NAME, i.INDEX_NAME, i.INDEX_TYPE, i.UNIQUENESS,
t.CONSTRAINT_NAME,
--LISTAGG(c.COLUMN_NAME,',')
-- WITHIN GROUP (ORDER BY c.COLUMN_POSITION) as INDEX_COLUMNS,
RTRIM(XMLAGG(xmlelement(s,c.COLUMN_NAME,',').extract('//text()')
ORDER BY c.COLUMN_POSITION),',') INDEX_COLUMNS,
COUNT(1) NUM_COLUMNS
from ALL_INDEXES i
join ALL_IND_COLUMNS c
on i.OWNER = c.INDEX_OWNER
and i.INDEX_NAME = c.INDEX_NAME
and i.TABLE_OWNER = c.TABLE_OWNER
and i.TABLE_NAME = c.TABLE_NAME
left join (select coalesce(INDEX_OWNER,OWNER) OWNER, INDEX_NAME, CONSTRAINT_NAME
from ALL_CONSTRAINTS t
where INDEX_NAME IS NOT NULL) t
on i.OWNER = t.OWNER
and i.INDEX_NAME = t.INDEX_NAME
where NOT REGEXP_LIKE(i.TABLE_OWNER, '%s')
group by i.TABLE_OWNER, i.TABLE_NAME, i.INDEX_NAME,
i.INDEX_TYPE, i.UNIQUENESS, t.CONSTRAINT_NAME
order by 1,2,3
""" % self.ignored_owner_regex
partition_col_sql = """
select
OWNER TABLE_OWNER, NAME TABLE_NAME,
RTRIM(XMLAGG(xmlelement(s,c.COLUMN_NAME,',').extract('//text()')
ORDER BY c.COLUMN_POSITION),',') PARTITION_COLUMNS,
COUNT(1) NUM_COLUMNS
from ALL_PART_KEY_COLUMNS c
where c.OBJECT_TYPE = 'TABLE' and NOT REGEXP_LIKE(c.OWNER, '%s')
group by c.OWNER, c.NAME
order by 1,2
""" % self.ignored_owner_regex
curs_meta = self.conn_db.cursor()
# get index and partition info one by one
curs_meta.execute(partition_col_sql)
rows = curs_meta.fetchall()
for row in rows:
table_name_key = "%s.%s" % (row[0], row[1])
if table_name_key not in self.table_dict:
continue
self.table_dict[table_name_key]['partition_columns'] = row[2]
self.logger.info("Found %d record for partition info" % len(rows))
curs_meta.execute(index_info_sql)
rows = curs_meta.fetchall()
curs_meta.close()
for row in rows:
table_name_key = "%s.%s" % (row[0], row[1])
if table_name_key not in self.table_dict:
continue
if "indexes" not in self.table_dict[table_name_key]:
self.table_dict[table_name_key]["indexes"] = []
self.table_dict[table_name_key]["indexes"].append(
{
"name": row[2],
"type": row[3],
"is_unique": 'Y' if row[4] == 'UNIQUE' else 'N',
"constraint_name": row[5],
"index_columns": row[6],
"num_of_columns": row[7]
}
)
self.logger.info("Found %d record for index info" % len(rows))
def format_table_metadata(self, rows):
"""
add table info with columns from rows into table schema
:param rows: input. each row is a table column
:param schema: {schema : _, type : _, tables : ['name' : _, ... 'original_name' : _] }
:return:
"""
schema_dict = {"fields": []}
table_record = {}
table_idx = 0
field_idx = 0
for row in rows:
table_name_key = "%s.%s" % (row[0], row[1])
table_urn = "oracle:///%s/%s" % (row[0], row[1])
if 'urn' not in table_record or table_urn != table_record['urn']:
# This is a new table. Let's push the previous table record into output_list
if 'urn' in table_record:
schema_dict["num_fields"] = field_idx
table_record["columns"] = json.dumps(schema_dict)
self.table_output_list.append(table_record)
properties = {
"indexes": self.table_dict[table_name_key].get("indexes"),
"partition_column": self.table_dict[table_name_key].get("partition_column")
}
table_record = {
"name": row[1],
"columns": None,
"schema_type": "JSON",
"properties": json.dumps(properties),
"urn": table_urn,
"source": "Oracle",
"location_prefix": row[0],
"parent_name": row[0],
"storage_type": "Table",
"dataset_type": "oracle",
"is_partitioned": 'Y' if self.table_dict[table_name_key]["partitioned"] == 'YES' else 'N'
}
schema_dict = {"fields": []}
table_idx += 1
field_idx = 0
field_record = {
"sort_id": self.num_to_int(row[3]),
"name": row[4],
"data_type": row[5],
"nullable": row[6],
"size": self.num_to_int(row[7]),
"precision": self.num_to_int(row[8]),
"scale": self.num_to_int(row[9]),
"default_value": self.trim_newline(row[12]),
"doc": self.trim_newline(row[13])
}
schema_dict['fields'].append(field_record)
field_record['dataset_urn'] = table_urn
self.field_output_list.append(field_record)
field_idx += 1
# finish all remaining rows
schema_dict["num_fields"] = field_idx
table_record["columns"] = json.dumps(schema_dict)
self.table_output_list.append(table_record)
self.logger.info("%d Table records generated" % table_idx)
def get_sample_data(self, table_fullname, num_rows):
"""
select top rows from table as sample data
:return: json of sample data
"""
table_urn = "oracle:///%s" % (table_fullname.replace('.', '/'))
columns = []
sample_data = []
sql = 'SELECT * FROM %s WHERE ROWNUM <= %d' % (table_fullname, num_rows)
cursor = self.conn_db.cursor()
try:
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) == 0:
self.logger.error("dataset {} is empty".format(table_fullname))
return
# retrieve column names
columns = [i[0] for i in cursor.description]
# self.logger.info("Table {} columns: {}".format(table_fullname, columns))
# retrieve data
for row in rows:
row_data = []
# encode each field to a new value
for value in row:
if value is None:
row_data.append('')
else:
row_data.append(unicode(value, errors='ignore'))
sample_data.append(row_data)
except Exception as ex:
self.logger.error("Error fetch sample for {}: {}".format(table_fullname, str(ex)))
return
cursor.close()
data_with_column = map(lambda x: dict(zip(columns, x)), sample_data)
self.sample_output_list.append({'dataset_urn': table_urn, 'sample_data': data_with_column})
def num_to_int(self, num):
try:
return int(num)
except (ValueError, TypeError):
return None
def trim_newline(self, line):
return line.replace('\n', ' ').replace('\r', ' ').strip().encode('ascii', 'ignore') if line else None
def write_csv(self, csv_filename, csv_columns, data_list):
csvfile = open(csv_filename, 'wb')
os.chmod(csv_filename, 0644)
writer = csv.DictWriter(csvfile, fieldnames=csv_columns, delimiter='\x1A', lineterminator='\n',
quoting=csv.QUOTE_NONE, quotechar='\1', escapechar='\0')
writer.writeheader()
for data in data_list:
writer.writerow(data)
csvfile.close()
def run(self, exclude_database_list, table_name, table_file, field_file, sample_file, sample=False):
"""
The entrance of the class, extract schema and sample data
Notice the database need to have a order that the databases have more info (DWH_STG) should be scaned first.
:param exclude_database_list: list of excluded databases/owners/schemas
:param table_name: specific table name to query
:param table_file: table output csv file path
:param field_file: table fields output csv file path
:param sample_file: sample data output csv file path
:param sample: do sample or not
:return:
"""
begin = datetime.datetime.now().strftime("%H:%M:%S")
# collect table info
rows = self.get_table_info(exclude_database_list, table_name)
self.get_extra_table_info()
self.format_table_metadata(rows)
mid = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Collecting table info [%s -> %s]" % (str(begin), str(mid)))
csv_columns = ['name', 'columns', 'schema_type', 'properties', 'urn', 'source', 'location_prefix',
'parent_name', 'storage_type', 'dataset_type', 'is_partitioned']
self.write_csv(table_file, csv_columns, self.table_output_list)
csv_columns = ['dataset_urn', 'sort_id', 'name', 'data_type', 'nullable',
'size', 'precision', 'scale', 'default_value', 'doc']
self.write_csv(field_file, csv_columns, self.field_output_list)
if sample:
# collect sample data
for table in self.table_dict.keys():
self.get_sample_data(table, 10)
end = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Collecting sample data [%s -> %s]" % (str(mid), str(end)))
csv_columns = ['dataset_urn', 'sample_data']
self.write_csv(sample_file, csv_columns, self.sample_output_list)
if __name__ == "__main__":
args = sys.argv[1]
# connection
username = args[Constant.ORA_DB_USERNAME_KEY]
password = args[Constant.ORA_DB_PASSWORD_KEY]
JDBC_DRIVER = args[Constant.ORA_DB_DRIVER_KEY]
JDBC_URL = args[Constant.ORA_DB_URL_KEY]
e = OracleExtract()
e.conn_db = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER)
exclude_databases = filter(bool, args[Constant.ORA_EXCLUDE_DATABASES_KEY].split(','))
collect_sample = False
if Constant.ORA_LOAD_SAMPLE in args:
collect_sample = FileUtil.parse_bool(args[Constant.ORA_LOAD_SAMPLE], False)
temp_dir = FileUtil.etl_temp_dir(args, "ORACLE")
table_output_file = os.path.join(temp_dir, args[Constant.ORA_SCHEMA_OUTPUT_KEY])
field_output_file = os.path.join(temp_dir, args[Constant.ORA_FIELD_OUTPUT_KEY])
sample_output_file = os.path.join(temp_dir, args[Constant.ORA_SAMPLE_OUTPUT_KEY])
try:
e.conn_db.cursor().execute("ALTER SESSION SET TIME_ZONE = 'US/Pacific'")
e.conn_db.cursor().execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'")
e.conn_db.cursor().execute("CALL dbms_application_info.set_module('%s','%d')" %
('WhereHows (Jython)', os.getpid()))
e.conn_db.commit()
e.run(exclude_databases,
None,
table_output_file,
field_output_file,
sample_output_file,
sample=collect_sample)
finally:
e.conn_db.cursor().close()
e.conn_db.close() | 0.301362 | 0.103341 |
import bisect
import os
import sys
EXPORT_RDR = True
try:
import rosbag
except ImportError, e:
print 'Failed to import rosbag, not exporting radar'
EXPORT_RDR = False
sys.path.append('../process')
from GPSReader import GPSReader
class FrameFinder:
""" Creates a mapping from cloud to frames.
"""
frame_to_cloud_map = {}
map_file_path = ""
map_file_name = ""
def __init__(self, gps_file, frames_folder, radar_bag_file, write_to_file):
if frames_folder[-1] == '/':
frames_folder = frames_folder[0:-1]
basename = frames_folder.replace('_frames', '')
self.map_file_name = basename + ".map"
reader = GPSReader(gps_file)
# Camera time should already be sorted because frame # always increases
camera_times = [utc_from_gps(data['week'], data['seconds'])
for data in reader.getData()]
ldr_times = \
[long(os.path.splitext(os.path.basename(ldr_file))[0])
for ldr_file in os.listdir(frames_folder)
if ldr_file.endswith('.ldr')]
# Need to sort the ldr_times because listdir returns undefined ordering
ldr_times.sort()
if EXPORT_RDR:
rdr_times = unpack_bag(basename, radar_bag_file)
for frame_number, camera_time in enumerate(camera_times):
# Find the closest time in ldr times
nearest_index_ldr = bisect.bisect(ldr_times, camera_time)
nearest_index_rdr = -1
if EXPORT_RDR:
nearest_index_rdr = bisect.bisect(rdr_times, camera_time)
if nearest_index_ldr >= 1 and (not EXPORT_RDR or nearest_index_rdr >=1):
lidr_file = str(ldr_times[nearest_index_ldr - 1]) + '.ldr'
if EXPORT_RDR:
radar_seq = str(rdr_times[nearest_index_rdr - 1]) + '.rdr'
# Frames are indexed by 1, not
real_frame = frame_number + 1
if EXPORT_RDR:
self.frame_to_cloud_map[real_frame] = (lidr_file, radar_seq)
else:
self.frame_to_cloud_map[real_frame] = lidr_file
#print real_frame, (lidr_file, radar_seq)
if write_to_file:
self.__write_frame_map()
def get_map(self):
""" Returns a mapping from camera frame to ldr file """
return self.frame_to_cloud_map
def __write_frame_map(self):
""" Writes the camera frame to ldr file mapping to a file """
out_file = open(self.map_file_name, 'w')
for frame, data in self.get_map().iteritems():
line = str(frame) + ' ' + str(data[0])
if EXPORT_RDR:
line = str(frame) + ' ' + str(data[0]) + ' ' + str(data[1])
else:
line = str(frame) + ' ' + data
line += '\n'
out_file.write(line)
out_file.close()
def utc_from_gps(gps_week, seconds, leap_seconds=16):
""" Converts from gps week time to UTC time. UTC time starts from JAN 1,
1970 and GPS time starts from JAN 6, 1980.
http://leapsecond.com/java/gpsclock.htm
"""
secs_in_week = 604800
secs_gps_to_utc = 315964800
return long((gps_week * secs_in_week + seconds + secs_gps_to_utc
- leap_seconds) * 1000000)
def unpack_bag(basename, radar_bag_file):
""" Unpacks the bag and writes individual segments to files.
The ouput folder is the basename + _rdr.
Each file name is the time of the starting segment
"""
radar_bag = rosbag.Bag(radar_bag_file)
times = []
cur_file = None
rdr_dir = basename + '_rdr/'
if not os.path.exists(rdr_dir):
os.mkdir(rdr_dir)
for topic, msg, t in radar_bag.read_messages(topics=['/object_list', '/target_status']):
if msg.obj_id == 61:
if cur_file != None:
cur_file.close()
time = msg.header.stamp.to_nsec()/1000 - 66000
times.append(time)
cur_file = open(rdr_dir + str(time) + '.rdr', 'w')
if cur_file != None:
if msg.obj_id == 0 or msg.obj_id == 62:
continue
line = None
if topic == '/object_list':
if msg.isMeasurd == True:
fmt = 'O {id} {dist} {lat_dist} {rel_spd} {dyn_prop} {rcs} {w} {l}'
line = fmt.format(
id = msg.obj_id,
dist = msg.dist,
lat_dist = msg.lat_dist,
rel_spd = msg.relative_spd,
dyn_prop = msg.dyn_prop,
rcs = msg.rcs,
w = msg.width,
l = msg.length)
else:
if msg.status > 0:
fmt = 'T {id} {dist} {lat_dist} {rel_spd} {dyn_prop} {traj} {w} {l} {obst_probab} {exist_probab} {rel_acc} {type} {lost_reason}'
line = fmt.format(
id = msg.obj_id,
dist = msg.dist,
lat_dist = msg.lat_dist,
rel_spd = msg.relative_spd,
dyn_prop = msg.dyn_prop,
traj = msg.traj,
w = msg.width,
l = msg.length,
obst_probab = msg.obst_probab,
exist_probab = msg.exist_probab,
rel_acc = msg.relative_acc,
type = msg.type,
lost_reason = msg.lost_reason
)
if line != None:
cur_file.write(line + '\n')
times.sort()
return times
def main():
""" Prints out times
"""
if len(sys.argv) != 4:
print """
Usage: ./FrameFinder.py <gps_output_file> <ldr_folder_directory> <radar_bag_file>
"""
sys.exit()
gps_file = sys.argv[1]
frames_folder = sys.argv[2]
radar_folder = sys.argv[3]
FrameFinder(gps_file, frames_folder, radar_folder, write_to_file=True)
if __name__ == '__main__':
main() | lidar/FrameFinder.py |
import bisect
import os
import sys
EXPORT_RDR = True
try:
import rosbag
except ImportError, e:
print 'Failed to import rosbag, not exporting radar'
EXPORT_RDR = False
sys.path.append('../process')
from GPSReader import GPSReader
class FrameFinder:
""" Creates a mapping from cloud to frames.
"""
frame_to_cloud_map = {}
map_file_path = ""
map_file_name = ""
def __init__(self, gps_file, frames_folder, radar_bag_file, write_to_file):
if frames_folder[-1] == '/':
frames_folder = frames_folder[0:-1]
basename = frames_folder.replace('_frames', '')
self.map_file_name = basename + ".map"
reader = GPSReader(gps_file)
# Camera time should already be sorted because frame # always increases
camera_times = [utc_from_gps(data['week'], data['seconds'])
for data in reader.getData()]
ldr_times = \
[long(os.path.splitext(os.path.basename(ldr_file))[0])
for ldr_file in os.listdir(frames_folder)
if ldr_file.endswith('.ldr')]
# Need to sort the ldr_times because listdir returns undefined ordering
ldr_times.sort()
if EXPORT_RDR:
rdr_times = unpack_bag(basename, radar_bag_file)
for frame_number, camera_time in enumerate(camera_times):
# Find the closest time in ldr times
nearest_index_ldr = bisect.bisect(ldr_times, camera_time)
nearest_index_rdr = -1
if EXPORT_RDR:
nearest_index_rdr = bisect.bisect(rdr_times, camera_time)
if nearest_index_ldr >= 1 and (not EXPORT_RDR or nearest_index_rdr >=1):
lidr_file = str(ldr_times[nearest_index_ldr - 1]) + '.ldr'
if EXPORT_RDR:
radar_seq = str(rdr_times[nearest_index_rdr - 1]) + '.rdr'
# Frames are indexed by 1, not
real_frame = frame_number + 1
if EXPORT_RDR:
self.frame_to_cloud_map[real_frame] = (lidr_file, radar_seq)
else:
self.frame_to_cloud_map[real_frame] = lidr_file
#print real_frame, (lidr_file, radar_seq)
if write_to_file:
self.__write_frame_map()
def get_map(self):
""" Returns a mapping from camera frame to ldr file """
return self.frame_to_cloud_map
def __write_frame_map(self):
""" Writes the camera frame to ldr file mapping to a file """
out_file = open(self.map_file_name, 'w')
for frame, data in self.get_map().iteritems():
line = str(frame) + ' ' + str(data[0])
if EXPORT_RDR:
line = str(frame) + ' ' + str(data[0]) + ' ' + str(data[1])
else:
line = str(frame) + ' ' + data
line += '\n'
out_file.write(line)
out_file.close()
def utc_from_gps(gps_week, seconds, leap_seconds=16):
""" Converts from gps week time to UTC time. UTC time starts from JAN 1,
1970 and GPS time starts from JAN 6, 1980.
http://leapsecond.com/java/gpsclock.htm
"""
secs_in_week = 604800
secs_gps_to_utc = 315964800
return long((gps_week * secs_in_week + seconds + secs_gps_to_utc
- leap_seconds) * 1000000)
def unpack_bag(basename, radar_bag_file):
""" Unpacks the bag and writes individual segments to files.
The ouput folder is the basename + _rdr.
Each file name is the time of the starting segment
"""
radar_bag = rosbag.Bag(radar_bag_file)
times = []
cur_file = None
rdr_dir = basename + '_rdr/'
if not os.path.exists(rdr_dir):
os.mkdir(rdr_dir)
for topic, msg, t in radar_bag.read_messages(topics=['/object_list', '/target_status']):
if msg.obj_id == 61:
if cur_file != None:
cur_file.close()
time = msg.header.stamp.to_nsec()/1000 - 66000
times.append(time)
cur_file = open(rdr_dir + str(time) + '.rdr', 'w')
if cur_file != None:
if msg.obj_id == 0 or msg.obj_id == 62:
continue
line = None
if topic == '/object_list':
if msg.isMeasurd == True:
fmt = 'O {id} {dist} {lat_dist} {rel_spd} {dyn_prop} {rcs} {w} {l}'
line = fmt.format(
id = msg.obj_id,
dist = msg.dist,
lat_dist = msg.lat_dist,
rel_spd = msg.relative_spd,
dyn_prop = msg.dyn_prop,
rcs = msg.rcs,
w = msg.width,
l = msg.length)
else:
if msg.status > 0:
fmt = 'T {id} {dist} {lat_dist} {rel_spd} {dyn_prop} {traj} {w} {l} {obst_probab} {exist_probab} {rel_acc} {type} {lost_reason}'
line = fmt.format(
id = msg.obj_id,
dist = msg.dist,
lat_dist = msg.lat_dist,
rel_spd = msg.relative_spd,
dyn_prop = msg.dyn_prop,
traj = msg.traj,
w = msg.width,
l = msg.length,
obst_probab = msg.obst_probab,
exist_probab = msg.exist_probab,
rel_acc = msg.relative_acc,
type = msg.type,
lost_reason = msg.lost_reason
)
if line != None:
cur_file.write(line + '\n')
times.sort()
return times
def main():
""" Prints out times
"""
if len(sys.argv) != 4:
print """
Usage: ./FrameFinder.py <gps_output_file> <ldr_folder_directory> <radar_bag_file>
"""
sys.exit()
gps_file = sys.argv[1]
frames_folder = sys.argv[2]
radar_folder = sys.argv[3]
FrameFinder(gps_file, frames_folder, radar_folder, write_to_file=True)
if __name__ == '__main__':
main() | 0.26218 | 0.22288 |
from dolfin import *
from dolfin_adjoint import *
import numpy as np
from scipy import io
import ufl
set_log_level(LogLevel.ERROR)
from preprocessing import Preprocessing
from ipopt_solver import IPOPTSolver, IPOPTProblem
import Hs_regularization as Hs_reg
try:
from pyadjoint import ipopt # noqa: F401
except ImportError:
print("""This example depends on IPOPT and Python ipopt bindings. \
When compiling IPOPT, make sure to link against HSL, as it \
is a necessity for practical problems.""")
raise
# turn off redundant output in parallel
parameters["std_out_all_processes"] = False
mu = Constant(1.0) # viscosity
alphaunderbar = 2.5 * mu / (100**2) # parameter for \alpha
alphabar = 2.5 * mu / (0.01**2) # parameter for \alpha
q = Constant(0.01) # q value that controls difficulty/discrete-valuedness of solution
def alpha(rho):
"""Inverse permeability as a function of rho, equation (40)"""
return conditional(gt(rho, 1.0),0.0, conditional(gt(rho, -1.0),
alphabar*(-1.0/16*rho**4 + 3.0/8*rho**2 -0.5*rho + 3.0/16),
-1.0*alphabar*rho))
N = 40
delta = 1.5 # The aspect ratio of the domain, 1 high and \delta wide
V = 1.0/3 * delta # want the fluid to occupy 1/3 of the domain
mesh = Mesh(RectangleMesh(MPI.comm_world, Point(0.0, 0.0), Point(delta, 1.0), int(delta*N), N))
controls_file = File('../Output/final_controls_' + str(N) +'.pvd')
# test if alpha does the correct thing
#P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
#P = FunctionSpace(mesh, P_h)
#c = interpolate(Expression("-4+8*x[0]", degree=1), P)
#testfile = File('./Output/c.pvd')
#v = TestFunction(P)
#vh = assemble(alpha(c)*v*dx)
#c.vector()[:] = vh[:]
#testfile << c
A = FunctionSpace(mesh, "CG", 1) # control function space
U_h = VectorElement("CG", mesh.ufl_cell(), 2)
P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
W = FunctionSpace(mesh, U_h*P_h) # mixed Taylor-Hood function space
B = FunctionSpace(mesh, "DG", 0)
b = Function(B)
k = len(b.vector()[:])
b.vector()[:] = range(k)
#file = File("./Output/b_ved.pvd")
#file << b
# Define the boundary condition on velocity
class InflowOutflow(UserExpression):
def eval(self, values, x):
values[1] = 0.0
values[0] = 0.0
l = 1.0/6.0
gbar = 1.0
if x[0] == 0.0 or x[0] == delta:
if (1.0/4 - l/2) < x[1] < (1.0/4 + l/2):
t = x[1] - 1.0/4
values[0] = gbar*(1 - (2*t/l)**2)
if (3.0/4 - l/2) < x[1] < (3.0/4 + l/2):
t = x[1] - 3.0/4
values[0] = gbar*(1 - (2*t/l)**2)
def value_shape(self):
return (2,)
def forward(rho):
"""Solve the forward problem for a given fluid distribution rho(x)."""
w = Function(W)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
F = (alpha(rho) * inner(u, v) * dx + inner(grad(u), grad(v)) * dx +
inner(grad(p), v) * dx + inner(div(u), q) * dx)
bc = DirichletBC(W.sub(0), InflowOutflow(degree=2), "on_boundary")
solve(lhs(F) == rhs(F), w, bcs=bc)
return w
def save_control(x0, controls_file, index=-1, J = None): #TODO
rho = preprocessing.dof_to_control(x0)
rho.rename("density", "density")
print('objective function value J', J(rho))
controls_file << rho
if index +1:
filename = '../Output/matlab_controls_' + str(N) + '_' + str(index +1) + '.mat'
io.savemat(filename, mdict={'data': x0})
pass
if __name__ == "__main__":
x0 = (2.*V/delta -1)*np.ones(int(k/2))
# preprocessing class which contains dof_to_control-mapping
weighting = 1. # consider L2-mass-matrix + weighting * Hs-matrix
sigma = 7./16
preprocessing = Preprocessing(N, B)
inner_product_matrix = Hs_reg.AssembleHs(N,delta,sigma).get_matrix(weighting)
rho = preprocessing.dof_to_control(x0)
# get reduced objective function: rho --> j(rho)
set_working_tape(Tape())
w = forward(rho)
(u, p) = split(w)
controls = File("../Output/control_iterations_guess" + str(N) +".pvd")
allctrls = File("../Output/allcontrols_" + str(N) + ".pvd")
rho_viz = Function(A, name="ControlVisualisation")
def eval_cb(j, rho):
rho_viz.assign(rho)
controls << rho_viz
allctrls << rho_viz
# objective function
J = assemble(0.5 * inner(alpha(rho) * u, u) * dx + 0.5 * mu * inner(grad(u), grad(u)) * dx)
# penalty term in objective function
J2 = assemble(ufl.Max(rho - 1.0, 0.0)**2 *dx + ufl.Max(-rho - 1.0, 0.0)**2 *dx)
m = Control(rho)
Jhat = [ReducedFunctional(J, m, eval_cb_post=eval_cb), ReducedFunctional(J2, m)]
# constraints
v = 1.0 /V * assemble((0.5 * (rho + 1)) * dx) - 1.0 # volume constraint
s = assemble( 1.0/delta*(rho*rho -1.0) *dx) # spherical constraint
constraints = [ReducedFunctional(v,m), ReducedFunctional(s,m)]
bounds = [[0.0, 0.0],[-1.0, 0.0]] # [[lower bound vc, upper bound vc],[lower bound sc, upper bound sc]]
# scaling
scaling_Jhat = [1.0, 0.0] # objective for optimization: scaling_Jhat[0]*Jhat[0]+scaling_Jhat[1]*Jhat[1]
scaling_constraints = [1.0, 1.0] # scaling of constraints for Ipopt
reg = 10.0 # regularization parameter
# problem
problem = IPOPTProblem(Jhat, scaling_Jhat, constraints, scaling_constraints, bounds,
preprocessing, inner_product_matrix, reg)
ipopt = IPOPTSolver(problem)
#ipopt.test_objective(len(x0))
#ipopt.test_constraints(len(x0), 1, option=1)
x0 = ipopt.solve(x0)
save_control(x0, controls_file, 0, J = Jhat[0])
# different weights for H_sigma matrix
weight = [0.01, 0.01, 0.01]
# different penalization parameters
eta = [40, 200, 1000]
# bounds for the constraints
bounds = [[0.0, 0.0], [0.0, 0.0]]
for j in range(len(eta)):
# update inner product
weighting = weight[j] # consider L2-mass-matrix + weighting * Hs-matrix
inner_product_matrix = Hs_reg.AssembleHs(N,delta,sigma).get_matrix(weighting)
scaling_Jhat = [1.0, eta[j]]
# move x0 onto sphere
x0 = preprocessing.move_onto_sphere(x0, V, delta)
# solve optimization problem
problem = IPOPTProblem(Jhat, scaling_Jhat, constraints, scaling_constraints, bounds, preprocessing,
inner_product_matrix, reg)
ipopt = IPOPTSolver(problem)
x0 = ipopt.solve(x0)
save_control(x0, controls_file, j+1, J = Jhat[0]) | topopt/topopt.py | from dolfin import *
from dolfin_adjoint import *
import numpy as np
from scipy import io
import ufl
set_log_level(LogLevel.ERROR)
from preprocessing import Preprocessing
from ipopt_solver import IPOPTSolver, IPOPTProblem
import Hs_regularization as Hs_reg
try:
from pyadjoint import ipopt # noqa: F401
except ImportError:
print("""This example depends on IPOPT and Python ipopt bindings. \
When compiling IPOPT, make sure to link against HSL, as it \
is a necessity for practical problems.""")
raise
# turn off redundant output in parallel
parameters["std_out_all_processes"] = False
mu = Constant(1.0) # viscosity
alphaunderbar = 2.5 * mu / (100**2) # parameter for \alpha
alphabar = 2.5 * mu / (0.01**2) # parameter for \alpha
q = Constant(0.01) # q value that controls difficulty/discrete-valuedness of solution
def alpha(rho):
"""Inverse permeability as a function of rho, equation (40)"""
return conditional(gt(rho, 1.0),0.0, conditional(gt(rho, -1.0),
alphabar*(-1.0/16*rho**4 + 3.0/8*rho**2 -0.5*rho + 3.0/16),
-1.0*alphabar*rho))
N = 40
delta = 1.5 # The aspect ratio of the domain, 1 high and \delta wide
V = 1.0/3 * delta # want the fluid to occupy 1/3 of the domain
mesh = Mesh(RectangleMesh(MPI.comm_world, Point(0.0, 0.0), Point(delta, 1.0), int(delta*N), N))
controls_file = File('../Output/final_controls_' + str(N) +'.pvd')
# test if alpha does the correct thing
#P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
#P = FunctionSpace(mesh, P_h)
#c = interpolate(Expression("-4+8*x[0]", degree=1), P)
#testfile = File('./Output/c.pvd')
#v = TestFunction(P)
#vh = assemble(alpha(c)*v*dx)
#c.vector()[:] = vh[:]
#testfile << c
A = FunctionSpace(mesh, "CG", 1) # control function space
U_h = VectorElement("CG", mesh.ufl_cell(), 2)
P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
W = FunctionSpace(mesh, U_h*P_h) # mixed Taylor-Hood function space
B = FunctionSpace(mesh, "DG", 0)
b = Function(B)
k = len(b.vector()[:])
b.vector()[:] = range(k)
#file = File("./Output/b_ved.pvd")
#file << b
# Define the boundary condition on velocity
class InflowOutflow(UserExpression):
def eval(self, values, x):
values[1] = 0.0
values[0] = 0.0
l = 1.0/6.0
gbar = 1.0
if x[0] == 0.0 or x[0] == delta:
if (1.0/4 - l/2) < x[1] < (1.0/4 + l/2):
t = x[1] - 1.0/4
values[0] = gbar*(1 - (2*t/l)**2)
if (3.0/4 - l/2) < x[1] < (3.0/4 + l/2):
t = x[1] - 3.0/4
values[0] = gbar*(1 - (2*t/l)**2)
def value_shape(self):
return (2,)
def forward(rho):
"""Solve the forward problem for a given fluid distribution rho(x)."""
w = Function(W)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
F = (alpha(rho) * inner(u, v) * dx + inner(grad(u), grad(v)) * dx +
inner(grad(p), v) * dx + inner(div(u), q) * dx)
bc = DirichletBC(W.sub(0), InflowOutflow(degree=2), "on_boundary")
solve(lhs(F) == rhs(F), w, bcs=bc)
return w
def save_control(x0, controls_file, index=-1, J = None): #TODO
rho = preprocessing.dof_to_control(x0)
rho.rename("density", "density")
print('objective function value J', J(rho))
controls_file << rho
if index +1:
filename = '../Output/matlab_controls_' + str(N) + '_' + str(index +1) + '.mat'
io.savemat(filename, mdict={'data': x0})
pass
if __name__ == "__main__":
x0 = (2.*V/delta -1)*np.ones(int(k/2))
# preprocessing class which contains dof_to_control-mapping
weighting = 1. # consider L2-mass-matrix + weighting * Hs-matrix
sigma = 7./16
preprocessing = Preprocessing(N, B)
inner_product_matrix = Hs_reg.AssembleHs(N,delta,sigma).get_matrix(weighting)
rho = preprocessing.dof_to_control(x0)
# get reduced objective function: rho --> j(rho)
set_working_tape(Tape())
w = forward(rho)
(u, p) = split(w)
controls = File("../Output/control_iterations_guess" + str(N) +".pvd")
allctrls = File("../Output/allcontrols_" + str(N) + ".pvd")
rho_viz = Function(A, name="ControlVisualisation")
def eval_cb(j, rho):
rho_viz.assign(rho)
controls << rho_viz
allctrls << rho_viz
# objective function
J = assemble(0.5 * inner(alpha(rho) * u, u) * dx + 0.5 * mu * inner(grad(u), grad(u)) * dx)
# penalty term in objective function
J2 = assemble(ufl.Max(rho - 1.0, 0.0)**2 *dx + ufl.Max(-rho - 1.0, 0.0)**2 *dx)
m = Control(rho)
Jhat = [ReducedFunctional(J, m, eval_cb_post=eval_cb), ReducedFunctional(J2, m)]
# constraints
v = 1.0 /V * assemble((0.5 * (rho + 1)) * dx) - 1.0 # volume constraint
s = assemble( 1.0/delta*(rho*rho -1.0) *dx) # spherical constraint
constraints = [ReducedFunctional(v,m), ReducedFunctional(s,m)]
bounds = [[0.0, 0.0],[-1.0, 0.0]] # [[lower bound vc, upper bound vc],[lower bound sc, upper bound sc]]
# scaling
scaling_Jhat = [1.0, 0.0] # objective for optimization: scaling_Jhat[0]*Jhat[0]+scaling_Jhat[1]*Jhat[1]
scaling_constraints = [1.0, 1.0] # scaling of constraints for Ipopt
reg = 10.0 # regularization parameter
# problem
problem = IPOPTProblem(Jhat, scaling_Jhat, constraints, scaling_constraints, bounds,
preprocessing, inner_product_matrix, reg)
ipopt = IPOPTSolver(problem)
#ipopt.test_objective(len(x0))
#ipopt.test_constraints(len(x0), 1, option=1)
x0 = ipopt.solve(x0)
save_control(x0, controls_file, 0, J = Jhat[0])
# different weights for H_sigma matrix
weight = [0.01, 0.01, 0.01]
# different penalization parameters
eta = [40, 200, 1000]
# bounds for the constraints
bounds = [[0.0, 0.0], [0.0, 0.0]]
for j in range(len(eta)):
# update inner product
weighting = weight[j] # consider L2-mass-matrix + weighting * Hs-matrix
inner_product_matrix = Hs_reg.AssembleHs(N,delta,sigma).get_matrix(weighting)
scaling_Jhat = [1.0, eta[j]]
# move x0 onto sphere
x0 = preprocessing.move_onto_sphere(x0, V, delta)
# solve optimization problem
problem = IPOPTProblem(Jhat, scaling_Jhat, constraints, scaling_constraints, bounds, preprocessing,
inner_product_matrix, reg)
ipopt = IPOPTSolver(problem)
x0 = ipopt.solve(x0)
save_control(x0, controls_file, j+1, J = Jhat[0]) | 0.504883 | 0.422981 |
from __future__ import (print_function, division, absolute_import, unicode_literals)
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from ..models import Badge, FUNCTION_WITH_RELATED_MODEL, UserBadge
from ..badge_registry import check_function_registry
from .utils_for_tests import DummyModelForTest
def dummy_check_function(instance, badge):
return True
def check_function_more_than_related_model(instance, badge):
user = instance.user
if user.dummymodelfortest_set.count() > int(badge.condition_parameters):
return user, True
return user, False
class BadgeWithFunctionConditionTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(username='user1')
self.FUNCTION_NAME = 'check_function_more_than_related_model'
self.INVALID_FUNCTION_NAME = 'invalid_check_function'
check_function_registry.register_function(self.FUNCTION_NAME,
check_function_more_than_related_model)
self.badge_more_three = Badge.objects.create(name="badge_more_three", name_is_visible=True,
content_type=ContentType.objects.get_for_model(DummyModelForTest),
condition_is_visible=True, is_active=True,
condition_type=FUNCTION_WITH_RELATED_MODEL,
condition_function_name='check_function_more_than_related_model',
condition_parameters="3")
def test_simple_condition_greater_than(self):
self.assertEqual(0, UserBadge.objects.all().count())
dummy1 = DummyModelForTest(name="Dummy 1", user=self.user)
dummy1.save()
dummy2 = DummyModelForTest(name="Dummy 2", user=self.user)
dummy2.save()
self.assertEqual(2, DummyModelForTest.objects.count())
self.assertEqual(0, UserBadge.objects.all().count())
dummy3 = DummyModelForTest(name="<NAME>", user=self.user)
dummy3.save()
dummy4 = DummyModelForTest(name="<NAME>", user=self.user)
dummy4.save()
self.assertEqual(1, UserBadge.objects.all().count()) | badgificator/tests/test_functions_condition.py | from __future__ import (print_function, division, absolute_import, unicode_literals)
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from ..models import Badge, FUNCTION_WITH_RELATED_MODEL, UserBadge
from ..badge_registry import check_function_registry
from .utils_for_tests import DummyModelForTest
def dummy_check_function(instance, badge):
return True
def check_function_more_than_related_model(instance, badge):
user = instance.user
if user.dummymodelfortest_set.count() > int(badge.condition_parameters):
return user, True
return user, False
class BadgeWithFunctionConditionTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(username='user1')
self.FUNCTION_NAME = 'check_function_more_than_related_model'
self.INVALID_FUNCTION_NAME = 'invalid_check_function'
check_function_registry.register_function(self.FUNCTION_NAME,
check_function_more_than_related_model)
self.badge_more_three = Badge.objects.create(name="badge_more_three", name_is_visible=True,
content_type=ContentType.objects.get_for_model(DummyModelForTest),
condition_is_visible=True, is_active=True,
condition_type=FUNCTION_WITH_RELATED_MODEL,
condition_function_name='check_function_more_than_related_model',
condition_parameters="3")
def test_simple_condition_greater_than(self):
self.assertEqual(0, UserBadge.objects.all().count())
dummy1 = DummyModelForTest(name="Dummy 1", user=self.user)
dummy1.save()
dummy2 = DummyModelForTest(name="Dummy 2", user=self.user)
dummy2.save()
self.assertEqual(2, DummyModelForTest.objects.count())
self.assertEqual(0, UserBadge.objects.all().count())
dummy3 = DummyModelForTest(name="<NAME>", user=self.user)
dummy3.save()
dummy4 = DummyModelForTest(name="<NAME>", user=self.user)
dummy4.save()
self.assertEqual(1, UserBadge.objects.all().count()) | 0.382949 | 0.18543 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import codecs
import shutil
import zipfile
import requests
__all__ = [
"wget", "unzip", "rm", "mkdir", "rmdir", "mv"
]
_CURRENT_FILE = os.path.dirname(__file__)
def wget(url, save_path=None, rename=None):
current_path = os.getcwd()
file_name = url[url.rfind("/")+1:]
if not save_path:
save_path = current_path
if not rename:
rename = file_name
save_path = os.path.abspath(os.path.join(save_path, rename))
print("[wget] downloading from {}".format(url))
start = time.time()
size = 0
response = requests.get(url, stream=True)
chunk_size = 10240
content_size = int(response.headers["content-length"])
if response.status_code == 200:
print("[wget] file size: %.2f MB" %(content_size / 1024 / 1024))
with codecs.open(save_path, "wb") as f:
for data in response.iter_content(chunk_size=chunk_size):
f.write(data)
size += len(data)
print("\r"+"[wget] %s%.2f%%"
%(">"*int(size*50/content_size), float(size/content_size*100)), end="")
end = time.time()
print("\n"+"[wget] complete! cost: %.2fs."%(end-start))
print("[wget] save at: %s" %save_path)
return save_path
def unzip(file_path, save_path=None):
if not save_path:
save_path = os.path.abspath("/".join(os.path.abspath(file_path).split("/")[:-1]))
with zipfile.ZipFile(file_path) as zf:
zf.extractall(save_path)
print("[unzip] file path: {}, save at {}".format(file_path, save_path))
return save_path
def rm(file_path):
file_path = os.path.abspath(file_path)
os.remove(file_path)
print("[remove] file path {}".format(file_path))
return
def mkdir(file_path):
file_path = os.path.abspath(file_path)
os.makedirs(file_path)
print("[mkdir] create directory {}".format(file_path))
return file_path
def rmdir(file_path):
file_path = os.path.abspath(file_path)
shutil.rmtree(file_path)
print("[rmdir] remove directory {}".format(file_path))
return
def mv(from_file_path, to_file_path):
from_file_path = os.path.abspath(from_file_path)
to_file_path = os.path.abspath(to_file_path)
os.rename(from_file_path, to_file_path)
print("[move] move file from {} to {}".format(from_file_path, to_file_path))
return | PyCLUE/utils/utils/file_utils.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import codecs
import shutil
import zipfile
import requests
__all__ = [
"wget", "unzip", "rm", "mkdir", "rmdir", "mv"
]
_CURRENT_FILE = os.path.dirname(__file__)
def wget(url, save_path=None, rename=None):
current_path = os.getcwd()
file_name = url[url.rfind("/")+1:]
if not save_path:
save_path = current_path
if not rename:
rename = file_name
save_path = os.path.abspath(os.path.join(save_path, rename))
print("[wget] downloading from {}".format(url))
start = time.time()
size = 0
response = requests.get(url, stream=True)
chunk_size = 10240
content_size = int(response.headers["content-length"])
if response.status_code == 200:
print("[wget] file size: %.2f MB" %(content_size / 1024 / 1024))
with codecs.open(save_path, "wb") as f:
for data in response.iter_content(chunk_size=chunk_size):
f.write(data)
size += len(data)
print("\r"+"[wget] %s%.2f%%"
%(">"*int(size*50/content_size), float(size/content_size*100)), end="")
end = time.time()
print("\n"+"[wget] complete! cost: %.2fs."%(end-start))
print("[wget] save at: %s" %save_path)
return save_path
def unzip(file_path, save_path=None):
if not save_path:
save_path = os.path.abspath("/".join(os.path.abspath(file_path).split("/")[:-1]))
with zipfile.ZipFile(file_path) as zf:
zf.extractall(save_path)
print("[unzip] file path: {}, save at {}".format(file_path, save_path))
return save_path
def rm(file_path):
file_path = os.path.abspath(file_path)
os.remove(file_path)
print("[remove] file path {}".format(file_path))
return
def mkdir(file_path):
file_path = os.path.abspath(file_path)
os.makedirs(file_path)
print("[mkdir] create directory {}".format(file_path))
return file_path
def rmdir(file_path):
file_path = os.path.abspath(file_path)
shutil.rmtree(file_path)
print("[rmdir] remove directory {}".format(file_path))
return
def mv(from_file_path, to_file_path):
from_file_path = os.path.abspath(from_file_path)
to_file_path = os.path.abspath(to_file_path)
os.rename(from_file_path, to_file_path)
print("[move] move file from {} to {}".format(from_file_path, to_file_path))
return | 0.338077 | 0.069479 |
import sys
from pathlib import Path
from collections import Counter
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import DataError, OperationalError, IntegrityError
from django_lr_loader.models import LightroomCatalog, LightroomImageFileInfo, ImageToFileInfo
from django_fs_searcher.models import FileInfo
class Command(BaseCommand):
help = """Matches images found by 'get_lightroom_catalog' to images (if possible) in the file lists collected by get_file_system in django-filesystem-searcher, another Django application. Potential and positive image matches between Lightroom catalogs and file system data are stored in the database for later analysis."""
lr_hostname = None
fs_hostname = None
fs_volume = None
lrcat_hostname = None
lrcat_volume = None
verbose = False
def add_arguments(self, parser):
parser.add_argument("--lr_hostname", help="Limit matches to Lightroom hostname.", default=None)
parser.add_argument("--fs_hostname", help="Limit matches to file system hostname.", default=None)
parser.add_argument("--fs_volume", help="Limit matches to file system volume.", default=None)
parser.add_argument("--lrcat_hostname", help="Hostname where working Lightroom catalog is located.")
parser.add_argument("--lrcat_volume", help="Volume where working Lightroom catalog is located.")
def certainty(self, hashes=[], matches=[]):
distinct_hash_counts = Counter(hashes)
if self.verbose:
print("\tMatches", len(matches), "Hashes", len(hashes), "Distinct Hashes", len(distinct_hash_counts.keys()))
for match in matches:
match.certainty = int(100.0 * distinct_hash_counts[match.file_info.dropbox_hash] / len(hashes))
match.save()
if (
match.certainty > 50 and 'Pictures' in match.file_info.full_path and
match.lightroom_image_file_info.status != LightroomImageFileInfo.Status.FILE_EXISTS_IN_PICTURES
):
if (
self.lrcat_hostname and self.lrcat_hostname == match.file_info.hostname and
self.lrcat_volume and self.lrcat_volume == match.file_info.volume
):
match.lightroom_image_file_info.status = LightroomImageFileInfo.Status.FILE_EXISTS_IN_PICTURES
match.lightroom_image_file_info.save()
elif match.lightroom_image_file_info.status == LightroomImageFileInfo.Status.UNKNOWN:
match.lightroom_image_file_info.status = LightroomImageFileInfo.Status.FILE_EXISTS_ON_BACKUP
match.lightroom_image_file_info.save()
if self.verbose:
print(f"\tMatch id {match.lightroom_image_file_info.id}::{match.file_info.id}", file=sys.stderr)
print(f"\tfile_name {match.lightroom_image_file_info.print_path}::{match.file_info.full_path}", file=sys.stderr)
print(f"\tCertainty {match.certainty}", f"Status {match.lightroom_image_file_info.status}", sys.stderr)
def handle(self, *args, **options):
self.lr_hostname = options['lr_hostname']
self.fs_hostname = options['fs_hostname']
self.fs_volume = options['fs_volume']
self.lrcat_hostname = options['lrcat_hostname']
self.lrcat_volume = options['lrcat_volume']
self.verbose = True if options['verbosity'] else False
if self.verbose:
print('lr_hostname', self.lr_hostname, file=sys.stderr)
print('fs_hostname', self.fs_hostname, file=sys.stderr)
print('fs_volume', self.fs_volume, file=sys.stderr)
print('lrcat_hostname', self.lrcat_hostname, file=sys.stderr)
print('lrcat_volume', self.lrcat_volume, file=sys.stderr)
if self.lr_hostname:
lr_queryset = LightroomImageFileInfo.objects.filter(hostname=self.lr_hostname)
else:
lr_queryset = LightroomImageFileInfo.objects.all()
fs_queryset = FileInfo.objects
if not self.fs_hostname and not self.fs_volume:
fs_queryset = fs_queryset.all()
if self.fs_hostname:
fs_queryset = fs_queryset.filter(hostname=self.fs_hostname)
if self.fs_volume:
fs_queryset = fs_queryset.filter(volume=self.fs_volume)
for lightroom_image_file_info in lr_queryset:
path = lightroom_image_file_info.print_path
if path[1] == ':':
# Remove assumed Windows Drive Letter
path = path[2:]
if path[0] == '/':
path = path[1:]
if path.startswith('Pictures/'):
path = path[9:]
if self.verbose:
print(path, sys.stderr)
hashes = []
matches = []
for file_info in fs_queryset.filter(file_name=lightroom_image_file_info.file_original_name):
if file_info.full_path.endswith(path):
hashes.append(file_info.dropbox_hash)
matches.append(
ImageToFileInfo(
lightroom_catalog=lightroom_image_file_info.lightroom_catalog,
lightroom_image_file_info=lightroom_image_file_info,
file_info=file_info,
certainty=0
)
)
self.certainty(hashes=hashes, matches=matches) | django_lr_loader/management/commands/match_images_to_file_system.py | import sys
from pathlib import Path
from collections import Counter
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import DataError, OperationalError, IntegrityError
from django_lr_loader.models import LightroomCatalog, LightroomImageFileInfo, ImageToFileInfo
from django_fs_searcher.models import FileInfo
class Command(BaseCommand):
help = """Matches images found by 'get_lightroom_catalog' to images (if possible) in the file lists collected by get_file_system in django-filesystem-searcher, another Django application. Potential and positive image matches between Lightroom catalogs and file system data are stored in the database for later analysis."""
lr_hostname = None
fs_hostname = None
fs_volume = None
lrcat_hostname = None
lrcat_volume = None
verbose = False
def add_arguments(self, parser):
parser.add_argument("--lr_hostname", help="Limit matches to Lightroom hostname.", default=None)
parser.add_argument("--fs_hostname", help="Limit matches to file system hostname.", default=None)
parser.add_argument("--fs_volume", help="Limit matches to file system volume.", default=None)
parser.add_argument("--lrcat_hostname", help="Hostname where working Lightroom catalog is located.")
parser.add_argument("--lrcat_volume", help="Volume where working Lightroom catalog is located.")
def certainty(self, hashes=[], matches=[]):
distinct_hash_counts = Counter(hashes)
if self.verbose:
print("\tMatches", len(matches), "Hashes", len(hashes), "Distinct Hashes", len(distinct_hash_counts.keys()))
for match in matches:
match.certainty = int(100.0 * distinct_hash_counts[match.file_info.dropbox_hash] / len(hashes))
match.save()
if (
match.certainty > 50 and 'Pictures' in match.file_info.full_path and
match.lightroom_image_file_info.status != LightroomImageFileInfo.Status.FILE_EXISTS_IN_PICTURES
):
if (
self.lrcat_hostname and self.lrcat_hostname == match.file_info.hostname and
self.lrcat_volume and self.lrcat_volume == match.file_info.volume
):
match.lightroom_image_file_info.status = LightroomImageFileInfo.Status.FILE_EXISTS_IN_PICTURES
match.lightroom_image_file_info.save()
elif match.lightroom_image_file_info.status == LightroomImageFileInfo.Status.UNKNOWN:
match.lightroom_image_file_info.status = LightroomImageFileInfo.Status.FILE_EXISTS_ON_BACKUP
match.lightroom_image_file_info.save()
if self.verbose:
print(f"\tMatch id {match.lightroom_image_file_info.id}::{match.file_info.id}", file=sys.stderr)
print(f"\tfile_name {match.lightroom_image_file_info.print_path}::{match.file_info.full_path}", file=sys.stderr)
print(f"\tCertainty {match.certainty}", f"Status {match.lightroom_image_file_info.status}", sys.stderr)
def handle(self, *args, **options):
self.lr_hostname = options['lr_hostname']
self.fs_hostname = options['fs_hostname']
self.fs_volume = options['fs_volume']
self.lrcat_hostname = options['lrcat_hostname']
self.lrcat_volume = options['lrcat_volume']
self.verbose = True if options['verbosity'] else False
if self.verbose:
print('lr_hostname', self.lr_hostname, file=sys.stderr)
print('fs_hostname', self.fs_hostname, file=sys.stderr)
print('fs_volume', self.fs_volume, file=sys.stderr)
print('lrcat_hostname', self.lrcat_hostname, file=sys.stderr)
print('lrcat_volume', self.lrcat_volume, file=sys.stderr)
if self.lr_hostname:
lr_queryset = LightroomImageFileInfo.objects.filter(hostname=self.lr_hostname)
else:
lr_queryset = LightroomImageFileInfo.objects.all()
fs_queryset = FileInfo.objects
if not self.fs_hostname and not self.fs_volume:
fs_queryset = fs_queryset.all()
if self.fs_hostname:
fs_queryset = fs_queryset.filter(hostname=self.fs_hostname)
if self.fs_volume:
fs_queryset = fs_queryset.filter(volume=self.fs_volume)
for lightroom_image_file_info in lr_queryset:
path = lightroom_image_file_info.print_path
if path[1] == ':':
# Remove assumed Windows Drive Letter
path = path[2:]
if path[0] == '/':
path = path[1:]
if path.startswith('Pictures/'):
path = path[9:]
if self.verbose:
print(path, sys.stderr)
hashes = []
matches = []
for file_info in fs_queryset.filter(file_name=lightroom_image_file_info.file_original_name):
if file_info.full_path.endswith(path):
hashes.append(file_info.dropbox_hash)
matches.append(
ImageToFileInfo(
lightroom_catalog=lightroom_image_file_info.lightroom_catalog,
lightroom_image_file_info=lightroom_image_file_info,
file_info=file_info,
certainty=0
)
)
self.certainty(hashes=hashes, matches=matches) | 0.207455 | 0.157169 |
import random
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import models
from django.forms.models import model_to_dict
from model_utils.models import TimeFramedModel, TimeStampedModel
from s3direct.fields import S3DirectField
# pylint: disable=cyclic-import
from will_of_the_prophets.validators import RollEmbargoValidator
SQUARE_VALIDATORS = [validators.MinValueValidator(1), validators.MaxValueValidator(100)]
class SpecialSquareType(models.Model):
"""A special square type."""
name = models.TextField()
description = models.TextField()
image = S3DirectField(dest="special_square")
auto_move = models.IntegerField(
default=0, help_text="Automatically move the runabout by this many places"
)
def __str__(self):
return self.name
class SpecialSquare(TimeFramedModel):
"""A special square."""
square = models.PositiveIntegerField(validators=SQUARE_VALIDATORS)
type = models.ForeignKey(
SpecialSquareType, on_delete=models.PROTECT, related_name="squares"
)
def __str__(self):
return "{type} at {square}".format(square=self.square, type=str(self.type))
class Butthole(TimeFramedModel):
"""A butthole."""
start_square = models.PositiveIntegerField(validators=SQUARE_VALIDATORS)
end_square = models.PositiveIntegerField(validators=SQUARE_VALIDATORS)
def clean(self):
if self.start_square == self.end_square:
raise ValidationError(
"A butthole cannot start and end in the " "same square."
)
return super().clean()
def __str__(self):
return "{start_square} to {end_square}".format(**model_to_dict(self))
def default_roll_number():
return random.randint(1, 6)
class Roll(TimeStampedModel):
"""A roll of the 'dice'."""
number = models.PositiveIntegerField(default=default_roll_number)
embargo = models.DateTimeField(validators=[RollEmbargoValidator()])
def __str__(self):
return "{number} on {embargo}".format(**model_to_dict(self)) | will_of_the_prophets/models.py |
import random
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import models
from django.forms.models import model_to_dict
from model_utils.models import TimeFramedModel, TimeStampedModel
from s3direct.fields import S3DirectField
# pylint: disable=cyclic-import
from will_of_the_prophets.validators import RollEmbargoValidator
SQUARE_VALIDATORS = [validators.MinValueValidator(1), validators.MaxValueValidator(100)]
class SpecialSquareType(models.Model):
"""A special square type."""
name = models.TextField()
description = models.TextField()
image = S3DirectField(dest="special_square")
auto_move = models.IntegerField(
default=0, help_text="Automatically move the runabout by this many places"
)
def __str__(self):
return self.name
class SpecialSquare(TimeFramedModel):
"""A special square."""
square = models.PositiveIntegerField(validators=SQUARE_VALIDATORS)
type = models.ForeignKey(
SpecialSquareType, on_delete=models.PROTECT, related_name="squares"
)
def __str__(self):
return "{type} at {square}".format(square=self.square, type=str(self.type))
class Butthole(TimeFramedModel):
"""A butthole."""
start_square = models.PositiveIntegerField(validators=SQUARE_VALIDATORS)
end_square = models.PositiveIntegerField(validators=SQUARE_VALIDATORS)
def clean(self):
if self.start_square == self.end_square:
raise ValidationError(
"A butthole cannot start and end in the " "same square."
)
return super().clean()
def __str__(self):
return "{start_square} to {end_square}".format(**model_to_dict(self))
def default_roll_number():
return random.randint(1, 6)
class Roll(TimeStampedModel):
"""A roll of the 'dice'."""
number = models.PositiveIntegerField(default=default_roll_number)
embargo = models.DateTimeField(validators=[RollEmbargoValidator()])
def __str__(self):
return "{number} on {embargo}".format(**model_to_dict(self)) | 0.747432 | 0.197406 |
from __future__ import absolute_import, division, print_function
import logging as log
from traceback import format_exc
from pprint import pformat
from . import FormatProvider, providers
__all__ = ['DictFormatProvider']
class DictFormatProvider(FormatProvider):
"""
Python dictionary format provider.
.. warning:: This provider uses :py:func:`eval` function.
Use with care.
"""
@classmethod
def do_import(cls, cfmg, string):
"""
Python dictionary parser implementation.
See :meth:`FormatProvider.do_import`.
"""
keys = cfmg._keys
categories = cfmg._categories
# Evaluate string
try:
as_dict = eval(string)
except Exception as e:
if not cfmg._safe:
raise e
log.error(format_exc())
return
# Check datatype
if type(as_dict) != dict:
msg = 'Cannot evaluate string as dictionary.'
if not cfmg._safe:
raise SyntaxError(msg)
log.error(msg)
return
# Iterate categories
for category, options in as_dict.items():
# Check datatype
if type(options) != dict:
if not cfmg._safe:
raise SyntaxError(
'Malformed category "{}".'.format(category)
)
log.error(
'Ignoring malformed category "{}".'.format(category)
)
continue
# Consider only the categories included in the specification
if category not in categories:
log.error(
'Ignoring unknown category "{}".'.format(category)
)
continue
# Iterate options
for key, value in options.items():
# Consider only known keys
if key not in keys:
log.error('Ignoring unknown key "{}".'.format(key))
continue
# Check if key belongs to the category we are in
if keys[key].category != category:
msg = (
'Key "{}" should belong to category "{}", '
'found in "{}" instead.'.format(
key, keys[key].category, category
)
)
if not cfmg._safe:
raise SyntaxError(msg)
log.error(msg)
continue
# Everything ok, try to set the value of the option
try:
cfmg.set(key, value)
except Exception as e:
if not cfmg._safe:
raise e
log.error(format_exc())
@classmethod
def do_export(cls, cfmg):
"""
Python dictionary writer implementation.
See :meth:`FormatProvider.do_export`.
"""
categories = cfmg._categories
output = None
# Create dictionary
# FIXME: Add support for comments?
as_dict = {
cat: {
opt.key: opt.repr(opt._value) for opt in categories[cat]
} for cat in categories
}
output = pformat(as_dict)
return output
providers['dict'] = DictFormatProvider | lib/confspec/providers/dict.py | from __future__ import absolute_import, division, print_function
import logging as log
from traceback import format_exc
from pprint import pformat
from . import FormatProvider, providers
__all__ = ['DictFormatProvider']
class DictFormatProvider(FormatProvider):
"""
Python dictionary format provider.
.. warning:: This provider uses :py:func:`eval` function.
Use with care.
"""
@classmethod
def do_import(cls, cfmg, string):
"""
Python dictionary parser implementation.
See :meth:`FormatProvider.do_import`.
"""
keys = cfmg._keys
categories = cfmg._categories
# Evaluate string
try:
as_dict = eval(string)
except Exception as e:
if not cfmg._safe:
raise e
log.error(format_exc())
return
# Check datatype
if type(as_dict) != dict:
msg = 'Cannot evaluate string as dictionary.'
if not cfmg._safe:
raise SyntaxError(msg)
log.error(msg)
return
# Iterate categories
for category, options in as_dict.items():
# Check datatype
if type(options) != dict:
if not cfmg._safe:
raise SyntaxError(
'Malformed category "{}".'.format(category)
)
log.error(
'Ignoring malformed category "{}".'.format(category)
)
continue
# Consider only the categories included in the specification
if category not in categories:
log.error(
'Ignoring unknown category "{}".'.format(category)
)
continue
# Iterate options
for key, value in options.items():
# Consider only known keys
if key not in keys:
log.error('Ignoring unknown key "{}".'.format(key))
continue
# Check if key belongs to the category we are in
if keys[key].category != category:
msg = (
'Key "{}" should belong to category "{}", '
'found in "{}" instead.'.format(
key, keys[key].category, category
)
)
if not cfmg._safe:
raise SyntaxError(msg)
log.error(msg)
continue
# Everything ok, try to set the value of the option
try:
cfmg.set(key, value)
except Exception as e:
if not cfmg._safe:
raise e
log.error(format_exc())
@classmethod
def do_export(cls, cfmg):
"""
Python dictionary writer implementation.
See :meth:`FormatProvider.do_export`.
"""
categories = cfmg._categories
output = None
# Create dictionary
# FIXME: Add support for comments?
as_dict = {
cat: {
opt.key: opt.repr(opt._value) for opt in categories[cat]
} for cat in categories
}
output = pformat(as_dict)
return output
providers['dict'] = DictFormatProvider | 0.668231 | 0.092237 |
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("polaris", "0002_auto_20191125_1829"),
]
operations = [
migrations.AlterField(
model_name="asset",
name="deposit_fee_fixed",
field=models.DecimalField(
blank=True, decimal_places=25, default=1.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="deposit_fee_percent",
field=models.DecimalField(
blank=True, decimal_places=25, default=0.01, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="deposit_max_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10000.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="deposit_min_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_fee_fixed",
field=models.DecimalField(
blank=True, decimal_places=25, default=1.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_fee_percent",
field=models.DecimalField(
blank=True, decimal_places=25, default=0.01, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_max_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10000.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_min_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10.0, max_digits=50
),
),
migrations.AlterField(
model_name="transaction",
name="amount_fee",
field=models.DecimalField(
blank=True, decimal_places=25, max_digits=50, null=True
),
),
migrations.AlterField(
model_name="transaction",
name="amount_in",
field=models.DecimalField(
blank=True, decimal_places=25, max_digits=50, null=True
),
),
migrations.AlterField(
model_name="transaction",
name="amount_out",
field=models.DecimalField(
blank=True, decimal_places=25, max_digits=50, null=True
),
),
] | polaris/polaris/migrations/0003_auto_20191211_1512.py |
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("polaris", "0002_auto_20191125_1829"),
]
operations = [
migrations.AlterField(
model_name="asset",
name="deposit_fee_fixed",
field=models.DecimalField(
blank=True, decimal_places=25, default=1.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="deposit_fee_percent",
field=models.DecimalField(
blank=True, decimal_places=25, default=0.01, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="deposit_max_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10000.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="deposit_min_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_fee_fixed",
field=models.DecimalField(
blank=True, decimal_places=25, default=1.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_fee_percent",
field=models.DecimalField(
blank=True, decimal_places=25, default=0.01, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_max_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10000.0, max_digits=50
),
),
migrations.AlterField(
model_name="asset",
name="withdrawal_min_amount",
field=models.DecimalField(
blank=True, decimal_places=25, default=10.0, max_digits=50
),
),
migrations.AlterField(
model_name="transaction",
name="amount_fee",
field=models.DecimalField(
blank=True, decimal_places=25, max_digits=50, null=True
),
),
migrations.AlterField(
model_name="transaction",
name="amount_in",
field=models.DecimalField(
blank=True, decimal_places=25, max_digits=50, null=True
),
),
migrations.AlterField(
model_name="transaction",
name="amount_out",
field=models.DecimalField(
blank=True, decimal_places=25, max_digits=50, null=True
),
),
] | 0.626924 | 0.195095 |
import ast
import os
import math
import profiler as profiler
class KNN:
def __init__(self, kValue):
self.K=kValue
def classify(self, newProfile):
existingProfiles=open('results/authorProfiles.txt')
allDistances=dict()
for lines in existingProfiles:
lines=lines.strip()
existingProfile=ast.literal_eval(str(lines))
distance=self.getEuclidianDistance(newProfile, existingProfile)
allDistances[distance]=existingProfile['author']
keys=list(allDistances.keys())
keys.sort()
kNeighbours=[]
for i in range(self.K):
kNeighbours.append(allDistances[keys[i]])
#print(kNeighbours)
print(self.getClass(kNeighbours)+','+newProfile['author'])
def getEuclidianDistance(self,newProfile, existingProfile):
avgConjDiff=pow((newProfile['conjIndex']-existingProfile['conjIndex']),2)
avgLengthDiff=pow((newProfile['avgLength']-existingProfile['avgLength']),2)
avgWordLengthDiff=pow((newProfile['avgWordLength']-existingProfile['avgWordLength']),2)
varianceDiff=pow((newProfile['variance']-existingProfile['avgLength']),2)
avgArticleDiff=pow((newProfile['articleIndex']-existingProfile['articleIndex']),2)
prepIndexDiff=pow((newProfile['prepIndex']-existingProfile['prepIndex']),2)
ppIndexDiff=pow((newProfile['ppIndex']-existingProfile['ppIndex']),2)
commaIndexDiff=pow((newProfile['commaIndex']-existingProfile['commaIndex']),2)
distance=math.sqrt(avgLengthDiff+avgWordLengthDiff+avgConjDiff+commaIndexDiff+ppIndexDiff+prepIndexDiff)
return distance
def getClass(self, kNeighbours):
authorDict=dict()
authorDict['Joyce']=0
authorDict['Dikens']=0
for author in kNeighbours:
authorDict[author]+=1
if authorDict['Joyce']>authorDict['Dikens']:
return 'Joyce'
else:
return 'Dikens'
knn=KNN(5)
p=profiler.Profiler()
directory='testData/Dikens'
for root, dirs, files in os.walk(directory):
for myfile in files:
if myfile.endswith(".txt"):
fullPath=os.path.join(root,myfile)
newProfile=ast.literal_eval(str(p.getProfile(fullPath,'Dikens')))
knn.classify(newProfile)
directory='testData/Joyce'
for root, dirs, files in os.walk(directory):
for myfile in files:
if myfile.endswith(".txt"):
fullPath=os.path.join(root,myfile)
newProfile=ast.literal_eval(str(p.getProfile(fullPath,'Joyce')))
knn.classify(newProfile) | src/kNN.py | import ast
import os
import math
import profiler as profiler
class KNN:
def __init__(self, kValue):
self.K=kValue
def classify(self, newProfile):
existingProfiles=open('results/authorProfiles.txt')
allDistances=dict()
for lines in existingProfiles:
lines=lines.strip()
existingProfile=ast.literal_eval(str(lines))
distance=self.getEuclidianDistance(newProfile, existingProfile)
allDistances[distance]=existingProfile['author']
keys=list(allDistances.keys())
keys.sort()
kNeighbours=[]
for i in range(self.K):
kNeighbours.append(allDistances[keys[i]])
#print(kNeighbours)
print(self.getClass(kNeighbours)+','+newProfile['author'])
def getEuclidianDistance(self,newProfile, existingProfile):
avgConjDiff=pow((newProfile['conjIndex']-existingProfile['conjIndex']),2)
avgLengthDiff=pow((newProfile['avgLength']-existingProfile['avgLength']),2)
avgWordLengthDiff=pow((newProfile['avgWordLength']-existingProfile['avgWordLength']),2)
varianceDiff=pow((newProfile['variance']-existingProfile['avgLength']),2)
avgArticleDiff=pow((newProfile['articleIndex']-existingProfile['articleIndex']),2)
prepIndexDiff=pow((newProfile['prepIndex']-existingProfile['prepIndex']),2)
ppIndexDiff=pow((newProfile['ppIndex']-existingProfile['ppIndex']),2)
commaIndexDiff=pow((newProfile['commaIndex']-existingProfile['commaIndex']),2)
distance=math.sqrt(avgLengthDiff+avgWordLengthDiff+avgConjDiff+commaIndexDiff+ppIndexDiff+prepIndexDiff)
return distance
def getClass(self, kNeighbours):
authorDict=dict()
authorDict['Joyce']=0
authorDict['Dikens']=0
for author in kNeighbours:
authorDict[author]+=1
if authorDict['Joyce']>authorDict['Dikens']:
return 'Joyce'
else:
return 'Dikens'
knn=KNN(5)
p=profiler.Profiler()
directory='testData/Dikens'
for root, dirs, files in os.walk(directory):
for myfile in files:
if myfile.endswith(".txt"):
fullPath=os.path.join(root,myfile)
newProfile=ast.literal_eval(str(p.getProfile(fullPath,'Dikens')))
knn.classify(newProfile)
directory='testData/Joyce'
for root, dirs, files in os.walk(directory):
for myfile in files:
if myfile.endswith(".txt"):
fullPath=os.path.join(root,myfile)
newProfile=ast.literal_eval(str(p.getProfile(fullPath,'Joyce')))
knn.classify(newProfile) | 0.06469 | 0.092401 |
import gzip
import logging
from finntk.utils import ResourceMan, urlretrieve
from finntk.vendor.conceptnet5.uri import concept_uri
from shutil import copyfileobj
import os
from .base import MultilingualVectorSpace, RefType
from .utils import get, get_tmpfile, load_word2vec_format, load
logger = logging.getLogger(__name__)
class NumberbatchWordVecs(ResourceMan):
RESOURCE_NAME = "numberbatch-multilingual"
URL = (
"https://conceptnet.s3.amazonaws.com/downloads/2017/numberbatch"
"/numberbatch-17.06.txt.gz"
)
def __init__(self):
super().__init__()
self._resources["vecs"] = "numberbatch.multi.binvec"
self._vecs = None
def _bootstrap(self, _res):
logger.info("Downloading word vectors")
gzipped_glove_tmp_fn = urlretrieve(self.URL)
try:
glove_tmp_fn = get_tmpfile("glove.txt")
try:
copyfileobj(gzip.open(gzipped_glove_tmp_fn), open(glove_tmp_fn, "wb"))
logger.info("Converting word vectors")
fi = load_word2vec_format(glove_tmp_fn)
fi.save(self._get_res_path("vecs"))
finally:
try:
os.remove(glove_tmp_fn)
except OSError:
pass
finally:
try:
os.remove(gzipped_glove_tmp_fn)
except OSError:
pass
def get_vecs(self):
if self._vecs is None:
vec_path = self.get_res("vecs")
logger.info("Loading word vectors")
self._vecs = load(vec_path, mmap="r")
logger.info("Loaded word vectors")
return self._vecs
vecs = NumberbatchWordVecs()
def mk_concept_vec(lang, text, *more):
return get(vecs.get_vecs(), concept_uri(lang, text, *more))
class NumberbatchMultiSpace(MultilingualVectorSpace):
takes = RefType.LEMMA
dim = 300
def get_vec(self, lang: str, ref: str):
return mk_concept_vec(lang, ref)
multispace = NumberbatchMultiSpace() | finntk/emb/numberbatch.py | import gzip
import logging
from finntk.utils import ResourceMan, urlretrieve
from finntk.vendor.conceptnet5.uri import concept_uri
from shutil import copyfileobj
import os
from .base import MultilingualVectorSpace, RefType
from .utils import get, get_tmpfile, load_word2vec_format, load
logger = logging.getLogger(__name__)
class NumberbatchWordVecs(ResourceMan):
RESOURCE_NAME = "numberbatch-multilingual"
URL = (
"https://conceptnet.s3.amazonaws.com/downloads/2017/numberbatch"
"/numberbatch-17.06.txt.gz"
)
def __init__(self):
super().__init__()
self._resources["vecs"] = "numberbatch.multi.binvec"
self._vecs = None
def _bootstrap(self, _res):
logger.info("Downloading word vectors")
gzipped_glove_tmp_fn = urlretrieve(self.URL)
try:
glove_tmp_fn = get_tmpfile("glove.txt")
try:
copyfileobj(gzip.open(gzipped_glove_tmp_fn), open(glove_tmp_fn, "wb"))
logger.info("Converting word vectors")
fi = load_word2vec_format(glove_tmp_fn)
fi.save(self._get_res_path("vecs"))
finally:
try:
os.remove(glove_tmp_fn)
except OSError:
pass
finally:
try:
os.remove(gzipped_glove_tmp_fn)
except OSError:
pass
def get_vecs(self):
if self._vecs is None:
vec_path = self.get_res("vecs")
logger.info("Loading word vectors")
self._vecs = load(vec_path, mmap="r")
logger.info("Loaded word vectors")
return self._vecs
vecs = NumberbatchWordVecs()
def mk_concept_vec(lang, text, *more):
return get(vecs.get_vecs(), concept_uri(lang, text, *more))
class NumberbatchMultiSpace(MultilingualVectorSpace):
takes = RefType.LEMMA
dim = 300
def get_vec(self, lang: str, ref: str):
return mk_concept_vec(lang, ref)
multispace = NumberbatchMultiSpace() | 0.386185 | 0.077169 |
import gtk
import gobject
COLUMN_NUMBER = 0
COLUMN_STRING = 1
COLUMN_BOOL = 2
data = [
[ 1, 'first row', True ],
[ 2, 'second row', True ],
[ 3, 'third row', True ],
[ 4, 'fourth row', True ],
[ 5, 'fifth row', True ],
[ 6, 'sixth row', True ],
[ 7, 'seventh row', None ],
[ 8, 'eigth row', None],
[ 9, 'ninth row', None ],
[ 10, 'tenth row', None ],
[ 1, 'first row', False ],
[ 2, 'second row', False ],
[ 3, 'third row', False],
[ 4, 'fourth row', False ],
[ 5, 'fifth row', False ]
]
def list_selections(param):
print param
def selection_cb(selection):
print selection
selection.selected_foreach(list_selections)
def main():
win = gtk.Window()
win.set_title("Main Window")
win.connect("destroy", lambda win: gtk.main_quit())
vbox = gtk.VBox()
win.add(vbox)
vbox.show()
label = gtk.Label("This is a label")
vbox.pack_start(label, expand = False)
label.show()
sw = gtk.ScrolledWindow(None, None)
sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
vbox.pack_start(sw, expand = True)
ls = gtk.ListStore(gobject.TYPE_UINT, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN)
for item in data:
iter = ls.append()
ls.set(iter, COLUMN_NUMBER, item[0],
COLUMN_STRING, item[1], COLUMN_BOOL, item[2])
tv = gtk.TreeView(ls)
tv.set_rules_hint(True)
sw.add(tv)
tv.show()
renderer = gtk.CellRendererText()
col = gtk.TreeViewColumn('Number', renderer, text=COLUMN_NUMBER)
col.set_sort_column_id(COLUMN_NUMBER)
tv.append_column(col)
renderer1 = gtk.CellRendererText()
renderer1.set_property('editable', True)
renderer1.connect('edited', col_edited, ls)
col = gtk.TreeViewColumn('String', renderer1, text=COLUMN_STRING)
col.set_sort_column_id(COLUMN_STRING)
tv.append_column(col)
renderer2 = gtk.CellRendererToggle()
renderer2.set_property('activatable', True)
renderer2.connect('toggled', col_toggled, ls)
col = gtk.TreeViewColumn('Boolean', renderer2)
col.add_attribute(renderer2, 'active', 2)
col.set_sort_column_id(COLUMN_BOOL)
tv.append_column(col)
win.set_default_size (200,300)
sw.show()
win.show_all()
gtk.main()
def col_edited(cell, path, new_text, ls):
ls[path][1] = new_text
def col_toggled(cell, path, ls):
ls[path][2] = not ls[path][2]
if __name__ == '__main__':
main() | test/samples/gtk/gtkliststore.py | import gtk
import gobject
COLUMN_NUMBER = 0
COLUMN_STRING = 1
COLUMN_BOOL = 2
data = [
[ 1, 'first row', True ],
[ 2, 'second row', True ],
[ 3, 'third row', True ],
[ 4, 'fourth row', True ],
[ 5, 'fifth row', True ],
[ 6, 'sixth row', True ],
[ 7, 'seventh row', None ],
[ 8, 'eigth row', None],
[ 9, 'ninth row', None ],
[ 10, 'tenth row', None ],
[ 1, 'first row', False ],
[ 2, 'second row', False ],
[ 3, 'third row', False],
[ 4, 'fourth row', False ],
[ 5, 'fifth row', False ]
]
def list_selections(param):
print param
def selection_cb(selection):
print selection
selection.selected_foreach(list_selections)
def main():
win = gtk.Window()
win.set_title("Main Window")
win.connect("destroy", lambda win: gtk.main_quit())
vbox = gtk.VBox()
win.add(vbox)
vbox.show()
label = gtk.Label("This is a label")
vbox.pack_start(label, expand = False)
label.show()
sw = gtk.ScrolledWindow(None, None)
sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
vbox.pack_start(sw, expand = True)
ls = gtk.ListStore(gobject.TYPE_UINT, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN)
for item in data:
iter = ls.append()
ls.set(iter, COLUMN_NUMBER, item[0],
COLUMN_STRING, item[1], COLUMN_BOOL, item[2])
tv = gtk.TreeView(ls)
tv.set_rules_hint(True)
sw.add(tv)
tv.show()
renderer = gtk.CellRendererText()
col = gtk.TreeViewColumn('Number', renderer, text=COLUMN_NUMBER)
col.set_sort_column_id(COLUMN_NUMBER)
tv.append_column(col)
renderer1 = gtk.CellRendererText()
renderer1.set_property('editable', True)
renderer1.connect('edited', col_edited, ls)
col = gtk.TreeViewColumn('String', renderer1, text=COLUMN_STRING)
col.set_sort_column_id(COLUMN_STRING)
tv.append_column(col)
renderer2 = gtk.CellRendererToggle()
renderer2.set_property('activatable', True)
renderer2.connect('toggled', col_toggled, ls)
col = gtk.TreeViewColumn('Boolean', renderer2)
col.add_attribute(renderer2, 'active', 2)
col.set_sort_column_id(COLUMN_BOOL)
tv.append_column(col)
win.set_default_size (200,300)
sw.show()
win.show_all()
gtk.main()
def col_edited(cell, path, new_text, ls):
ls[path][1] = new_text
def col_toggled(cell, path, ls):
ls[path][2] = not ls[path][2]
if __name__ == '__main__':
main() | 0.219087 | 0.218836 |
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor
from torch.distributions import Distribution
from torch.optim import RMSprop
from memory import Memory
from policy import Policy
class Agent():
def __init__(self, observation_size: int, action_size: int, goal_size: int, hidden_size: int, learning_rate: float, weight_decay: float):
self.policy = Policy(observation_size, action_size, goal_size, hidden_size)
self.optimiser = RMSprop(self.policy.parameters(), lr=learning_rate, weight_decay=weight_decay, alpha=0.7)
# Sets training/evaluation mode
def set_mode(self, training: bool):
self.policy.train(training)
# Constructs an initial command for a new episode Dict[str, T x B x C]
def get_initial_command(self, goal: Tensor, memory: Memory, mode: str) -> Dict[str, Tensor]:
desired_return, desired_horizon = memory.get_max_return_horizon()
command = {'goal': goal, 'prev_action': torch.zeros(1, 1, dtype=torch.int64), 'desired_horizon': torch.tensor([[[desired_horizon]]], dtype=torch.float32)}
if mode != 'imitation':
command['reward'], command['desired_return'] = torch.zeros(1, 1, 1), torch.tensor([[[desired_return]]], dtype=torch.float32)
return command
# Observes the current state and produces a policy and updated internal/hidden state
def observe(self, observations: Tensor, commands: Dict[str, Tensor], hidden: Optional[Tuple[Tensor, Tensor]]=None) -> Tuple[Distribution, Tuple[Tensor, Tensor]]:
return self.policy(observations, commands, hidden)
# Updates command (inplace)
def update_command(self, observation: Tensor, goal: Tensor, action: Tensor, reward: float, terminal: bool, command: Tensor, mode: str) -> Tensor:
command['goal'], command['prev_action'], command['desired_horizon'] = goal, action, command['desired_horizon'] - 1 # Update goal, previous action, and subtract one timestep from desired horizon
if mode != 'imitation':
command['reward'], command['desired_return'] = torch.tensor([[[reward]]], dtype=torch.float32), command['desired_return'] - reward # Update reward and subtract reward from desired return
# Trains on past observations using supervised learning
def train(self, memory: Memory, batch_size: int, seq_len: int, mode: str) -> float:
observations, commands, actions = memory.sample(batch_size, seq_len, mode)
policy, _ = self.policy(observations, commands, None)
self.optimiser.zero_grad(set_to_none=True)
loss = -policy.log_prob(actions).mean()
loss.backward()
self.optimiser.step()
return loss.item() | agent.py | from typing import Dict, Optional, Tuple
import torch
from torch import Tensor
from torch.distributions import Distribution
from torch.optim import RMSprop
from memory import Memory
from policy import Policy
class Agent():
def __init__(self, observation_size: int, action_size: int, goal_size: int, hidden_size: int, learning_rate: float, weight_decay: float):
self.policy = Policy(observation_size, action_size, goal_size, hidden_size)
self.optimiser = RMSprop(self.policy.parameters(), lr=learning_rate, weight_decay=weight_decay, alpha=0.7)
# Sets training/evaluation mode
def set_mode(self, training: bool):
self.policy.train(training)
# Constructs an initial command for a new episode Dict[str, T x B x C]
def get_initial_command(self, goal: Tensor, memory: Memory, mode: str) -> Dict[str, Tensor]:
desired_return, desired_horizon = memory.get_max_return_horizon()
command = {'goal': goal, 'prev_action': torch.zeros(1, 1, dtype=torch.int64), 'desired_horizon': torch.tensor([[[desired_horizon]]], dtype=torch.float32)}
if mode != 'imitation':
command['reward'], command['desired_return'] = torch.zeros(1, 1, 1), torch.tensor([[[desired_return]]], dtype=torch.float32)
return command
# Observes the current state and produces a policy and updated internal/hidden state
def observe(self, observations: Tensor, commands: Dict[str, Tensor], hidden: Optional[Tuple[Tensor, Tensor]]=None) -> Tuple[Distribution, Tuple[Tensor, Tensor]]:
return self.policy(observations, commands, hidden)
# Updates command (inplace)
def update_command(self, observation: Tensor, goal: Tensor, action: Tensor, reward: float, terminal: bool, command: Tensor, mode: str) -> Tensor:
command['goal'], command['prev_action'], command['desired_horizon'] = goal, action, command['desired_horizon'] - 1 # Update goal, previous action, and subtract one timestep from desired horizon
if mode != 'imitation':
command['reward'], command['desired_return'] = torch.tensor([[[reward]]], dtype=torch.float32), command['desired_return'] - reward # Update reward and subtract reward from desired return
# Trains on past observations using supervised learning
def train(self, memory: Memory, batch_size: int, seq_len: int, mode: str) -> float:
observations, commands, actions = memory.sample(batch_size, seq_len, mode)
policy, _ = self.policy(observations, commands, None)
self.optimiser.zero_grad(set_to_none=True)
loss = -policy.log_prob(actions).mean()
loss.backward()
self.optimiser.step()
return loss.item() | 0.956462 | 0.621024 |
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\28")
buf.write("\u0163\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\4\3")
buf.write("\4\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3")
buf.write("\t\3\t\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3")
buf.write("\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\16")
buf.write("\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21")
buf.write("\3\21\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\26")
buf.write("\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36")
buf.write("\3\36\3\37\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%")
buf.write("\3&\3&\3\'\3\'\3(\3(\3)\3)\3)\3*\3*\3*\3+\3+\3+\3,\3,")
buf.write("\3-\3-\3-\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\61")
buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3\63\7\63")
buf.write("\u0133\n\63\f\63\16\63\u0136\13\63\3\64\6\64\u0139\n\64")
buf.write("\r\64\16\64\u013a\3\64\3\64\6\64\u013f\n\64\r\64\16\64")
buf.write("\u0140\3\65\6\65\u0144\n\65\r\65\16\65\u0145\3\66\6\66")
buf.write("\u0149\n\66\r\66\16\66\u014a\3\66\3\66\3\67\3\67\3\67")
buf.write("\5\67\u0152\n\67\3\67\7\67\u0155\n\67\f\67\16\67\u0158")
buf.write("\13\67\3\67\3\67\3\67\3\67\38\38\39\39\3:\3:\3\u0156\2")
buf.write(";\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31")
buf.write("\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31")
buf.write("\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O")
buf.write(")Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o\2q\2")
buf.write("s\2\3\2\5\5\2\13\f\17\17\"\"\3\2\62;\4\2C\\c|\2\u0168")
buf.write("\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13")
buf.write("\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3")
buf.write("\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2")
buf.write("\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2")
buf.write("%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2")
buf.write("\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67")
buf.write("\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write("A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write("\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write("\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write("\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write("\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\3u\3\2\2\2\5~")
buf.write("\3\2\2\2\7\u0081\3\2\2\2\t\u0083\3\2\2\2\13\u0085\3\2")
buf.write("\2\2\r\u008a\3\2\2\2\17\u0091\3\2\2\2\21\u0099\3\2\2\2")
buf.write("\23\u009f\3\2\2\2\25\u00a4\3\2\2\2\27\u00a9\3\2\2\2\31")
buf.write("\u00b4\3\2\2\2\33\u00b6\3\2\2\2\35\u00b8\3\2\2\2\37\u00bf")
buf.write("\3\2\2\2!\u00c1\3\2\2\2#\u00c3\3\2\2\2%\u00c5\3\2\2\2")
buf.write("\'\u00cc\3\2\2\2)\u00d2\3\2\2\2+\u00d7\3\2\2\2-\u00dc")
buf.write("\3\2\2\2/\u00df\3\2\2\2\61\u00e6\3\2\2\2\63\u00ec\3\2")
buf.write("\2\2\65\u00f2\3\2\2\2\67\u00f4\3\2\2\29\u00f6\3\2\2\2")
buf.write(";\u00f8\3\2\2\2=\u00fa\3\2\2\2?\u00fc\3\2\2\2A\u00fe\3")
buf.write("\2\2\2C\u0100\3\2\2\2E\u0102\3\2\2\2G\u0104\3\2\2\2I\u0106")
buf.write("\3\2\2\2K\u0108\3\2\2\2M\u010a\3\2\2\2O\u010c\3\2\2\2")
buf.write("Q\u010e\3\2\2\2S\u0111\3\2\2\2U\u0114\3\2\2\2W\u0117\3")
buf.write("\2\2\2Y\u0119\3\2\2\2[\u011c\3\2\2\2]\u011e\3\2\2\2_\u0123")
buf.write("\3\2\2\2a\u0127\3\2\2\2c\u012a\3\2\2\2e\u012e\3\2\2\2")
buf.write("g\u0138\3\2\2\2i\u0143\3\2\2\2k\u0148\3\2\2\2m\u0151\3")
buf.write("\2\2\2o\u015d\3\2\2\2q\u015f\3\2\2\2s\u0161\3\2\2\2uv")
buf.write("\7T\2\2vw\7G\2\2wx\7I\2\2xy\7K\2\2yz\7U\2\2z{\7V\2\2{")
buf.write("|\7G\2\2|}\7T\2\2}\4\3\2\2\2~\177\7C\2\2\177\u0080\7U")
buf.write("\2\2\u0080\6\3\2\2\2\u0081\u0082\7=\2\2\u0082\b\3\2\2")
buf.write("\2\u0083\u0084\7?\2\2\u0084\n\3\2\2\2\u0085\u0086\7N\2")
buf.write("\2\u0086\u0087\7Q\2\2\u0087\u0088\7C\2\2\u0088\u0089\7")
buf.write("F\2\2\u0089\f\3\2\2\2\u008a\u008b\7U\2\2\u008b\u008c\7")
buf.write("E\2\2\u008c\u008d\7J\2\2\u008d\u008e\7G\2\2\u008e\u008f")
buf.write("\7O\2\2\u008f\u0090\7C\2\2\u0090\16\3\2\2\2\u0091\u0092")
buf.write("\7X\2\2\u0092\u0093\7G\2\2\u0093\u0094\7T\2\2\u0094\u0095")
buf.write("\7U\2\2\u0095\u0096\7K\2\2\u0096\u0097\7Q\2\2\u0097\u0098")
buf.write("\7P\2\2\u0098\20\3\2\2\2\u0099\u009a\7V\2\2\u009a\u009b")
buf.write("\7C\2\2\u009b\u009c\7D\2\2\u009c\u009d\7N\2\2\u009d\u009e")
buf.write("\7G\2\2\u009e\22\3\2\2\2\u009f\u00a0\7H\2\2\u00a0\u00a1")
buf.write("\7T\2\2\u00a1\u00a2\7Q\2\2\u00a2\u00a3\7O\2\2\u00a3\24")
buf.write("\3\2\2\2\u00a4\u00a5\7Y\2\2\u00a5\u00a6\7K\2\2\u00a6\u00a7")
buf.write("\7V\2\2\u00a7\u00a8\7J\2\2\u00a8\26\3\2\2\2\u00a9\u00aa")
buf.write("\7R\2\2\u00aa\u00ab\7T\2\2\u00ab\u00ac\7Q\2\2\u00ac\u00ad")
buf.write("\7L\2\2\u00ad\u00ae\7G\2\2\u00ae\u00af\7E\2\2\u00af\u00b0")
buf.write("\7V\2\2\u00b0\u00b1\7K\2\2\u00b1\u00b2\7Q\2\2\u00b2\u00b3")
buf.write("\7P\2\2\u00b3\30\3\2\2\2\u00b4\u00b5\7*\2\2\u00b5\32\3")
buf.write("\2\2\2\u00b6\u00b7\7+\2\2\u00b7\34\3\2\2\2\u00b8\u00b9")
buf.write("\7P\2\2\u00b9\u00ba\7Q\2\2\u00ba\u00bb\7G\2\2\u00bb\u00bc")
buf.write("\7O\2\2\u00bc\u00bd\7K\2\2\u00bd\u00be\7V\2\2\u00be\36")
buf.write("\3\2\2\2\u00bf\u00c0\7.\2\2\u00c0 \3\2\2\2\u00c1\u00c2")
buf.write("\7\60\2\2\u00c2\"\3\2\2\2\u00c3\u00c4\7<\2\2\u00c4$\3")
buf.write("\2\2\2\u00c5\u00c6\7G\2\2\u00c6\u00c7\7Z\2\2\u00c7\u00c8")
buf.write("\7R\2\2\u00c8\u00c9\7C\2\2\u00c9\u00ca\7P\2\2\u00ca\u00cb")
buf.write("\7F\2\2\u00cb&\3\2\2\2\u00cc\u00cd\7U\2\2\u00cd\u00ce")
buf.write("\7V\2\2\u00ce\u00cf\7Q\2\2\u00cf\u00d0\7T\2\2\u00d0\u00d1")
buf.write("\7G\2\2\u00d1(\3\2\2\2\u00d2\u00d3\7K\2\2\u00d3\u00d4")
buf.write("\7P\2\2\u00d4\u00d5\7V\2\2\u00d5\u00d6\7Q\2\2\u00d6*\3")
buf.write("\2\2\2\u00d7\u00d8\7L\2\2\u00d8\u00d9\7Q\2\2\u00d9\u00da")
buf.write("\7K\2\2\u00da\u00db\7P\2\2\u00db,\3\2\2\2\u00dc\u00dd")
buf.write("\7D\2\2\u00dd\u00de\7[\2\2\u00de.\3\2\2\2\u00df\u00e0")
buf.write("\7H\2\2\u00e0\u00e1\7K\2\2\u00e1\u00e2\7N\2\2\u00e2\u00e3")
buf.write("\7V\2\2\u00e3\u00e4\7G\2\2\u00e4\u00e5\7T\2\2\u00e5\60")
buf.write("\3\2\2\2\u00e6\u00e7\7Q\2\2\u00e7\u00e8\7T\2\2\u00e8\u00e9")
buf.write("\7F\2\2\u00e9\u00ea\7G\2\2\u00ea\u00eb\7T\2\2\u00eb\62")
buf.write("\3\2\2\2\u00ec\u00ed\7I\2\2\u00ed\u00ee\7T\2\2\u00ee\u00ef")
buf.write("\7Q\2\2\u00ef\u00f0\7W\2\2\u00f0\u00f1\7R\2\2\u00f1\64")
buf.write("\3\2\2\2\u00f2\u00f3\7)\2\2\u00f3\66\3\2\2\2\u00f4\u00f5")
buf.write("\7&\2\2\u00f58\3\2\2\2\u00f6\u00f7\7}\2\2\u00f7:\3\2\2")
buf.write("\2\u00f8\u00f9\7\177\2\2\u00f9<\3\2\2\2\u00fa\u00fb\7")
buf.write("B\2\2\u00fb>\3\2\2\2\u00fc\u00fd\7\'\2\2\u00fd@\3\2\2")
buf.write("\2\u00fe\u00ff\7A\2\2\u00ffB\3\2\2\2\u0100\u0101\7~\2")
buf.write("\2\u0101D\3\2\2\2\u0102\u0103\7(\2\2\u0103F\3\2\2\2\u0104")
buf.write("\u0105\7,\2\2\u0105H\3\2\2\2\u0106\u0107\7\61\2\2\u0107")
buf.write("J\3\2\2\2\u0108\u0109\7-\2\2\u0109L\3\2\2\2\u010a\u010b")
buf.write("\7/\2\2\u010bN\3\2\2\2\u010c\u010d\7`\2\2\u010dP\3\2\2")
buf.write("\2\u010e\u010f\7#\2\2\u010f\u0110\7?\2\2\u0110R\3\2\2")
buf.write("\2\u0111\u0112\7?\2\2\u0112\u0113\7?\2\2\u0113T\3\2\2")
buf.write("\2\u0114\u0115\7>\2\2\u0115\u0116\7?\2\2\u0116V\3\2\2")
buf.write("\2\u0117\u0118\7>\2\2\u0118X\3\2\2\2\u0119\u011a\7@\2")
buf.write("\2\u011a\u011b\7?\2\2\u011bZ\3\2\2\2\u011c\u011d\7@\2")
buf.write("\2\u011d\\\3\2\2\2\u011e\u011f\7N\2\2\u011f\u0120\7K\2")
buf.write("\2\u0120\u0121\7M\2\2\u0121\u0122\7G\2\2\u0122^\3\2\2")
buf.write("\2\u0123\u0124\7C\2\2\u0124\u0125\7P\2\2\u0125\u0126\7")
buf.write("F\2\2\u0126`\3\2\2\2\u0127\u0128\7Q\2\2\u0128\u0129\7")
buf.write("T\2\2\u0129b\3\2\2\2\u012a\u012b\7b\2\2\u012b\u012c\7")
buf.write("b\2\2\u012c\u012d\7b\2\2\u012dd\3\2\2\2\u012e\u0134\5")
buf.write("s:\2\u012f\u0133\5s:\2\u0130\u0133\5q9\2\u0131\u0133\5")
buf.write("o8\2\u0132\u012f\3\2\2\2\u0132\u0130\3\2\2\2\u0132\u0131")
buf.write("\3\2\2\2\u0133\u0136\3\2\2\2\u0134\u0132\3\2\2\2\u0134")
buf.write("\u0135\3\2\2\2\u0135f\3\2\2\2\u0136\u0134\3\2\2\2\u0137")
buf.write("\u0139\5q9\2\u0138\u0137\3\2\2\2\u0139\u013a\3\2\2\2\u013a")
buf.write("\u0138\3\2\2\2\u013a\u013b\3\2\2\2\u013b\u013c\3\2\2\2")
buf.write("\u013c\u013e\7\60\2\2\u013d\u013f\5q9\2\u013e\u013d\3")
buf.write("\2\2\2\u013f\u0140\3\2\2\2\u0140\u013e\3\2\2\2\u0140\u0141")
buf.write("\3\2\2\2\u0141h\3\2\2\2\u0142\u0144\5q9\2\u0143\u0142")
buf.write("\3\2\2\2\u0144\u0145\3\2\2\2\u0145\u0143\3\2\2\2\u0145")
buf.write("\u0146\3\2\2\2\u0146j\3\2\2\2\u0147\u0149\t\2\2\2\u0148")
buf.write("\u0147\3\2\2\2\u0149\u014a\3\2\2\2\u014a\u0148\3\2\2\2")
buf.write("\u014a\u014b\3\2\2\2\u014b\u014c\3\2\2\2\u014c\u014d\b")
buf.write("\66\2\2\u014dl\3\2\2\2\u014e\u014f\7/\2\2\u014f\u0152")
buf.write("\7/\2\2\u0150\u0152\7%\2\2\u0151\u014e\3\2\2\2\u0151\u0150")
buf.write("\3\2\2\2\u0152\u0156\3\2\2\2\u0153\u0155\13\2\2\2\u0154")
buf.write("\u0153\3\2\2\2\u0155\u0158\3\2\2\2\u0156\u0157\3\2\2\2")
buf.write("\u0156\u0154\3\2\2\2\u0157\u0159\3\2\2\2\u0158\u0156\3")
buf.write("\2\2\2\u0159\u015a\7\f\2\2\u015a\u015b\3\2\2\2\u015b\u015c")
buf.write("\b\67\3\2\u015cn\3\2\2\2\u015d\u015e\7a\2\2\u015ep\3\2")
buf.write("\2\2\u015f\u0160\t\3\2\2\u0160r\3\2\2\2\u0161\u0162\t")
buf.write("\4\2\2\u0162t\3\2\2\2\13\2\u0132\u0134\u013a\u0140\u0145")
buf.write("\u014a\u0151\u0156\4\2\3\2\2\4\2")
return buf.getvalue()
class sdplLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
AO_MULTIPLY = 35
AO_DIVIDE = 36
AO_PLUS = 37
AO_MINUS = 38
AO_POWER = 39
CO_NE = 40
CO_EQ = 41
CO_LE = 42
CO_LT = 43
CO_GE = 44
CO_GT = 45
CO_LIKE = 46
AND = 47
OR = 48
QUOTE_DELIM = 49
ID = 50
DECIMAL = 51
INTEGER = 52
WS = 53
SL_COMMENT = 54
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'REGISTER'", "'AS'", "';'", "'='", "'LOAD'", "'SCHEMA'", "'VERSION'",
"'TABLE'", "'FROM'", "'WITH'", "'PROJECTION'", "'('", "')'",
"'NOEMIT'", "','", "'.'", "':'", "'EXPAND'", "'STORE'", "'INTO'",
"'JOIN'", "'BY'", "'FILTER'", "'ORDER'", "'GROUP'", "'''", "'$'",
"'{'", "'}'", "'@'", "'%'", "'?'", "'|'", "'&'", "'*'", "'/'",
"'+'", "'-'", "'^'", "'!='", "'=='", "'<='", "'<'", "'>='",
"'>'", "'LIKE'", "'AND'", "'OR'", "'```'" ]
symbolicNames = [ "<INVALID>",
"AO_MULTIPLY", "AO_DIVIDE", "AO_PLUS", "AO_MINUS", "AO_POWER",
"CO_NE", "CO_EQ", "CO_LE", "CO_LT", "CO_GE", "CO_GT", "CO_LIKE",
"AND", "OR", "QUOTE_DELIM", "ID", "DECIMAL", "INTEGER", "WS",
"SL_COMMENT" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "AO_MULTIPLY", "AO_DIVIDE", "AO_PLUS",
"AO_MINUS", "AO_POWER", "CO_NE", "CO_EQ", "CO_LE", "CO_LT",
"CO_GE", "CO_GT", "CO_LIKE", "AND", "OR", "QUOTE_DELIM",
"ID", "DECIMAL", "INTEGER", "WS", "SL_COMMENT", "UNDERSCORE",
"NUMBER", "LETTER" ]
grammarFileName = "sdpl.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
CHANNEL_WHITESPACE = 1
CHANNEL_COMMENTS = 2 | grammar/sdplLexer.py | from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\28")
buf.write("\u0163\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\4\3")
buf.write("\4\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3")
buf.write("\t\3\t\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3")
buf.write("\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\16")
buf.write("\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21")
buf.write("\3\21\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\26")
buf.write("\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36")
buf.write("\3\36\3\37\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%")
buf.write("\3&\3&\3\'\3\'\3(\3(\3)\3)\3)\3*\3*\3*\3+\3+\3+\3,\3,")
buf.write("\3-\3-\3-\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\61")
buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3\63\7\63")
buf.write("\u0133\n\63\f\63\16\63\u0136\13\63\3\64\6\64\u0139\n\64")
buf.write("\r\64\16\64\u013a\3\64\3\64\6\64\u013f\n\64\r\64\16\64")
buf.write("\u0140\3\65\6\65\u0144\n\65\r\65\16\65\u0145\3\66\6\66")
buf.write("\u0149\n\66\r\66\16\66\u014a\3\66\3\66\3\67\3\67\3\67")
buf.write("\5\67\u0152\n\67\3\67\7\67\u0155\n\67\f\67\16\67\u0158")
buf.write("\13\67\3\67\3\67\3\67\3\67\38\38\39\39\3:\3:\3\u0156\2")
buf.write(";\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31")
buf.write("\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31")
buf.write("\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O")
buf.write(")Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o\2q\2")
buf.write("s\2\3\2\5\5\2\13\f\17\17\"\"\3\2\62;\4\2C\\c|\2\u0168")
buf.write("\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13")
buf.write("\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3")
buf.write("\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2")
buf.write("\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2")
buf.write("%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2")
buf.write("\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67")
buf.write("\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write("A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write("\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write("\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write("\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write("\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\3u\3\2\2\2\5~")
buf.write("\3\2\2\2\7\u0081\3\2\2\2\t\u0083\3\2\2\2\13\u0085\3\2")
buf.write("\2\2\r\u008a\3\2\2\2\17\u0091\3\2\2\2\21\u0099\3\2\2\2")
buf.write("\23\u009f\3\2\2\2\25\u00a4\3\2\2\2\27\u00a9\3\2\2\2\31")
buf.write("\u00b4\3\2\2\2\33\u00b6\3\2\2\2\35\u00b8\3\2\2\2\37\u00bf")
buf.write("\3\2\2\2!\u00c1\3\2\2\2#\u00c3\3\2\2\2%\u00c5\3\2\2\2")
buf.write("\'\u00cc\3\2\2\2)\u00d2\3\2\2\2+\u00d7\3\2\2\2-\u00dc")
buf.write("\3\2\2\2/\u00df\3\2\2\2\61\u00e6\3\2\2\2\63\u00ec\3\2")
buf.write("\2\2\65\u00f2\3\2\2\2\67\u00f4\3\2\2\29\u00f6\3\2\2\2")
buf.write(";\u00f8\3\2\2\2=\u00fa\3\2\2\2?\u00fc\3\2\2\2A\u00fe\3")
buf.write("\2\2\2C\u0100\3\2\2\2E\u0102\3\2\2\2G\u0104\3\2\2\2I\u0106")
buf.write("\3\2\2\2K\u0108\3\2\2\2M\u010a\3\2\2\2O\u010c\3\2\2\2")
buf.write("Q\u010e\3\2\2\2S\u0111\3\2\2\2U\u0114\3\2\2\2W\u0117\3")
buf.write("\2\2\2Y\u0119\3\2\2\2[\u011c\3\2\2\2]\u011e\3\2\2\2_\u0123")
buf.write("\3\2\2\2a\u0127\3\2\2\2c\u012a\3\2\2\2e\u012e\3\2\2\2")
buf.write("g\u0138\3\2\2\2i\u0143\3\2\2\2k\u0148\3\2\2\2m\u0151\3")
buf.write("\2\2\2o\u015d\3\2\2\2q\u015f\3\2\2\2s\u0161\3\2\2\2uv")
buf.write("\7T\2\2vw\7G\2\2wx\7I\2\2xy\7K\2\2yz\7U\2\2z{\7V\2\2{")
buf.write("|\7G\2\2|}\7T\2\2}\4\3\2\2\2~\177\7C\2\2\177\u0080\7U")
buf.write("\2\2\u0080\6\3\2\2\2\u0081\u0082\7=\2\2\u0082\b\3\2\2")
buf.write("\2\u0083\u0084\7?\2\2\u0084\n\3\2\2\2\u0085\u0086\7N\2")
buf.write("\2\u0086\u0087\7Q\2\2\u0087\u0088\7C\2\2\u0088\u0089\7")
buf.write("F\2\2\u0089\f\3\2\2\2\u008a\u008b\7U\2\2\u008b\u008c\7")
buf.write("E\2\2\u008c\u008d\7J\2\2\u008d\u008e\7G\2\2\u008e\u008f")
buf.write("\7O\2\2\u008f\u0090\7C\2\2\u0090\16\3\2\2\2\u0091\u0092")
buf.write("\7X\2\2\u0092\u0093\7G\2\2\u0093\u0094\7T\2\2\u0094\u0095")
buf.write("\7U\2\2\u0095\u0096\7K\2\2\u0096\u0097\7Q\2\2\u0097\u0098")
buf.write("\7P\2\2\u0098\20\3\2\2\2\u0099\u009a\7V\2\2\u009a\u009b")
buf.write("\7C\2\2\u009b\u009c\7D\2\2\u009c\u009d\7N\2\2\u009d\u009e")
buf.write("\7G\2\2\u009e\22\3\2\2\2\u009f\u00a0\7H\2\2\u00a0\u00a1")
buf.write("\7T\2\2\u00a1\u00a2\7Q\2\2\u00a2\u00a3\7O\2\2\u00a3\24")
buf.write("\3\2\2\2\u00a4\u00a5\7Y\2\2\u00a5\u00a6\7K\2\2\u00a6\u00a7")
buf.write("\7V\2\2\u00a7\u00a8\7J\2\2\u00a8\26\3\2\2\2\u00a9\u00aa")
buf.write("\7R\2\2\u00aa\u00ab\7T\2\2\u00ab\u00ac\7Q\2\2\u00ac\u00ad")
buf.write("\7L\2\2\u00ad\u00ae\7G\2\2\u00ae\u00af\7E\2\2\u00af\u00b0")
buf.write("\7V\2\2\u00b0\u00b1\7K\2\2\u00b1\u00b2\7Q\2\2\u00b2\u00b3")
buf.write("\7P\2\2\u00b3\30\3\2\2\2\u00b4\u00b5\7*\2\2\u00b5\32\3")
buf.write("\2\2\2\u00b6\u00b7\7+\2\2\u00b7\34\3\2\2\2\u00b8\u00b9")
buf.write("\7P\2\2\u00b9\u00ba\7Q\2\2\u00ba\u00bb\7G\2\2\u00bb\u00bc")
buf.write("\7O\2\2\u00bc\u00bd\7K\2\2\u00bd\u00be\7V\2\2\u00be\36")
buf.write("\3\2\2\2\u00bf\u00c0\7.\2\2\u00c0 \3\2\2\2\u00c1\u00c2")
buf.write("\7\60\2\2\u00c2\"\3\2\2\2\u00c3\u00c4\7<\2\2\u00c4$\3")
buf.write("\2\2\2\u00c5\u00c6\7G\2\2\u00c6\u00c7\7Z\2\2\u00c7\u00c8")
buf.write("\7R\2\2\u00c8\u00c9\7C\2\2\u00c9\u00ca\7P\2\2\u00ca\u00cb")
buf.write("\7F\2\2\u00cb&\3\2\2\2\u00cc\u00cd\7U\2\2\u00cd\u00ce")
buf.write("\7V\2\2\u00ce\u00cf\7Q\2\2\u00cf\u00d0\7T\2\2\u00d0\u00d1")
buf.write("\7G\2\2\u00d1(\3\2\2\2\u00d2\u00d3\7K\2\2\u00d3\u00d4")
buf.write("\7P\2\2\u00d4\u00d5\7V\2\2\u00d5\u00d6\7Q\2\2\u00d6*\3")
buf.write("\2\2\2\u00d7\u00d8\7L\2\2\u00d8\u00d9\7Q\2\2\u00d9\u00da")
buf.write("\7K\2\2\u00da\u00db\7P\2\2\u00db,\3\2\2\2\u00dc\u00dd")
buf.write("\7D\2\2\u00dd\u00de\7[\2\2\u00de.\3\2\2\2\u00df\u00e0")
buf.write("\7H\2\2\u00e0\u00e1\7K\2\2\u00e1\u00e2\7N\2\2\u00e2\u00e3")
buf.write("\7V\2\2\u00e3\u00e4\7G\2\2\u00e4\u00e5\7T\2\2\u00e5\60")
buf.write("\3\2\2\2\u00e6\u00e7\7Q\2\2\u00e7\u00e8\7T\2\2\u00e8\u00e9")
buf.write("\7F\2\2\u00e9\u00ea\7G\2\2\u00ea\u00eb\7T\2\2\u00eb\62")
buf.write("\3\2\2\2\u00ec\u00ed\7I\2\2\u00ed\u00ee\7T\2\2\u00ee\u00ef")
buf.write("\7Q\2\2\u00ef\u00f0\7W\2\2\u00f0\u00f1\7R\2\2\u00f1\64")
buf.write("\3\2\2\2\u00f2\u00f3\7)\2\2\u00f3\66\3\2\2\2\u00f4\u00f5")
buf.write("\7&\2\2\u00f58\3\2\2\2\u00f6\u00f7\7}\2\2\u00f7:\3\2\2")
buf.write("\2\u00f8\u00f9\7\177\2\2\u00f9<\3\2\2\2\u00fa\u00fb\7")
buf.write("B\2\2\u00fb>\3\2\2\2\u00fc\u00fd\7\'\2\2\u00fd@\3\2\2")
buf.write("\2\u00fe\u00ff\7A\2\2\u00ffB\3\2\2\2\u0100\u0101\7~\2")
buf.write("\2\u0101D\3\2\2\2\u0102\u0103\7(\2\2\u0103F\3\2\2\2\u0104")
buf.write("\u0105\7,\2\2\u0105H\3\2\2\2\u0106\u0107\7\61\2\2\u0107")
buf.write("J\3\2\2\2\u0108\u0109\7-\2\2\u0109L\3\2\2\2\u010a\u010b")
buf.write("\7/\2\2\u010bN\3\2\2\2\u010c\u010d\7`\2\2\u010dP\3\2\2")
buf.write("\2\u010e\u010f\7#\2\2\u010f\u0110\7?\2\2\u0110R\3\2\2")
buf.write("\2\u0111\u0112\7?\2\2\u0112\u0113\7?\2\2\u0113T\3\2\2")
buf.write("\2\u0114\u0115\7>\2\2\u0115\u0116\7?\2\2\u0116V\3\2\2")
buf.write("\2\u0117\u0118\7>\2\2\u0118X\3\2\2\2\u0119\u011a\7@\2")
buf.write("\2\u011a\u011b\7?\2\2\u011bZ\3\2\2\2\u011c\u011d\7@\2")
buf.write("\2\u011d\\\3\2\2\2\u011e\u011f\7N\2\2\u011f\u0120\7K\2")
buf.write("\2\u0120\u0121\7M\2\2\u0121\u0122\7G\2\2\u0122^\3\2\2")
buf.write("\2\u0123\u0124\7C\2\2\u0124\u0125\7P\2\2\u0125\u0126\7")
buf.write("F\2\2\u0126`\3\2\2\2\u0127\u0128\7Q\2\2\u0128\u0129\7")
buf.write("T\2\2\u0129b\3\2\2\2\u012a\u012b\7b\2\2\u012b\u012c\7")
buf.write("b\2\2\u012c\u012d\7b\2\2\u012dd\3\2\2\2\u012e\u0134\5")
buf.write("s:\2\u012f\u0133\5s:\2\u0130\u0133\5q9\2\u0131\u0133\5")
buf.write("o8\2\u0132\u012f\3\2\2\2\u0132\u0130\3\2\2\2\u0132\u0131")
buf.write("\3\2\2\2\u0133\u0136\3\2\2\2\u0134\u0132\3\2\2\2\u0134")
buf.write("\u0135\3\2\2\2\u0135f\3\2\2\2\u0136\u0134\3\2\2\2\u0137")
buf.write("\u0139\5q9\2\u0138\u0137\3\2\2\2\u0139\u013a\3\2\2\2\u013a")
buf.write("\u0138\3\2\2\2\u013a\u013b\3\2\2\2\u013b\u013c\3\2\2\2")
buf.write("\u013c\u013e\7\60\2\2\u013d\u013f\5q9\2\u013e\u013d\3")
buf.write("\2\2\2\u013f\u0140\3\2\2\2\u0140\u013e\3\2\2\2\u0140\u0141")
buf.write("\3\2\2\2\u0141h\3\2\2\2\u0142\u0144\5q9\2\u0143\u0142")
buf.write("\3\2\2\2\u0144\u0145\3\2\2\2\u0145\u0143\3\2\2\2\u0145")
buf.write("\u0146\3\2\2\2\u0146j\3\2\2\2\u0147\u0149\t\2\2\2\u0148")
buf.write("\u0147\3\2\2\2\u0149\u014a\3\2\2\2\u014a\u0148\3\2\2\2")
buf.write("\u014a\u014b\3\2\2\2\u014b\u014c\3\2\2\2\u014c\u014d\b")
buf.write("\66\2\2\u014dl\3\2\2\2\u014e\u014f\7/\2\2\u014f\u0152")
buf.write("\7/\2\2\u0150\u0152\7%\2\2\u0151\u014e\3\2\2\2\u0151\u0150")
buf.write("\3\2\2\2\u0152\u0156\3\2\2\2\u0153\u0155\13\2\2\2\u0154")
buf.write("\u0153\3\2\2\2\u0155\u0158\3\2\2\2\u0156\u0157\3\2\2\2")
buf.write("\u0156\u0154\3\2\2\2\u0157\u0159\3\2\2\2\u0158\u0156\3")
buf.write("\2\2\2\u0159\u015a\7\f\2\2\u015a\u015b\3\2\2\2\u015b\u015c")
buf.write("\b\67\3\2\u015cn\3\2\2\2\u015d\u015e\7a\2\2\u015ep\3\2")
buf.write("\2\2\u015f\u0160\t\3\2\2\u0160r\3\2\2\2\u0161\u0162\t")
buf.write("\4\2\2\u0162t\3\2\2\2\13\2\u0132\u0134\u013a\u0140\u0145")
buf.write("\u014a\u0151\u0156\4\2\3\2\2\4\2")
return buf.getvalue()
class sdplLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
AO_MULTIPLY = 35
AO_DIVIDE = 36
AO_PLUS = 37
AO_MINUS = 38
AO_POWER = 39
CO_NE = 40
CO_EQ = 41
CO_LE = 42
CO_LT = 43
CO_GE = 44
CO_GT = 45
CO_LIKE = 46
AND = 47
OR = 48
QUOTE_DELIM = 49
ID = 50
DECIMAL = 51
INTEGER = 52
WS = 53
SL_COMMENT = 54
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'REGISTER'", "'AS'", "';'", "'='", "'LOAD'", "'SCHEMA'", "'VERSION'",
"'TABLE'", "'FROM'", "'WITH'", "'PROJECTION'", "'('", "')'",
"'NOEMIT'", "','", "'.'", "':'", "'EXPAND'", "'STORE'", "'INTO'",
"'JOIN'", "'BY'", "'FILTER'", "'ORDER'", "'GROUP'", "'''", "'$'",
"'{'", "'}'", "'@'", "'%'", "'?'", "'|'", "'&'", "'*'", "'/'",
"'+'", "'-'", "'^'", "'!='", "'=='", "'<='", "'<'", "'>='",
"'>'", "'LIKE'", "'AND'", "'OR'", "'```'" ]
symbolicNames = [ "<INVALID>",
"AO_MULTIPLY", "AO_DIVIDE", "AO_PLUS", "AO_MINUS", "AO_POWER",
"CO_NE", "CO_EQ", "CO_LE", "CO_LT", "CO_GE", "CO_GT", "CO_LIKE",
"AND", "OR", "QUOTE_DELIM", "ID", "DECIMAL", "INTEGER", "WS",
"SL_COMMENT" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "AO_MULTIPLY", "AO_DIVIDE", "AO_PLUS",
"AO_MINUS", "AO_POWER", "CO_NE", "CO_EQ", "CO_LE", "CO_LT",
"CO_GE", "CO_GT", "CO_LIKE", "AND", "OR", "QUOTE_DELIM",
"ID", "DECIMAL", "INTEGER", "WS", "SL_COMMENT", "UNDERSCORE",
"NUMBER", "LETTER" ]
grammarFileName = "sdpl.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
CHANNEL_WHITESPACE = 1
CHANNEL_COMMENTS = 2 | 0.284874 | 0.332256 |
import validators
from itertools import chain
from typing import Set, Union
import urlfinderlib.helpers as helpers
import urlfinderlib.tokenizer as tokenizer
from urlfinderlib.url import URLList
class TextUrlFinder:
def __init__(self, blob: Union[bytes, str]):
if isinstance(blob, str):
blob = blob.encode("utf-8", errors="ignore")
self.blob = blob
def find_urls(self, strict: bool = True, domain_as_url: bool = False) -> Set[str]:
tok = tokenizer.UTF8Tokenizer(self.blob)
token_iter = chain(
tok.get_line_tokens(),
tok.get_tokens_between_angle_brackets(strict=strict),
tok.get_tokens_between_backticks(),
tok.get_tokens_between_brackets(strict=strict),
tok.get_tokens_between_curly_brackets(strict=strict),
tok.get_tokens_between_double_quotes(),
tok.get_tokens_between_parentheses(strict=strict),
tok.get_tokens_between_single_quotes(),
tok.get_sentences(),
)
split_token_iter = tok.get_split_tokens_after_replace(["<", ">", "`", "[", "]", "{", "}", '"', "'", "(", ")"])
if domain_as_url:
tokens = set()
for token in token_iter:
if "." in token and "/" in token:
tokens.add(token)
continue
if validators.domain(token):
tokens.add(token)
for token in split_token_iter:
if "." in token and "/" in token:
tokens.add(token)
continue
if validators.domain(token):
tokens.add(token)
else:
tokens = {t for t in token_iter if "." in t and "/" in t}
tokens |= {t for t in split_token_iter if "." in t and "/" in t}
valid_urls = URLList()
for token in tokens:
# It is common for text files like email plaintext bodies to encode URLs in the form of:
# http://domain.com<http://actualdomain.com>
# where the text at the beginning is what will be displayed, and the text inside the <> is the
# actual URL you will be taken to if you click on it. In these cases, we don't want that entire string
# to be considered as a valid URL, but would rather have each of them as separate URLs.
if "<" in token and token.endswith(">"):
continue
valid_urls.append(helpers.fix_possible_url(token, domain_as_url=domain_as_url))
return set(valid_urls) | urlfinderlib/finders/text.py | import validators
from itertools import chain
from typing import Set, Union
import urlfinderlib.helpers as helpers
import urlfinderlib.tokenizer as tokenizer
from urlfinderlib.url import URLList
class TextUrlFinder:
def __init__(self, blob: Union[bytes, str]):
if isinstance(blob, str):
blob = blob.encode("utf-8", errors="ignore")
self.blob = blob
def find_urls(self, strict: bool = True, domain_as_url: bool = False) -> Set[str]:
tok = tokenizer.UTF8Tokenizer(self.blob)
token_iter = chain(
tok.get_line_tokens(),
tok.get_tokens_between_angle_brackets(strict=strict),
tok.get_tokens_between_backticks(),
tok.get_tokens_between_brackets(strict=strict),
tok.get_tokens_between_curly_brackets(strict=strict),
tok.get_tokens_between_double_quotes(),
tok.get_tokens_between_parentheses(strict=strict),
tok.get_tokens_between_single_quotes(),
tok.get_sentences(),
)
split_token_iter = tok.get_split_tokens_after_replace(["<", ">", "`", "[", "]", "{", "}", '"', "'", "(", ")"])
if domain_as_url:
tokens = set()
for token in token_iter:
if "." in token and "/" in token:
tokens.add(token)
continue
if validators.domain(token):
tokens.add(token)
for token in split_token_iter:
if "." in token and "/" in token:
tokens.add(token)
continue
if validators.domain(token):
tokens.add(token)
else:
tokens = {t for t in token_iter if "." in t and "/" in t}
tokens |= {t for t in split_token_iter if "." in t and "/" in t}
valid_urls = URLList()
for token in tokens:
# It is common for text files like email plaintext bodies to encode URLs in the form of:
# http://domain.com<http://actualdomain.com>
# where the text at the beginning is what will be displayed, and the text inside the <> is the
# actual URL you will be taken to if you click on it. In these cases, we don't want that entire string
# to be considered as a valid URL, but would rather have each of them as separate URLs.
if "<" in token and token.endswith(">"):
continue
valid_urls.append(helpers.fix_possible_url(token, domain_as_url=domain_as_url))
return set(valid_urls) | 0.63624 | 0.195729 |
from sleekxmpp.plugins.base import base_plugin
from rhobot.components.configuration import BotConfiguration
from rhobot.components.storage import StoragePayload
from foursquare_bot.components.configuration_enums import CLIENT_SECRET_KEY, IDENTIFIER_KEY
from foursquare_bot.components.utilities import get_foursquare_venue_from_url, foursquare_to_storage
import logging
import foursquare
from rdflib.namespace import RDFS, DCTERMS
logger = logging.getLogger(__name__)
class FoursquareLookup(base_plugin):
name = 'foursquare_lookup'
description = 'Foursquare Lookup'
dependencies = {'rho_bot_storage_client', 'rho_bot_rdf_publish', 'rho_bot_representation_manager', }
def plugin_init(self):
self.xmpp.add_event_handler(BotConfiguration.CONFIGURATION_RECEIVED_EVENT, self._configuration_updated)
self._foursquare_client = None
def post_init(self):
self._configuration = self.xmpp['rho_bot_configuration']
self._storage_client = self.xmpp['rho_bot_storage_client']
self._rdf_publish = self.xmpp['rho_bot_rdf_publish']
self._scheduler = self.xmpp['rho_bot_scheduler']
self._representation_manager = self.xmpp['rho_bot_representation_manager']
def _configuration_updated(self, event):
"""
Check to see if the properties for the foursquare service are available, updated, and then create the client
library to use in this bot.
:return:
"""
configuration = self._configuration.get_configuration()
client_secret = configuration.get(CLIENT_SECRET_KEY, None)
identifier = configuration.get(IDENTIFIER_KEY, None)
if client_secret is None or identifier is None:
self._foursquare_client = None
else:
if self._foursquare_client:
oauth = self._foursquare_client.oauth
if oauth.client_id == identifier and oauth.client_secret == client_secret:
return
self._foursquare_client = foursquare.Foursquare(client_id=identifier,
client_secret=client_secret)
def lookup_foursquare_content(self, node_uri, foursquare_identifier=None):
"""
Looks up the foursquare details of a venue.
:param node_uri: the uri of the node to look up.
:param foursquare_identifier: the identifier of the foursquare data. If this is not provided, the node will be
fetched and the first seeAlso property from the node will be used as this parameter.
:return:
"""
def update_venue_details(venue):
# No point in continuing this exercise if certain requirements are not resolved.
if not venue:
raise RuntimeError('Venue identifier is not defined')
if not self._foursquare_client:
raise RuntimeError('Foursquare client is not defined')
# Finished checking requirements, fetch the details and update.
logger.debug('Looking up venue: %s' % venue)
venue_details = self._foursquare_client.venues(venue)
# Translate the venue details into a rdf storage payload for sending to update.
if 'venue' in venue_details:
storage_payload = StoragePayload()
foursquare_to_storage(venue_details['venue'], storage_payload)
storage_payload.about = node_uri
storage_payload.add_reference(DCTERMS.creator, self._representation_manager.representation_uri)
return self._storage_client.update_node(storage_payload)
# Attempt to look up the venue id from the details in the node.
if foursquare_identifier is None:
search_payload = StoragePayload()
search_payload.about = node_uri
promise = self._storage_client.get_node(search_payload).then(self._handle_get_node)
else:
promise = self._scheduler.promise()
venue_identifier = get_foursquare_venue_from_url(foursquare_identifier)
promise.resolved(venue_identifier)
promise = promise.then(update_venue_details)
return promise
def _handle_get_node(self, result):
venue = None
for see_also in result.properties.get(str(RDFS.seeAlso), []):
venue = get_foursquare_venue_from_url(see_also)
if venue:
break
return venue
def schedule_lookup(self, node_uri, foursquare_identifier=None, create=False):
"""
Schedule a lookup on the node to be executed later.
:param node_uri: uri to look up.
:return:
"""
promise = self._scheduler.defer(self.lookup_foursquare_content, node_uri, foursquare_identifier)
if create:
return promise.then(self._publish_create)
else:
return promise.then(self._publish_update)
def _publish_update(self, result):
"""
Publish the update information to the channel.
:param result: result collection
:return:
"""
self._rdf_publish.publish_all_results(result, created=False)
def _publish_create(self, result):
"""
Publish the update information to the channel.
:param result: result collection
:return:
"""
self._rdf_publish.publish_all_results(result, created=True)
def search_foursquare(self, near, query=None):
"""
Search foursquare.
:param near: near a location
:param query: query to search for.
:return: list of id, name dictionaries.
"""
parameters = dict(near=near, limit=10)
if query:
parameters['query'] = query
venue_results = self._foursquare_client.venues.search(parameters)
logger.debug('venue_results: %s' % venue_results['venues'])
return venue_results['venues']
foursquare_lookup = FoursquareLookup | foursquare_bot/components/foursquare_lookup.py | from sleekxmpp.plugins.base import base_plugin
from rhobot.components.configuration import BotConfiguration
from rhobot.components.storage import StoragePayload
from foursquare_bot.components.configuration_enums import CLIENT_SECRET_KEY, IDENTIFIER_KEY
from foursquare_bot.components.utilities import get_foursquare_venue_from_url, foursquare_to_storage
import logging
import foursquare
from rdflib.namespace import RDFS, DCTERMS
logger = logging.getLogger(__name__)
class FoursquareLookup(base_plugin):
name = 'foursquare_lookup'
description = 'Foursquare Lookup'
dependencies = {'rho_bot_storage_client', 'rho_bot_rdf_publish', 'rho_bot_representation_manager', }
def plugin_init(self):
self.xmpp.add_event_handler(BotConfiguration.CONFIGURATION_RECEIVED_EVENT, self._configuration_updated)
self._foursquare_client = None
def post_init(self):
self._configuration = self.xmpp['rho_bot_configuration']
self._storage_client = self.xmpp['rho_bot_storage_client']
self._rdf_publish = self.xmpp['rho_bot_rdf_publish']
self._scheduler = self.xmpp['rho_bot_scheduler']
self._representation_manager = self.xmpp['rho_bot_representation_manager']
def _configuration_updated(self, event):
"""
Check to see if the properties for the foursquare service are available, updated, and then create the client
library to use in this bot.
:return:
"""
configuration = self._configuration.get_configuration()
client_secret = configuration.get(CLIENT_SECRET_KEY, None)
identifier = configuration.get(IDENTIFIER_KEY, None)
if client_secret is None or identifier is None:
self._foursquare_client = None
else:
if self._foursquare_client:
oauth = self._foursquare_client.oauth
if oauth.client_id == identifier and oauth.client_secret == client_secret:
return
self._foursquare_client = foursquare.Foursquare(client_id=identifier,
client_secret=client_secret)
def lookup_foursquare_content(self, node_uri, foursquare_identifier=None):
"""
Looks up the foursquare details of a venue.
:param node_uri: the uri of the node to look up.
:param foursquare_identifier: the identifier of the foursquare data. If this is not provided, the node will be
fetched and the first seeAlso property from the node will be used as this parameter.
:return:
"""
def update_venue_details(venue):
# No point in continuing this exercise if certain requirements are not resolved.
if not venue:
raise RuntimeError('Venue identifier is not defined')
if not self._foursquare_client:
raise RuntimeError('Foursquare client is not defined')
# Finished checking requirements, fetch the details and update.
logger.debug('Looking up venue: %s' % venue)
venue_details = self._foursquare_client.venues(venue)
# Translate the venue details into a rdf storage payload for sending to update.
if 'venue' in venue_details:
storage_payload = StoragePayload()
foursquare_to_storage(venue_details['venue'], storage_payload)
storage_payload.about = node_uri
storage_payload.add_reference(DCTERMS.creator, self._representation_manager.representation_uri)
return self._storage_client.update_node(storage_payload)
# Attempt to look up the venue id from the details in the node.
if foursquare_identifier is None:
search_payload = StoragePayload()
search_payload.about = node_uri
promise = self._storage_client.get_node(search_payload).then(self._handle_get_node)
else:
promise = self._scheduler.promise()
venue_identifier = get_foursquare_venue_from_url(foursquare_identifier)
promise.resolved(venue_identifier)
promise = promise.then(update_venue_details)
return promise
def _handle_get_node(self, result):
venue = None
for see_also in result.properties.get(str(RDFS.seeAlso), []):
venue = get_foursquare_venue_from_url(see_also)
if venue:
break
return venue
def schedule_lookup(self, node_uri, foursquare_identifier=None, create=False):
"""
Schedule a lookup on the node to be executed later.
:param node_uri: uri to look up.
:return:
"""
promise = self._scheduler.defer(self.lookup_foursquare_content, node_uri, foursquare_identifier)
if create:
return promise.then(self._publish_create)
else:
return promise.then(self._publish_update)
def _publish_update(self, result):
"""
Publish the update information to the channel.
:param result: result collection
:return:
"""
self._rdf_publish.publish_all_results(result, created=False)
def _publish_create(self, result):
"""
Publish the update information to the channel.
:param result: result collection
:return:
"""
self._rdf_publish.publish_all_results(result, created=True)
def search_foursquare(self, near, query=None):
"""
Search foursquare.
:param near: near a location
:param query: query to search for.
:return: list of id, name dictionaries.
"""
parameters = dict(near=near, limit=10)
if query:
parameters['query'] = query
venue_results = self._foursquare_client.venues.search(parameters)
logger.debug('venue_results: %s' % venue_results['venues'])
return venue_results['venues']
foursquare_lookup = FoursquareLookup | 0.610221 | 0.106226 |
from __future__ import annotations
import logging
import uuid
from copy import deepcopy
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Type,
Union,
)
from algoliasearch.search_client import SearchClient
if TYPE_CHECKING:
from types import TracebackType
from algoliasearch.search_index_async import SearchIndexAsync
AlgoliaIndexType = Union["SearchIndexAsync", "MockAlgoliaIndex"]
"""Type annotation alias supporting the return types of the `AlgoliaIndex` and
`MockAlgoliaIndex` context managers.
"""
class BaseAlgoliaIndex:
"""Base class for an Algolia index client.
Parameters
----------
key : str
The Algolia API key.
app_id : str
The Algolia application ID.
name : str
Name of the Algolia index.
"""
def __init__(self, *, key: str, app_id: str, name: str):
self._key = key
self._app_id = app_id
self._index_name = name
self._logger = logging.getLogger(__name__)
@property
def name(self) -> str:
"""The index's name."""
return self._index_name
@property
def app_id(self) -> str:
"""The Algolia application ID."""
return self._app_id
class AlgoliaIndex(BaseAlgoliaIndex):
"""An Algolia index client.
This client wraps both the ``algoliasearch`` package's ``SearchClient``
and ``index`` classes.
Parameters
----------
key : str
The Algolia API key.
appid : str
The Algolia application ID.
name : str
Name of the Algolia index.
"""
async def __aenter__(self) -> SearchIndexAsync:
self._logger.debug("Opening algolia client")
self.algolia_client = SearchClient.create(self.app_id, self._key)
self._logger.debug("Initializing algolia index")
self.index = self.algolia_client.init_index(self.name)
return self.index
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[Exception],
tb: Optional[TracebackType],
) -> None:
self._logger.debug("Closing algolia client")
await self.algolia_client.close_async()
self._logger.debug("Finished closing algolia client")
class MockAlgoliaIndex(BaseAlgoliaIndex):
"""A mock Algolia index client.
Use this client as a drop-in replaceemnt to `AlgoliaIndex` in situations
where you do not want to make real network requests to Algolia, such as in
testing or in dry-run applications.
Parameters
----------
key : str
The Algolia API key.
appid : str
The Algolia application ID.
index : str
Name of the Algolia index.
"""
async def __aenter__(self) -> "MockAlgoliaIndex":
self._logger.debug("Creating mock Algolia index")
self._saved_objects: List[Dict] = []
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[Exception],
tb: Optional[TracebackType],
) -> None:
self._logger.debug("Closing MockAlgoliaIndex")
async def save_objects_async(
self,
objects: Union[List[Dict], Iterator[Dict]],
request_options: Optional[Dict[str, Any]] = None,
) -> MockMultiResponse:
"""Mock implementation of save_objects_async."""
for obj in objects:
self._saved_objects.append(deepcopy(obj))
return MockMultiResponse()
async def browse_objects_async(
self, search_settings: Dict[str, Any]
) -> AsyncIterator[Dict[str, Any]]:
self._logger.debug("Got search settings %s", search_settings)
# FIXME need to flesh out this mock:
# - provide a way to seed data
# - use attributesToRetrieve to inform what attributes are sent back
for _ in range(5):
yield {}
async def delete_objects_async(self, objectids: List[str]) -> List[str]:
return objectids
class MockMultiResponse:
"""Mock of an algolia resonse."""
def escape_facet_value(value: str) -> str:
"""Escape and quote a facet value for an Algolia search."""
value = value.replace('"', r"\"").replace("'", r"\'")
value = f'"{value}"'
return value
def generate_index_epoch() -> str:
"""Generate a new value for index_epoch key (a hexadecimal string
representation of a UUID4 unique identifier.
"""
return str(uuid.uuid4()) | astropylibrarian/algolia/client.py | from __future__ import annotations
import logging
import uuid
from copy import deepcopy
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Type,
Union,
)
from algoliasearch.search_client import SearchClient
if TYPE_CHECKING:
from types import TracebackType
from algoliasearch.search_index_async import SearchIndexAsync
AlgoliaIndexType = Union["SearchIndexAsync", "MockAlgoliaIndex"]
"""Type annotation alias supporting the return types of the `AlgoliaIndex` and
`MockAlgoliaIndex` context managers.
"""
class BaseAlgoliaIndex:
"""Base class for an Algolia index client.
Parameters
----------
key : str
The Algolia API key.
app_id : str
The Algolia application ID.
name : str
Name of the Algolia index.
"""
def __init__(self, *, key: str, app_id: str, name: str):
self._key = key
self._app_id = app_id
self._index_name = name
self._logger = logging.getLogger(__name__)
@property
def name(self) -> str:
"""The index's name."""
return self._index_name
@property
def app_id(self) -> str:
"""The Algolia application ID."""
return self._app_id
class AlgoliaIndex(BaseAlgoliaIndex):
"""An Algolia index client.
This client wraps both the ``algoliasearch`` package's ``SearchClient``
and ``index`` classes.
Parameters
----------
key : str
The Algolia API key.
appid : str
The Algolia application ID.
name : str
Name of the Algolia index.
"""
async def __aenter__(self) -> SearchIndexAsync:
self._logger.debug("Opening algolia client")
self.algolia_client = SearchClient.create(self.app_id, self._key)
self._logger.debug("Initializing algolia index")
self.index = self.algolia_client.init_index(self.name)
return self.index
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[Exception],
tb: Optional[TracebackType],
) -> None:
self._logger.debug("Closing algolia client")
await self.algolia_client.close_async()
self._logger.debug("Finished closing algolia client")
class MockAlgoliaIndex(BaseAlgoliaIndex):
"""A mock Algolia index client.
Use this client as a drop-in replaceemnt to `AlgoliaIndex` in situations
where you do not want to make real network requests to Algolia, such as in
testing or in dry-run applications.
Parameters
----------
key : str
The Algolia API key.
appid : str
The Algolia application ID.
index : str
Name of the Algolia index.
"""
async def __aenter__(self) -> "MockAlgoliaIndex":
self._logger.debug("Creating mock Algolia index")
self._saved_objects: List[Dict] = []
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[Exception],
tb: Optional[TracebackType],
) -> None:
self._logger.debug("Closing MockAlgoliaIndex")
async def save_objects_async(
self,
objects: Union[List[Dict], Iterator[Dict]],
request_options: Optional[Dict[str, Any]] = None,
) -> MockMultiResponse:
"""Mock implementation of save_objects_async."""
for obj in objects:
self._saved_objects.append(deepcopy(obj))
return MockMultiResponse()
async def browse_objects_async(
self, search_settings: Dict[str, Any]
) -> AsyncIterator[Dict[str, Any]]:
self._logger.debug("Got search settings %s", search_settings)
# FIXME need to flesh out this mock:
# - provide a way to seed data
# - use attributesToRetrieve to inform what attributes are sent back
for _ in range(5):
yield {}
async def delete_objects_async(self, objectids: List[str]) -> List[str]:
return objectids
class MockMultiResponse:
"""Mock of an algolia resonse."""
def escape_facet_value(value: str) -> str:
"""Escape and quote a facet value for an Algolia search."""
value = value.replace('"', r"\"").replace("'", r"\'")
value = f'"{value}"'
return value
def generate_index_epoch() -> str:
"""Generate a new value for index_epoch key (a hexadecimal string
representation of a UUID4 unique identifier.
"""
return str(uuid.uuid4()) | 0.913845 | 0.204898 |
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
data = pd.read_csv("london_merged.csv")
data
"""Metadata:
- "timestamp" - timestamp field for grouping the data
- "cnt" - the count of a new bike shares
- "t1" - real temperature in C
- "t2" - temperature in C "feels like"
- "hum" - humidity in percentage
- "windspeed" - wind speed in km/h
- "weathercode" - category of the weather
- "isholiday" - boolean field - 1 holiday / 0 non holiday
- "isweekend" - boolean field - 1 if the day is weekend
"season" - category field meteorological seasons: 0-spring ; 1-summer; 2-fall; 3-winter.
- "weather_code" category description:
1 = Clear ; mostly clear but have some values with haze/fog/patches of fog/ fog in vicinity 2 = scattered clouds / few clouds 3 = Broken clouds 4 = Cloudy 7 = Rain/ light Rain shower/ Light rain 10 = rain with thunderstorm 26 = snowfall 94 = Freezing Fog
"""
data.info()
data['weather_code'].value_counts()
#one hot encoding required
data['is_weekend'].value_counts()
data['is_holiday'].value_counts()
data.isna().sum()
"""## Preprocessing"""
def data_preparation(df):
df = df.copy()
#Handling the timestamp column. Extract month, day, and hour from it.
df['timestamp'] = pd.to_datetime(df['timestamp']) #datetime object
df['month'] = df['timestamp'].apply(lambda x: x.month) #extract month from datetime object
df['day'] = df['timestamp'].apply(lambda x: x.day) #extract day from the datetime object
df['hour'] = df['timestamp'].apply(lambda x: x.hour) #extract hour from the datetime object
#Now we can drop the actual timestamp column
df = df.drop("timestamp", axis=1)
return df
X = data_preparation(data)
X
"""# Using PyCaret"""
!pip install pycaret
import pycaret.regression as pyr
pyr.setup(
data = X,
target = 'cnt',
train_size = 0.7,
normalize = True
)
pyr.compare_models()
best_model = pyr.create_model('lightgbm')
pyr.evaluate_model(best_model)
X
unseen_data = X.drop("cnt", axis=1)
unseen_data
result = pyr.predict_model(best_model, data=unseen_data)
print(result)
result.to_csv("prediction.csv")
"""# Using Sklearn's Pipeline"""
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
def pipeline_data(df):
df = df.copy()
#Handling the timestamp column. Extract month, day, and hour from it.
df['timestamp'] = pd.to_datetime(df['timestamp']) #datetime object
df['month'] = df['timestamp'].apply(lambda x: x.month) #extract month from datetime object
df['day'] = df['timestamp'].apply(lambda x: x.day) #extract day from the datetime object
df['hour'] = df['timestamp'].apply(lambda x: x.hour) #extract hour from the datetime object
#Now we can drop the actual timestamp column
df = df.drop("timestamp", axis=1)
#X and y
X = df.drop("cnt", axis=1)
y = df['cnt']
#split
X_train, X_test, y_train, y_test = train_test_split(X,y, train_size=0.7, shuffle=True, random_state=1)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = pipeline_data(data)
X_train
y_train
"""## Building the Pipeline"""
nominal_transformer = Pipeline(steps=[
("onehot", OneHotEncoder(sparse=False))
])
preprocessor = ColumnTransformer(transformers=[
("nominal", nominal_transformer, ['weather_code'])
], remainder = 'passthrough')
model = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', RandomForestRegressor())
])
estimator = model.fit(X_train, y_train)
"""## Evaluation
# R-Square
"""
y_true = np.array(y_test)
print(y_true)
y_pred = estimator.predict(X_test)
print(y_pred)
print("Model R^2 Score: {:.4f}".format(r2_score(y_true, y_pred)))
"""## RMSE"""
print(np.mean((y_test - y_pred) ** 2))
rmse = np.sqrt(np.mean((y_test - y_pred) ** 2))
print("RMSE is:", rmse) | London Bike Share Usage Prediction/london_bike_sharing_usage_prediction.py | # Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
data = pd.read_csv("london_merged.csv")
data
"""Metadata:
- "timestamp" - timestamp field for grouping the data
- "cnt" - the count of a new bike shares
- "t1" - real temperature in C
- "t2" - temperature in C "feels like"
- "hum" - humidity in percentage
- "windspeed" - wind speed in km/h
- "weathercode" - category of the weather
- "isholiday" - boolean field - 1 holiday / 0 non holiday
- "isweekend" - boolean field - 1 if the day is weekend
"season" - category field meteorological seasons: 0-spring ; 1-summer; 2-fall; 3-winter.
- "weather_code" category description:
1 = Clear ; mostly clear but have some values with haze/fog/patches of fog/ fog in vicinity 2 = scattered clouds / few clouds 3 = Broken clouds 4 = Cloudy 7 = Rain/ light Rain shower/ Light rain 10 = rain with thunderstorm 26 = snowfall 94 = Freezing Fog
"""
data.info()
data['weather_code'].value_counts()
#one hot encoding required
data['is_weekend'].value_counts()
data['is_holiday'].value_counts()
data.isna().sum()
"""## Preprocessing"""
def data_preparation(df):
df = df.copy()
#Handling the timestamp column. Extract month, day, and hour from it.
df['timestamp'] = pd.to_datetime(df['timestamp']) #datetime object
df['month'] = df['timestamp'].apply(lambda x: x.month) #extract month from datetime object
df['day'] = df['timestamp'].apply(lambda x: x.day) #extract day from the datetime object
df['hour'] = df['timestamp'].apply(lambda x: x.hour) #extract hour from the datetime object
#Now we can drop the actual timestamp column
df = df.drop("timestamp", axis=1)
return df
X = data_preparation(data)
X
"""# Using PyCaret"""
!pip install pycaret
import pycaret.regression as pyr
pyr.setup(
data = X,
target = 'cnt',
train_size = 0.7,
normalize = True
)
pyr.compare_models()
best_model = pyr.create_model('lightgbm')
pyr.evaluate_model(best_model)
X
unseen_data = X.drop("cnt", axis=1)
unseen_data
result = pyr.predict_model(best_model, data=unseen_data)
print(result)
result.to_csv("prediction.csv")
"""# Using Sklearn's Pipeline"""
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
def pipeline_data(df):
df = df.copy()
#Handling the timestamp column. Extract month, day, and hour from it.
df['timestamp'] = pd.to_datetime(df['timestamp']) #datetime object
df['month'] = df['timestamp'].apply(lambda x: x.month) #extract month from datetime object
df['day'] = df['timestamp'].apply(lambda x: x.day) #extract day from the datetime object
df['hour'] = df['timestamp'].apply(lambda x: x.hour) #extract hour from the datetime object
#Now we can drop the actual timestamp column
df = df.drop("timestamp", axis=1)
#X and y
X = df.drop("cnt", axis=1)
y = df['cnt']
#split
X_train, X_test, y_train, y_test = train_test_split(X,y, train_size=0.7, shuffle=True, random_state=1)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = pipeline_data(data)
X_train
y_train
"""## Building the Pipeline"""
nominal_transformer = Pipeline(steps=[
("onehot", OneHotEncoder(sparse=False))
])
preprocessor = ColumnTransformer(transformers=[
("nominal", nominal_transformer, ['weather_code'])
], remainder = 'passthrough')
model = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', RandomForestRegressor())
])
estimator = model.fit(X_train, y_train)
"""## Evaluation
# R-Square
"""
y_true = np.array(y_test)
print(y_true)
y_pred = estimator.predict(X_test)
print(y_pred)
print("Model R^2 Score: {:.4f}".format(r2_score(y_true, y_pred)))
"""## RMSE"""
print(np.mean((y_test - y_pred) ** 2))
rmse = np.sqrt(np.mean((y_test - y_pred) ** 2))
print("RMSE is:", rmse) | 0.639961 | 0.611121 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
import argparse
import csv
import os
import sys
import pickle
import numpy as np
import pandas as pd
from os.path import join
from collections import defaultdict
# matplotlib
titlesize = 33
xsize = 30
ysize = 30
ticksize = 25
legendsize = 25
er_alpha = 0.25
ENVS = [
'Ant-v2',
'HalfCheetah-v2',
'Hopper-v2',
'InvertedPendulum-v2',
'Reacher-v2',
'Swimmer-v2',
'Walker2d-v2',
]
def smoothed(x, w):
"""Smooth x by averaging over sliding windows of w, assuming sufficient length.
"""
if len(x) <= w:
return x
smooth = []
for i in range(1, w):
smooth.append( np.mean(x[0:i]) )
for i in range(w, len(x)+1):
smooth.append( np.mean(x[i-w:i]) )
assert len(x) == len(smooth), "lengths: {}, {}".format(len(x), len(smooth))
return np.array(smooth)
def _get_jagged_mean_std(data, w=-1):
"""Mean and std vectors for a 'jagged' set of data.
Thus, just doing an `np.array()` on a list of lists won't usually give the
desired result. I have to compute means/stdevs explicitly here.
Parameters
----------
data: list
List of lists containing things of which we want to take means/stdevs.
"""
mean_vec = []
std_vec = []
maxlen = -1
for item in data:
maxlen = max(maxlen, len(item))
for idx in range(maxlen):
vals = []
for item in data:
if idx < len(item):
vals.append(item[idx])
mean_vec.append(np.mean(vals))
std_vec.append(np.std(vals))
if w != -1:
mean_vec = smoothed(mean_vec, w=w)
std_vec = smoothed(std_vec, w=w)
return np.array(mean_vec), np.array(std_vec)
def _get_stuff_from_monitor(mon):
"""Get stuff from `monitor` log files.
Monitor files are named `0.envidx.monitor.csv` and have one line for each
episode that finished in that CPU 'core', with the reward, length (number
of steps) and the time (in seconds). The lengths are not cumulative, but
time is cumulative.
"""
scores = []
steps = []
times = []
with open(mon, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for csv_row in csv_reader:
# First two lines don't contain interesting stuff.
if line_count == 0 or line_count == 1:
line_count += 1
continue
scores.append(float(csv_row[0]))
steps.append(int(csv_row[1]))
times.append(float(csv_row[2]))
line_count += 1
print("finished: {}".format(mon))
return scores, steps, times
def plot_mujoco(args, env_to_directory):
"""Plot from monitor files.
"""
nrows, ncols = len(env_to_directory), 2
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharey='row',
figsize=(13*ncols,6*nrows))
for idx,env in enumerate(sorted(env_to_directory.keys())):
print('plotting: ', env)
progress = []
for directory in env_to_directory[env]:
progfile = join('/tmp', directory, 'progress.csv')
df = pd.read_csv(progfile, delimiter=',')
expl_data = df['rollout/return_history'].tolist()
eval_data = df['eval/return_history'].tolist()
expl_l = '{}'.format(directory)
eval_l = '{}'.format(directory)
ax[idx,0].plot(expl_data, label=expl_l)
ax[idx,1].plot(eval_data, label=eval_l)
ax[idx,0].set_title('{}: Exploration'.format(env), fontsize=titlesize)
ax[idx,1].set_title('{}: Evaluation'.format(env), fontsize=titlesize)
for row in range(nrows):
for col in range(ncols):
# https://github.com/BerkeleyAutomation/baselines-fork/issues/5
ax[row,col].set_xlabel("Evaluation Points", fontsize=ysize)
ax[row,col].set_ylabel("Avg 100 Episode Return", fontsize=ysize)
ax[row,col].tick_params(axis='x', labelsize=ticksize)
ax[row,col].tick_params(axis='y', labelsize=ticksize)
leg = ax[row,col].legend(loc="best", ncol=1, prop={'size':legendsize})
for legobj in leg.legendHandles:
legobj.set_linewidth(5.0)
figname = args.title
plt.tight_layout()
plt.savefig(figname)
print("Just saved: {}\n".format(figname))
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--title', type=str, default='mujoco.png')
args = pp.parse_args()
# All OpenAI monitor files.
openai_files = sorted(
[x for x in os.listdir('/tmp') if 'openai-2019-06-' in x]
)
# Go through and get all the monitors for each game.
env_to_directory = defaultdict(list)
for openai_f in openai_files:
logdir = join('/tmp', openai_f, 'log.txt')
with open(logdir) as fh:
_ = fh.readline()
line = fh.readline().strip()
line_sp = line.split()
mujoco_env = line_sp[-1]
assert mujoco_env in ENVS, "{}, {}, {}".format(mujoco_env, line, openai_f)
env_to_directory[mujoco_env].append(openai_f)
print('env to directory:')
for env in env_to_directory:
print(env_to_directory[env], ': ', env)
plot_mujoco(args, env_to_directory) | scripts/mujoco_results.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
import argparse
import csv
import os
import sys
import pickle
import numpy as np
import pandas as pd
from os.path import join
from collections import defaultdict
# matplotlib
titlesize = 33
xsize = 30
ysize = 30
ticksize = 25
legendsize = 25
er_alpha = 0.25
ENVS = [
'Ant-v2',
'HalfCheetah-v2',
'Hopper-v2',
'InvertedPendulum-v2',
'Reacher-v2',
'Swimmer-v2',
'Walker2d-v2',
]
def smoothed(x, w):
"""Smooth x by averaging over sliding windows of w, assuming sufficient length.
"""
if len(x) <= w:
return x
smooth = []
for i in range(1, w):
smooth.append( np.mean(x[0:i]) )
for i in range(w, len(x)+1):
smooth.append( np.mean(x[i-w:i]) )
assert len(x) == len(smooth), "lengths: {}, {}".format(len(x), len(smooth))
return np.array(smooth)
def _get_jagged_mean_std(data, w=-1):
"""Mean and std vectors for a 'jagged' set of data.
Thus, just doing an `np.array()` on a list of lists won't usually give the
desired result. I have to compute means/stdevs explicitly here.
Parameters
----------
data: list
List of lists containing things of which we want to take means/stdevs.
"""
mean_vec = []
std_vec = []
maxlen = -1
for item in data:
maxlen = max(maxlen, len(item))
for idx in range(maxlen):
vals = []
for item in data:
if idx < len(item):
vals.append(item[idx])
mean_vec.append(np.mean(vals))
std_vec.append(np.std(vals))
if w != -1:
mean_vec = smoothed(mean_vec, w=w)
std_vec = smoothed(std_vec, w=w)
return np.array(mean_vec), np.array(std_vec)
def _get_stuff_from_monitor(mon):
"""Get stuff from `monitor` log files.
Monitor files are named `0.envidx.monitor.csv` and have one line for each
episode that finished in that CPU 'core', with the reward, length (number
of steps) and the time (in seconds). The lengths are not cumulative, but
time is cumulative.
"""
scores = []
steps = []
times = []
with open(mon, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for csv_row in csv_reader:
# First two lines don't contain interesting stuff.
if line_count == 0 or line_count == 1:
line_count += 1
continue
scores.append(float(csv_row[0]))
steps.append(int(csv_row[1]))
times.append(float(csv_row[2]))
line_count += 1
print("finished: {}".format(mon))
return scores, steps, times
def plot_mujoco(args, env_to_directory):
"""Plot from monitor files.
"""
nrows, ncols = len(env_to_directory), 2
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharey='row',
figsize=(13*ncols,6*nrows))
for idx,env in enumerate(sorted(env_to_directory.keys())):
print('plotting: ', env)
progress = []
for directory in env_to_directory[env]:
progfile = join('/tmp', directory, 'progress.csv')
df = pd.read_csv(progfile, delimiter=',')
expl_data = df['rollout/return_history'].tolist()
eval_data = df['eval/return_history'].tolist()
expl_l = '{}'.format(directory)
eval_l = '{}'.format(directory)
ax[idx,0].plot(expl_data, label=expl_l)
ax[idx,1].plot(eval_data, label=eval_l)
ax[idx,0].set_title('{}: Exploration'.format(env), fontsize=titlesize)
ax[idx,1].set_title('{}: Evaluation'.format(env), fontsize=titlesize)
for row in range(nrows):
for col in range(ncols):
# https://github.com/BerkeleyAutomation/baselines-fork/issues/5
ax[row,col].set_xlabel("Evaluation Points", fontsize=ysize)
ax[row,col].set_ylabel("Avg 100 Episode Return", fontsize=ysize)
ax[row,col].tick_params(axis='x', labelsize=ticksize)
ax[row,col].tick_params(axis='y', labelsize=ticksize)
leg = ax[row,col].legend(loc="best", ncol=1, prop={'size':legendsize})
for legobj in leg.legendHandles:
legobj.set_linewidth(5.0)
figname = args.title
plt.tight_layout()
plt.savefig(figname)
print("Just saved: {}\n".format(figname))
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--title', type=str, default='mujoco.png')
args = pp.parse_args()
# All OpenAI monitor files.
openai_files = sorted(
[x for x in os.listdir('/tmp') if 'openai-2019-06-' in x]
)
# Go through and get all the monitors for each game.
env_to_directory = defaultdict(list)
for openai_f in openai_files:
logdir = join('/tmp', openai_f, 'log.txt')
with open(logdir) as fh:
_ = fh.readline()
line = fh.readline().strip()
line_sp = line.split()
mujoco_env = line_sp[-1]
assert mujoco_env in ENVS, "{}, {}, {}".format(mujoco_env, line, openai_f)
env_to_directory[mujoco_env].append(openai_f)
print('env to directory:')
for env in env_to_directory:
print(env_to_directory[env], ': ', env)
plot_mujoco(args, env_to_directory) | 0.547706 | 0.408985 |
import cv2
import numpy as np
from ..adapters import Adapter
from ..config import StringField
from ..representation import BackgroundMattingPrediction
class ImageBackgroundMattingAdapter(Adapter):
__provider__ = 'background_matting_with_pha_and_fgr'
def process(self, raw, identifiers, frame_meta):
if not self.output_verified:
self.select_output_blob(raw)
result = []
frame_meta = frame_meta or [] * len(identifiers)
raw_outputs = self._extract_predictions(raw, frame_meta)
pha = raw_outputs[self.pha]
fgr = raw_outputs[self.fgr]
batch_size = len(identifiers)
for i in range(batch_size):
output = {
self._orig_pha: self.to_image(pha[i], frame_meta[i]),
self._orig_fgr: self.to_image(fgr[i], frame_meta[i])
}
result.append(
BackgroundMattingPrediction(identifiers[i], output)
)
return result
@staticmethod
def to_image(tensor, meta):
out = cv2.resize(
np.transpose(tensor, (1, 2, 0)),
(meta['original_width'], meta['original_height'])
)
if len(out.shape) == 3 and out.shape[-1] == 3:
out = out[:, :, ::-1]
return out
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'alpha_out': StringField(
description="Name of output layer with alpha.",
optional=True
),
'foreground_out': StringField(
description="Name of output layer with foreground.",
optional=True
),
})
return parameters
def select_output_blob(self, outputs):
self.pha = self.check_output_name(self.pha, outputs)
self.fgr = self.check_output_name(self.fgr, outputs)
self.output_verified = True
def configure(self):
self.pha = self.get_value_from_config('alpha_out')
self._orig_pha = self.get_value_from_config('alpha_out')
self.fgr = self.get_value_from_config('foreground_out')
self._orig_fgr = self.get_value_from_config('foreground_out')
self.output_verified = False | tools/accuracy_checker/openvino/tools/accuracy_checker/adapters/background_matting.py | import cv2
import numpy as np
from ..adapters import Adapter
from ..config import StringField
from ..representation import BackgroundMattingPrediction
class ImageBackgroundMattingAdapter(Adapter):
__provider__ = 'background_matting_with_pha_and_fgr'
def process(self, raw, identifiers, frame_meta):
if not self.output_verified:
self.select_output_blob(raw)
result = []
frame_meta = frame_meta or [] * len(identifiers)
raw_outputs = self._extract_predictions(raw, frame_meta)
pha = raw_outputs[self.pha]
fgr = raw_outputs[self.fgr]
batch_size = len(identifiers)
for i in range(batch_size):
output = {
self._orig_pha: self.to_image(pha[i], frame_meta[i]),
self._orig_fgr: self.to_image(fgr[i], frame_meta[i])
}
result.append(
BackgroundMattingPrediction(identifiers[i], output)
)
return result
@staticmethod
def to_image(tensor, meta):
out = cv2.resize(
np.transpose(tensor, (1, 2, 0)),
(meta['original_width'], meta['original_height'])
)
if len(out.shape) == 3 and out.shape[-1] == 3:
out = out[:, :, ::-1]
return out
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'alpha_out': StringField(
description="Name of output layer with alpha.",
optional=True
),
'foreground_out': StringField(
description="Name of output layer with foreground.",
optional=True
),
})
return parameters
def select_output_blob(self, outputs):
self.pha = self.check_output_name(self.pha, outputs)
self.fgr = self.check_output_name(self.fgr, outputs)
self.output_verified = True
def configure(self):
self.pha = self.get_value_from_config('alpha_out')
self._orig_pha = self.get_value_from_config('alpha_out')
self.fgr = self.get_value_from_config('foreground_out')
self._orig_fgr = self.get_value_from_config('foreground_out')
self.output_verified = False | 0.550849 | 0.233368 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0094_auto_20190404_0854'),
]
operations = [
migrations.AddField(
model_name='biomes',
name='compute_soil_carbon',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you compute soil organic carbon during land use (do you mix the previous PFT SOC into agricultural SOC)?'),
),
migrations.AddField(
model_name='biomes',
name='harvest_npp_crops',
field=models.TextField(blank=True, default='', null=True, verbose_name='Do you harvest NPP of crops? Do you including grazing? How does harvested NPP decay?'),
),
migrations.AddField(
model_name='biomes',
name='npp_litter_output',
field=models.TextField(blank=True, default='', null=True, verbose_name='Does non-harvested crop NPP go to litter in your output?'),
),
migrations.AddField(
model_name='biomes',
name='seperate_soil_carbon',
field=models.TextField(blank=True, default='', null=True, verbose_name='Do you separate soil organic carbon in pasture from natural grass?'),
),
migrations.AddField(
model_name='biomes',
name='simulate_bioenergy',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you simulate bioenergy? I.e. What PFT do you simulate on bioenergy land?'),
),
migrations.AddField(
model_name='biomes',
name='simulate_pasture',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you simulate pasture (which PFT)?'),
),
migrations.AddField(
model_name='biomes',
name='transition_cropland',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you simulate the transition from cropland to bioenergy?'),
),
migrations.AddField(
model_name='biomes',
name='treat_biofuel_npp',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you to treat biofuel NPP and biofuel harvest?'),
),
] | isi_mip/climatemodels/migrations/0095_auto_20190408_1053.py | from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0094_auto_20190404_0854'),
]
operations = [
migrations.AddField(
model_name='biomes',
name='compute_soil_carbon',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you compute soil organic carbon during land use (do you mix the previous PFT SOC into agricultural SOC)?'),
),
migrations.AddField(
model_name='biomes',
name='harvest_npp_crops',
field=models.TextField(blank=True, default='', null=True, verbose_name='Do you harvest NPP of crops? Do you including grazing? How does harvested NPP decay?'),
),
migrations.AddField(
model_name='biomes',
name='npp_litter_output',
field=models.TextField(blank=True, default='', null=True, verbose_name='Does non-harvested crop NPP go to litter in your output?'),
),
migrations.AddField(
model_name='biomes',
name='seperate_soil_carbon',
field=models.TextField(blank=True, default='', null=True, verbose_name='Do you separate soil organic carbon in pasture from natural grass?'),
),
migrations.AddField(
model_name='biomes',
name='simulate_bioenergy',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you simulate bioenergy? I.e. What PFT do you simulate on bioenergy land?'),
),
migrations.AddField(
model_name='biomes',
name='simulate_pasture',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you simulate pasture (which PFT)?'),
),
migrations.AddField(
model_name='biomes',
name='transition_cropland',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you simulate the transition from cropland to bioenergy?'),
),
migrations.AddField(
model_name='biomes',
name='treat_biofuel_npp',
field=models.TextField(blank=True, default='', null=True, verbose_name='How do you to treat biofuel NPP and biofuel harvest?'),
),
] | 0.705075 | 0.211173 |
import nltk
import sys
import os
import string
import math
FILE_MATCHES = 1
SENTENCE_MATCHES = 1
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python questions.py corpus")
# Calculate IDF values across files
files = load_files(sys.argv[1])
file_words = {
filename: tokenize(files[filename])
for filename in files
}
file_idfs = compute_idfs(file_words)
# Prompt user for query
query = set(tokenize(input("Query: ")))
# Determine top file matches according to TF-IDF
filenames = top_files(query, file_words, file_idfs, n=FILE_MATCHES)
# Extract sentences from top files
sentences = dict()
for filename in filenames:
for passage in files[filename].split("\n"):
for sentence in nltk.sent_tokenize(passage):
tokens = tokenize(sentence)
if tokens:
sentences[sentence] = tokens
# Compute IDF values across sentences
idfs = compute_idfs(sentences)
# Determine top sentence matches
matches = top_sentences(query, sentences, idfs, n=SENTENCE_MATCHES)
for match in matches:
print(match)
def load_files(directory):
"""
Given a directory name, return a dictionary mapping the filename of each
`.txt` file inside that directory to the file's contents as a string.
"""
dictionary = {}
for file in os.listdir(directory):
with open(os.path.join(directory, file), encoding="utf-8") as ofile:
dictionary[file] = ofile.read()
return dictionary
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
tokenized = nltk.tokenize.word_tokenize(document.lower())
final_list = [x for x in tokenized if x not in string.punctuation and x not in nltk.corpus.stopwords.words("english")]
return final_list
def compute_idfs(documents):
"""
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
"""
idf_dictio = {}
doc_len = len(documents)
unique_words = set(sum(documents.values(), []))
for word in unique_words:
count = 0
for doc in documents.values():
if word in doc:
count += 1
idf_dictio[word] = math.log(doc_len/count)
return idf_dictio
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
scores = {}
for filename, filecontent in files.items():
file_score = 0
for word in query:
if word in filecontent:
file_score += filecontent.count(word) * idfs[word]
if file_score != 0:
scores[filename] = file_score
sorted_by_score = [k for k, v in sorted(scores.items(), key=lambda x: x[1], reverse=True)]
return sorted_by_score[:n]
def top_sentences(query, sentences, idfs, n):
"""
Given a `query` (a set of words), `sentences` (a dictionary mapping
sentences to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the `n` top sentences that match
the query, ranked according to idf. If there are ties, preference should
be given to sentences that have a higher query term density.
"""
scores = {}
for sentence, sentwords in sentences.items():
score = 0
for word in query:
if word in sentwords:
score += idfs[word]
if score != 0:
density = sum([sentwords.count(x) for x in query]) / len(sentwords)
scores[sentence] = (score, density)
sorted_by_score = [k for k, v in sorted(scores.items(), key=lambda x: (x[1][0], x[1][1]), reverse=True)]
return sorted_by_score[:n]
if __name__ == "__main__":
main() | Intro To Artificial Intelligence/Projects/week6/questions/questions.py | import nltk
import sys
import os
import string
import math
FILE_MATCHES = 1
SENTENCE_MATCHES = 1
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python questions.py corpus")
# Calculate IDF values across files
files = load_files(sys.argv[1])
file_words = {
filename: tokenize(files[filename])
for filename in files
}
file_idfs = compute_idfs(file_words)
# Prompt user for query
query = set(tokenize(input("Query: ")))
# Determine top file matches according to TF-IDF
filenames = top_files(query, file_words, file_idfs, n=FILE_MATCHES)
# Extract sentences from top files
sentences = dict()
for filename in filenames:
for passage in files[filename].split("\n"):
for sentence in nltk.sent_tokenize(passage):
tokens = tokenize(sentence)
if tokens:
sentences[sentence] = tokens
# Compute IDF values across sentences
idfs = compute_idfs(sentences)
# Determine top sentence matches
matches = top_sentences(query, sentences, idfs, n=SENTENCE_MATCHES)
for match in matches:
print(match)
def load_files(directory):
"""
Given a directory name, return a dictionary mapping the filename of each
`.txt` file inside that directory to the file's contents as a string.
"""
dictionary = {}
for file in os.listdir(directory):
with open(os.path.join(directory, file), encoding="utf-8") as ofile:
dictionary[file] = ofile.read()
return dictionary
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
tokenized = nltk.tokenize.word_tokenize(document.lower())
final_list = [x for x in tokenized if x not in string.punctuation and x not in nltk.corpus.stopwords.words("english")]
return final_list
def compute_idfs(documents):
"""
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
"""
idf_dictio = {}
doc_len = len(documents)
unique_words = set(sum(documents.values(), []))
for word in unique_words:
count = 0
for doc in documents.values():
if word in doc:
count += 1
idf_dictio[word] = math.log(doc_len/count)
return idf_dictio
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
scores = {}
for filename, filecontent in files.items():
file_score = 0
for word in query:
if word in filecontent:
file_score += filecontent.count(word) * idfs[word]
if file_score != 0:
scores[filename] = file_score
sorted_by_score = [k for k, v in sorted(scores.items(), key=lambda x: x[1], reverse=True)]
return sorted_by_score[:n]
def top_sentences(query, sentences, idfs, n):
"""
Given a `query` (a set of words), `sentences` (a dictionary mapping
sentences to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the `n` top sentences that match
the query, ranked according to idf. If there are ties, preference should
be given to sentences that have a higher query term density.
"""
scores = {}
for sentence, sentwords in sentences.items():
score = 0
for word in query:
if word in sentwords:
score += idfs[word]
if score != 0:
density = sum([sentwords.count(x) for x in query]) / len(sentwords)
scores[sentence] = (score, density)
sorted_by_score = [k for k, v in sorted(scores.items(), key=lambda x: (x[1][0], x[1][1]), reverse=True)]
return sorted_by_score[:n]
if __name__ == "__main__":
main() | 0.501221 | 0.432303 |
from __future__ import division
from collections import namedtuple
import math
Vector3 = namedtuple('Vector3', ['x', 'y', 'z'])
Vector2 = namedtuple('Vector2', ['x', 'y'])
def crossProduct(a, b):
""" return normalized cross product
"""
x = a.y*b.z - a.z*b.y
y = -(a.x*b.z - a.z*b.x)
z = a.x*b.y - a.y*b.x
mag = math.sqrt(x**2 + y**2 + z**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return Vector3(x/mag, y/mag, z/mag)
# end def
def normalizeV3(v):
mag = math.sqrt(v.x**2 + v.y**2 + v.z**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return Vector3(v.x/mag, v.y/mag, v.z/mag)
# end def
def normalizeV2(v):
x, y = v
mag = math.sqrt(x**2 + y**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return x/mag, v.y/mag
# end def
def normalToPlane(v1, v2, v3):
""" Calculate unit normal to the normal to
the plane defined by vertices v1, v2, and v3
"""
def subVector(a, b): return Vector3(a.x - b.x, a.y - b.y, a.z - b.z)
a = subVector(v3, v2)
b = subVector(v1, v2)
return crossProduct(a, b)
# end def
def applyMatrix3(m, v):
x = m[0] * v.x + m[1] * v.y + m[2] * v.z
y = m[3] * v.x + m[4] * v.y + m[5] * v.z
z = m[6] * v.x + m[7] * v.y + m[8] * v.z
return Vector3(x, y, z)
# end def
def applyMatrix4(m, v):
x = m[0] * v.x + m[1] * v.y + m[2] * v.z + m[3]
y = m[4] * v.x + m[5] * v.y + m[6] * v.z + m[7]
z = m[8] * v.x + m[9] * v.y + m[10] * v.z + m[11]
return Vector3(x, y, z)
# end def
def v3SetX(v, x):
return Vector3(x, v.y, v.z)
def v3SetY(v, y):
return Vector3(v.x, y, v.z)
def v3SetZ(v, z):
return Vector3(v.x, v.y, z)
def addVectors(v1, v2):
return Vector3(v1.x+v2.x, v1.y+v2.y, v1.z+v2.z)
def subVectors(v1, v2):
""" return v1 - v2
"""
return Vector3(v1.x-v2.x, v1.y-v2.y, v1.z-v2.z)
def multiplyScalar(v, s):
""" return v1*s
"""
return Vector3(v.x*s, v.y*s, v.z*s)
def v2DistanceAndAngle(a, b):
dx = b[0] - a[0]
dy = b[1] - a[1]
dist = math.sqrt(dx*dx + dy*dy)
angle = math.atan2(dy, dx)
return dist, angle
def v2dot(a, b):
return a[0]*b[0]+a[1]*b[1]
def v2AngleBetween(a, b):
a = normalizeV2(a)
b = normalizeV2(b)
v2dot(a, b)
xa, xa = a
xb, yb = a
maga = math.sqrt(xa**2 + ya**2)
magb = math.sqrt(xb**2 + yb**2)
return math.acos(num/(maga*magb))
# end def
# end def | cadnano/extras/math/vector.py | from __future__ import division
from collections import namedtuple
import math
Vector3 = namedtuple('Vector3', ['x', 'y', 'z'])
Vector2 = namedtuple('Vector2', ['x', 'y'])
def crossProduct(a, b):
""" return normalized cross product
"""
x = a.y*b.z - a.z*b.y
y = -(a.x*b.z - a.z*b.x)
z = a.x*b.y - a.y*b.x
mag = math.sqrt(x**2 + y**2 + z**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return Vector3(x/mag, y/mag, z/mag)
# end def
def normalizeV3(v):
mag = math.sqrt(v.x**2 + v.y**2 + v.z**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return Vector3(v.x/mag, v.y/mag, v.z/mag)
# end def
def normalizeV2(v):
x, y = v
mag = math.sqrt(x**2 + y**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return x/mag, v.y/mag
# end def
def normalToPlane(v1, v2, v3):
""" Calculate unit normal to the normal to
the plane defined by vertices v1, v2, and v3
"""
def subVector(a, b): return Vector3(a.x - b.x, a.y - b.y, a.z - b.z)
a = subVector(v3, v2)
b = subVector(v1, v2)
return crossProduct(a, b)
# end def
def applyMatrix3(m, v):
x = m[0] * v.x + m[1] * v.y + m[2] * v.z
y = m[3] * v.x + m[4] * v.y + m[5] * v.z
z = m[6] * v.x + m[7] * v.y + m[8] * v.z
return Vector3(x, y, z)
# end def
def applyMatrix4(m, v):
x = m[0] * v.x + m[1] * v.y + m[2] * v.z + m[3]
y = m[4] * v.x + m[5] * v.y + m[6] * v.z + m[7]
z = m[8] * v.x + m[9] * v.y + m[10] * v.z + m[11]
return Vector3(x, y, z)
# end def
def v3SetX(v, x):
return Vector3(x, v.y, v.z)
def v3SetY(v, y):
return Vector3(v.x, y, v.z)
def v3SetZ(v, z):
return Vector3(v.x, v.y, z)
def addVectors(v1, v2):
return Vector3(v1.x+v2.x, v1.y+v2.y, v1.z+v2.z)
def subVectors(v1, v2):
""" return v1 - v2
"""
return Vector3(v1.x-v2.x, v1.y-v2.y, v1.z-v2.z)
def multiplyScalar(v, s):
""" return v1*s
"""
return Vector3(v.x*s, v.y*s, v.z*s)
def v2DistanceAndAngle(a, b):
dx = b[0] - a[0]
dy = b[1] - a[1]
dist = math.sqrt(dx*dx + dy*dy)
angle = math.atan2(dy, dx)
return dist, angle
def v2dot(a, b):
return a[0]*b[0]+a[1]*b[1]
def v2AngleBetween(a, b):
a = normalizeV2(a)
b = normalizeV2(b)
v2dot(a, b)
xa, xa = a
xb, yb = a
maga = math.sqrt(xa**2 + ya**2)
magb = math.sqrt(xb**2 + yb**2)
return math.acos(num/(maga*magb))
# end def
# end def | 0.72027 | 0.746486 |
import h5py, yaml, re, cPickle, shutil
from control4.misc.console_utils import mkdirp,yes_or_no
from collections import OrderedDict
import os.path as osp
class ScriptRun(object):
def __init__(self, info, script_name, run_idx, out_root):
info = info.copy()
self.info = info
self.script_name = script_name
self.run_idx = run_idx
self.out_root = out_root
assert osp.expandvars(out_root).startswith("/")
self.test_name = self._expand_vars(info.pop("test_name","unnamed"))
self.cfg_name = self._expand_vars(info.pop("cfg_name","unnamed"))
self.command = info.pop("command").strip()
self.add_extra_args = info.pop("add_extra_args",1)
outfile_basename = info.pop("outfile",self.script_name+"_RUN%.2i.h5"%(self.run_idx))
self.outfile =osp.join(self.out_root,outfile_basename)
def _expand_vars(self, s):
if s.startswith("$"):
if s=="$script_name":
return self.script_name
else:
raise RuntimeError("unrecognized variable %s"%s)
else:
return s
def get_cmd(self, pipe_to_logfile="off"):
li = [self.command]
li.extend(["--%s=%s"%(par,val) for (par,val) in self.info.items()])
if self.add_extra_args:
li.extend(["--seed=%i"%self.run_idx, "--outfile=%s"%self.outfile, "--metadata=cfg_name=%s,test_name=%s,script_name=%s"%(self.cfg_name,self.test_name,self.script_name)])
if pipe_to_logfile == "stdout":
pipe_str = ">"
elif pipe_to_logfile == "all":
pipe_str = "&>"
elif pipe_to_logfile == "off":
# pipe_str = ""
pass
if pipe_to_logfile != "off": li.append("%s %s.log\n"%(pipe_str,self.outfile))
return " ".join(li)
def assert_script_runs_different(srs):
scriptname2info = {sr.script_name:(sr.info,sr.run_idx,sr.command) for sr in srs}
badkeypair = None
valhash2key = {}
for (k,v) in scriptname2info.items():
valhash = cPickle.dumps(v)
if valhash in valhash2key:
badkeypair = (k,valhash2key[valhash])
valhash2key[valhash] = k
if badkeypair is not None:
raise AssertionError(
"Two scripts are being run with the exact same parameters: %s and %s"%badkeypair)
def prepare_dir_for_experiment(out_root,allow_continue=False):
if osp.exists(out_root) and not allow_continue:
yn = yes_or_no("%s exists. delete?"%out_root)
if yn: shutil.rmtree(out_root)
else: raise IOError
mkdirp(out_root)
def ordered_load(stream):
return yaml.load(stream, CustomYamlLoader)
class CustomYamlLoader(yaml.Loader):
"""
Ordered load: http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts/21048064#21048064
Include: http://stackoverflow.com/questions/528281/how-can-i-include-an-yaml-file-inside-another
"""
def __init__(self, stream):
self._root = osp.split(stream.name)[0]
yaml.Loader.__init__(self,stream)
def include(self, node):
filename = osp.join(self._root, self.construct_scalar(node))
with open(filename, 'r') as f:
return yaml.load(f, yaml.Loader)
CustomYamlLoader.add_constructor('!include', CustomYamlLoader.include)
def load_node(loader,node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
CustomYamlLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, load_node)
def last_if_list(l):
if hasattr(l, "__len__"):
return l[-1]
else:
return l
def extract_scalar_stats(fname, testinfo):
hdf = h5py.File(fname,"r")
return [last_if_list(eval(expr, dict(), dict(hdf=hdf))) for expr in testinfo["stats"].values()]
def extract_series_stats(fname, exprs):
hdf = h5py.File(fname,"r")
return [eval(expr, dict(), dict(hdf=hdf)) for expr in exprs]
def list_tests(testinfos):
for (i,test) in enumerate(testinfos):
print "%4i %s"%(i,test["name"])
def increase_suffix(fname):
pat = "-([0-9]+)$"
current_suffix_singleton = re.findall(pat, fname)
if current_suffix_singleton:
assert len(current_suffix_singleton) == 1
current_suffix = current_suffix_singleton[0]
i = int(current_suffix)
return re.sub(pat, "-"+str(i+1), fname)
else:
return fname + "-1"
def test_increase_suffix():
assert increase_suffix("/x/y/z") == "/x/y/z-1"
assert increase_suffix("/x/y-1/z") == "/x/y-1/z-1"
assert increase_suffix("/x/y-1/z-1") == "/x/y-1/z-2"
def get_next_suffixed_dir(dir): #pylint: disable=W0622
n = 0
for i in xrange(20):
if osp.exists(dir+"-"+str(i)):
n=i+1
if n==0 and not osp.exists(dir):
return dir
else:
return dir + "-"+str(n)
def experiment_h5name(testname, i_run):
return testname+"_RUN%i"%i_run+".h5"
if __name__ == "__main__":
test_increase_suffix() | control4/bench/bench.py | import h5py, yaml, re, cPickle, shutil
from control4.misc.console_utils import mkdirp,yes_or_no
from collections import OrderedDict
import os.path as osp
class ScriptRun(object):
def __init__(self, info, script_name, run_idx, out_root):
info = info.copy()
self.info = info
self.script_name = script_name
self.run_idx = run_idx
self.out_root = out_root
assert osp.expandvars(out_root).startswith("/")
self.test_name = self._expand_vars(info.pop("test_name","unnamed"))
self.cfg_name = self._expand_vars(info.pop("cfg_name","unnamed"))
self.command = info.pop("command").strip()
self.add_extra_args = info.pop("add_extra_args",1)
outfile_basename = info.pop("outfile",self.script_name+"_RUN%.2i.h5"%(self.run_idx))
self.outfile =osp.join(self.out_root,outfile_basename)
def _expand_vars(self, s):
if s.startswith("$"):
if s=="$script_name":
return self.script_name
else:
raise RuntimeError("unrecognized variable %s"%s)
else:
return s
def get_cmd(self, pipe_to_logfile="off"):
li = [self.command]
li.extend(["--%s=%s"%(par,val) for (par,val) in self.info.items()])
if self.add_extra_args:
li.extend(["--seed=%i"%self.run_idx, "--outfile=%s"%self.outfile, "--metadata=cfg_name=%s,test_name=%s,script_name=%s"%(self.cfg_name,self.test_name,self.script_name)])
if pipe_to_logfile == "stdout":
pipe_str = ">"
elif pipe_to_logfile == "all":
pipe_str = "&>"
elif pipe_to_logfile == "off":
# pipe_str = ""
pass
if pipe_to_logfile != "off": li.append("%s %s.log\n"%(pipe_str,self.outfile))
return " ".join(li)
def assert_script_runs_different(srs):
scriptname2info = {sr.script_name:(sr.info,sr.run_idx,sr.command) for sr in srs}
badkeypair = None
valhash2key = {}
for (k,v) in scriptname2info.items():
valhash = cPickle.dumps(v)
if valhash in valhash2key:
badkeypair = (k,valhash2key[valhash])
valhash2key[valhash] = k
if badkeypair is not None:
raise AssertionError(
"Two scripts are being run with the exact same parameters: %s and %s"%badkeypair)
def prepare_dir_for_experiment(out_root,allow_continue=False):
if osp.exists(out_root) and not allow_continue:
yn = yes_or_no("%s exists. delete?"%out_root)
if yn: shutil.rmtree(out_root)
else: raise IOError
mkdirp(out_root)
def ordered_load(stream):
return yaml.load(stream, CustomYamlLoader)
class CustomYamlLoader(yaml.Loader):
"""
Ordered load: http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts/21048064#21048064
Include: http://stackoverflow.com/questions/528281/how-can-i-include-an-yaml-file-inside-another
"""
def __init__(self, stream):
self._root = osp.split(stream.name)[0]
yaml.Loader.__init__(self,stream)
def include(self, node):
filename = osp.join(self._root, self.construct_scalar(node))
with open(filename, 'r') as f:
return yaml.load(f, yaml.Loader)
CustomYamlLoader.add_constructor('!include', CustomYamlLoader.include)
def load_node(loader,node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
CustomYamlLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, load_node)
def last_if_list(l):
if hasattr(l, "__len__"):
return l[-1]
else:
return l
def extract_scalar_stats(fname, testinfo):
hdf = h5py.File(fname,"r")
return [last_if_list(eval(expr, dict(), dict(hdf=hdf))) for expr in testinfo["stats"].values()]
def extract_series_stats(fname, exprs):
hdf = h5py.File(fname,"r")
return [eval(expr, dict(), dict(hdf=hdf)) for expr in exprs]
def list_tests(testinfos):
for (i,test) in enumerate(testinfos):
print "%4i %s"%(i,test["name"])
def increase_suffix(fname):
pat = "-([0-9]+)$"
current_suffix_singleton = re.findall(pat, fname)
if current_suffix_singleton:
assert len(current_suffix_singleton) == 1
current_suffix = current_suffix_singleton[0]
i = int(current_suffix)
return re.sub(pat, "-"+str(i+1), fname)
else:
return fname + "-1"
def test_increase_suffix():
assert increase_suffix("/x/y/z") == "/x/y/z-1"
assert increase_suffix("/x/y-1/z") == "/x/y-1/z-1"
assert increase_suffix("/x/y-1/z-1") == "/x/y-1/z-2"
def get_next_suffixed_dir(dir): #pylint: disable=W0622
n = 0
for i in xrange(20):
if osp.exists(dir+"-"+str(i)):
n=i+1
if n==0 and not osp.exists(dir):
return dir
else:
return dir + "-"+str(n)
def experiment_h5name(testname, i_run):
return testname+"_RUN%i"%i_run+".h5"
if __name__ == "__main__":
test_increase_suffix() | 0.199854 | 0.140395 |
import torch
import torch.nn as nn
import numpy as np
import copy
from typing import Any, ClassVar, Dict, List, Optional, Sequence, Type, Union
from d3rlpy.models.encoders import EncoderFactory, Encoder, VectorEncoderWithAction, _create_activation, VectorEncoder
class CustomVectorEncoder(VectorEncoder):
def __init__(
self,
config,
action_size,
mask_size,
with_q,
observation_shape: Sequence[int],
hidden_units: Optional[Sequence[int]] = None,
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
activation: nn.Module = nn.ReLU(),
):
super().__init__(observation_shape, hidden_units, use_batch_norm, dropout_rate, use_dense, activation)
self.action_size = action_size
self.mask_size = mask_size
self.with_q = with_q
self.emb_size = 32
self.emb_layer = nn.Embedding(action_size, self.emb_size)
self.fc2 = nn.Linear(self._feature_size + self.emb_size * mask_size, action_size)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
location_mask = config['location_mask']
self.special_items = config['special_items']
self.location_mask = torch.tensor(location_mask, device=self.device)
def get_feature_size(self) -> int:
if not self.with_q:
return self._feature_size + self.emb_size * self.mask_size
else:
return self.action_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
# mask
prev_actions = x[:, -self.mask_size:-1].to(torch.long)
cur_step = x[:, -1].to(torch.long)
x_mask_layer = cur_step % 9 // 3
mask = self.location_mask[x_mask_layer]
for i in range(self.mask_size-1):
mask[range(batch_size), prev_actions[:, i]] = 0
h = self._fc_encode(x)
if self._use_batch_norm:
h = self._bns[-1](h)
if self._dropout_rate is not None:
h = self._dropouts[-1](h)
prev_action_emb = nn.Flatten()(self.emb_layer(x[:, -self.mask_size:].to(torch.long)))
h = torch.cat([h, prev_action_emb], dim=-1)
if self.with_q:
h = self.fc2(h)
action_mask = mask < 0.01
# h[action_mask] = -2 ** 15
h[action_mask] = 0
for i in range(batch_size):
if len(np.intersect1d(prev_actions[i].cpu().numpy(), self.special_items)) > 0:
h[i][self.special_items] = 0
# h[i][self.special_items] = -2 ** 15
return h
class CustomVectorEncoderFactory(EncoderFactory):
TYPE: ClassVar[str] = "vector"
_hidden_units: Sequence[int]
_activation: str
_use_batch_norm: bool
_dropout_rate: Optional[float]
_use_dense: bool
def __init__(
self,
config,
action_size,
mask_size,
with_q=False,
hidden_units: Optional[Sequence[int]] = None,
activation: str = "relu",
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
):
self.config = config
self.action_size = action_size
self.mask_size = mask_size
self.with_q = with_q
if hidden_units is None:
self._hidden_units = [256]
else:
self._hidden_units = hidden_units
self._activation = activation
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
self._use_dense = use_dense
def create(self, observation_shape: Sequence[int]) -> CustomVectorEncoder:
assert len(observation_shape) == 1
return CustomVectorEncoder(
config=self.config,
action_size=self.action_size,
mask_size=self.mask_size,
with_q=self.with_q,
observation_shape=observation_shape,
hidden_units=self._hidden_units,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
use_dense=self._use_dense,
activation=_create_activation(self._activation),
)
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> VectorEncoderWithAction:
assert len(observation_shape) == 1
return VectorEncoderWithAction(
observation_shape=observation_shape,
action_size=action_size,
hidden_units=self._hidden_units,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
use_dense=self._use_dense,
discrete_action=discrete_action,
activation=_create_activation(self._activation),
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if deep:
hidden_units = copy.deepcopy(self._hidden_units)
else:
hidden_units = self._hidden_units
params = {
"hidden_units": hidden_units,
"activation": self._activation,
"use_batch_norm": self._use_batch_norm,
"dropout_rate": self._dropout_rate,
"use_dense": self._use_dense,
}
return params | rl4rs/nets/cql/encoder.py | import torch
import torch.nn as nn
import numpy as np
import copy
from typing import Any, ClassVar, Dict, List, Optional, Sequence, Type, Union
from d3rlpy.models.encoders import EncoderFactory, Encoder, VectorEncoderWithAction, _create_activation, VectorEncoder
class CustomVectorEncoder(VectorEncoder):
def __init__(
self,
config,
action_size,
mask_size,
with_q,
observation_shape: Sequence[int],
hidden_units: Optional[Sequence[int]] = None,
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
activation: nn.Module = nn.ReLU(),
):
super().__init__(observation_shape, hidden_units, use_batch_norm, dropout_rate, use_dense, activation)
self.action_size = action_size
self.mask_size = mask_size
self.with_q = with_q
self.emb_size = 32
self.emb_layer = nn.Embedding(action_size, self.emb_size)
self.fc2 = nn.Linear(self._feature_size + self.emb_size * mask_size, action_size)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
location_mask = config['location_mask']
self.special_items = config['special_items']
self.location_mask = torch.tensor(location_mask, device=self.device)
def get_feature_size(self) -> int:
if not self.with_q:
return self._feature_size + self.emb_size * self.mask_size
else:
return self.action_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
# mask
prev_actions = x[:, -self.mask_size:-1].to(torch.long)
cur_step = x[:, -1].to(torch.long)
x_mask_layer = cur_step % 9 // 3
mask = self.location_mask[x_mask_layer]
for i in range(self.mask_size-1):
mask[range(batch_size), prev_actions[:, i]] = 0
h = self._fc_encode(x)
if self._use_batch_norm:
h = self._bns[-1](h)
if self._dropout_rate is not None:
h = self._dropouts[-1](h)
prev_action_emb = nn.Flatten()(self.emb_layer(x[:, -self.mask_size:].to(torch.long)))
h = torch.cat([h, prev_action_emb], dim=-1)
if self.with_q:
h = self.fc2(h)
action_mask = mask < 0.01
# h[action_mask] = -2 ** 15
h[action_mask] = 0
for i in range(batch_size):
if len(np.intersect1d(prev_actions[i].cpu().numpy(), self.special_items)) > 0:
h[i][self.special_items] = 0
# h[i][self.special_items] = -2 ** 15
return h
class CustomVectorEncoderFactory(EncoderFactory):
TYPE: ClassVar[str] = "vector"
_hidden_units: Sequence[int]
_activation: str
_use_batch_norm: bool
_dropout_rate: Optional[float]
_use_dense: bool
def __init__(
self,
config,
action_size,
mask_size,
with_q=False,
hidden_units: Optional[Sequence[int]] = None,
activation: str = "relu",
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
):
self.config = config
self.action_size = action_size
self.mask_size = mask_size
self.with_q = with_q
if hidden_units is None:
self._hidden_units = [256]
else:
self._hidden_units = hidden_units
self._activation = activation
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
self._use_dense = use_dense
def create(self, observation_shape: Sequence[int]) -> CustomVectorEncoder:
assert len(observation_shape) == 1
return CustomVectorEncoder(
config=self.config,
action_size=self.action_size,
mask_size=self.mask_size,
with_q=self.with_q,
observation_shape=observation_shape,
hidden_units=self._hidden_units,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
use_dense=self._use_dense,
activation=_create_activation(self._activation),
)
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> VectorEncoderWithAction:
assert len(observation_shape) == 1
return VectorEncoderWithAction(
observation_shape=observation_shape,
action_size=action_size,
hidden_units=self._hidden_units,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
use_dense=self._use_dense,
discrete_action=discrete_action,
activation=_create_activation(self._activation),
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if deep:
hidden_units = copy.deepcopy(self._hidden_units)
else:
hidden_units = self._hidden_units
params = {
"hidden_units": hidden_units,
"activation": self._activation,
"use_batch_norm": self._use_batch_norm,
"dropout_rate": self._dropout_rate,
"use_dense": self._use_dense,
}
return params | 0.95156 | 0.460532 |
from timo.decorators import timer
from timo.utils import colored_print
from timo.utils import equals
from timo.utils import get_command_black_list
from typing import List
from typing import NoReturn
import platform
import subprocess
import shlex
class CommandRunner(object):
"""Perform tests."""
def _convert_byte_to_string(self, byte_string: str) -> str:
"""
Decodes Byte format strings and returns them as regular strings.
Parameters:
byte_string(b_str): String in Byte format.
Returns:
str: String decoded to utf-8 or cp949.
If the execution environment is'Windows', it is converted to'cp949'.
If the execution environment is'Linux' or 'Mac OS', it is converted to'utf-8'.
"""
_os = platform.system()
if _os == 'Windows':
return byte_string.decode('CP949')
if _os == 'Linux' or _os == 'Darwin':
return byte_string.decode('utf-8')
@timer
def run(self, command: str) -> NoReturn:
"""
Execute the command.
Parameters:
command(str): Command to execute.
"""
colored_print(f'Run: {command}', 'yellow') # 커맨드 출력
if command in get_command_black_list(): # 입력받은 커맨드가 실행하면 안되는 커맨드인지 체크
colored_print('Out: Not supports', 'red')
return # 실행하면 안되는 커맨드라면 실행하지 않고 함수 종료
popen = subprocess.Popen(args=shlex.split(command), shell=False, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) # 커맨드를 실행함
stdout, _ = popen.communicate() # stderr 메세지는 무시한다
colorname = 'green'
result = self._convert_byte_to_string(stdout)
if result.replace('\n', '').replace('\r', '') == '': # 출력값이 아무것도 없는 경우에는 'None'으로 변경한다
result = 'None'
colorname = 'red'
colored_print('Out:', colorname, end=' ')
colored_print(f'{result}', 'white') # 커맨드 실행결과 출력
def run_all(self, command_list: List):
"""
Executes all commands in the command list.
Parameters:
command_list(str): List of commands to be executed.
"""
if equals(len(command_list), 0):
colored_print('No command found.', 'orange')
else:
for command in command_list:
self.run(command)
if __name__ == "__main__":
c = CommandRunner()
c.run_all(['python -V', 'pip -V', 'git --version', 'dir', 'pwd']) | timo/test_manager/command_runner.py | from timo.decorators import timer
from timo.utils import colored_print
from timo.utils import equals
from timo.utils import get_command_black_list
from typing import List
from typing import NoReturn
import platform
import subprocess
import shlex
class CommandRunner(object):
"""Perform tests."""
def _convert_byte_to_string(self, byte_string: str) -> str:
"""
Decodes Byte format strings and returns them as regular strings.
Parameters:
byte_string(b_str): String in Byte format.
Returns:
str: String decoded to utf-8 or cp949.
If the execution environment is'Windows', it is converted to'cp949'.
If the execution environment is'Linux' or 'Mac OS', it is converted to'utf-8'.
"""
_os = platform.system()
if _os == 'Windows':
return byte_string.decode('CP949')
if _os == 'Linux' or _os == 'Darwin':
return byte_string.decode('utf-8')
@timer
def run(self, command: str) -> NoReturn:
"""
Execute the command.
Parameters:
command(str): Command to execute.
"""
colored_print(f'Run: {command}', 'yellow') # 커맨드 출력
if command in get_command_black_list(): # 입력받은 커맨드가 실행하면 안되는 커맨드인지 체크
colored_print('Out: Not supports', 'red')
return # 실행하면 안되는 커맨드라면 실행하지 않고 함수 종료
popen = subprocess.Popen(args=shlex.split(command), shell=False, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) # 커맨드를 실행함
stdout, _ = popen.communicate() # stderr 메세지는 무시한다
colorname = 'green'
result = self._convert_byte_to_string(stdout)
if result.replace('\n', '').replace('\r', '') == '': # 출력값이 아무것도 없는 경우에는 'None'으로 변경한다
result = 'None'
colorname = 'red'
colored_print('Out:', colorname, end=' ')
colored_print(f'{result}', 'white') # 커맨드 실행결과 출력
def run_all(self, command_list: List):
"""
Executes all commands in the command list.
Parameters:
command_list(str): List of commands to be executed.
"""
if equals(len(command_list), 0):
colored_print('No command found.', 'orange')
else:
for command in command_list:
self.run(command)
if __name__ == "__main__":
c = CommandRunner()
c.run_all(['python -V', 'pip -V', 'git --version', 'dir', 'pwd']) | 0.806777 | 0.2194 |
from viperid import app
import unittest
class ViperidTestCase(unittest.TestCase):
contract_1 = {
'code': 'def foo(x: num) -> num:\n return x * 2'
}
def setUp(self):
app.testing = True
def test_compile_to_abi(self):
result = {'result': [{'constant': False, 'inputs': [{'name': 'x', 'type': 'int128'}], 'name': 'foo(int128)', 'outputs': [{'name': 'out', 'type': 'int128'}], 'payable': False, 'type': 'function'}]}
with app.test_client() as c:
rv = c.post('/abi/', json=self.contract_1)
assert rv.status_code == 200
assert rv.is_json
assert rv.get_json() == result
def test_compile_to_ir(self):
result = {'result': '[seq,\n [return,\n 0,\n [lll,\n [seq,\n [mstore, 28, [calldataload, 0]],\n [mstore, 32, 1461501637330902918203684832716283019655932542976],\n [mstore, 64, 340282366920938463463374607431768211455],\n [mstore, 96, -340282366920938463463374607431768211455],\n [mstore, 128, 3402823669209384634633746074317682114550000000000],\n [mstore, 160, -3402823669209384634633746074317682114550000000000],\n # Line 1\n [if,\n [eq, [mload, 0], 3650092561 <foo>],\n [seq,\n [calldatacopy, 320, 4, 32],\n [assert, [iszero, callvalue]],\n /* checking num input */ [clamp, [mload, 96], [calldataload, 4], [mload, 64]],\n # Line 2\n [mstore, 0, [mul, [mload, 320 <x>], 2]],\n [return, 0, 32],\n # Line 1\n stop]]],\n 0]]]'}
with app.test_client() as c:
rv = c.post('/ir/', json=self.contract_1)
assert rv.status_code == 200
assert rv.is_json
assert rv.get_json() == result
def test_compile_to_bytecode(self):
result = {'result': '0x6100d956600035601c52740100000000000000000000000000000000000000006020526fffffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff000000000000000000000000000000016060527402540be3fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffdabf41c00000000000000000000000002540be40060a05263d98ffe1160005114156100d457602060046101403734151558576060516004358060405190135857809190125857506002610140510260005260206000f3005b5b6100046100d9036100046000396100046100d9036000f3'}
with app.test_client() as c:
rv = c.post('/bytecode/', json=self.contract_1)
assert rv.status_code == 200
assert rv.is_json
assert rv.get_json() == result
if __name__ == '__main__':
unittest.main() | backend/tests/tests_viperid.py | from viperid import app
import unittest
class ViperidTestCase(unittest.TestCase):
contract_1 = {
'code': 'def foo(x: num) -> num:\n return x * 2'
}
def setUp(self):
app.testing = True
def test_compile_to_abi(self):
result = {'result': [{'constant': False, 'inputs': [{'name': 'x', 'type': 'int128'}], 'name': 'foo(int128)', 'outputs': [{'name': 'out', 'type': 'int128'}], 'payable': False, 'type': 'function'}]}
with app.test_client() as c:
rv = c.post('/abi/', json=self.contract_1)
assert rv.status_code == 200
assert rv.is_json
assert rv.get_json() == result
def test_compile_to_ir(self):
result = {'result': '[seq,\n [return,\n 0,\n [lll,\n [seq,\n [mstore, 28, [calldataload, 0]],\n [mstore, 32, 1461501637330902918203684832716283019655932542976],\n [mstore, 64, 340282366920938463463374607431768211455],\n [mstore, 96, -340282366920938463463374607431768211455],\n [mstore, 128, 3402823669209384634633746074317682114550000000000],\n [mstore, 160, -3402823669209384634633746074317682114550000000000],\n # Line 1\n [if,\n [eq, [mload, 0], 3650092561 <foo>],\n [seq,\n [calldatacopy, 320, 4, 32],\n [assert, [iszero, callvalue]],\n /* checking num input */ [clamp, [mload, 96], [calldataload, 4], [mload, 64]],\n # Line 2\n [mstore, 0, [mul, [mload, 320 <x>], 2]],\n [return, 0, 32],\n # Line 1\n stop]]],\n 0]]]'}
with app.test_client() as c:
rv = c.post('/ir/', json=self.contract_1)
assert rv.status_code == 200
assert rv.is_json
assert rv.get_json() == result
def test_compile_to_bytecode(self):
result = {'result': '0x6100d956600035601c52740100000000000000000000000000000000000000006020526fffffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff000000000000000000000000000000016060527402540be3fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffdabf41c00000000000000000000000002540be40060a05263d98ffe1160005114156100d457602060046101403734151558576060516004358060405190135857809190125857506002610140510260005260206000f3005b5b6100046100d9036100046000396100046100d9036000f3'}
with app.test_client() as c:
rv = c.post('/bytecode/', json=self.contract_1)
assert rv.status_code == 200
assert rv.is_json
assert rv.get_json() == result
if __name__ == '__main__':
unittest.main() | 0.445288 | 0.485478 |
import random
from tools import binExtend, crc
class networkLayer(object):
"""
networkLayer:ip package ipv4
"""
def __init__(self):
super(networkLayer, self).__init__()
self.outEncodeData = []
# self.outDecodeData = []
self.pacSize = 146*8 + 32*5
self.ipHeaderDict = {}
self.ipHeader = []
def netEncode(self, inputData):
version = 4
IHL = 5
diffServices = '10100100'#3 1 1 1 2
totalLength = self.pacSize
identification = 0
blank = '0'
DF = '1'
MF = '0'
fragOffset = 0*8
liveTime = 127
protocol = 6
oriIP = [
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255)
]
toIP = [
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255)
]
options = None
for li in inputData:
ipHeader = ''
versionT = binExtend(version, 4)
IHLT = binExtend(IHL, 4)
totalLengthT = binExtend(totalLength, 16)
identificationT = binExtend(identification, 16)
fragOffsetT = binExtend(fragOffset, 13)
liveTimeT = binExtend(liveTime, 8)
protocolT = binExtend(protocol, 8)
checkSumT = crc(li, 16)
oriIPT = ''
toIPT = ''
for o in oriIP:
oriIPT+=binExtend(o, 8)
for t in toIP:
toIPT+=binExtend(t, 8)
self.ipHeaderDict = {
'version':versionT,
'IHL':IHLT,
'diffServices':diffServices,
'totalLength':totalLengthT,
'identification':identificationT,
'blank':blank,
'DF':DF,
'MF':MF,
'fragOffset':fragOffsetT,
'liveTime':liveTimeT,
'protocol':protocolT,
'checkSum':checkSumT,
'oriIP':oriIPT,
'toIP':toIPT,
}
print("加入IP头信息:")
print(self.ipHeaderDict)
for ipH in self.ipHeaderDict:
ipHeader += self.ipHeaderDict[ipH]
self.ipHeader.append(ipHeader)
outEncodeData = ipHeader + li
self.outEncodeData.append(outEncodeData)
return self.outEncodeData
def netDecode(self, inputData):
j = 0
outDecodeDataL = []
for i in range(len(inputData)):
lenOfIpH = 160
outDecodeData = inputData[i][lenOfIpH:]
outDecodeDataL.append(outDecodeData)
return outDecodeDataL
if __name__ == '__main__':
i = ['01000110100011000110000111101100000000000000000000000000000000010000000000000000000000000000001001010000000100100000000000101000101001101110101000000000000000000110100001101000011010000110100001101000']
app = networkLayer()
a=app.netEncode(i)
print(a)
b= app.netDecode(a)
print(b == i) | chatroom/layers/networkLayer.py | import random
from tools import binExtend, crc
class networkLayer(object):
"""
networkLayer:ip package ipv4
"""
def __init__(self):
super(networkLayer, self).__init__()
self.outEncodeData = []
# self.outDecodeData = []
self.pacSize = 146*8 + 32*5
self.ipHeaderDict = {}
self.ipHeader = []
def netEncode(self, inputData):
version = 4
IHL = 5
diffServices = '10100100'#3 1 1 1 2
totalLength = self.pacSize
identification = 0
blank = '0'
DF = '1'
MF = '0'
fragOffset = 0*8
liveTime = 127
protocol = 6
oriIP = [
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255)
]
toIP = [
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255),
random.randint(1, 255)
]
options = None
for li in inputData:
ipHeader = ''
versionT = binExtend(version, 4)
IHLT = binExtend(IHL, 4)
totalLengthT = binExtend(totalLength, 16)
identificationT = binExtend(identification, 16)
fragOffsetT = binExtend(fragOffset, 13)
liveTimeT = binExtend(liveTime, 8)
protocolT = binExtend(protocol, 8)
checkSumT = crc(li, 16)
oriIPT = ''
toIPT = ''
for o in oriIP:
oriIPT+=binExtend(o, 8)
for t in toIP:
toIPT+=binExtend(t, 8)
self.ipHeaderDict = {
'version':versionT,
'IHL':IHLT,
'diffServices':diffServices,
'totalLength':totalLengthT,
'identification':identificationT,
'blank':blank,
'DF':DF,
'MF':MF,
'fragOffset':fragOffsetT,
'liveTime':liveTimeT,
'protocol':protocolT,
'checkSum':checkSumT,
'oriIP':oriIPT,
'toIP':toIPT,
}
print("加入IP头信息:")
print(self.ipHeaderDict)
for ipH in self.ipHeaderDict:
ipHeader += self.ipHeaderDict[ipH]
self.ipHeader.append(ipHeader)
outEncodeData = ipHeader + li
self.outEncodeData.append(outEncodeData)
return self.outEncodeData
def netDecode(self, inputData):
j = 0
outDecodeDataL = []
for i in range(len(inputData)):
lenOfIpH = 160
outDecodeData = inputData[i][lenOfIpH:]
outDecodeDataL.append(outDecodeData)
return outDecodeDataL
if __name__ == '__main__':
i = ['01000110100011000110000111101100000000000000000000000000000000010000000000000000000000000000001001010000000100100000000000101000101001101110101000000000000000000110100001101000011010000110100001101000']
app = networkLayer()
a=app.netEncode(i)
print(a)
b= app.netDecode(a)
print(b == i) | 0.149904 | 0.258718 |
import numpy as np
import epipack as epk
import numpy as np
import networkx as nx
def make_equal_length(arr_list):
maxlen = max([len(a) for a in arr_list])
new_arr_list = []
for a in arr_list:
dL = maxlen - len(a)
if dL > 0:
newa = np.concatenate((a, np.ones(dL)*a[-1]))
else:
newa = a
new_arr_list.append(newa)
return new_arr_list
def simulation_code(kw):
S, E, I, R, X = list("SEIRX")
Sa, Ea, Ia, Ra, Xa = [letter+"a" for letter in "SEIRX"]
Za = "Za"
Ya = "Ya"
a = kw['a']
q = kw['q']
k0 = kw['k0']
I0 = kw['I0_prob']
alpha = kw['alpha']
R0 = kw['R0']
rho = kw['rho']
kappa = rho * q/(1-q)
N = kw['N']
p = kw['k0'] / (N-1)
G = nx.fast_gnp_random_graph(N, p)
edges = [ (u,v,1.) for u, v in G.edges() ]
#tmaxs = [40,40,40,1e300]
#Rscale = [1.0,0.4,1.0,0.4]
#tmaxs = [1e300]
#Rscale = [1.0]
tmaxs = kw['phases'][kw['phase']]['tmaxs']
Rscale = kw['phases'][kw['phase']]['Rscale']
delete_edges_instead_of_scaling_R = kw['delete_edges_instead_of_scaling_R']
_I0 = int(N*I0)
_I0a = int(a*_I0)
_I0 -= _I0a
_S0 = N - _I0 - _I0a
_S0a = int(_S0*a)
_S0 -= _S0a
node_statuses = None
timebin_ts = []
timebin_results = []
last_t = 0
if delete_edges_instead_of_scaling_R:
ndx = np.random.permutation(len(edges))
scrambled_edges = [ edges[i] for i in ndx ]
for iphase, (this_tmax, this_Rscale) in enumerate(zip(tmaxs, Rscale)):
if delete_edges_instead_of_scaling_R:
these_edges = scrambled_edges[:int(this_Rscale*len(edges))]
this_Rscale = 1
else:
these_edges = edges
model = epk.StochasticEpiModel([S,E,I,R,X,Sa,Ea,Ia,Ra,Xa,Ya,Za],N,edge_weight_tuples=these_edges)\
.set_node_transition_processes([
("Ea", alpha, "Ia"),
("Ia", rho,"Ra"),
("Ia", kappa, "Xa"),
("E", alpha, "I"),
("I", rho,"R"),
("I", kappa, "X"),
])\
.set_link_transmission_processes([
("Ia", "Sa", R0*rho/k0*this_Rscale, "Ia", "Ea"),
("Ia", "S", R0*rho/k0*this_Rscale, "Ia", "E"),
("I", "Sa", R0*rho/k0*this_Rscale, "I", "Ea"),
("I", "S", R0*rho/k0*this_Rscale, "I", "E"),
])\
.set_conditional_link_transmission_processes({
( "Ia", "->", "Xa" ) : [
("Xa", "Ia", "->", "Xa", "Ya"),
("Xa", "Ea", "->", "Xa", "Za"),
]
})
if node_statuses is None:
model.set_random_initial_conditions({
"Sa": _S0a,
"Ia": _I0a,
"S": _S0,
"I": _I0,
})
else:
model.set_node_statuses(node_statuses)
this_t, this_result = model.simulate(this_tmax+last_t,sampling_dt=1,t0=last_t)
if iphase < len(tmaxs)-1:
this_t = this_t[:-1]
this_result = { C: arr[:-1] for C, arr in this_result.items() }
last_t += this_tmax
node_statuses = model.node_status
timebin_ts.append(this_t)
timebin_results.append(this_result)
if len(tmaxs) > 1:
t = np.concatenate(timebin_ts)
this_result = { C: np.concatenate([res[C] for res in timebin_results]) for C in model.compartments }
else:
t = timebin_ts[0]
this_result = timebin_results[0]
return this_result
if __name__ == "__main__":
import matplotlib.pyplot as pl
import qsuite_config as cf
from pprint import pprint
kw = {}
for p in cf.external_parameters + cf.internal_parameters:
if p[0] is not None:
kw[p[0]] = p[1][0]
for p in cf.standard_parameters:
if p[0] is not None:
kw[p[0]] = p[1]
kw['phase'] = 'periodic lockdown'
kw['N'] = 20000
print("using config:")
pprint(kw)
print()
result = simulation_code(kw)
from epipack.plottools import plot
t = np.arange(len(result['S']))
plot(t, result)
pl.show() | analysis_collection/tracing_sim/results_toy_model/simulation.py | import numpy as np
import epipack as epk
import numpy as np
import networkx as nx
def make_equal_length(arr_list):
maxlen = max([len(a) for a in arr_list])
new_arr_list = []
for a in arr_list:
dL = maxlen - len(a)
if dL > 0:
newa = np.concatenate((a, np.ones(dL)*a[-1]))
else:
newa = a
new_arr_list.append(newa)
return new_arr_list
def simulation_code(kw):
S, E, I, R, X = list("SEIRX")
Sa, Ea, Ia, Ra, Xa = [letter+"a" for letter in "SEIRX"]
Za = "Za"
Ya = "Ya"
a = kw['a']
q = kw['q']
k0 = kw['k0']
I0 = kw['I0_prob']
alpha = kw['alpha']
R0 = kw['R0']
rho = kw['rho']
kappa = rho * q/(1-q)
N = kw['N']
p = kw['k0'] / (N-1)
G = nx.fast_gnp_random_graph(N, p)
edges = [ (u,v,1.) for u, v in G.edges() ]
#tmaxs = [40,40,40,1e300]
#Rscale = [1.0,0.4,1.0,0.4]
#tmaxs = [1e300]
#Rscale = [1.0]
tmaxs = kw['phases'][kw['phase']]['tmaxs']
Rscale = kw['phases'][kw['phase']]['Rscale']
delete_edges_instead_of_scaling_R = kw['delete_edges_instead_of_scaling_R']
_I0 = int(N*I0)
_I0a = int(a*_I0)
_I0 -= _I0a
_S0 = N - _I0 - _I0a
_S0a = int(_S0*a)
_S0 -= _S0a
node_statuses = None
timebin_ts = []
timebin_results = []
last_t = 0
if delete_edges_instead_of_scaling_R:
ndx = np.random.permutation(len(edges))
scrambled_edges = [ edges[i] for i in ndx ]
for iphase, (this_tmax, this_Rscale) in enumerate(zip(tmaxs, Rscale)):
if delete_edges_instead_of_scaling_R:
these_edges = scrambled_edges[:int(this_Rscale*len(edges))]
this_Rscale = 1
else:
these_edges = edges
model = epk.StochasticEpiModel([S,E,I,R,X,Sa,Ea,Ia,Ra,Xa,Ya,Za],N,edge_weight_tuples=these_edges)\
.set_node_transition_processes([
("Ea", alpha, "Ia"),
("Ia", rho,"Ra"),
("Ia", kappa, "Xa"),
("E", alpha, "I"),
("I", rho,"R"),
("I", kappa, "X"),
])\
.set_link_transmission_processes([
("Ia", "Sa", R0*rho/k0*this_Rscale, "Ia", "Ea"),
("Ia", "S", R0*rho/k0*this_Rscale, "Ia", "E"),
("I", "Sa", R0*rho/k0*this_Rscale, "I", "Ea"),
("I", "S", R0*rho/k0*this_Rscale, "I", "E"),
])\
.set_conditional_link_transmission_processes({
( "Ia", "->", "Xa" ) : [
("Xa", "Ia", "->", "Xa", "Ya"),
("Xa", "Ea", "->", "Xa", "Za"),
]
})
if node_statuses is None:
model.set_random_initial_conditions({
"Sa": _S0a,
"Ia": _I0a,
"S": _S0,
"I": _I0,
})
else:
model.set_node_statuses(node_statuses)
this_t, this_result = model.simulate(this_tmax+last_t,sampling_dt=1,t0=last_t)
if iphase < len(tmaxs)-1:
this_t = this_t[:-1]
this_result = { C: arr[:-1] for C, arr in this_result.items() }
last_t += this_tmax
node_statuses = model.node_status
timebin_ts.append(this_t)
timebin_results.append(this_result)
if len(tmaxs) > 1:
t = np.concatenate(timebin_ts)
this_result = { C: np.concatenate([res[C] for res in timebin_results]) for C in model.compartments }
else:
t = timebin_ts[0]
this_result = timebin_results[0]
return this_result
if __name__ == "__main__":
import matplotlib.pyplot as pl
import qsuite_config as cf
from pprint import pprint
kw = {}
for p in cf.external_parameters + cf.internal_parameters:
if p[0] is not None:
kw[p[0]] = p[1][0]
for p in cf.standard_parameters:
if p[0] is not None:
kw[p[0]] = p[1]
kw['phase'] = 'periodic lockdown'
kw['N'] = 20000
print("using config:")
pprint(kw)
print()
result = simulation_code(kw)
from epipack.plottools import plot
t = np.arange(len(result['S']))
plot(t, result)
pl.show() | 0.181372 | 0.266229 |
import numpy as np
from torch import nn
from torch.nn import functional as F
class CELEBAgenerator(nn.Module):
def __init__(self, args):
super(CELEBAgenerator, self).__init__()
self._name = 'celebaG'
self.shape = (64, 64, 3)
self.dim = args.dim
preprocess = nn.Sequential(
nn.Linear(self.dim, 2* 4 * 4 * 4 * self.dim),
nn.BatchNorm1d(2 * 4 * 4 * 4 * self.dim),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(8 * self.dim, 4 * self.dim, 2, stride=2),
nn.BatchNorm2d(4 * self.dim),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(4 * self.dim, 2 * self.dim, 2, stride=2),
nn.BatchNorm2d(2 * self.dim),
nn.ReLU(True),
)
block3 = nn.Sequential(
nn.ConvTranspose2d(2 * self.dim, self.dim, 2, stride=2),
nn.BatchNorm2d(self.dim),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(self.dim, 3, 2, stride=2)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.block3 = block3
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * 2 * self.dim, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.block3(output)
output = self.deconv_out(output)
output = self.tanh(output)
output = output.view(-1, 3, 64, 64)
return output
class CIFARgenerator(nn.Module):
def __init__(self, args):
super(CIFARgenerator, self).__init__()
self._name = 'cifarG'
self.shape = (32, 32, 3)
self.dim = args.dim
preprocess = nn.Sequential(
nn.Linear(self.dim, 4 * 4 * 4 * self.dim),
nn.BatchNorm1d(4 * 4 * 4 * self.dim),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(4 * self.dim, 2 * self.dim, 2, stride=2),
nn.BatchNorm2d(2 * self.dim),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(2 * self.dim, self.dim, 2, stride=2),
nn.BatchNorm2d(self.dim),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(self.dim, 3, 2, stride=2)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * self.dim, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.deconv_out(output)
output = self.tanh(output)
return output.view(-1, 3*32*32)
class MNISTgenerator(nn.Module):
def __init__(self, args):
super(MNISTgenerator, self).__init__()
self._name = 'mnistG'
self.dim = args.dim
self.in_shape = int(np.sqrt(args.dim))
self.shape = (self.in_shape, self.in_shape, 1)
preprocess = nn.Sequential(
nn.Linear(self.dim, 4*4*4*self.dim),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(4*self.dim, 2*self.dim, 5),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(2*self.dim, self.dim, 5),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(self.dim, 1, 8, stride=2)
self.block1 = block1
self.block2 = block2
self.deconv_out = deconv_out
self.preprocess = preprocess
self.sigmoid = nn.Sigmoid()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4*self.dim, 4, 4)
output = self.block1(output)
output = output[:, :, :7, :7]
output = self.block2(output)
output = self.deconv_out(output)
output = self.sigmoid(output)
return output.view(-1, 784) | generators.py | import numpy as np
from torch import nn
from torch.nn import functional as F
class CELEBAgenerator(nn.Module):
def __init__(self, args):
super(CELEBAgenerator, self).__init__()
self._name = 'celebaG'
self.shape = (64, 64, 3)
self.dim = args.dim
preprocess = nn.Sequential(
nn.Linear(self.dim, 2* 4 * 4 * 4 * self.dim),
nn.BatchNorm1d(2 * 4 * 4 * 4 * self.dim),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(8 * self.dim, 4 * self.dim, 2, stride=2),
nn.BatchNorm2d(4 * self.dim),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(4 * self.dim, 2 * self.dim, 2, stride=2),
nn.BatchNorm2d(2 * self.dim),
nn.ReLU(True),
)
block3 = nn.Sequential(
nn.ConvTranspose2d(2 * self.dim, self.dim, 2, stride=2),
nn.BatchNorm2d(self.dim),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(self.dim, 3, 2, stride=2)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.block3 = block3
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * 2 * self.dim, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.block3(output)
output = self.deconv_out(output)
output = self.tanh(output)
output = output.view(-1, 3, 64, 64)
return output
class CIFARgenerator(nn.Module):
def __init__(self, args):
super(CIFARgenerator, self).__init__()
self._name = 'cifarG'
self.shape = (32, 32, 3)
self.dim = args.dim
preprocess = nn.Sequential(
nn.Linear(self.dim, 4 * 4 * 4 * self.dim),
nn.BatchNorm1d(4 * 4 * 4 * self.dim),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(4 * self.dim, 2 * self.dim, 2, stride=2),
nn.BatchNorm2d(2 * self.dim),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(2 * self.dim, self.dim, 2, stride=2),
nn.BatchNorm2d(self.dim),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(self.dim, 3, 2, stride=2)
self.preprocess = preprocess
self.block1 = block1
self.block2 = block2
self.deconv_out = deconv_out
self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * self.dim, 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.deconv_out(output)
output = self.tanh(output)
return output.view(-1, 3*32*32)
class MNISTgenerator(nn.Module):
def __init__(self, args):
super(MNISTgenerator, self).__init__()
self._name = 'mnistG'
self.dim = args.dim
self.in_shape = int(np.sqrt(args.dim))
self.shape = (self.in_shape, self.in_shape, 1)
preprocess = nn.Sequential(
nn.Linear(self.dim, 4*4*4*self.dim),
nn.ReLU(True),
)
block1 = nn.Sequential(
nn.ConvTranspose2d(4*self.dim, 2*self.dim, 5),
nn.ReLU(True),
)
block2 = nn.Sequential(
nn.ConvTranspose2d(2*self.dim, self.dim, 5),
nn.ReLU(True),
)
deconv_out = nn.ConvTranspose2d(self.dim, 1, 8, stride=2)
self.block1 = block1
self.block2 = block2
self.deconv_out = deconv_out
self.preprocess = preprocess
self.sigmoid = nn.Sigmoid()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4*self.dim, 4, 4)
output = self.block1(output)
output = output[:, :, :7, :7]
output = self.block2(output)
output = self.deconv_out(output)
output = self.sigmoid(output)
return output.view(-1, 784) | 0.957368 | 0.42483 |
import sqlalchemy
import sqlalchemy.orm
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Student(Base): # type: ignore
__tablename__ = 'student'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(250), nullable=False)
email = sqlalchemy.Column(sqlalchemy.String(250), nullable=False)
def __repr__(self):
return ('<Student(id={0.id!r},'
' name={0.name!r},'
' email={0.email!r})>').format(
self)
class Assignment(Base): # type: ignore
__tablename__ = 'assignment'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(250), nullable=False)
full_credit = sqlalchemy.Column(sqlalchemy.Float, nullable=False)
def __repr__(self):
return ('<Assignment(id={0.id!r},'
' name={0.name!r},'
' full_credit={0.full_credit!r})>')\
.format(self)
class Grade(Base): # type: ignore
__tablename__ = 'grade'
student_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('student.id'),
primary_key=True)
student = sqlalchemy.orm.relationship('Student',
back_populates='grades')
assignment_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('assignment.id'),
primary_key=True)
assignment = sqlalchemy.orm.relationship('Assignment',
back_populates='grades')
grade = sqlalchemy.Column(sqlalchemy.Float)
notes = sqlalchemy.Column(sqlalchemy.String(500))
def __repr__(self):
return ('<Grade(student_id={0.student_id!r},'
' assignment_id={0.assignment_id!r},'
' grade={0.grade!r}),'
' notes={0.notes!r}>')\
.format(self)
Student.grades = sqlalchemy.orm.relationship('Grade',
order_by=Grade.assignment_id,
back_populates='student')
Assignment.grades = sqlalchemy.orm.relationship('Grade',
order_by=Grade.student_id,
back_populates='assignment')
Session = sqlalchemy.orm.sessionmaker() | src/grader_toolkit/gradebook.py | import sqlalchemy
import sqlalchemy.orm
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Student(Base): # type: ignore
__tablename__ = 'student'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(250), nullable=False)
email = sqlalchemy.Column(sqlalchemy.String(250), nullable=False)
def __repr__(self):
return ('<Student(id={0.id!r},'
' name={0.name!r},'
' email={0.email!r})>').format(
self)
class Assignment(Base): # type: ignore
__tablename__ = 'assignment'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(250), nullable=False)
full_credit = sqlalchemy.Column(sqlalchemy.Float, nullable=False)
def __repr__(self):
return ('<Assignment(id={0.id!r},'
' name={0.name!r},'
' full_credit={0.full_credit!r})>')\
.format(self)
class Grade(Base): # type: ignore
__tablename__ = 'grade'
student_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('student.id'),
primary_key=True)
student = sqlalchemy.orm.relationship('Student',
back_populates='grades')
assignment_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('assignment.id'),
primary_key=True)
assignment = sqlalchemy.orm.relationship('Assignment',
back_populates='grades')
grade = sqlalchemy.Column(sqlalchemy.Float)
notes = sqlalchemy.Column(sqlalchemy.String(500))
def __repr__(self):
return ('<Grade(student_id={0.student_id!r},'
' assignment_id={0.assignment_id!r},'
' grade={0.grade!r}),'
' notes={0.notes!r}>')\
.format(self)
Student.grades = sqlalchemy.orm.relationship('Grade',
order_by=Grade.assignment_id,
back_populates='student')
Assignment.grades = sqlalchemy.orm.relationship('Grade',
order_by=Grade.student_id,
back_populates='assignment')
Session = sqlalchemy.orm.sessionmaker() | 0.376738 | 0.112967 |
from .tool.func import *
def main_func_setting_main(db_set):
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
setting_list = {
0 : ['name', 'Wiki'],
2 : ['frontpage', 'FrontPage'],
4 : ['upload', '2'],
5 : ['skin', ''],
7 : ['reg', ''],
8 : ['ip_view', ''],
9 : ['back_up', '0'],
10 : ['port', '3000'],
11 : ['key', load_random_key()],
12 : ['update', 'stable'],
15 : ['encode', 'sha3'],
16 : ['host', '0.0.0.0'],
19 : ['slow_edit', '0'],
20 : ['requires_approval', ''],
21 : ['backup_where', ''],
22 : ['domain', flask.request.host],
23 : ['ua_get', ''],
24 : ['enable_comment', ''],
25 : ['enable_challenge', ''],
26 : ['edit_bottom_compulsion', ''],
27 : ['http_select', 'http'],
28 : ['title_max_length', ''],
29 : ['title_topic_max_length', '']
}
if flask.request.method == 'POST':
for i in setting_list:
curs.execute(db_change("update other set data = ? where name = ?"), [
flask.request.form.get(setting_list[i][0], setting_list[i][1]),
setting_list[i][0]
])
conn.commit()
admin_check(None, 'edit_set (main)')
return redirect('/setting/main')
else:
d_list = {}
for i in setting_list:
curs.execute(db_change('select data from other where name = ?'), [setting_list[i][0]])
db_data = curs.fetchall()
if not db_data:
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [setting_list[i][0], setting_list[i][1]])
d_list[i] = db_data[0][0] if db_data else setting_list[i][1]
else:
conn.commit()
encode_select = ''
encode_select_data = ['sha256', 'sha3']
for encode_select_one in encode_select_data:
if encode_select_one == d_list[15]:
encode_select = '<option value="' + encode_select_one + '">' + encode_select_one + '</option>' + encode_select
else:
encode_select += '<option value="' + encode_select_one + '">' + encode_select_one + '</option>'
tls_select = ''
tls_select_data = ['http', 'https']
for tls_select_one in tls_select_data:
if tls_select_one == d_list[27]:
tls_select = '<option value="' + tls_select_one + '">' + tls_select_one + '</option>' + tls_select
else:
tls_select += '<option value="' + tls_select_one + '">' + tls_select_one + '</option>'
check_box_div = ['', '', '', '', '', '', '', '']
for i in range(0, len(check_box_div)):
if i == 0:
acl_num = 7
elif i == 1:
acl_num = 8
elif i == 3:
acl_num = 20
elif i == 4:
acl_num = 23
elif i == 5:
acl_num = 24
elif i == 6:
acl_num = 25
elif i == 7:
acl_num = 26
if d_list[acl_num]:
check_box_div[i] = 'checked="checked"'
branch_div = ''
branch_list = ['stable', 'dev', 'beta']
for i in branch_list:
if d_list[12] == i:
branch_div = '<option value="' + i + '">' + i + '</option>' + branch_div
else:
branch_div += '<option value="' + i + '">' + i + '</option>'
sqlite_only = 'style="display:none;"' if db_set != 'sqlite' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('main_setting'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post" id="main_set_data">
<h2>1. ''' + load_lang('basic_set') + '''</h2>
<span>''' + load_lang('wiki_name') + '''</span>
<hr class="main_hr">
<input name="name" value="''' + html.escape(d_list[0]) + '''">
<hr class="main_hr">
<span><a href="/setting/main/logo">(''' + load_lang('wiki_logo') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('main_page') + '''</span>
<hr class="main_hr">
<input name="frontpage" value="''' + html.escape(d_list[2]) + '''">
<hr class="main_hr">
<span>''' + load_lang('tls_method') + '''</span>
<hr class="main_hr">
<select name="http_select">''' + tls_select + '''</select>
<hr class="main_hr">
<span>''' + load_lang('domain') + '''</span> (EX : 2du.pythonanywhere.com)
<hr class="main_hr">
<input name="domain" value="''' + html.escape(d_list[22]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_host') + '''</span>
<hr class="main_hr">
<input name="host" value="''' + html.escape(d_list[16]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_port') + '''</span>
<hr class="main_hr">
<input name="port" value="''' + html.escape(d_list[10]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_secret_key') + '''</span>
<hr class="main_hr">
<input type="password" name="key" value="''' + html.escape(d_list[11]) + '''">
<hr class="main_hr">
<span>''' + load_lang('encryption_method') + '''</span>
<hr class="main_hr">
<select name="encode">''' + encode_select + '''</select>
<h3>1.1. ''' + load_lang('communication_set') + '''</h3>
<input type="checkbox" name="enable_comment" ''' + check_box_div[5] + '''> ''' + load_lang('enable_comment_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<input type="checkbox" name="enable_challenge" ''' + check_box_div[6] + '''> ''' + load_lang('enable_challenge_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<h2>2. ''' + load_lang('design_set') + '''</h2>
<span>''' + load_lang('wiki_skin') + '''</span>
<hr class="main_hr">
<select name="skin">''' + load_skin(d_list[5] if d_list[5] != '' else 'tenshi') + '''</select>
<h2>3. ''' + load_lang('login_set') + '''</h2>
<input type="checkbox" name="reg" ''' + check_box_div[0] + '''> ''' + load_lang('no_register') + '''
<hr class="main_hr">
<input type="checkbox" name="ip_view" ''' + check_box_div[1] + '''> ''' + load_lang('hide_ip') + '''
<hr class="main_hr">
<input type="checkbox" name="requires_approval" ''' + check_box_div[3] + '''> ''' + load_lang('requires_approval') + '''
<hr class="main_hr">
<input type="checkbox" name="ua_get" ''' + check_box_div[4] + '''> ''' + load_lang('ua_get_off') + '''
<h2>4. ''' + load_lang('server_set') + '''</h2>
<span>''' + load_lang('max_file_size') + ''' (MB)</span>
<hr class="main_hr">
<input name="upload" value="''' + html.escape(d_list[4]) + '''">
<hr class="main_hr">
<span>''' + load_lang('update_branch') + '''</span>
<hr class="main_hr">
<select name="update">''' + branch_div + '''</select>
<span ''' + sqlite_only + '''>
<h3>4.1. ''' + load_lang('sqlite_only') + '''</h3>
<span>
''' + load_lang('backup_interval') + ' (' + load_lang('hour') + ') (' + load_lang('off') + ' : 0) ' + \
'(' + load_lang('restart_required') + ''')</span>
<hr class="main_hr">
<input name="back_up" value="''' + html.escape(d_list[9]) + '''">
<hr class="main_hr">
<span>
''' + load_lang('backup_where') + ' (' + load_lang('empty') + ' : ' + load_lang('default') + ') ' + \
'(' + load_lang('restart_required') + ''') (''' + load_lang('example') + ''' : ./data/backup.db)
</span>
<hr class="main_hr">
<input name="backup_where" value="''' + html.escape(d_list[21]) + '''">
<hr class="main_hr">
</span>
<h2>5. ''' + load_lang('edit_set') + '''</h2>
<span><a href="/setting/acl">(''' + load_lang('main_acl_setting') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('slow_edit') + ' (' + load_lang('second') + ') (' + load_lang('off') + ''' : 0)</span>
<hr class="main_hr">
<input name="slow_edit" value="''' + html.escape(d_list[19]) + '''">
<hr class="main_hr">
<input type="checkbox" name="edit_bottom_compulsion" ''' + check_box_div[7] + '''> ''' + load_lang('edit_bottom_compulsion') + ''' (''' + load_lang('beta') + ''')
<hr class="main_hr">
<span>''' + load_lang('title_max_length') + ''' (''' + load_lang('beta') + ''')</span>
<hr class="main_hr">
<input name="title_max_length" value="''' + html.escape(d_list[28]) + '''">
<hr class="main_hr">
<span>''' + load_lang('title_topic_max_length') + ''' (''' + load_lang('not_working') + ''')</span>
<hr class="main_hr">
<input name="title_topic_max_length" value="''' + html.escape(d_list[29]) + '''">
<hr class="main_hr">
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
<script>simple_render('main_set_data');</script>
''',
menu = [['setting', load_lang('return')]]
)) | route/main_func_setting_main.py | from .tool.func import *
def main_func_setting_main(db_set):
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
setting_list = {
0 : ['name', 'Wiki'],
2 : ['frontpage', 'FrontPage'],
4 : ['upload', '2'],
5 : ['skin', ''],
7 : ['reg', ''],
8 : ['ip_view', ''],
9 : ['back_up', '0'],
10 : ['port', '3000'],
11 : ['key', load_random_key()],
12 : ['update', 'stable'],
15 : ['encode', 'sha3'],
16 : ['host', '0.0.0.0'],
19 : ['slow_edit', '0'],
20 : ['requires_approval', ''],
21 : ['backup_where', ''],
22 : ['domain', flask.request.host],
23 : ['ua_get', ''],
24 : ['enable_comment', ''],
25 : ['enable_challenge', ''],
26 : ['edit_bottom_compulsion', ''],
27 : ['http_select', 'http'],
28 : ['title_max_length', ''],
29 : ['title_topic_max_length', '']
}
if flask.request.method == 'POST':
for i in setting_list:
curs.execute(db_change("update other set data = ? where name = ?"), [
flask.request.form.get(setting_list[i][0], setting_list[i][1]),
setting_list[i][0]
])
conn.commit()
admin_check(None, 'edit_set (main)')
return redirect('/setting/main')
else:
d_list = {}
for i in setting_list:
curs.execute(db_change('select data from other where name = ?'), [setting_list[i][0]])
db_data = curs.fetchall()
if not db_data:
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [setting_list[i][0], setting_list[i][1]])
d_list[i] = db_data[0][0] if db_data else setting_list[i][1]
else:
conn.commit()
encode_select = ''
encode_select_data = ['sha256', 'sha3']
for encode_select_one in encode_select_data:
if encode_select_one == d_list[15]:
encode_select = '<option value="' + encode_select_one + '">' + encode_select_one + '</option>' + encode_select
else:
encode_select += '<option value="' + encode_select_one + '">' + encode_select_one + '</option>'
tls_select = ''
tls_select_data = ['http', 'https']
for tls_select_one in tls_select_data:
if tls_select_one == d_list[27]:
tls_select = '<option value="' + tls_select_one + '">' + tls_select_one + '</option>' + tls_select
else:
tls_select += '<option value="' + tls_select_one + '">' + tls_select_one + '</option>'
check_box_div = ['', '', '', '', '', '', '', '']
for i in range(0, len(check_box_div)):
if i == 0:
acl_num = 7
elif i == 1:
acl_num = 8
elif i == 3:
acl_num = 20
elif i == 4:
acl_num = 23
elif i == 5:
acl_num = 24
elif i == 6:
acl_num = 25
elif i == 7:
acl_num = 26
if d_list[acl_num]:
check_box_div[i] = 'checked="checked"'
branch_div = ''
branch_list = ['stable', 'dev', 'beta']
for i in branch_list:
if d_list[12] == i:
branch_div = '<option value="' + i + '">' + i + '</option>' + branch_div
else:
branch_div += '<option value="' + i + '">' + i + '</option>'
sqlite_only = 'style="display:none;"' if db_set != 'sqlite' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('main_setting'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post" id="main_set_data">
<h2>1. ''' + load_lang('basic_set') + '''</h2>
<span>''' + load_lang('wiki_name') + '''</span>
<hr class="main_hr">
<input name="name" value="''' + html.escape(d_list[0]) + '''">
<hr class="main_hr">
<span><a href="/setting/main/logo">(''' + load_lang('wiki_logo') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('main_page') + '''</span>
<hr class="main_hr">
<input name="frontpage" value="''' + html.escape(d_list[2]) + '''">
<hr class="main_hr">
<span>''' + load_lang('tls_method') + '''</span>
<hr class="main_hr">
<select name="http_select">''' + tls_select + '''</select>
<hr class="main_hr">
<span>''' + load_lang('domain') + '''</span> (EX : 2du.pythonanywhere.com)
<hr class="main_hr">
<input name="domain" value="''' + html.escape(d_list[22]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_host') + '''</span>
<hr class="main_hr">
<input name="host" value="''' + html.escape(d_list[16]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_port') + '''</span>
<hr class="main_hr">
<input name="port" value="''' + html.escape(d_list[10]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_secret_key') + '''</span>
<hr class="main_hr">
<input type="password" name="key" value="''' + html.escape(d_list[11]) + '''">
<hr class="main_hr">
<span>''' + load_lang('encryption_method') + '''</span>
<hr class="main_hr">
<select name="encode">''' + encode_select + '''</select>
<h3>1.1. ''' + load_lang('communication_set') + '''</h3>
<input type="checkbox" name="enable_comment" ''' + check_box_div[5] + '''> ''' + load_lang('enable_comment_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<input type="checkbox" name="enable_challenge" ''' + check_box_div[6] + '''> ''' + load_lang('enable_challenge_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<h2>2. ''' + load_lang('design_set') + '''</h2>
<span>''' + load_lang('wiki_skin') + '''</span>
<hr class="main_hr">
<select name="skin">''' + load_skin(d_list[5] if d_list[5] != '' else 'tenshi') + '''</select>
<h2>3. ''' + load_lang('login_set') + '''</h2>
<input type="checkbox" name="reg" ''' + check_box_div[0] + '''> ''' + load_lang('no_register') + '''
<hr class="main_hr">
<input type="checkbox" name="ip_view" ''' + check_box_div[1] + '''> ''' + load_lang('hide_ip') + '''
<hr class="main_hr">
<input type="checkbox" name="requires_approval" ''' + check_box_div[3] + '''> ''' + load_lang('requires_approval') + '''
<hr class="main_hr">
<input type="checkbox" name="ua_get" ''' + check_box_div[4] + '''> ''' + load_lang('ua_get_off') + '''
<h2>4. ''' + load_lang('server_set') + '''</h2>
<span>''' + load_lang('max_file_size') + ''' (MB)</span>
<hr class="main_hr">
<input name="upload" value="''' + html.escape(d_list[4]) + '''">
<hr class="main_hr">
<span>''' + load_lang('update_branch') + '''</span>
<hr class="main_hr">
<select name="update">''' + branch_div + '''</select>
<span ''' + sqlite_only + '''>
<h3>4.1. ''' + load_lang('sqlite_only') + '''</h3>
<span>
''' + load_lang('backup_interval') + ' (' + load_lang('hour') + ') (' + load_lang('off') + ' : 0) ' + \
'(' + load_lang('restart_required') + ''')</span>
<hr class="main_hr">
<input name="back_up" value="''' + html.escape(d_list[9]) + '''">
<hr class="main_hr">
<span>
''' + load_lang('backup_where') + ' (' + load_lang('empty') + ' : ' + load_lang('default') + ') ' + \
'(' + load_lang('restart_required') + ''') (''' + load_lang('example') + ''' : ./data/backup.db)
</span>
<hr class="main_hr">
<input name="backup_where" value="''' + html.escape(d_list[21]) + '''">
<hr class="main_hr">
</span>
<h2>5. ''' + load_lang('edit_set') + '''</h2>
<span><a href="/setting/acl">(''' + load_lang('main_acl_setting') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('slow_edit') + ' (' + load_lang('second') + ') (' + load_lang('off') + ''' : 0)</span>
<hr class="main_hr">
<input name="slow_edit" value="''' + html.escape(d_list[19]) + '''">
<hr class="main_hr">
<input type="checkbox" name="edit_bottom_compulsion" ''' + check_box_div[7] + '''> ''' + load_lang('edit_bottom_compulsion') + ''' (''' + load_lang('beta') + ''')
<hr class="main_hr">
<span>''' + load_lang('title_max_length') + ''' (''' + load_lang('beta') + ''')</span>
<hr class="main_hr">
<input name="title_max_length" value="''' + html.escape(d_list[28]) + '''">
<hr class="main_hr">
<span>''' + load_lang('title_topic_max_length') + ''' (''' + load_lang('not_working') + ''')</span>
<hr class="main_hr">
<input name="title_topic_max_length" value="''' + html.escape(d_list[29]) + '''">
<hr class="main_hr">
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
<script>simple_render('main_set_data');</script>
''',
menu = [['setting', load_lang('return')]]
)) | 0.248899 | 0.20834 |
from typing import Union, List, Tuple, Dict, Any
from psycopg2 import sql, extensions
from general_utils.postgres_utils import LocalhostCursor
from general_utils.type_helpers import validate_is_int
class Field(sql.Identifier):
"""
A composable instance for a field that allows it to work as an element in a query using psycopg's sql module.
This is a clone of sql.Identifier except that it allows for an optional display name and it is hashable (as the
unquoted string value). This usefully allows us to store sets or dicts of Fields as if they were strings.
Note that by subclassing Identifier directly we inherit its useful ability to properly quote things with escaping.
"""
def __init__(self, name: str, display_name: Union[str, None] = None):
"""
Constructs a field with a raw name and an optional display name.
If no display_name is provided, then we will use the raw_name as the display name.
:param name: the name of the field (required)
:param display_name: the display name (optional)
"""
# This assigned the name to be "_wrapped"
super().__init__(name)
self._display_name = display_name if display_name is not None else name
def __hash__(self):
"""
Implements hash for the Field by using the hash of its wrapped string
:return:
"""
return self._wrapped.__hash__()
@property
def name(self) -> str:
"""
Returns the field name (unquoted)
:return: the field name
"""
return self._wrapped
@property
def display_name(self) -> str:
"""
Returns the display name of the field
:return: the field display name
"""
return self._display_name
def clone_with_new_display_name(self, display_name: str) -> 'Field':
"""
Clones this field and changes its display name (keeps the field name)
:return: a new field instance with the same name but the new field display name
"""
return Field(self.name, display_name)
class Schema(sql.Identifier):
"""
A composable instance for a schema that allows it to work as an element in a query using psycopg's sql module.
Right now a pure clone of Identifier and stores the constructor in the "_wrapped" property
"""
class Table(sql.Identifier):
"""
A composable instance for a schema that allows it to work as an element in a query using psycopg's sql module.
Right now a pure clone of Identifier and stores the constructor in the "_wrapped" property
Note that what we refer to as a "table" may actually be a view or a materialized view, but it queries just like a table
so for our purposes it's a "table"
This class also allows us extra features like the ability to get the metadata table if the table is a raw table.
"""
class SchemaTable(sql.Composed):
"""
A composable instance that takes schema and table strings (or and allows them to work together
This subclasses Composed since it involves two idenitifiers linked by the sql.SQL('.') and it
allows for convenient functionality to abstract away some nuances
(i.e. like how you must do "schema"."table" as "schema.table" does not work)
gives you a container object if you ever want to get the schema or the table.
This class also allows us to get the metadata SchemaTable if appropriate.
"""
def __init__(self, schema: Union[str, Schema], table: Union[str, Table]):
if isinstance(schema, Schema):
self._schema = schema
elif isinstance(schema, str):
self._schema = Schema(schema)
else:
raise TypeError("schema must be a str or Schema, not a {}".format(type(table)))
if isinstance(table, Table):
self._table = table
elif isinstance(table, str):
self._table = Table(table)
else:
raise TypeError("table must be a str or Table, not a {}".format(type(table)))
# Store it as a composed of the schema identifier, the table identifier and a period in between
super().__init__([self._schema, sql.SQL("."), self._table])
@property
def string(self) -> str:
"""
"Returns an unwrapped string version of the schema table for ease of printing
:return: a string version of the schema table
"""
return self._schema.string + "." + self._table.string
@property
def schema(self) -> Schema:
"""
Returns a clone of schema (so that it can't be changed)
:return: a clone of the schema of this SchemaTable
"""
return Schema(self._schema.string)
@property
def table(self) -> Table:
"""
Returns a clone of table (so that it can't be changed)
:return: a clone of the table of this SchemaTable
"""
return Table(self._table.string)
class SQLTypeStruct(sql.Composable):
"""
A composable instance that takes a postgres sqltype string and allows it to work as an element in a query
using psycopg's sql module
This is like the Field / sql.Identifier, except it is even more basic composable that does not put quotes around the
wrapped input (since SQL Types can't be quoted).
The goal for this is for use in dynamic CREATE TABLE queries where we give sql types without quotes, and you should
only construct this by using the class methods
"""
def as_string(self, context=None):
"""
Implement the abstract as_string to just give us the string that was given to be wrapped but without quotes.
This should be safe since we will only use one of the class methods
:param context: Don't need a context since it won't be quoted
:return: the string given in the constructor "wrapped" by this class
"""
return self._wrapped
class SQLType(object):
"""
An enum container for the SQLTypeStruct's objects.
In python the only way to make an object return enum-instances of itself is to use class methods which are a bit
bulky and so easier to just make this second object as the enum object with class properties that reference the
other object.
"""
TEXT = SQLTypeStruct("TEXT")
TEXT_PRIMARY_KEY = SQLTypeStruct("TEXT PRIMARY KEY")
DATE = SQLTypeStruct("DATE")
TIMESTAMP = SQLTypeStruct("TIMESTAMP")
JSONB = SQLTypeStruct("JSONB")
JSONB_DEFAULT_EMPTY_ARRAY = SQLTypeStruct("JSONB DEFAULT '[]'::json")
JSONB_DEFAULT_EMPTY_OBJ = SQLTypeStruct("JSONB DEFAULT '{}'::json")
BOOLEAN = SQLTypeStruct("BOOLEAN")
BOOLEAN_DEFAULT_TRUE = SQLTypeStruct("BOOLEAN DEFAULT TRUE")
BOOLEAN_DEFAULT_FALSE = SQLTypeStruct("BOOLEAN DEFAULT FALSE")
INTEGER = SQLTypeStruct("INTEGER")
INTEGER_DEFAULT_ZERO = SQLTypeStruct("INTEGER DEFAULT 0")
DOUBLE_PRECISION = SQLTypeStruct("DOUBLE PRECISION")
NUMERIC = SQLTypeStruct("NUMERIC")
@staticmethod
def NUMERIC_WITH_PRECISION_SCALE(precision: int, scale: int):
if precision is None or scale is None:
raise Exception("Must specify either both precision and scale or neither")
else:
return SQLTypeStruct("NUMERIC ({}, {})".format(precision, scale))
def get_column_names(schema_table: SchemaTable, cursor: extensions.cursor) -> List[str]:
"""
Gets a list of all columns (from the information schema) for a given schema and table in the ordinal order
:param schema_table: the SchemaTable object that we want to get the columns_from
:param cursor: a cursor for where to execute this query
:return: a list of all table columns
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute("SELECT column_name FROM information_schema.columns "
"WHERE table_schema = %s AND table_name = %s ORDER BY ordinal_position",
(schema_name, table_name))
return [x[0] for x in cursor.fetchall()]
def execute_values_insert_query(schema_table: SchemaTable) -> sql.Composable:
"""
This helper function takes a SchemaTable and creates a generic insert query for use with the execute values method
(i.e. with the parameter %s following the word values
:param schema_table: the SchemaTable object to insert
:return: a Composable wrapper with the insert query
"""
return sql.SQL("""
INSERT INTO {} VALUES %s
""").format(schema_table)
def get_row_count(schema_table: SchemaTable, cursor: extensions.cursor) -> int:
"""
Given a SchemaTable and a cursor, this simple utility will run a SELECT COUNT(*) on the object and return an int
:param schema_table: the SchemaTable object that we want to compute the row count
:param cursor: a cursor for where to execute this query
:return: the number of rows in the schema table object after querying the database with the cursor
"""
cursor.execute(sql.SQL("""
SELECT COUNT(*) FROM {}
""").format(schema_table))
count = cursor.fetchone()[0] # grab the first element of the tuple that is returned
validate_is_int(count)
return count
def fetch_all_records(schema_table: SchemaTable, cursor: extensions.cursor) -> List:
"""
Given a SchemaTable and a cursor, this simple utility will run a SELECT * on the object and return the full thing in
memory. Recommended for use only on small objects!
:param schema_table: the SchemaTable object that we want to fetch all from
:param cursor: a cursor for where to execute this query
:return: a list of tuple records with the table in memory
"""
cursor.execute(sql.SQL("""
SELECT * FROM {}
""").format(schema_table))
return cursor.fetchall()
def get_column_count(schema_table: SchemaTable, cursor: extensions.cursor) -> int:
"""
Given a SchemaTable and a cursor, this simple utility will query the information schema to find out how many
columns are in it.
Note that this works equally well if the schema_table actually refers to a view, but it won't work with a
materialized view since they aren't part of the SQL standard (so they aren't in the information schema)
:param schema_table: the SchemaTable object that we want to compute the row count
:param cursor: a cursor for where to execute this query
:return: the number of rows in the schema table object after querying the database with hte cursor
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute(sql.SQL("""
SELECT COUNT(*) FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s
"""), (schema_name, table_name))
count = cursor.fetchone()[0] # grab the first element of the tuple that is returned
validate_is_int(count)
return count
def get_list_field_type_tuples(schema_table: SchemaTable, cursor: extensions.cursor) -> List[Tuple[str, str]]:
"""
Takes a schema table and a cursor and returns a list of tuples with the str field
name and the sqltype in the proper ordinal order (which it gets by querying the information_schema).
Note that the type is simply whatever is in the data_type field, and as of now, this does not use the precision
and scale for numeric types.
Note that this works equally well if the schema_table actually refers to a view, but it won't work with a
materialized view since they aren't part of the SQL standard (so they aren't in the information schema)
:param schema_table: the schema table to use (can also be views, but not materialized views)
:param cursor: the cursor for where to execute this query
:return: a list of tuple of strings, each one containing the field name and the sql type in ordinal order
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute(sql.SQL("""
SELECT column_name, data_type FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s ORDER BY ordinal_position
"""), (schema_name, table_name))
# TODO add precision and scale to a parenthetical for numeric types
# TODO make this return List[Tuple[Field, SQLType] instead of List[str, str]
return cursor.fetchall()
def create_table_from_field_sql_type_tuples(schema_table: SchemaTable,
list_field_type_tuples: List[Tuple[Field, SQLType]]) -> sql.Composable:
"""
Takes a schema table and a list of (Field, SQLType) tuples and returns a create table SQL Composable.
:param schema_table: the schema table to use to make the table
:param list_field_type_tuples: a list of tuples, each containing the field name and the sql type in ordinal order
"""
list_field_type_sql = [sql.SQL("{field}\t\t{type}").format(field=x[0], type=x[1])
for x in list_field_type_tuples]
return sql.SQL("CREATE TABLE {schema_table} (\n\t{field_types_joined}\n);"
"").format(schema_table=schema_table,
field_types_joined=sql.SQL(",\n\t").join(list_field_type_sql))
def upsert_sql_from_field_value_dict(schema_table: SchemaTable, field_value_dict: Dict[Field, Any],
conflict_field_list: List[Field]) -> sql.Composable:
"""
Takes a schema table, a dict mapping Fields to values, and a list of fields to check for conflicts on and creates
and "upsert", meaning that we will try to insert the dict values associated with each field key into the table,
and if that fails due to a conflict where the conf;ict field lists already exist, then do an update instead.
Also we run lstrip and rstrip on any string that is a value.
:param schema_table: the schema table to upsert into
:param field_value_dict: the dict mapping a Field to a value representing the data we want to upsert
:param conflict_field_list: a list of fields to use for the on conflict column – note that this is often a single
field serving as the primary key but multiple fields for composite keys are inserted
:return: a list
"""
# Grab the fields and values (the order will be preserved by grabbing these without changing the dict in between)
field_list = field_value_dict.keys()
# Strip whitespace with rstrip and lstrip if a string (i.e. something that has lstrip and rstrip)
val_list = []
for x in field_value_dict.values():
if hasattr(x, 'rstrip') and hasattr(x, 'lstrip'):
x = x.rstrip().lstrip()
val_list.append(x)
field_value_dict.values()
val_list = [sql.Literal(x) for x in val_list] # Convert the values into sql.Literal for insertion
# EXCLUDED is the posgres name of the records that couldn't be inserted due to the conflict
exc_fields = [sql.SQL("EXCLUDED.{}").format(x) for x in field_list]
if len(field_list) == 0:
raise Exception("Cannot do upsert with an empty field_value_dict!")
# The necessity of these two templates is due to an annoying discrepancy between Postgres 9 and Postgres 10:
# The SQL without the the parentheses works for Postgres 9 for all cases, but fails in 10 if you have only one field
# because 10 wants you to insert the keyword ROW before the parentheses since ROW(1) ≠ (1).
# Inserting this keyword would fix the parentheses version in 10 but the ROW keyword is not supported in 9.
# So the solution is to remove the parentheses if the field list is length 1 and keep them if the field list is
# longer than 1 (which works on both 9 and 10!)
if len(field_list) == 1:
upsert_sql_template = sql.SQL("INSERT INTO {schema_table} ({joined_fields}) VALUES ({joined_vals}) \n"
"ON CONFLICT ({joined_conf_fields}) \n"
" DO UPDATE SET {joined_fields} = {joined_exc_fields};")
else: # This is the only one necessary in 9 but breaks in 10 if you have only one field
upsert_sql_template = sql.SQL("INSERT INTO {schema_table} ({joined_fields}) VALUES ({joined_vals}) \n"
"ON CONFLICT ({joined_conf_fields}) \n"
" DO UPDATE SET ({joined_fields}) = ({joined_exc_fields});")
return upsert_sql_template.format(
schema_table=schema_table,
joined_fields=sql.SQL(", ").join(field_list),
joined_vals=sql.SQL(", ").join(val_list),
joined_conf_fields=sql.SQL(", ").join(conflict_field_list),
joined_exc_fields=sql.SQL(", ").join(exc_fields)
)
class TableError(Exception):
"""
An exception for when the table given as an argument is not as expected
"""
pass
if __name__ == '__main__':
with LocalhostCursor(dict_cursor=True) as cur:
pass | general_utils/sql_utils.py | from typing import Union, List, Tuple, Dict, Any
from psycopg2 import sql, extensions
from general_utils.postgres_utils import LocalhostCursor
from general_utils.type_helpers import validate_is_int
class Field(sql.Identifier):
"""
A composable instance for a field that allows it to work as an element in a query using psycopg's sql module.
This is a clone of sql.Identifier except that it allows for an optional display name and it is hashable (as the
unquoted string value). This usefully allows us to store sets or dicts of Fields as if they were strings.
Note that by subclassing Identifier directly we inherit its useful ability to properly quote things with escaping.
"""
def __init__(self, name: str, display_name: Union[str, None] = None):
"""
Constructs a field with a raw name and an optional display name.
If no display_name is provided, then we will use the raw_name as the display name.
:param name: the name of the field (required)
:param display_name: the display name (optional)
"""
# This assigned the name to be "_wrapped"
super().__init__(name)
self._display_name = display_name if display_name is not None else name
def __hash__(self):
"""
Implements hash for the Field by using the hash of its wrapped string
:return:
"""
return self._wrapped.__hash__()
@property
def name(self) -> str:
"""
Returns the field name (unquoted)
:return: the field name
"""
return self._wrapped
@property
def display_name(self) -> str:
"""
Returns the display name of the field
:return: the field display name
"""
return self._display_name
def clone_with_new_display_name(self, display_name: str) -> 'Field':
"""
Clones this field and changes its display name (keeps the field name)
:return: a new field instance with the same name but the new field display name
"""
return Field(self.name, display_name)
class Schema(sql.Identifier):
"""
A composable instance for a schema that allows it to work as an element in a query using psycopg's sql module.
Right now a pure clone of Identifier and stores the constructor in the "_wrapped" property
"""
class Table(sql.Identifier):
"""
A composable instance for a schema that allows it to work as an element in a query using psycopg's sql module.
Right now a pure clone of Identifier and stores the constructor in the "_wrapped" property
Note that what we refer to as a "table" may actually be a view or a materialized view, but it queries just like a table
so for our purposes it's a "table"
This class also allows us extra features like the ability to get the metadata table if the table is a raw table.
"""
class SchemaTable(sql.Composed):
"""
A composable instance that takes schema and table strings (or and allows them to work together
This subclasses Composed since it involves two idenitifiers linked by the sql.SQL('.') and it
allows for convenient functionality to abstract away some nuances
(i.e. like how you must do "schema"."table" as "schema.table" does not work)
gives you a container object if you ever want to get the schema or the table.
This class also allows us to get the metadata SchemaTable if appropriate.
"""
def __init__(self, schema: Union[str, Schema], table: Union[str, Table]):
if isinstance(schema, Schema):
self._schema = schema
elif isinstance(schema, str):
self._schema = Schema(schema)
else:
raise TypeError("schema must be a str or Schema, not a {}".format(type(table)))
if isinstance(table, Table):
self._table = table
elif isinstance(table, str):
self._table = Table(table)
else:
raise TypeError("table must be a str or Table, not a {}".format(type(table)))
# Store it as a composed of the schema identifier, the table identifier and a period in between
super().__init__([self._schema, sql.SQL("."), self._table])
@property
def string(self) -> str:
"""
"Returns an unwrapped string version of the schema table for ease of printing
:return: a string version of the schema table
"""
return self._schema.string + "." + self._table.string
@property
def schema(self) -> Schema:
"""
Returns a clone of schema (so that it can't be changed)
:return: a clone of the schema of this SchemaTable
"""
return Schema(self._schema.string)
@property
def table(self) -> Table:
"""
Returns a clone of table (so that it can't be changed)
:return: a clone of the table of this SchemaTable
"""
return Table(self._table.string)
class SQLTypeStruct(sql.Composable):
"""
A composable instance that takes a postgres sqltype string and allows it to work as an element in a query
using psycopg's sql module
This is like the Field / sql.Identifier, except it is even more basic composable that does not put quotes around the
wrapped input (since SQL Types can't be quoted).
The goal for this is for use in dynamic CREATE TABLE queries where we give sql types without quotes, and you should
only construct this by using the class methods
"""
def as_string(self, context=None):
"""
Implement the abstract as_string to just give us the string that was given to be wrapped but without quotes.
This should be safe since we will only use one of the class methods
:param context: Don't need a context since it won't be quoted
:return: the string given in the constructor "wrapped" by this class
"""
return self._wrapped
class SQLType(object):
"""
An enum container for the SQLTypeStruct's objects.
In python the only way to make an object return enum-instances of itself is to use class methods which are a bit
bulky and so easier to just make this second object as the enum object with class properties that reference the
other object.
"""
TEXT = SQLTypeStruct("TEXT")
TEXT_PRIMARY_KEY = SQLTypeStruct("TEXT PRIMARY KEY")
DATE = SQLTypeStruct("DATE")
TIMESTAMP = SQLTypeStruct("TIMESTAMP")
JSONB = SQLTypeStruct("JSONB")
JSONB_DEFAULT_EMPTY_ARRAY = SQLTypeStruct("JSONB DEFAULT '[]'::json")
JSONB_DEFAULT_EMPTY_OBJ = SQLTypeStruct("JSONB DEFAULT '{}'::json")
BOOLEAN = SQLTypeStruct("BOOLEAN")
BOOLEAN_DEFAULT_TRUE = SQLTypeStruct("BOOLEAN DEFAULT TRUE")
BOOLEAN_DEFAULT_FALSE = SQLTypeStruct("BOOLEAN DEFAULT FALSE")
INTEGER = SQLTypeStruct("INTEGER")
INTEGER_DEFAULT_ZERO = SQLTypeStruct("INTEGER DEFAULT 0")
DOUBLE_PRECISION = SQLTypeStruct("DOUBLE PRECISION")
NUMERIC = SQLTypeStruct("NUMERIC")
@staticmethod
def NUMERIC_WITH_PRECISION_SCALE(precision: int, scale: int):
if precision is None or scale is None:
raise Exception("Must specify either both precision and scale or neither")
else:
return SQLTypeStruct("NUMERIC ({}, {})".format(precision, scale))
def get_column_names(schema_table: SchemaTable, cursor: extensions.cursor) -> List[str]:
"""
Gets a list of all columns (from the information schema) for a given schema and table in the ordinal order
:param schema_table: the SchemaTable object that we want to get the columns_from
:param cursor: a cursor for where to execute this query
:return: a list of all table columns
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute("SELECT column_name FROM information_schema.columns "
"WHERE table_schema = %s AND table_name = %s ORDER BY ordinal_position",
(schema_name, table_name))
return [x[0] for x in cursor.fetchall()]
def execute_values_insert_query(schema_table: SchemaTable) -> sql.Composable:
"""
This helper function takes a SchemaTable and creates a generic insert query for use with the execute values method
(i.e. with the parameter %s following the word values
:param schema_table: the SchemaTable object to insert
:return: a Composable wrapper with the insert query
"""
return sql.SQL("""
INSERT INTO {} VALUES %s
""").format(schema_table)
def get_row_count(schema_table: SchemaTable, cursor: extensions.cursor) -> int:
"""
Given a SchemaTable and a cursor, this simple utility will run a SELECT COUNT(*) on the object and return an int
:param schema_table: the SchemaTable object that we want to compute the row count
:param cursor: a cursor for where to execute this query
:return: the number of rows in the schema table object after querying the database with the cursor
"""
cursor.execute(sql.SQL("""
SELECT COUNT(*) FROM {}
""").format(schema_table))
count = cursor.fetchone()[0] # grab the first element of the tuple that is returned
validate_is_int(count)
return count
def fetch_all_records(schema_table: SchemaTable, cursor: extensions.cursor) -> List:
"""
Given a SchemaTable and a cursor, this simple utility will run a SELECT * on the object and return the full thing in
memory. Recommended for use only on small objects!
:param schema_table: the SchemaTable object that we want to fetch all from
:param cursor: a cursor for where to execute this query
:return: a list of tuple records with the table in memory
"""
cursor.execute(sql.SQL("""
SELECT * FROM {}
""").format(schema_table))
return cursor.fetchall()
def get_column_count(schema_table: SchemaTable, cursor: extensions.cursor) -> int:
"""
Given a SchemaTable and a cursor, this simple utility will query the information schema to find out how many
columns are in it.
Note that this works equally well if the schema_table actually refers to a view, but it won't work with a
materialized view since they aren't part of the SQL standard (so they aren't in the information schema)
:param schema_table: the SchemaTable object that we want to compute the row count
:param cursor: a cursor for where to execute this query
:return: the number of rows in the schema table object after querying the database with hte cursor
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute(sql.SQL("""
SELECT COUNT(*) FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s
"""), (schema_name, table_name))
count = cursor.fetchone()[0] # grab the first element of the tuple that is returned
validate_is_int(count)
return count
def get_list_field_type_tuples(schema_table: SchemaTable, cursor: extensions.cursor) -> List[Tuple[str, str]]:
"""
Takes a schema table and a cursor and returns a list of tuples with the str field
name and the sqltype in the proper ordinal order (which it gets by querying the information_schema).
Note that the type is simply whatever is in the data_type field, and as of now, this does not use the precision
and scale for numeric types.
Note that this works equally well if the schema_table actually refers to a view, but it won't work with a
materialized view since they aren't part of the SQL standard (so they aren't in the information schema)
:param schema_table: the schema table to use (can also be views, but not materialized views)
:param cursor: the cursor for where to execute this query
:return: a list of tuple of strings, each one containing the field name and the sql type in ordinal order
"""
schema_name = schema_table.schema.string
table_name = schema_table.table.string
cursor.execute(sql.SQL("""
SELECT column_name, data_type FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s ORDER BY ordinal_position
"""), (schema_name, table_name))
# TODO add precision and scale to a parenthetical for numeric types
# TODO make this return List[Tuple[Field, SQLType] instead of List[str, str]
return cursor.fetchall()
def create_table_from_field_sql_type_tuples(schema_table: SchemaTable,
list_field_type_tuples: List[Tuple[Field, SQLType]]) -> sql.Composable:
"""
Takes a schema table and a list of (Field, SQLType) tuples and returns a create table SQL Composable.
:param schema_table: the schema table to use to make the table
:param list_field_type_tuples: a list of tuples, each containing the field name and the sql type in ordinal order
"""
list_field_type_sql = [sql.SQL("{field}\t\t{type}").format(field=x[0], type=x[1])
for x in list_field_type_tuples]
return sql.SQL("CREATE TABLE {schema_table} (\n\t{field_types_joined}\n);"
"").format(schema_table=schema_table,
field_types_joined=sql.SQL(",\n\t").join(list_field_type_sql))
def upsert_sql_from_field_value_dict(schema_table: SchemaTable, field_value_dict: Dict[Field, Any],
conflict_field_list: List[Field]) -> sql.Composable:
"""
Takes a schema table, a dict mapping Fields to values, and a list of fields to check for conflicts on and creates
and "upsert", meaning that we will try to insert the dict values associated with each field key into the table,
and if that fails due to a conflict where the conf;ict field lists already exist, then do an update instead.
Also we run lstrip and rstrip on any string that is a value.
:param schema_table: the schema table to upsert into
:param field_value_dict: the dict mapping a Field to a value representing the data we want to upsert
:param conflict_field_list: a list of fields to use for the on conflict column – note that this is often a single
field serving as the primary key but multiple fields for composite keys are inserted
:return: a list
"""
# Grab the fields and values (the order will be preserved by grabbing these without changing the dict in between)
field_list = field_value_dict.keys()
# Strip whitespace with rstrip and lstrip if a string (i.e. something that has lstrip and rstrip)
val_list = []
for x in field_value_dict.values():
if hasattr(x, 'rstrip') and hasattr(x, 'lstrip'):
x = x.rstrip().lstrip()
val_list.append(x)
field_value_dict.values()
val_list = [sql.Literal(x) for x in val_list] # Convert the values into sql.Literal for insertion
# EXCLUDED is the posgres name of the records that couldn't be inserted due to the conflict
exc_fields = [sql.SQL("EXCLUDED.{}").format(x) for x in field_list]
if len(field_list) == 0:
raise Exception("Cannot do upsert with an empty field_value_dict!")
# The necessity of these two templates is due to an annoying discrepancy between Postgres 9 and Postgres 10:
# The SQL without the the parentheses works for Postgres 9 for all cases, but fails in 10 if you have only one field
# because 10 wants you to insert the keyword ROW before the parentheses since ROW(1) ≠ (1).
# Inserting this keyword would fix the parentheses version in 10 but the ROW keyword is not supported in 9.
# So the solution is to remove the parentheses if the field list is length 1 and keep them if the field list is
# longer than 1 (which works on both 9 and 10!)
if len(field_list) == 1:
upsert_sql_template = sql.SQL("INSERT INTO {schema_table} ({joined_fields}) VALUES ({joined_vals}) \n"
"ON CONFLICT ({joined_conf_fields}) \n"
" DO UPDATE SET {joined_fields} = {joined_exc_fields};")
else: # This is the only one necessary in 9 but breaks in 10 if you have only one field
upsert_sql_template = sql.SQL("INSERT INTO {schema_table} ({joined_fields}) VALUES ({joined_vals}) \n"
"ON CONFLICT ({joined_conf_fields}) \n"
" DO UPDATE SET ({joined_fields}) = ({joined_exc_fields});")
return upsert_sql_template.format(
schema_table=schema_table,
joined_fields=sql.SQL(", ").join(field_list),
joined_vals=sql.SQL(", ").join(val_list),
joined_conf_fields=sql.SQL(", ").join(conflict_field_list),
joined_exc_fields=sql.SQL(", ").join(exc_fields)
)
class TableError(Exception):
"""
An exception for when the table given as an argument is not as expected
"""
pass
if __name__ == '__main__':
with LocalhostCursor(dict_cursor=True) as cur:
pass | 0.910613 | 0.468912 |
import re
import sys
if sys.version < '3':
def u(x):
return x.decode('utf-8')
else:
unicode = str
def u(x):
return x
# Matches section start `interfaces {`
rx_section = re.compile(r'^([\w\-]+) \{$', re.UNICODE)
# Matches named section `ethernet eth0 {`
rx_named_section = re.compile(
r'^([\w\-]+) ([\w\-\"\./@:=\+]+) \{$', re.UNICODE
)
# Matches simple key-value pair `duplex auto`
rx_value = re.compile(r'^([\w\-]+) "?([^"]+)?"?$', re.UNICODE)
# Matches single value (flag) `disable`
rx_flag = re.compile(r'^([\w\-]+)$', re.UNICODE)
# Matches comments
rx_comment = re.compile(r'^(\/\*).*(\*\/)', re.UNICODE)
class ParserException(Exception):
pass
def update_tree(config, path, val, val_type=None):
t = config
for item in path:
if list(item.keys())[0] not in t:
try:
t[list(item.keys())[0]] = {}
except TypeError:
break
t = t.get(list(item.keys())[0])
if val_type == 'flag':
t.update(val)
elif val_type == 'value':
if t and isinstance(t, dict):
if list(t.keys())[0] == list(val.keys())[0]:
try:
t.update(
{
list(t.keys())[0]: dict(
[
(k, {})
for k in list(t.values())
+ list(val.values())
]
)
}
)
except TypeError:
if isinstance(t[list(t.keys())[0]], unicode):
t[list(t.keys())[0]] = {t[list(t.keys())[0]]: {}}
t[list(t.keys())[0]].update({list(val.values())[0]: {}})
elif list(val.keys())[0] == list(path[-1].keys())[0]:
t.update({list(val.values())[0]: {}})
elif list(val.keys())[0] in list(t.keys()):
try:
t.update(
{
list(val.keys())[0]: {
t[list(val.keys())[0]]: {},
list(val.values())[0]: {},
}
}
)
except TypeError:
t[list(val.keys())[0]].update({list(val.values())[0]: {}})
else:
t.update(val)
else:
if isinstance(t, str):
prev_keys = list(map(lambda x: list(x.keys())[0], path))[:-1]
prev_section_key = prev_keys[-1]
if len(prev_keys) == 1:
config[prev_section_key] = {config[prev_section_key]: {}}
t = config[prev_section_key]
else:
t = config
for k in prev_keys[:-1]:
t = t[k]
t[prev_section_key] = {t[prev_section_key]: {}}
t = t[prev_section_key]
t.update({list(item.keys())[0]: val})
else:
t.update(val)
elif val_type == 'named_section':
pass
elif val_type == 'section':
t = val
return config
def parse_node(config, line, line_num, path=None):
if not path:
path = []
line = line.strip()
if not line:
return config, path
if rx_section.match(line):
val_type = 'section'
section = rx_section.match(line).groups()[0]
path.append({section: val_type})
if path:
update_tree(config, path, {section: {}}, val_type=val_type)
elif rx_named_section.match(line):
val_type = 'named_section'
section, name = rx_named_section.match(line).groups()
if section not in [list(p.keys())[0] for p in path]:
path.append({section: val_type})
elif section != [list(p.keys())[0] for p in path][-1]:
path.append({section: val_type})
path.append({name: val_type})
update_tree(config, path, {section: {name: {}}}, val_type=val_type)
elif rx_value.match(line):
key, value = rx_value.match(line).groups()
update_tree(config, path, {key: value}, val_type='value')
elif rx_flag.match(line):
flag = rx_flag.match(line).group()
update_tree(config, path, {flag: flag}, val_type='flag')
elif rx_comment.match(line):
pass
elif line == '}' and path:
path_types = [list(p.values())[0] for p in path]
path.pop()
if len(path_types) > 1 and path_types[-2:] == [
'section',
'named_section',
]:
path.pop()
elif len(path_types) > 1 and path_types[-2:] == [
'named_section',
'named_section',
]:
path.pop()
else:
raise ParserException(
'Parse error at {line_num}: {line}'.format(
line_num=line_num, line=line
)
)
return config, path
def parse_conf(s):
if s:
s = u(s).split('\n')
c = {}
headers = []
for n, line in enumerate(s, start=1):
c, headers = parse_node(c, line, n, headers)
return c
raise ParserException('Empty config passed') | vyattaconfparser/parser.py | import re
import sys
if sys.version < '3':
def u(x):
return x.decode('utf-8')
else:
unicode = str
def u(x):
return x
# Matches section start `interfaces {`
rx_section = re.compile(r'^([\w\-]+) \{$', re.UNICODE)
# Matches named section `ethernet eth0 {`
rx_named_section = re.compile(
r'^([\w\-]+) ([\w\-\"\./@:=\+]+) \{$', re.UNICODE
)
# Matches simple key-value pair `duplex auto`
rx_value = re.compile(r'^([\w\-]+) "?([^"]+)?"?$', re.UNICODE)
# Matches single value (flag) `disable`
rx_flag = re.compile(r'^([\w\-]+)$', re.UNICODE)
# Matches comments
rx_comment = re.compile(r'^(\/\*).*(\*\/)', re.UNICODE)
class ParserException(Exception):
pass
def update_tree(config, path, val, val_type=None):
t = config
for item in path:
if list(item.keys())[0] not in t:
try:
t[list(item.keys())[0]] = {}
except TypeError:
break
t = t.get(list(item.keys())[0])
if val_type == 'flag':
t.update(val)
elif val_type == 'value':
if t and isinstance(t, dict):
if list(t.keys())[0] == list(val.keys())[0]:
try:
t.update(
{
list(t.keys())[0]: dict(
[
(k, {})
for k in list(t.values())
+ list(val.values())
]
)
}
)
except TypeError:
if isinstance(t[list(t.keys())[0]], unicode):
t[list(t.keys())[0]] = {t[list(t.keys())[0]]: {}}
t[list(t.keys())[0]].update({list(val.values())[0]: {}})
elif list(val.keys())[0] == list(path[-1].keys())[0]:
t.update({list(val.values())[0]: {}})
elif list(val.keys())[0] in list(t.keys()):
try:
t.update(
{
list(val.keys())[0]: {
t[list(val.keys())[0]]: {},
list(val.values())[0]: {},
}
}
)
except TypeError:
t[list(val.keys())[0]].update({list(val.values())[0]: {}})
else:
t.update(val)
else:
if isinstance(t, str):
prev_keys = list(map(lambda x: list(x.keys())[0], path))[:-1]
prev_section_key = prev_keys[-1]
if len(prev_keys) == 1:
config[prev_section_key] = {config[prev_section_key]: {}}
t = config[prev_section_key]
else:
t = config
for k in prev_keys[:-1]:
t = t[k]
t[prev_section_key] = {t[prev_section_key]: {}}
t = t[prev_section_key]
t.update({list(item.keys())[0]: val})
else:
t.update(val)
elif val_type == 'named_section':
pass
elif val_type == 'section':
t = val
return config
def parse_node(config, line, line_num, path=None):
if not path:
path = []
line = line.strip()
if not line:
return config, path
if rx_section.match(line):
val_type = 'section'
section = rx_section.match(line).groups()[0]
path.append({section: val_type})
if path:
update_tree(config, path, {section: {}}, val_type=val_type)
elif rx_named_section.match(line):
val_type = 'named_section'
section, name = rx_named_section.match(line).groups()
if section not in [list(p.keys())[0] for p in path]:
path.append({section: val_type})
elif section != [list(p.keys())[0] for p in path][-1]:
path.append({section: val_type})
path.append({name: val_type})
update_tree(config, path, {section: {name: {}}}, val_type=val_type)
elif rx_value.match(line):
key, value = rx_value.match(line).groups()
update_tree(config, path, {key: value}, val_type='value')
elif rx_flag.match(line):
flag = rx_flag.match(line).group()
update_tree(config, path, {flag: flag}, val_type='flag')
elif rx_comment.match(line):
pass
elif line == '}' and path:
path_types = [list(p.values())[0] for p in path]
path.pop()
if len(path_types) > 1 and path_types[-2:] == [
'section',
'named_section',
]:
path.pop()
elif len(path_types) > 1 and path_types[-2:] == [
'named_section',
'named_section',
]:
path.pop()
else:
raise ParserException(
'Parse error at {line_num}: {line}'.format(
line_num=line_num, line=line
)
)
return config, path
def parse_conf(s):
if s:
s = u(s).split('\n')
c = {}
headers = []
for n, line in enumerate(s, start=1):
c, headers = parse_node(c, line, n, headers)
return c
raise ParserException('Empty config passed') | 0.250088 | 0.402275 |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.vision.models import vgg16
from ppcd.models.layers import CAM, SAM
class Vgg16Base(nn.Layer):
# Vgg16 feature extraction backbone
def __init__(self, in_channels=3):
super(Vgg16Base, self).__init__()
features = vgg16(pretrained=True).sublayers()[0].sublayers()
if in_channels != 3:
features[0] = nn.Conv2D(in_channels, 64, kernel_size=[3, 3], padding=1, data_format='NCHW')
self.features = nn.LayerList(features)
self.features.eval()
def forward(self, x):
results = []
for idx, layer in enumerate(self.features):
x = layer(x)
if idx in {3, 8, 15, 22, 29}:
results.append(x)
return results
class CPBD(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(CPBD, self).__init__(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.PReLU(),
nn.BatchNorm(out_channels),
nn.Dropout(p=0.6),
)
class DSIFN(nn.Layer):
"""
The DSIFN implementation based on PaddlePaddle.
The original article refers to
<NAME> , et al. "A deeply supervised image fusion network for change detection in high resolution bi-temporal remote sensing images"
(https://www.sciencedirect.com/science/article/abs/pii/S0924271620301532).
Args:
in_channels (int, optional): The channel number of input image. default:3.
num_classes (int, optional): The unique number of target classes. default:2.
"""
def __init__(self, in_channels=3, num_classes=2):
super().__init__()
self.backbone = Vgg16Base(in_channels=in_channels)
self.sa1 = SAM()
self.sa2 = SAM()
self.sa3 = SAM()
self.sa4 = SAM()
self.sa5 = SAM()
# branch1
self.ca1 = CAM(in_channels=1024, ratio=8)
self.bn_ca1 = nn.BatchNorm(1024)
self.o1_conv1 = CPBD(1024, 512)
self.o1_conv2 = CPBD(512, 512)
self.bn_sa1 = nn.BatchNorm(512)
self.o1_conv3 = nn.Conv2D(512, num_classes, 1)
self.trans_conv1 = nn.Conv2DTranspose(512, 512, kernel_size=2, stride=2)
# branch 2
self.ca2 = CAM(in_channels=1536, ratio=8)
self.bn_ca2 = nn.BatchNorm(1536)
self.o2_conv1 = CPBD(1536, 512)
self.o2_conv2 = CPBD(512, 256)
self.o2_conv3 = CPBD(256, 256)
self.bn_sa2 = nn.BatchNorm(256)
self.o2_conv4 = nn.Conv2D(256, num_classes, 1)
self.trans_conv2 = nn.Conv2DTranspose(256, 256, kernel_size=2, stride=2)
# branch 3
self.ca3 = CAM(in_channels=768, ratio=8)
self.o3_conv1 = CPBD(768, 256)
self.o3_conv2 = CPBD(256, 128)
self.o3_conv3 = CPBD(128, 128)
self.bn_sa3 = nn.BatchNorm(128)
self.o3_conv4 = nn.Conv2D(128, num_classes, 1)
self.trans_conv3 = nn.Conv2DTranspose(128, 128, kernel_size=2, stride=2)
# branch 4
self.ca4 = CAM(in_channels=384, ratio=8)
self.o4_conv1 = CPBD(384, 128)
self.o4_conv2 = CPBD(128, 64)
self.o4_conv3 = CPBD(64, 64)
self.bn_sa4 = nn.BatchNorm(64)
self.o4_conv4 = nn.Conv2D(64, num_classes, 1)
self.trans_conv4 = nn.Conv2DTranspose(64, 64, kernel_size=2, stride=2)
# branch 5
self.ca5 = CAM(in_channels=192, ratio=8)
self.o5_conv1 = CPBD(192, 64)
self.o5_conv2 = CPBD(64, 32)
self.o5_conv3 = CPBD(32, 16)
self.bn_sa5 = nn.BatchNorm(16)
self.o5_conv4 = nn.Conv2D(16, num_classes, 1)
def forward(self, images):
t1_f_l3, t1_f_l8, t1_f_l15, t1_f_l22, t1_f_l29 = self.backbone(images[0])
t2_f_l3, t2_f_l8, t2_f_l15, t2_f_l22, t2_f_l29 = self.backbone(images[1])
x = paddle.concat([t1_f_l29, t2_f_l29], axis=1)
x = self.ca1(x) * x
x = self.o1_conv1(x)
x = self.o1_conv2(x)
x = self.sa1(x) * x
x = self.bn_sa1(x)
branch_1_out = self.o1_conv3(x)
x = self.trans_conv1(x)
x = paddle.concat([x, t1_f_l22, t2_f_l22], axis=1)
x = self.ca2(x) * x
x = self.o2_conv1(x)
x = self.o2_conv2(x)
x = self.o2_conv3(x)
x = self.sa2(x) * x
x = self.bn_sa2(x)
branch_2_out = self.o2_conv4(x)
x = self.trans_conv2(x)
x = paddle.concat([x, t1_f_l15, t2_f_l15], axis=1)
x = self.ca3(x) * x
x = self.o3_conv1(x)
x = self.o3_conv2(x)
x = self.o3_conv3(x)
x = self.sa3(x) * x
x = self.bn_sa3(x)
branch_3_out = self.o3_conv4(x)
x = self.trans_conv3(x)
x = paddle.concat([x, t1_f_l8, t2_f_l8], axis=1)
x = self.ca4(x) * x
x = self.o4_conv1(x)
x = self.o4_conv2(x)
x = self.o4_conv3(x)
x = self.sa4(x) * x
x = self.bn_sa4(x)
branch_4_out =self.o4_conv4(x)
x = self.trans_conv4(x)
x = paddle.concat([x, t1_f_l3, t2_f_l3], axis=1)
x = self.ca5(x) * x
x = self.o5_conv1(x)
x = self.o5_conv2(x)
x = self.o5_conv3(x)
x = self.sa5(x) * x
x = self.bn_sa5(x)
branch_5_out = self.o5_conv4(x)
return [branch_5_out, branch_4_out, branch_3_out, branch_2_out, branch_1_out] | ppcd/models/dsifn.py | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.vision.models import vgg16
from ppcd.models.layers import CAM, SAM
class Vgg16Base(nn.Layer):
# Vgg16 feature extraction backbone
def __init__(self, in_channels=3):
super(Vgg16Base, self).__init__()
features = vgg16(pretrained=True).sublayers()[0].sublayers()
if in_channels != 3:
features[0] = nn.Conv2D(in_channels, 64, kernel_size=[3, 3], padding=1, data_format='NCHW')
self.features = nn.LayerList(features)
self.features.eval()
def forward(self, x):
results = []
for idx, layer in enumerate(self.features):
x = layer(x)
if idx in {3, 8, 15, 22, 29}:
results.append(x)
return results
class CPBD(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(CPBD, self).__init__(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.PReLU(),
nn.BatchNorm(out_channels),
nn.Dropout(p=0.6),
)
class DSIFN(nn.Layer):
"""
The DSIFN implementation based on PaddlePaddle.
The original article refers to
<NAME> , et al. "A deeply supervised image fusion network for change detection in high resolution bi-temporal remote sensing images"
(https://www.sciencedirect.com/science/article/abs/pii/S0924271620301532).
Args:
in_channels (int, optional): The channel number of input image. default:3.
num_classes (int, optional): The unique number of target classes. default:2.
"""
def __init__(self, in_channels=3, num_classes=2):
super().__init__()
self.backbone = Vgg16Base(in_channels=in_channels)
self.sa1 = SAM()
self.sa2 = SAM()
self.sa3 = SAM()
self.sa4 = SAM()
self.sa5 = SAM()
# branch1
self.ca1 = CAM(in_channels=1024, ratio=8)
self.bn_ca1 = nn.BatchNorm(1024)
self.o1_conv1 = CPBD(1024, 512)
self.o1_conv2 = CPBD(512, 512)
self.bn_sa1 = nn.BatchNorm(512)
self.o1_conv3 = nn.Conv2D(512, num_classes, 1)
self.trans_conv1 = nn.Conv2DTranspose(512, 512, kernel_size=2, stride=2)
# branch 2
self.ca2 = CAM(in_channels=1536, ratio=8)
self.bn_ca2 = nn.BatchNorm(1536)
self.o2_conv1 = CPBD(1536, 512)
self.o2_conv2 = CPBD(512, 256)
self.o2_conv3 = CPBD(256, 256)
self.bn_sa2 = nn.BatchNorm(256)
self.o2_conv4 = nn.Conv2D(256, num_classes, 1)
self.trans_conv2 = nn.Conv2DTranspose(256, 256, kernel_size=2, stride=2)
# branch 3
self.ca3 = CAM(in_channels=768, ratio=8)
self.o3_conv1 = CPBD(768, 256)
self.o3_conv2 = CPBD(256, 128)
self.o3_conv3 = CPBD(128, 128)
self.bn_sa3 = nn.BatchNorm(128)
self.o3_conv4 = nn.Conv2D(128, num_classes, 1)
self.trans_conv3 = nn.Conv2DTranspose(128, 128, kernel_size=2, stride=2)
# branch 4
self.ca4 = CAM(in_channels=384, ratio=8)
self.o4_conv1 = CPBD(384, 128)
self.o4_conv2 = CPBD(128, 64)
self.o4_conv3 = CPBD(64, 64)
self.bn_sa4 = nn.BatchNorm(64)
self.o4_conv4 = nn.Conv2D(64, num_classes, 1)
self.trans_conv4 = nn.Conv2DTranspose(64, 64, kernel_size=2, stride=2)
# branch 5
self.ca5 = CAM(in_channels=192, ratio=8)
self.o5_conv1 = CPBD(192, 64)
self.o5_conv2 = CPBD(64, 32)
self.o5_conv3 = CPBD(32, 16)
self.bn_sa5 = nn.BatchNorm(16)
self.o5_conv4 = nn.Conv2D(16, num_classes, 1)
def forward(self, images):
t1_f_l3, t1_f_l8, t1_f_l15, t1_f_l22, t1_f_l29 = self.backbone(images[0])
t2_f_l3, t2_f_l8, t2_f_l15, t2_f_l22, t2_f_l29 = self.backbone(images[1])
x = paddle.concat([t1_f_l29, t2_f_l29], axis=1)
x = self.ca1(x) * x
x = self.o1_conv1(x)
x = self.o1_conv2(x)
x = self.sa1(x) * x
x = self.bn_sa1(x)
branch_1_out = self.o1_conv3(x)
x = self.trans_conv1(x)
x = paddle.concat([x, t1_f_l22, t2_f_l22], axis=1)
x = self.ca2(x) * x
x = self.o2_conv1(x)
x = self.o2_conv2(x)
x = self.o2_conv3(x)
x = self.sa2(x) * x
x = self.bn_sa2(x)
branch_2_out = self.o2_conv4(x)
x = self.trans_conv2(x)
x = paddle.concat([x, t1_f_l15, t2_f_l15], axis=1)
x = self.ca3(x) * x
x = self.o3_conv1(x)
x = self.o3_conv2(x)
x = self.o3_conv3(x)
x = self.sa3(x) * x
x = self.bn_sa3(x)
branch_3_out = self.o3_conv4(x)
x = self.trans_conv3(x)
x = paddle.concat([x, t1_f_l8, t2_f_l8], axis=1)
x = self.ca4(x) * x
x = self.o4_conv1(x)
x = self.o4_conv2(x)
x = self.o4_conv3(x)
x = self.sa4(x) * x
x = self.bn_sa4(x)
branch_4_out =self.o4_conv4(x)
x = self.trans_conv4(x)
x = paddle.concat([x, t1_f_l3, t2_f_l3], axis=1)
x = self.ca5(x) * x
x = self.o5_conv1(x)
x = self.o5_conv2(x)
x = self.o5_conv3(x)
x = self.sa5(x) * x
x = self.bn_sa5(x)
branch_5_out = self.o5_conv4(x)
return [branch_5_out, branch_4_out, branch_3_out, branch_2_out, branch_1_out] | 0.923212 | 0.291378 |
from pathlib import Path
from typing import Union, Optional
from collections import OrderedDict
import re
import math
import torch
from .. import Results as Results_
# old Results class
class Results:
"""Class representing BCN training results.
Attributes:
epoch (int): The number of epochs the BCN model has been trained for.
train_losses (List[float]): List of average training set losses acorss epochs.
valid_losses (List[float]): List of average validation set losses across epochs.
accuracies (List[float]): List of validation set accuracies across epochs.
precisions (List[float]): List of validation set `precision scores`_ across eopchs.
recalls (List[float]): List of validation set `recall scores`_ across epochs.
f1_scores (List[float]): List of validation set `F1 scores`_ across epochs.
train_times (List[float]): List of durations, in seconds, each epoch took to train.
valid_times (List[float]): List of durations, in seconds, each epoch took to test.
best_valid_loss (float): Minimum encountered validation loss.
best_epoch (int): Index corresponding to the minimum of `Results.valid_losses`.
tag (str): Anything notable about the model or results. Used as plot titles when plotting.
Set via the `BCN.train` method.
version (str): The version of the BCN Python module that these results came from.
.. _precision scores: https://en.wikipedia.org/wiki/Precision_and_recall
.. _recall scores: https://en.wikipedia.org/wiki/Precision_and_recall
.. _F1 scores: https://en.wikipedia.org/wiki/F-score
"""
def __init__(self):
self.epoch = 0
self.train_losses = []
self.valid_losses = []
self.accuracies = []
self.precisions = []
self.recalls = []
self.f1_scores = []
self.train_times = []
self.valid_times = []
self.best_valid_loss = float("inf")
self.best_epoch = 0
self.tag = ""
self.version = "0.4.98"
def __repr__(self):
plural = self.epoch != 1
return f"{self.__class__.__name__}({self.epoch} epoch{'s' if plural else ''})"
def __iter__(self):
for (k,v) in self.__dict__.items():
yield (k,v)
def load(self, path: Union[Path,str]) -> None:
"""Load results from path.
Args:
path: File path from which to load the results.
"""
self.__dict__ = torch.load(path)
def save(self, path: Union[Path,str]) -> None:
"""Save results to path.
Args:
path: File path to which to save the results.
"""
torch.save(self.__dict__, path)
def migrate_results(
file: Union[Path,str],
new_filename: Optional[Union[Path,str]]=None) \
-> None:
"""Migrate a results file that was generated from v0.4.98 or earlier to a v1 results file.
This overwrites the file unless the ``new_filename`` parameter is used.
Example usage: ::
>>> from pathlib import Path
>>> from bcn.v0 import migrate_results
>>> for file in Path("./results/").iterdir():
... migrate_results(file)
...
Args:
file: The v0 results file to convert.
new_filename: The new filename to use. Default is to use the original filename.
"""
file = Path(file)
r = Results()
r.load(file)
r_ = Results_()
r_.__dict__.update(r.__dict__)
# replace `best_epoch` with `best`
r_.best = max(range(len(r_.f1_scores)), key=lambda i: r_.f1_scores[i])
del r_.__dict__["best_epoch"]
del r_.__dict__["best_valid_loss"]
# version info is now kept in a set
r_.versions = set((r.version,))
del r_.__dict__["version"]
# devices are unknown :/
r_.devices = set()
# no weight perturbation yet
r_.step = 0
r_.wp_layers = []
if new_filename is None: new_filename = Path(filename)
r_.save(new_filename)
def migrate_weights(
file: Union[Path,str],
new_filename: Optional[Union[Path,str]]=None) \
-> None:
"""Migrate a weights file that was generated from v0.4.98 or earlier to a v1 weights file.
This overwrites the file unless the ``new_filename`` parameter is used.
Example usage: ::
>>> from pathlib import Path
>>> from bcn.v0 import migrate_weights
>>> for file in Path("./results/").iterdir():
... migrate_weights(file)
...
Args:
file: The v0 weights file to convert.
new_filename: The new filename to use. Default is to use the original filename.
"""
file = Path(file)
W = torch.load(file, map_location="cpu")
W_ = OrderedDict()
# get file info using regex
# example: weights_30x30x6@9-informed.IndirectOnly.MNIST.b64.t1.pt
m = re.match(
r"weights_([0-9]+)x([0-9]+)x([0-9]+)@([0-9]+)-" \
r"(\w+\.?\w+)\.(\w+)\.b([0-9]+)(\.t(?:\w+))?\.pt",
file.name
)
# detect information from filename
left, _ = file.name.split("-")
shape = left[left.rfind("_")+1:]
size, c = shape.split("@")
h, w, d = size.split("x")
h = int(h)
w = int(w)
hw = h*w
d = int(d)
c = int(c)
ell = (int(math.sqrt(c))-1)//2
ells = range(-ell, ell+1)
for l in range(d):
W_[f"layers.{l}.weights"] = torch.zeros((c,hw,1))
index = 0
for dy in ells:
for dx in ells: # row-major
W_[f"layers.{l}.weights"][index,:,:] = W[f"layers.{l}.({dy},{dx})"]
index += 1
torch.save(W_, new_filename) | bcn/v0/compat.py | from pathlib import Path
from typing import Union, Optional
from collections import OrderedDict
import re
import math
import torch
from .. import Results as Results_
# old Results class
class Results:
"""Class representing BCN training results.
Attributes:
epoch (int): The number of epochs the BCN model has been trained for.
train_losses (List[float]): List of average training set losses acorss epochs.
valid_losses (List[float]): List of average validation set losses across epochs.
accuracies (List[float]): List of validation set accuracies across epochs.
precisions (List[float]): List of validation set `precision scores`_ across eopchs.
recalls (List[float]): List of validation set `recall scores`_ across epochs.
f1_scores (List[float]): List of validation set `F1 scores`_ across epochs.
train_times (List[float]): List of durations, in seconds, each epoch took to train.
valid_times (List[float]): List of durations, in seconds, each epoch took to test.
best_valid_loss (float): Minimum encountered validation loss.
best_epoch (int): Index corresponding to the minimum of `Results.valid_losses`.
tag (str): Anything notable about the model or results. Used as plot titles when plotting.
Set via the `BCN.train` method.
version (str): The version of the BCN Python module that these results came from.
.. _precision scores: https://en.wikipedia.org/wiki/Precision_and_recall
.. _recall scores: https://en.wikipedia.org/wiki/Precision_and_recall
.. _F1 scores: https://en.wikipedia.org/wiki/F-score
"""
def __init__(self):
self.epoch = 0
self.train_losses = []
self.valid_losses = []
self.accuracies = []
self.precisions = []
self.recalls = []
self.f1_scores = []
self.train_times = []
self.valid_times = []
self.best_valid_loss = float("inf")
self.best_epoch = 0
self.tag = ""
self.version = "0.4.98"
def __repr__(self):
plural = self.epoch != 1
return f"{self.__class__.__name__}({self.epoch} epoch{'s' if plural else ''})"
def __iter__(self):
for (k,v) in self.__dict__.items():
yield (k,v)
def load(self, path: Union[Path,str]) -> None:
"""Load results from path.
Args:
path: File path from which to load the results.
"""
self.__dict__ = torch.load(path)
def save(self, path: Union[Path,str]) -> None:
"""Save results to path.
Args:
path: File path to which to save the results.
"""
torch.save(self.__dict__, path)
def migrate_results(
file: Union[Path,str],
new_filename: Optional[Union[Path,str]]=None) \
-> None:
"""Migrate a results file that was generated from v0.4.98 or earlier to a v1 results file.
This overwrites the file unless the ``new_filename`` parameter is used.
Example usage: ::
>>> from pathlib import Path
>>> from bcn.v0 import migrate_results
>>> for file in Path("./results/").iterdir():
... migrate_results(file)
...
Args:
file: The v0 results file to convert.
new_filename: The new filename to use. Default is to use the original filename.
"""
file = Path(file)
r = Results()
r.load(file)
r_ = Results_()
r_.__dict__.update(r.__dict__)
# replace `best_epoch` with `best`
r_.best = max(range(len(r_.f1_scores)), key=lambda i: r_.f1_scores[i])
del r_.__dict__["best_epoch"]
del r_.__dict__["best_valid_loss"]
# version info is now kept in a set
r_.versions = set((r.version,))
del r_.__dict__["version"]
# devices are unknown :/
r_.devices = set()
# no weight perturbation yet
r_.step = 0
r_.wp_layers = []
if new_filename is None: new_filename = Path(filename)
r_.save(new_filename)
def migrate_weights(
file: Union[Path,str],
new_filename: Optional[Union[Path,str]]=None) \
-> None:
"""Migrate a weights file that was generated from v0.4.98 or earlier to a v1 weights file.
This overwrites the file unless the ``new_filename`` parameter is used.
Example usage: ::
>>> from pathlib import Path
>>> from bcn.v0 import migrate_weights
>>> for file in Path("./results/").iterdir():
... migrate_weights(file)
...
Args:
file: The v0 weights file to convert.
new_filename: The new filename to use. Default is to use the original filename.
"""
file = Path(file)
W = torch.load(file, map_location="cpu")
W_ = OrderedDict()
# get file info using regex
# example: weights_30x30x6@9-informed.IndirectOnly.MNIST.b64.t1.pt
m = re.match(
r"weights_([0-9]+)x([0-9]+)x([0-9]+)@([0-9]+)-" \
r"(\w+\.?\w+)\.(\w+)\.b([0-9]+)(\.t(?:\w+))?\.pt",
file.name
)
# detect information from filename
left, _ = file.name.split("-")
shape = left[left.rfind("_")+1:]
size, c = shape.split("@")
h, w, d = size.split("x")
h = int(h)
w = int(w)
hw = h*w
d = int(d)
c = int(c)
ell = (int(math.sqrt(c))-1)//2
ells = range(-ell, ell+1)
for l in range(d):
W_[f"layers.{l}.weights"] = torch.zeros((c,hw,1))
index = 0
for dy in ells:
for dx in ells: # row-major
W_[f"layers.{l}.weights"][index,:,:] = W[f"layers.{l}.({dy},{dx})"]
index += 1
torch.save(W_, new_filename) | 0.950698 | 0.439567 |
from ..base import disk
from infi.pyutils.lazy import cached_method
# pylint: disable=R0921
class LinuxDiskDrive(disk.DiskDrive):
def __init__(self, storage_device, scsi_disk_path):
super(LinuxDiskDrive, self).__init__()
self._storage_device = storage_device
self._scsi_disk_path = scsi_disk_path
def _get_parted_disk_drive(self):
from infi.parted import Disk
return Disk(self._scsi_disk_path)
@cached_method
def get_storage_device(self):
return self._storage_device
@cached_method
def get_block_access_path(self):
return self._scsi_disk_path
@cached_method
def get_partition_table(self):
from .partition import LinuxGUIDPartitionTable, LinuxMBRPartitionTable
parted = self._get_parted_disk_drive()
if not parted.has_partition_table():
raise disk.NoPartitionTable()
if parted.get_partition_table_type() == "gpt":
return LinuxGUIDPartitionTable(self)
elif parted.get_partition_table_type() == "msdos":
return LinuxMBRPartitionTable(self)
raise disk.NoPartitionTable()
def is_empty(self):
return not self._get_parted_disk_drive().has_partition_table()
def delete_partition_table(self):
raise NotImplementedError()
def create_guid_partition_table(self, alignment_in_bytes=None):
from .partition import LinuxGUIDPartitionTable
return LinuxGUIDPartitionTable.create_partition_table(self, alignment_in_bytes)
def create_mbr_partition_table(self, alignment_in_bytes=None):
from .partition import LinuxMBRPartitionTable
return LinuxMBRPartitionTable.create_partition_table(self, alignment_in_bytes)
def _format_partition(self, number, filesystem_name, **kwargs):
self._get_parted_disk_drive().format_partition(number, filesystem_name, **kwargs)
@cached_method
def get_block_access_paths_for_partitions(self):
from glob import glob
if self._scsi_disk_path[-1].isalpha():
# disk path is like 'sda' or 'mpathc'
glob_patterns = ['%s_part[0-9]*', # e.g. SuSE 12
'%s-part[0-9]*', # e.g. SuSE 12
'%sp[0-9]*', # e.g. CentOS 6
'%s[0-9]*'] # e.g. CentOS/RHEL/Oracle 7
else:
# disk path is like 'mpath3'
glob_patterns = ['%s-part[0-9]*', # e.g. Ubuntu-14.04
'%sp[0-9]*'] # e.g. Ubuntu-14.04
res = []
for glob_pattern in glob_patterns:
res.extend([item for item in glob(glob_pattern % self._scsi_disk_path)])
return res
class LinuxDiskModel(disk.DiskModel):
def find_disk_drive_by_block_access_path(self, path):
from infi.storagemodel import get_storage_model
scsi = get_storage_model().get_scsi()
multipath = get_storage_model().get_native_multipath()
all_devices = scsi.get_all_scsi_block_devices() + multipath.get_all_multipath_block_devices()
storage_device = [device for device in all_devices
if device.get_block_access_path() == path][0]
return LinuxDiskDrive(storage_device, path) | src/infi/storagemodel/linux/disk.py | from ..base import disk
from infi.pyutils.lazy import cached_method
# pylint: disable=R0921
class LinuxDiskDrive(disk.DiskDrive):
def __init__(self, storage_device, scsi_disk_path):
super(LinuxDiskDrive, self).__init__()
self._storage_device = storage_device
self._scsi_disk_path = scsi_disk_path
def _get_parted_disk_drive(self):
from infi.parted import Disk
return Disk(self._scsi_disk_path)
@cached_method
def get_storage_device(self):
return self._storage_device
@cached_method
def get_block_access_path(self):
return self._scsi_disk_path
@cached_method
def get_partition_table(self):
from .partition import LinuxGUIDPartitionTable, LinuxMBRPartitionTable
parted = self._get_parted_disk_drive()
if not parted.has_partition_table():
raise disk.NoPartitionTable()
if parted.get_partition_table_type() == "gpt":
return LinuxGUIDPartitionTable(self)
elif parted.get_partition_table_type() == "msdos":
return LinuxMBRPartitionTable(self)
raise disk.NoPartitionTable()
def is_empty(self):
return not self._get_parted_disk_drive().has_partition_table()
def delete_partition_table(self):
raise NotImplementedError()
def create_guid_partition_table(self, alignment_in_bytes=None):
from .partition import LinuxGUIDPartitionTable
return LinuxGUIDPartitionTable.create_partition_table(self, alignment_in_bytes)
def create_mbr_partition_table(self, alignment_in_bytes=None):
from .partition import LinuxMBRPartitionTable
return LinuxMBRPartitionTable.create_partition_table(self, alignment_in_bytes)
def _format_partition(self, number, filesystem_name, **kwargs):
self._get_parted_disk_drive().format_partition(number, filesystem_name, **kwargs)
@cached_method
def get_block_access_paths_for_partitions(self):
from glob import glob
if self._scsi_disk_path[-1].isalpha():
# disk path is like 'sda' or 'mpathc'
glob_patterns = ['%s_part[0-9]*', # e.g. SuSE 12
'%s-part[0-9]*', # e.g. SuSE 12
'%sp[0-9]*', # e.g. CentOS 6
'%s[0-9]*'] # e.g. CentOS/RHEL/Oracle 7
else:
# disk path is like 'mpath3'
glob_patterns = ['%s-part[0-9]*', # e.g. Ubuntu-14.04
'%sp[0-9]*'] # e.g. Ubuntu-14.04
res = []
for glob_pattern in glob_patterns:
res.extend([item for item in glob(glob_pattern % self._scsi_disk_path)])
return res
class LinuxDiskModel(disk.DiskModel):
def find_disk_drive_by_block_access_path(self, path):
from infi.storagemodel import get_storage_model
scsi = get_storage_model().get_scsi()
multipath = get_storage_model().get_native_multipath()
all_devices = scsi.get_all_scsi_block_devices() + multipath.get_all_multipath_block_devices()
storage_device = [device for device in all_devices
if device.get_block_access_path() == path][0]
return LinuxDiskDrive(storage_device, path) | 0.459076 | 0.088347 |
import re
import os
from git import Repo, InvalidGitRepositoryError, NoSuchPathError
from elifetools import utils as etoolsutils
def repl(match):
"Convert hex to int to unicode character"
chr_code = int(match.group(1), 16)
return chr(chr_code)
def entity_to_unicode(string):
"""
Quick convert unicode HTML entities to unicode characters
using a regular expression replacement
"""
# Selected character replacements that have been seen
replacements = []
replacements.append((r"α", u"\u03b1"))
replacements.append((r"β", u"\u03b2"))
replacements.append((r"γ", u"\u03b3"))
replacements.append((r"δ", u"\u03b4"))
replacements.append((r"ε", u"\u03b5"))
replacements.append((r"º", u"\u00ba"))
replacements.append((r"ï", u"\u00cf"))
replacements.append((r"“", '"'))
replacements.append((r"”", '"'))
# First, replace numeric entities with unicode
string = re.sub(r"&#x(....);", repl, string)
# Second, replace some specific entities specified in the list
for entity, replacement in replacements:
string = re.sub(entity, replacement, string)
return string
def remove_tag(tag_name, string):
"""
Remove open and close tags - the tags themselves only - using
a non-greedy angle bracket pattern match
"""
if not string:
return string
pattern = re.compile("</?" + tag_name + ".*?>")
string = pattern.sub("", string)
return string
def replace_tags(string, from_tag="i", to_tag="italic"):
"""
Replace tags such as <i> to <italic>
<sup> and <sub> are allowed and do not need to be replaced
This does not validate markup
"""
string = string.replace("<" + from_tag + ">", "<" + to_tag + ">")
string = string.replace("</" + from_tag + ">", "</" + to_tag + ">")
return string
def attr_names(attr_map):
"""return a list of attribute names from the map"""
if attr_map:
return list(sorted(attr_map.keys()))
return []
def attr_string(attr_map):
"""string of tag attributes and values"""
string = ""
if attr_map:
for key, value in sorted(attr_map.items()):
attr = '%s="%s"' % (
key,
etoolsutils.escape_ampersand(value).replace('"', """),
)
string = " ".join([string, attr])
return string
def set_attr_if_value(obj, attr_name, value):
"shorthand method to set object values if the value is not none"
if value is not None:
setattr(obj, attr_name, value)
def is_year_numeric(value):
"True if value is all digits"
if value and re.match("^[0-9]+$", value):
return True
return False
def version_from_xml_filename(filename):
"extract the numeric version from the xml filename"
try:
filename_parts = filename.split(os.sep)[-1].split("-")
except AttributeError:
return None
if len(filename_parts) == 3:
try:
return int(filename_parts[-1].lstrip("v").rstrip(".xml"))
except ValueError:
return None
else:
return None
def get_last_commit_to_master(repo_path="."):
"""
returns the last commit on the master branch. It would be more ideal to get the commit
from the branch we are currently on, but as this is a check mostly to help
with production issues, returning the commit from master will be sufficient.
"""
last_commit = None
repo = None
try:
repo = Repo(repo_path)
except (InvalidGitRepositoryError, NoSuchPathError):
repo = None
if repo:
try:
last_commit = repo.commits()[0]
except AttributeError:
# Optimised for version 0.3.2.RC1
last_commit = repo.head.commit
return str(last_commit)
def calculate_journal_volume(pub_date, year):
"""
volume value is based on the pub date year
pub_date is a python time object
"""
try:
volume = str(pub_date.tm_year - year + 1)
except TypeError:
volume = None
except AttributeError:
volume = None
return volume
def author_name_from_json(author_json):
"concatenate an author name from json data"
author_name = None
if author_json.get("type"):
if author_json.get("type") == "group" and author_json.get("name"):
author_name = author_json.get("name")
elif author_json.get("type") == "person" and author_json.get("name"):
if author_json.get("name").get("preferred"):
author_name = author_json.get("name").get("preferred")
return author_name
def text_from_affiliation_elements(department, institution, city, country):
"format an author affiliation from details"
return ", ".join(
element for element in [department, institution, city, country] if element
) | elifearticle/utils.py | import re
import os
from git import Repo, InvalidGitRepositoryError, NoSuchPathError
from elifetools import utils as etoolsutils
def repl(match):
"Convert hex to int to unicode character"
chr_code = int(match.group(1), 16)
return chr(chr_code)
def entity_to_unicode(string):
"""
Quick convert unicode HTML entities to unicode characters
using a regular expression replacement
"""
# Selected character replacements that have been seen
replacements = []
replacements.append((r"α", u"\u03b1"))
replacements.append((r"β", u"\u03b2"))
replacements.append((r"γ", u"\u03b3"))
replacements.append((r"δ", u"\u03b4"))
replacements.append((r"ε", u"\u03b5"))
replacements.append((r"º", u"\u00ba"))
replacements.append((r"ï", u"\u00cf"))
replacements.append((r"“", '"'))
replacements.append((r"”", '"'))
# First, replace numeric entities with unicode
string = re.sub(r"&#x(....);", repl, string)
# Second, replace some specific entities specified in the list
for entity, replacement in replacements:
string = re.sub(entity, replacement, string)
return string
def remove_tag(tag_name, string):
"""
Remove open and close tags - the tags themselves only - using
a non-greedy angle bracket pattern match
"""
if not string:
return string
pattern = re.compile("</?" + tag_name + ".*?>")
string = pattern.sub("", string)
return string
def replace_tags(string, from_tag="i", to_tag="italic"):
"""
Replace tags such as <i> to <italic>
<sup> and <sub> are allowed and do not need to be replaced
This does not validate markup
"""
string = string.replace("<" + from_tag + ">", "<" + to_tag + ">")
string = string.replace("</" + from_tag + ">", "</" + to_tag + ">")
return string
def attr_names(attr_map):
"""return a list of attribute names from the map"""
if attr_map:
return list(sorted(attr_map.keys()))
return []
def attr_string(attr_map):
"""string of tag attributes and values"""
string = ""
if attr_map:
for key, value in sorted(attr_map.items()):
attr = '%s="%s"' % (
key,
etoolsutils.escape_ampersand(value).replace('"', """),
)
string = " ".join([string, attr])
return string
def set_attr_if_value(obj, attr_name, value):
"shorthand method to set object values if the value is not none"
if value is not None:
setattr(obj, attr_name, value)
def is_year_numeric(value):
"True if value is all digits"
if value and re.match("^[0-9]+$", value):
return True
return False
def version_from_xml_filename(filename):
"extract the numeric version from the xml filename"
try:
filename_parts = filename.split(os.sep)[-1].split("-")
except AttributeError:
return None
if len(filename_parts) == 3:
try:
return int(filename_parts[-1].lstrip("v").rstrip(".xml"))
except ValueError:
return None
else:
return None
def get_last_commit_to_master(repo_path="."):
"""
returns the last commit on the master branch. It would be more ideal to get the commit
from the branch we are currently on, but as this is a check mostly to help
with production issues, returning the commit from master will be sufficient.
"""
last_commit = None
repo = None
try:
repo = Repo(repo_path)
except (InvalidGitRepositoryError, NoSuchPathError):
repo = None
if repo:
try:
last_commit = repo.commits()[0]
except AttributeError:
# Optimised for version 0.3.2.RC1
last_commit = repo.head.commit
return str(last_commit)
def calculate_journal_volume(pub_date, year):
"""
volume value is based on the pub date year
pub_date is a python time object
"""
try:
volume = str(pub_date.tm_year - year + 1)
except TypeError:
volume = None
except AttributeError:
volume = None
return volume
def author_name_from_json(author_json):
"concatenate an author name from json data"
author_name = None
if author_json.get("type"):
if author_json.get("type") == "group" and author_json.get("name"):
author_name = author_json.get("name")
elif author_json.get("type") == "person" and author_json.get("name"):
if author_json.get("name").get("preferred"):
author_name = author_json.get("name").get("preferred")
return author_name
def text_from_affiliation_elements(department, institution, city, country):
"format an author affiliation from details"
return ", ".join(
element for element in [department, institution, city, country] if element
) | 0.439026 | 0.254932 |
from threading import Thread
import cv2
import numpy as np
from pyzbar.pyzbar import decode
from typing import List, Any, Union
from security.database.firestore import Firestore
from security.user.user import User
from security.tts.messages import Messages
from security.tts.text_to_speech import TTS
from .util import terminate_thread
class QRRecognizer:
"""
Detect and validate a the QR code showing to the camera by the user
This class uses Singleton design pattern
"""
# SINGLETON INSTANCE
__instance = None
@staticmethod
def get_instance(capture):
if QRRecognizer.__instance is None:
QRRecognizer.__instance = QRRecognizer(capture)
return QRRecognizer.__instance
# PRIVATE CONSTRUCTOR
def __init__(self, capture) -> None:
# checks if there is an existing instance of QRRecognizer
if QRRecognizer.__instance:
raise Exception("Singleton cannot be instantiated more than once")
else:
self.__capture = capture
self.__tts = TTS.get_instance()
self.__tts_thread = None
self.__threads: List[Thread] = []
self.__thread_killers: List[Thread] = []
self.__is_running = True
self.__user: Union[User, None] = None
self.__db = Firestore.get_instance()
QRRecognizer.__instance = self
# PUBLIC METHODS
def reset(self) -> None:
self.__is_running = True
self.__user = None
self.__tts_thread = None
def run(self) -> Union[User, None]:
while self.__is_running:
# gets current frame from the video stream
_, frame = self.__capture.read()
# decodes qr code if any
qr_data = self.__get_qr_data(frame)
if len(qr_data) == 0:
cv2.imshow("QR RECOGNITION", frame)
# creates a killer thread to kill tts running __threads
killer = Thread(target=terminate_thread, args=(self.__threads, ))
self.__thread_killers.append(killer)
killer.start()
else:
# decodes QR code
# frames the detected QR code
for obj in qr_data:
key = obj.data.decode('utf-8')
frame = self.__draw_polygon(frame, obj)
cv2.imshow("QR RECOGNITION", frame)
# creates a different thread for database interaction and voice messages
if len(self.__threads) == 0:
self.__tts_thread = Thread(target=self.__check_qr_key, args=(key, ))
self.__threads.append(self.__tts_thread)
self.__tts_thread.start()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
return self.__user
# PRIVATE METHODS
def __get_qr_data(self, input_frame) -> List[Any]:
try:
return decode(input_frame)
except:
return []
def __draw_polygon(self, frame, qr_object):
pts = np.array([qr_object.polygon], np.int32)
pts = pts.reshape((4, 1, 2))
cv2.polylines(frame, [pts], True, (210, 27, 27), 3)
return frame
def __check_qr_key(self, key) -> None:
user = self.__db.connect(key)
if user:
self.__tts.speak(Messages.VALID_QR_CODE)
self.__is_running = False
self.__user = user
else:
self.__tts.speak(Messages.INVALID_QR_CODE) | security/recognition/qr_recognizer.py | from threading import Thread
import cv2
import numpy as np
from pyzbar.pyzbar import decode
from typing import List, Any, Union
from security.database.firestore import Firestore
from security.user.user import User
from security.tts.messages import Messages
from security.tts.text_to_speech import TTS
from .util import terminate_thread
class QRRecognizer:
"""
Detect and validate a the QR code showing to the camera by the user
This class uses Singleton design pattern
"""
# SINGLETON INSTANCE
__instance = None
@staticmethod
def get_instance(capture):
if QRRecognizer.__instance is None:
QRRecognizer.__instance = QRRecognizer(capture)
return QRRecognizer.__instance
# PRIVATE CONSTRUCTOR
def __init__(self, capture) -> None:
# checks if there is an existing instance of QRRecognizer
if QRRecognizer.__instance:
raise Exception("Singleton cannot be instantiated more than once")
else:
self.__capture = capture
self.__tts = TTS.get_instance()
self.__tts_thread = None
self.__threads: List[Thread] = []
self.__thread_killers: List[Thread] = []
self.__is_running = True
self.__user: Union[User, None] = None
self.__db = Firestore.get_instance()
QRRecognizer.__instance = self
# PUBLIC METHODS
def reset(self) -> None:
self.__is_running = True
self.__user = None
self.__tts_thread = None
def run(self) -> Union[User, None]:
while self.__is_running:
# gets current frame from the video stream
_, frame = self.__capture.read()
# decodes qr code if any
qr_data = self.__get_qr_data(frame)
if len(qr_data) == 0:
cv2.imshow("QR RECOGNITION", frame)
# creates a killer thread to kill tts running __threads
killer = Thread(target=terminate_thread, args=(self.__threads, ))
self.__thread_killers.append(killer)
killer.start()
else:
# decodes QR code
# frames the detected QR code
for obj in qr_data:
key = obj.data.decode('utf-8')
frame = self.__draw_polygon(frame, obj)
cv2.imshow("QR RECOGNITION", frame)
# creates a different thread for database interaction and voice messages
if len(self.__threads) == 0:
self.__tts_thread = Thread(target=self.__check_qr_key, args=(key, ))
self.__threads.append(self.__tts_thread)
self.__tts_thread.start()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
return self.__user
# PRIVATE METHODS
def __get_qr_data(self, input_frame) -> List[Any]:
try:
return decode(input_frame)
except:
return []
def __draw_polygon(self, frame, qr_object):
pts = np.array([qr_object.polygon], np.int32)
pts = pts.reshape((4, 1, 2))
cv2.polylines(frame, [pts], True, (210, 27, 27), 3)
return frame
def __check_qr_key(self, key) -> None:
user = self.__db.connect(key)
if user:
self.__tts.speak(Messages.VALID_QR_CODE)
self.__is_running = False
self.__user = user
else:
self.__tts.speak(Messages.INVALID_QR_CODE) | 0.795539 | 0.192065 |
import logging
import six.moves.urllib as urllib
from algosec.api_clients.base import SoapAPIClient
from algosec.errors import AlgoSecLoginError, AlgoSecAPIError
from algosec.helpers import report_soap_failure
logger = logging.getLogger(__name__)
class FireFlowAPIClient(SoapAPIClient):
"""*FireFlow* SOAP API client.
Used by initiating and calling its public methods or by sending custom calls using the ``client`` property.
Client implementation is strictly based on AlgoSec's official API guide.
Example:
Using the public methods to send an API call::
from algosec.api_clients.fire_flow import FireFlowAPIClient
client = FireFlowAPIClient(ip, username, password)
change_request = client.get_change_request_by_id(change_request_id)
Args:
server_ip (str): IP address of the AlgoSec server.
user (str): Username used to log in to AlgoSec.
password (str): The user's password, similar to the one used to log in to the UI.
verify_ssl (bool): Turn on/off the connection's SSL certificate verification. Defaults to True.
"""
@property
def _wsdl_url_path(self):
return 'https://{}/WebServices/FireFlow.wsdl'.format(self.server_ip)
@property
def _soap_service_location(self):
return 'https://{}/WebServices/WSDispatcher.pl'.format(self.server_ip)
def _initiate_client(self):
"""Return a connected suds client and save the new session id to ``self._session_id``
Raises:
AlgoSecLoginError: If login using the username/password failed.
Returns:
suds.client.Client
"""
client = self._get_soap_client(self._wsdl_url_path, location=self._soap_service_location)
with report_soap_failure(AlgoSecLoginError):
authenticate = client.service.authenticate(
username=self.user,
password=<PASSWORD>,
)
self._session_id = authenticate.sessionId
return client
def _create_soap_traffic_line(self, traffic_line):
"""
Create new FireFlow traffic line based on TrafficLine object.
Args:
traffic_line (algosec.models.ChangeRequestTrafficLine): The traffic line to create.
Returns: Soap traffic line object
"""
soap_traffic_line = self.client.factory.create('trafficLine')
soap_traffic_line.action = traffic_line.action.value.api_value
for source in traffic_line.sources:
traffic_address = self.client.factory.create('trafficAddress')
traffic_address.address = source
soap_traffic_line.trafficSource.append(traffic_address)
for dest in traffic_line.destinations:
traffic_address = self.client.factory.create('trafficAddress')
traffic_address.address = dest
soap_traffic_line.trafficDestination.append(traffic_address)
for service in traffic_line.services:
traffic_service = self.client.factory.create('trafficService')
traffic_service.service = service
soap_traffic_line.trafficService.append(traffic_service)
if traffic_line.applications:
for application_name in traffic_line.applications:
traffic_application = self.client.factory.create('trafficApplication')
traffic_application.application = application_name
soap_traffic_line.trafficApplication.append(traffic_application)
return soap_traffic_line
def create_change_request(
self,
subject,
requestor_name,
email,
traffic_lines,
description="",
template=None,
):
"""Create a new change request.
Args:
subject (str): The ticket subject, will be shown on FireFlow.
requestor_name (str): The ticket creator name, will be shown on FireFlow.
email (str): The email address of the requestor.
traffic_lines (list[algosec.models.ChangeRequestTrafficLine]): List of traffic lines each describing its
sources, destinations and services.
description (str): description for the ticket, will be shown on FireFlow.
template (str): When different than None, this template will be passed on to FireFlow to be used
as the template for the new change requets.
Raises:
:class:`~algosec.errors.AlgoSecAPIError`: If change request creation failed.
Returns:
str: The URL for the newley create change request on FireFlow
"""
# Create ticket and traffic lines objects
ticket = self.client.factory.create('ticket')
ticket.description = description
ticket.requestor = '{} {}'.format(requestor_name, email)
ticket.subject = subject
if template is not None:
ticket.template = template
for traffic_line in traffic_lines:
ticket.trafficLines.append(self._create_soap_traffic_line(traffic_line))
# Actually create the ticket
with report_soap_failure(AlgoSecAPIError):
ticket_added = self.client.service.createTicket(sessionId=self._session_id, ticket=ticket)
ticket_url = ticket_added.ticketDisplayURL
# normalize ticket url hostname that is sometimes incorrect from the FireFlow server (which uses it's own
# internal IP to build this url.
url = list(urllib.parse.urlsplit(ticket_url))
url[1] = self.server_ip
return urllib.parse.urlunsplit(url)
def get_change_request_by_id(self, change_request_id):
"""Get a change request by its ID.
Useful for checking the status of a change request you opened through the API.
Args:
change_request_id: The ID of the change request to fetch.
Raises:
:class:`~algosec.errors.AlgoSecAPIError`: If the change request was not found on the server or another
error occurred while fetching the change request.
Returns:
The change request ticket object.
"""
with report_soap_failure(AlgoSecAPIError):
response = self.client.service.getTicket(sessionId=self._session_id, ticketId=change_request_id)
return response.ticket | algosec/api_clients/fire_flow.py | import logging
import six.moves.urllib as urllib
from algosec.api_clients.base import SoapAPIClient
from algosec.errors import AlgoSecLoginError, AlgoSecAPIError
from algosec.helpers import report_soap_failure
logger = logging.getLogger(__name__)
class FireFlowAPIClient(SoapAPIClient):
"""*FireFlow* SOAP API client.
Used by initiating and calling its public methods or by sending custom calls using the ``client`` property.
Client implementation is strictly based on AlgoSec's official API guide.
Example:
Using the public methods to send an API call::
from algosec.api_clients.fire_flow import FireFlowAPIClient
client = FireFlowAPIClient(ip, username, password)
change_request = client.get_change_request_by_id(change_request_id)
Args:
server_ip (str): IP address of the AlgoSec server.
user (str): Username used to log in to AlgoSec.
password (str): The user's password, similar to the one used to log in to the UI.
verify_ssl (bool): Turn on/off the connection's SSL certificate verification. Defaults to True.
"""
@property
def _wsdl_url_path(self):
return 'https://{}/WebServices/FireFlow.wsdl'.format(self.server_ip)
@property
def _soap_service_location(self):
return 'https://{}/WebServices/WSDispatcher.pl'.format(self.server_ip)
def _initiate_client(self):
"""Return a connected suds client and save the new session id to ``self._session_id``
Raises:
AlgoSecLoginError: If login using the username/password failed.
Returns:
suds.client.Client
"""
client = self._get_soap_client(self._wsdl_url_path, location=self._soap_service_location)
with report_soap_failure(AlgoSecLoginError):
authenticate = client.service.authenticate(
username=self.user,
password=<PASSWORD>,
)
self._session_id = authenticate.sessionId
return client
def _create_soap_traffic_line(self, traffic_line):
"""
Create new FireFlow traffic line based on TrafficLine object.
Args:
traffic_line (algosec.models.ChangeRequestTrafficLine): The traffic line to create.
Returns: Soap traffic line object
"""
soap_traffic_line = self.client.factory.create('trafficLine')
soap_traffic_line.action = traffic_line.action.value.api_value
for source in traffic_line.sources:
traffic_address = self.client.factory.create('trafficAddress')
traffic_address.address = source
soap_traffic_line.trafficSource.append(traffic_address)
for dest in traffic_line.destinations:
traffic_address = self.client.factory.create('trafficAddress')
traffic_address.address = dest
soap_traffic_line.trafficDestination.append(traffic_address)
for service in traffic_line.services:
traffic_service = self.client.factory.create('trafficService')
traffic_service.service = service
soap_traffic_line.trafficService.append(traffic_service)
if traffic_line.applications:
for application_name in traffic_line.applications:
traffic_application = self.client.factory.create('trafficApplication')
traffic_application.application = application_name
soap_traffic_line.trafficApplication.append(traffic_application)
return soap_traffic_line
def create_change_request(
self,
subject,
requestor_name,
email,
traffic_lines,
description="",
template=None,
):
"""Create a new change request.
Args:
subject (str): The ticket subject, will be shown on FireFlow.
requestor_name (str): The ticket creator name, will be shown on FireFlow.
email (str): The email address of the requestor.
traffic_lines (list[algosec.models.ChangeRequestTrafficLine]): List of traffic lines each describing its
sources, destinations and services.
description (str): description for the ticket, will be shown on FireFlow.
template (str): When different than None, this template will be passed on to FireFlow to be used
as the template for the new change requets.
Raises:
:class:`~algosec.errors.AlgoSecAPIError`: If change request creation failed.
Returns:
str: The URL for the newley create change request on FireFlow
"""
# Create ticket and traffic lines objects
ticket = self.client.factory.create('ticket')
ticket.description = description
ticket.requestor = '{} {}'.format(requestor_name, email)
ticket.subject = subject
if template is not None:
ticket.template = template
for traffic_line in traffic_lines:
ticket.trafficLines.append(self._create_soap_traffic_line(traffic_line))
# Actually create the ticket
with report_soap_failure(AlgoSecAPIError):
ticket_added = self.client.service.createTicket(sessionId=self._session_id, ticket=ticket)
ticket_url = ticket_added.ticketDisplayURL
# normalize ticket url hostname that is sometimes incorrect from the FireFlow server (which uses it's own
# internal IP to build this url.
url = list(urllib.parse.urlsplit(ticket_url))
url[1] = self.server_ip
return urllib.parse.urlunsplit(url)
def get_change_request_by_id(self, change_request_id):
"""Get a change request by its ID.
Useful for checking the status of a change request you opened through the API.
Args:
change_request_id: The ID of the change request to fetch.
Raises:
:class:`~algosec.errors.AlgoSecAPIError`: If the change request was not found on the server or another
error occurred while fetching the change request.
Returns:
The change request ticket object.
"""
with report_soap_failure(AlgoSecAPIError):
response = self.client.service.getTicket(sessionId=self._session_id, ticketId=change_request_id)
return response.ticket | 0.763043 | 0.222267 |
import numpy as np
def moving_average(x, window=3, mode='full'):
'''
Calculate the moving average over an array.
Summary
-------
This function computes the running mean of an array.
Padding is performed for the "left" side, not for the "right".
Parameters
----------
x : array
Input array.
window : int
Window size.
mode : {'full', 'last'}
Determines whether the full rolling mean history
or only its last element is returned.
Returns
-------
running_mean : float
Rolling mean.
'''
x = np.array(x)
if mode == 'full':
x_padded = np.pad(x, (window-1, 0), mode='constant', constant_values=x[0])
running_mean = np.convolve(x_padded, np.ones((window,))/window, mode='valid')
elif mode == 'last':
if x.size >= window:
running_mean = np.convolve(x[-window:], np.ones((window,))/window, mode='valid')[0]
else:
x_padded = np.pad(x, (window-x.size, 0), mode='constant', constant_values=x[0])
running_mean = np.convolve(x_padded, np.ones((window,))/window, mode='valid')[0]
return running_mean
def conv_out_shape(input_shape,
kernel_size,
stride=1,
padding=0,
dilation=1,
mode='floor'):
'''
Calculate the output shape of a convolutional layer.
Summary
-------
This function returns the output tensor shape of a convolutional layer.
One needs to pass the input shape and all relevant layer properties as arguments.
The parameter convention of PyTorch's convolutional layer modules is adopted herein,
e.g. see the documentation of the torch.nn.Conv2d class.
Parameters
----------
input_shape : int or array-like
Shape of the layer input tensor.
kernel_size : int or array-like
Size of the convolutional kernels.
stride : int or array-like
Stride parameter.
padding : int or array-like
Padding parameter.
dilation : int or array-like
Dilation parameter.
mode : {'floor', 'ceil'}
Determines whether to floor or to ceil.
Returns
-------
output_shape : int or tuple
Shape of the layer output tensor.
Notes
-----
The same function can be used to determine the output size of pooling layers.
Though, some care regarding the ceil/floor mode has to be taken.
PyTorch's default behavior is to floor the output size.
'''
input_shape = np.array(input_shape)
no_dims = input_shape.size
kernel_size = _make_array(kernel_size, no_dims)
stride = _make_array(stride, no_dims)
padding = _make_array(padding, no_dims)
dilation = _make_array(dilation, no_dims)
if mode == 'floor':
output_shape = np.floor((input_shape + 2*padding - dilation*(kernel_size-1) - 1) / stride + 1).astype('int')
elif mode == 'ceil':
output_shape = np.ceil((input_shape + 2*padding - dilation*(kernel_size-1) - 1) / stride + 1).astype('int')
if no_dims == 1:
output_shape = int(output_shape)
if no_dims >= 2:
output_shape = tuple([int(output_shape[i]) for i in range(no_dims)])
return output_shape
def _make_array(x, no_dims):
'''Transform a scalar into an array with equal entries.'''
return np.array(x) if np.size(x) == no_dims else np.array([x for i in range(no_dims)]) | torchutils/tools.py | import numpy as np
def moving_average(x, window=3, mode='full'):
'''
Calculate the moving average over an array.
Summary
-------
This function computes the running mean of an array.
Padding is performed for the "left" side, not for the "right".
Parameters
----------
x : array
Input array.
window : int
Window size.
mode : {'full', 'last'}
Determines whether the full rolling mean history
or only its last element is returned.
Returns
-------
running_mean : float
Rolling mean.
'''
x = np.array(x)
if mode == 'full':
x_padded = np.pad(x, (window-1, 0), mode='constant', constant_values=x[0])
running_mean = np.convolve(x_padded, np.ones((window,))/window, mode='valid')
elif mode == 'last':
if x.size >= window:
running_mean = np.convolve(x[-window:], np.ones((window,))/window, mode='valid')[0]
else:
x_padded = np.pad(x, (window-x.size, 0), mode='constant', constant_values=x[0])
running_mean = np.convolve(x_padded, np.ones((window,))/window, mode='valid')[0]
return running_mean
def conv_out_shape(input_shape,
kernel_size,
stride=1,
padding=0,
dilation=1,
mode='floor'):
'''
Calculate the output shape of a convolutional layer.
Summary
-------
This function returns the output tensor shape of a convolutional layer.
One needs to pass the input shape and all relevant layer properties as arguments.
The parameter convention of PyTorch's convolutional layer modules is adopted herein,
e.g. see the documentation of the torch.nn.Conv2d class.
Parameters
----------
input_shape : int or array-like
Shape of the layer input tensor.
kernel_size : int or array-like
Size of the convolutional kernels.
stride : int or array-like
Stride parameter.
padding : int or array-like
Padding parameter.
dilation : int or array-like
Dilation parameter.
mode : {'floor', 'ceil'}
Determines whether to floor or to ceil.
Returns
-------
output_shape : int or tuple
Shape of the layer output tensor.
Notes
-----
The same function can be used to determine the output size of pooling layers.
Though, some care regarding the ceil/floor mode has to be taken.
PyTorch's default behavior is to floor the output size.
'''
input_shape = np.array(input_shape)
no_dims = input_shape.size
kernel_size = _make_array(kernel_size, no_dims)
stride = _make_array(stride, no_dims)
padding = _make_array(padding, no_dims)
dilation = _make_array(dilation, no_dims)
if mode == 'floor':
output_shape = np.floor((input_shape + 2*padding - dilation*(kernel_size-1) - 1) / stride + 1).astype('int')
elif mode == 'ceil':
output_shape = np.ceil((input_shape + 2*padding - dilation*(kernel_size-1) - 1) / stride + 1).astype('int')
if no_dims == 1:
output_shape = int(output_shape)
if no_dims >= 2:
output_shape = tuple([int(output_shape[i]) for i in range(no_dims)])
return output_shape
def _make_array(x, no_dims):
'''Transform a scalar into an array with equal entries.'''
return np.array(x) if np.size(x) == no_dims else np.array([x for i in range(no_dims)]) | 0.932269 | 0.759894 |
import re
import logging
from bs4 import BeautifulSoup
import requests
from concurrent.futures import ThreadPoolExecutor
def is_subseq(the_seq, the_string):
rgx = re.compile('.*'.join([re.escape(x) for x in the_seq]))
return rgx.search(the_string)
def is_hero_searched(query, raw_name):
name = raw_name.lower()
return name[0:2] == query[0:2] and is_subseq(query, name)
domain = 'https://dota2.gamepedia.com'
def make_entry(li, query, name):
source_el = li.find('source')
title = ''.join(filter(lambda x: not x.name, li.contents)).strip()
if not source_el or not is_subseq(query, title.lower()):
return None
src = source_el['src']
return {
'src': src,
'title': f'{name}: {title}',
}
def collect_sounds(hero, query):
url = hero['url']
name = hero['name']
logging.info(f'Scraping {url}...')
website_request = requests.get(f'{domain}{url}', timeout=5)
website_content = BeautifulSoup(website_request.content, 'html.parser')
page_content = website_content.find(class_='mw-parser-output')
for table in page_content.find_all('table'):
table.decompose()
entries = map(lambda x: make_entry(x, query, name), page_content.find_all('li'))
entries = filter(bool, entries)
logging.info(f'Scraping {url} done!')
return list(entries)
website_request = requests.get(f'{domain}/Category:Responses', timeout=5)
website_content = BeautifulSoup(website_request.content, 'html.parser')
heroes = list(map(
lambda a: {
'url': a['href'],
'name': ''.join(a.contents).split('/')[0].strip(),
},
website_content
.find(class_='mw-category')
.find_all('a')
))
def scrape(query):
hero_query, *response_query = query.split('/')
response_query = ''.join(response_query)
filtered_heroes = list(filter(
lambda hero: is_hero_searched(hero_query, hero['name']),
heroes,
))
if len(filtered_heroes) == 0:
return []
executor = ThreadPoolExecutor(len(filtered_heroes))
results = executor.map(
lambda hero: collect_sounds(hero, response_query),
filtered_heroes,
)
return [
single_result
for results_list in results
for single_result in results_list
][0:50] | app/scrape.py | import re
import logging
from bs4 import BeautifulSoup
import requests
from concurrent.futures import ThreadPoolExecutor
def is_subseq(the_seq, the_string):
rgx = re.compile('.*'.join([re.escape(x) for x in the_seq]))
return rgx.search(the_string)
def is_hero_searched(query, raw_name):
name = raw_name.lower()
return name[0:2] == query[0:2] and is_subseq(query, name)
domain = 'https://dota2.gamepedia.com'
def make_entry(li, query, name):
source_el = li.find('source')
title = ''.join(filter(lambda x: not x.name, li.contents)).strip()
if not source_el or not is_subseq(query, title.lower()):
return None
src = source_el['src']
return {
'src': src,
'title': f'{name}: {title}',
}
def collect_sounds(hero, query):
url = hero['url']
name = hero['name']
logging.info(f'Scraping {url}...')
website_request = requests.get(f'{domain}{url}', timeout=5)
website_content = BeautifulSoup(website_request.content, 'html.parser')
page_content = website_content.find(class_='mw-parser-output')
for table in page_content.find_all('table'):
table.decompose()
entries = map(lambda x: make_entry(x, query, name), page_content.find_all('li'))
entries = filter(bool, entries)
logging.info(f'Scraping {url} done!')
return list(entries)
website_request = requests.get(f'{domain}/Category:Responses', timeout=5)
website_content = BeautifulSoup(website_request.content, 'html.parser')
heroes = list(map(
lambda a: {
'url': a['href'],
'name': ''.join(a.contents).split('/')[0].strip(),
},
website_content
.find(class_='mw-category')
.find_all('a')
))
def scrape(query):
hero_query, *response_query = query.split('/')
response_query = ''.join(response_query)
filtered_heroes = list(filter(
lambda hero: is_hero_searched(hero_query, hero['name']),
heroes,
))
if len(filtered_heroes) == 0:
return []
executor = ThreadPoolExecutor(len(filtered_heroes))
results = executor.map(
lambda hero: collect_sounds(hero, response_query),
filtered_heroes,
)
return [
single_result
for results_list in results
for single_result in results_list
][0:50] | 0.342132 | 0.156169 |
import os
import os.path
import sys
import json
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn="https://4ccfa35d8c424f2d87cc3aca96611bb6@o447032.ingest.sentry.io/5426444",
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
with open('/etc/config.json') as config_file:
config = json.load(config_file)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['www.zedway.app','.zedway.app','172.16.17.32','li2139-241.members.linode.com','localhost','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'notifications',
'accounts',
'home',
'posts',
]
SITE_ID = 1
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
SOCIALACCOUNT_AUTO_SIGNUP = False
#SOCIALACCOUNT_PROVIDERS = {
# 'google': {
# 'APP': {
# 'client_id': '123',
# 'secret': '456',
# 'key': ''
# }
# }
#}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'easy_timezones.middleware.EasyTimezoneMiddleware',
]
ROOT_URLCONF = 'zedway.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/home/zedway/zedway/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'zedway.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config['DB_NAME'],
'USER': config['DB_USER'],
'PASSWORD': config['DB_PASS'],
'HOST': 'localhost',
'PORT': '',
}
}
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Notifiactions
# DJANGO_NOTIFICATIONS_CONFIG = {'SOFT_DELETE': True}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# GEOIP_DATABASE = '/home/samarth/Dev/zedway/zedway/GeoLite2-City.mmdb'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
# Media Files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# EMAIL CONFIGURATION
EMAIL_HOST = 'smtpout.secureserver.net'
EMAIL_USE_TLS = True
EMAIL_HOST_USER = config.get('EMAIL')
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = config.get('EMAIL_PASS')
EMAIL_PORT = 587
ACCOUNT_EMAIL_VERIFICATION = 'none' | zedway/settings.py | import os
import os.path
import sys
import json
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn="https://4ccfa35d8c424f2d87cc3aca96611bb6@o447032.ingest.sentry.io/5426444",
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
with open('/etc/config.json') as config_file:
config = json.load(config_file)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['www.zedway.app','.zedway.app','172.16.17.32','li2139-241.members.linode.com','localhost','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'notifications',
'accounts',
'home',
'posts',
]
SITE_ID = 1
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
SOCIALACCOUNT_AUTO_SIGNUP = False
#SOCIALACCOUNT_PROVIDERS = {
# 'google': {
# 'APP': {
# 'client_id': '123',
# 'secret': '456',
# 'key': ''
# }
# }
#}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'easy_timezones.middleware.EasyTimezoneMiddleware',
]
ROOT_URLCONF = 'zedway.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/home/zedway/zedway/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'zedway.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config['DB_NAME'],
'USER': config['DB_USER'],
'PASSWORD': config['DB_PASS'],
'HOST': 'localhost',
'PORT': '',
}
}
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Notifiactions
# DJANGO_NOTIFICATIONS_CONFIG = {'SOFT_DELETE': True}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# GEOIP_DATABASE = '/home/samarth/Dev/zedway/zedway/GeoLite2-City.mmdb'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
# Media Files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# EMAIL CONFIGURATION
EMAIL_HOST = 'smtpout.secureserver.net'
EMAIL_USE_TLS = True
EMAIL_HOST_USER = config.get('EMAIL')
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = config.get('EMAIL_PASS')
EMAIL_PORT = 587
ACCOUNT_EMAIL_VERIFICATION = 'none' | 0.237576 | 0.066176 |
from sklearn.metrics import roc_auc_score
import shap
import numpy as np
import skexplain
from skexplain.common.importance_utils import to_skexplain_importance
from tests import TestLR, TestRF
TRUE_RANKINGS = np.array(["X_1", "X_2", "X_3", "X_4", "X_5"], dtype=object)
class TestRankings(TestLR, TestRF):
def test_bad_evaluation_fn(self):
# Make sure the metrics are correct
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
available_scores = ["auc", "auprc", "bss", "mse", "norm_aupdc"]
with self.assertRaises(Exception) as ex:
explainer.permutation_importance(
n_vars=len(self.X.columns), evaluation_fn="bad"
)
except_msg = (
f"evaluation_fn is not set! Available options are {available_scores}"
)
self.assertEqual(ex.exception.args[0], except_msg)
def test_custom_evaluation_fn(self):
# scoring_strategy exception for custom evaluation funcs
explainer = skexplain.ExplainToolkit(
estimators=(self.lr_estimator_name, self.lr), X=self.X, y=self.y
)
available_scores = ["auc", "auprc", "bss", "mse", "norm_aupdc"]
with self.assertRaises(Exception) as ex:
explainer.permutation_importance(
n_vars=len(self.X.columns),
evaluation_fn=roc_auc_score,
)
except_msg = """
The scoring_strategy argument is None! If you are using a non-default evaluation_fn
then scoring_strategy must be set! If the metric is positively-oriented (a higher value is better),
then set scoring_strategy = "argmin_of_mean" and if it is negatively-oriented-
(a lower value is better), then set scoring_strategy = "argmax_of_mean"
"""
self.assertMultiLineEqual(ex.exception.args[0], except_msg)
def test_shape(self):
# Shape is correct (with bootstrapping)
explainer = skexplain.ExplainToolkit(
estimators=(self.lr_estimator_name, self.lr), X=self.X, y=self.y
)
n_vars = 3
n_permute = 8
results = explainer.permutation_importance(
n_vars=n_vars, evaluation_fn="mse", n_permute=n_permute
)
# shape should be (n_vars_multipass, n_permute)
self.assertEqual(
results[f"multipass_scores__{self.lr_estimator_name}"].values.shape,
(n_vars, n_permute),
)
# shape should be (n_vars_singlepass, n_permute)
self.assertEqual(
results[f"singlepass_scores__{self.lr_estimator_name}"].values.shape,
(len(self.X.columns), n_permute),
)
def test_correct_rankings(self):
# rankings are correct for simple case (for multi-pass, single-pass, and ale_variance)
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
ale = explainer.ale(features=self.X.columns, n_bins=10)
ale_var_results = explainer.ale_variance(
ale, estimator_names=self.lr_estimator_name
)
# TODO: coefficients
shap_results = explainer.shap(
shap_kwargs={
"masker": shap.maskers.Partition(
self.X, max_samples=100, clustering="correlation"
),
"algorithm": "auto",
}
)
# Implicit test of the to_sklearn_importance method.
shap_imp = to_skexplain_importance(
shap_results[f"shap_values__{self.lr_estimator_name}"].values,
estimator_name=self.lr_estimator_name,
feature_names=list(self.X.columns),
method="shap_sum",
)
# Check the single-pass and multi-pass permutation importance (both forward and backward)
for direction in ["backward", "forward"]:
results = explainer.permutation_importance(
n_vars=len(self.X.columns),
evaluation_fn="mse",
n_permute=50,
direction=direction,
n_jobs=2,
)
np.testing.assert_array_equal(
results[f"multipass_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
np.testing.assert_array_equal(
results[f"singlepass_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
# Check the ALE variance.
np.testing.assert_array_equal(
ale_var_results[f"ale_variance_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
# Check the SHAP.
np.testing.assert_array_equal(
shap_imp[f"shap_sum_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
def test_ale_variance(self):
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
ale = explainer.ale(features=self.X.columns, n_bins=10)
ale_var_results = explainer.ale_variance(ale)
with self.assertRaises(Exception) as ex_1:
ale_var_results = explainer.ale_variance(ale=np.array([0, 0]))
except_msg_1 = """
ale must be an xarray.Dataset,
perferably generated by ExplainToolkit.ale
to be formatted correctly
"""
self.assertMultiLineEqual(ex_1.exception.args[0], except_msg_1)
with self.assertRaises(Exception) as ex_2:
ale_var_results = explainer.ale_variance(
ale, estimator_names=[self.lr_estimator_name, "Fake"]
)
except_msg_2 = "ale does not contain values for all the estimator names given!"
self.assertEqual(ex_2.exception.args[0], except_msg_2)
def test_grouped_importance(self):
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
groups = {'group1' : ['X_1', 'X_2'],
'group2' : ['X_2', 'X_3'],
'group3' : ['X_4', 'X_5'],
}
correct_rankings = np.array(['group1','group2', 'group3'])
# Catch using a wrong perm_method!
with self.assertRaises(Exception) as ex:
results = explainer.grouped_permutation_importance(perm_method='grou',
evaluation_fn='mse',
n_permute=10,
groups=groups,
sample_size=100,
subsample=1.0,
n_jobs=1,
clustering_kwargs={'n_clusters' : 10})
except_msg = "Invalid perm_method! Available options are 'grouped' and 'grouped_only'"
self.assertEqual(ex.exception.args[0], except_msg)
# Simple test that the rankings are correct.
for perm_method in ['grouped', 'grouped_only']:
results = explainer.grouped_permutation_importance(perm_method=perm_method,
evaluation_fn='mse',
n_permute=10,
groups=groups,
sample_size=100,
subsample=1.0,
n_jobs=1,
clustering_kwargs={'n_clusters' : 10})
np.testing.assert_array_equal(results[f'{perm_method}_rankings__{self.lr_estimator_name}'].values,
correct_rankings)
def test_to_skexplain_importance(self):
# Coefficients, Gini to Importance (SHAP is tested above).
scores = [self.lr.coef_, self.rf.feature_importances_]
methods = ['coefs', 'gini']
for imp, method in zip(scores, methods):
for normalize in [True, False]:
results = to_skexplain_importance(importances=imp,
estimator_name=self.lr_estimator_name,
feature_names=list(self.X.columns),
method=method,
normalize=normalize)
# Check that the ranking is correct.
np.testing.assert_array_equal(
results[f"{method}_rankings__{self.lr_estimator_name}"],
TRUE_RANKINGS,
)
if __name__ == "__main__":
unittest.main() | tests/test_feature_ranking.py | from sklearn.metrics import roc_auc_score
import shap
import numpy as np
import skexplain
from skexplain.common.importance_utils import to_skexplain_importance
from tests import TestLR, TestRF
TRUE_RANKINGS = np.array(["X_1", "X_2", "X_3", "X_4", "X_5"], dtype=object)
class TestRankings(TestLR, TestRF):
def test_bad_evaluation_fn(self):
# Make sure the metrics are correct
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
available_scores = ["auc", "auprc", "bss", "mse", "norm_aupdc"]
with self.assertRaises(Exception) as ex:
explainer.permutation_importance(
n_vars=len(self.X.columns), evaluation_fn="bad"
)
except_msg = (
f"evaluation_fn is not set! Available options are {available_scores}"
)
self.assertEqual(ex.exception.args[0], except_msg)
def test_custom_evaluation_fn(self):
# scoring_strategy exception for custom evaluation funcs
explainer = skexplain.ExplainToolkit(
estimators=(self.lr_estimator_name, self.lr), X=self.X, y=self.y
)
available_scores = ["auc", "auprc", "bss", "mse", "norm_aupdc"]
with self.assertRaises(Exception) as ex:
explainer.permutation_importance(
n_vars=len(self.X.columns),
evaluation_fn=roc_auc_score,
)
except_msg = """
The scoring_strategy argument is None! If you are using a non-default evaluation_fn
then scoring_strategy must be set! If the metric is positively-oriented (a higher value is better),
then set scoring_strategy = "argmin_of_mean" and if it is negatively-oriented-
(a lower value is better), then set scoring_strategy = "argmax_of_mean"
"""
self.assertMultiLineEqual(ex.exception.args[0], except_msg)
def test_shape(self):
# Shape is correct (with bootstrapping)
explainer = skexplain.ExplainToolkit(
estimators=(self.lr_estimator_name, self.lr), X=self.X, y=self.y
)
n_vars = 3
n_permute = 8
results = explainer.permutation_importance(
n_vars=n_vars, evaluation_fn="mse", n_permute=n_permute
)
# shape should be (n_vars_multipass, n_permute)
self.assertEqual(
results[f"multipass_scores__{self.lr_estimator_name}"].values.shape,
(n_vars, n_permute),
)
# shape should be (n_vars_singlepass, n_permute)
self.assertEqual(
results[f"singlepass_scores__{self.lr_estimator_name}"].values.shape,
(len(self.X.columns), n_permute),
)
def test_correct_rankings(self):
# rankings are correct for simple case (for multi-pass, single-pass, and ale_variance)
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
ale = explainer.ale(features=self.X.columns, n_bins=10)
ale_var_results = explainer.ale_variance(
ale, estimator_names=self.lr_estimator_name
)
# TODO: coefficients
shap_results = explainer.shap(
shap_kwargs={
"masker": shap.maskers.Partition(
self.X, max_samples=100, clustering="correlation"
),
"algorithm": "auto",
}
)
# Implicit test of the to_sklearn_importance method.
shap_imp = to_skexplain_importance(
shap_results[f"shap_values__{self.lr_estimator_name}"].values,
estimator_name=self.lr_estimator_name,
feature_names=list(self.X.columns),
method="shap_sum",
)
# Check the single-pass and multi-pass permutation importance (both forward and backward)
for direction in ["backward", "forward"]:
results = explainer.permutation_importance(
n_vars=len(self.X.columns),
evaluation_fn="mse",
n_permute=50,
direction=direction,
n_jobs=2,
)
np.testing.assert_array_equal(
results[f"multipass_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
np.testing.assert_array_equal(
results[f"singlepass_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
# Check the ALE variance.
np.testing.assert_array_equal(
ale_var_results[f"ale_variance_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
# Check the SHAP.
np.testing.assert_array_equal(
shap_imp[f"shap_sum_rankings__{self.lr_estimator_name}"].values,
TRUE_RANKINGS,
)
def test_ale_variance(self):
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
ale = explainer.ale(features=self.X.columns, n_bins=10)
ale_var_results = explainer.ale_variance(ale)
with self.assertRaises(Exception) as ex_1:
ale_var_results = explainer.ale_variance(ale=np.array([0, 0]))
except_msg_1 = """
ale must be an xarray.Dataset,
perferably generated by ExplainToolkit.ale
to be formatted correctly
"""
self.assertMultiLineEqual(ex_1.exception.args[0], except_msg_1)
with self.assertRaises(Exception) as ex_2:
ale_var_results = explainer.ale_variance(
ale, estimator_names=[self.lr_estimator_name, "Fake"]
)
except_msg_2 = "ale does not contain values for all the estimator names given!"
self.assertEqual(ex_2.exception.args[0], except_msg_2)
def test_grouped_importance(self):
explainer = skexplain.ExplainToolkit(
estimators=self.lr_estimator, X=self.X, y=self.y
)
groups = {'group1' : ['X_1', 'X_2'],
'group2' : ['X_2', 'X_3'],
'group3' : ['X_4', 'X_5'],
}
correct_rankings = np.array(['group1','group2', 'group3'])
# Catch using a wrong perm_method!
with self.assertRaises(Exception) as ex:
results = explainer.grouped_permutation_importance(perm_method='grou',
evaluation_fn='mse',
n_permute=10,
groups=groups,
sample_size=100,
subsample=1.0,
n_jobs=1,
clustering_kwargs={'n_clusters' : 10})
except_msg = "Invalid perm_method! Available options are 'grouped' and 'grouped_only'"
self.assertEqual(ex.exception.args[0], except_msg)
# Simple test that the rankings are correct.
for perm_method in ['grouped', 'grouped_only']:
results = explainer.grouped_permutation_importance(perm_method=perm_method,
evaluation_fn='mse',
n_permute=10,
groups=groups,
sample_size=100,
subsample=1.0,
n_jobs=1,
clustering_kwargs={'n_clusters' : 10})
np.testing.assert_array_equal(results[f'{perm_method}_rankings__{self.lr_estimator_name}'].values,
correct_rankings)
def test_to_skexplain_importance(self):
# Coefficients, Gini to Importance (SHAP is tested above).
scores = [self.lr.coef_, self.rf.feature_importances_]
methods = ['coefs', 'gini']
for imp, method in zip(scores, methods):
for normalize in [True, False]:
results = to_skexplain_importance(importances=imp,
estimator_name=self.lr_estimator_name,
feature_names=list(self.X.columns),
method=method,
normalize=normalize)
# Check that the ranking is correct.
np.testing.assert_array_equal(
results[f"{method}_rankings__{self.lr_estimator_name}"],
TRUE_RANKINGS,
)
if __name__ == "__main__":
unittest.main() | 0.61855 | 0.500366 |
import cupy as cp
from ArraysCollection import ArraysCollection
import functools
import math
class DirectionalFilterBank():
""" Class that perform a directional filter bank, which means one step of the curvelet transform for one scale.
The procedure was taken from the article the uniform discrete curvelet transform.
"""
def _compute_decimation_factor_from_n_angles(self, n_angles):
""" Fonction that compute the decimation factor i.e. the factor that will divide horizontally the vertical angular
window, and divide vertically the horizontal angular window.
Args:
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
Returns:
Integer: the decimation factor that will divide horizontally the vertical angular window, and divide vertically the
horizontal angular window
"""
self.decimation_factor = 1
while self.decimation_factor * 2 < n_angles:
self.decimation_factor *= 2
return self.decimation_factor
def T_angle(self,x,y):
""" Elementwise function that, given x and y pixel coordinates return T(theta(x,y)) with T given in the article
Args:
x (array): coordinate along x
y (array): coordinate along y
Returns:
[array]: same dimension of x and y. For each element xi, yi of x and y returns T(theta(xi,yi)).
"""
result = cp.zeros(x.shape)
result = cp.where(x >= abs(y), y/(x+ 1e-18), result)
result = cp.where(y >= abs(x), 2 - x/(y+ 1e-18), result)
result = cp.where(y <= - abs(x), -2 - x/(y+ 1e-18), result)
result = cp.where(x <= - abs(y), (y>=0) * ( 4 + y/(x+ 1e-18)) \
+ (y< 0) * ( -4 + y/(x+ 1e-18))
, result
)
result = cp.where(cp.logical_and(x == 0, y == 0), 0, result)
return result
def _get_frame_functions(self, n_angles, nu_a = 0.3, nu_b = 0.2):
""" Function that setup the frame function given in the article. The name are the same
Args:
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
nu_a (float, optional): smoothing factor for concentric windows. Defaults to 0.3.
nu_b (float, optional): smoothing factor for angular windows. Defaults to 0.2.
Returns:
function, function: the windows function corresponding to the low pass filter and the angular filters
"""
poly = lambda t: 1/2 + t * (35/32 + t**2 * (-35/32 + t**2 * (21/32 - 5/32 * t**2)))
#cp.polyval([- 5/32, 0, 21/32, 0, -35/32, 0, 35/32,1/2], t)
beta_squared = lambda t: cp.where(cp.abs(t) < 1, poly(t), (t > 0).astype(float))
safe_beta_squared = lambda t: cp.clip(beta_squared(t), 0, 1) # when rounding error makes values out of [0,1]
beta = lambda t : cp.sqrt(safe_beta_squared(t))
w1_tilda = lambda t : beta((1 - abs(t))/nu_a)
w0_tilda = lambda t : w1_tilda((2 * (1 + nu_a)) * t)
w0 = lambda cx, cy : w0_tilda(cx)* w0_tilda(cy)
w1 = lambda cx, cy : cp.sqrt(1 - w0(cx, cy) ** 2) * w1_tilda(cx) * w1_tilda(cy)
width_angle = 2/n_angles
denominator = width_angle * nu_b
v1_tilda = lambda t: beta( ((width_angle - 1) - t)/ denominator ) \
* beta( ( t + 1 ) / denominator )
v_tilda = lambda t, idx_angle : v1_tilda( t - width_angle * idx_angle )
v = lambda cx,cy, idx_angle : v_tilda(self.T_angle(cx,cy), idx_angle)
u_tilda = lambda cx, cy, idx_angle: w1(cx, cy) * v(cx,cy, idx_angle)
self.beta = beta
self.w0 = w0
self.w1 = w1
self.v_tilda = v_tilda
self.v = v
self.u_tilda = u_tilda
return self.w0, self.u_tilda
def _compute_angular_filters(self,size_image, n_angles, border):
""" Function that precompute the filters that will be used for the filter bank
Args:
size_image (Integer): the size of the image that will be given as input to the transform
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
border (str): "null" or "toric". Depending on the hypothesis made on extrapolation outside borders.
Returns:
array: a 3D dimensional array, which is astack of images each corresponding to one filters.
"""
graduation = cp.arange(- size_image // 2, size_image // 2)
x,y = cp.meshgrid(graduation, graduation, indexing = 'ij')
x = x / (size_image // 2)
y = y / (size_image // 2)
x = cp.fft.fftshift(x)
y = cp.fft.fftshift(y)
self.lowpass_filter = cp.expand_dims(self.w0(x,y), axis = 0)
if border == "toric":
# self.angular_filters = cp.array( [ [ [
# self.u_tilda(x + px,y + py, idx_angle) for px in [-2,0,2]
# ] for py in [-2,0,2]
# ] for idx_angle in range(n_angles*2)
# ]
# )
ang_frame_func = lambda idx_angle: cp.sum(
cp.array(
[
[ self.u_tilda(x + px,y + py, idx_angle) for px in [-2,0,2] ]
for py in [-2,0,2]
]
),
axis = (0,1)
)
self.angular_filters = cp.array(
[
ang_frame_func(idx_angle)
for idx_angle in range(n_angles*2)
]
)
elif border == "null":
self.angular_filters = cp.array( [ self.u_tilda(x ,y , idx_angle)
for idx_angle in range(n_angles*2)
]
)
self.filters = cp.concatenate( ( self.angular_filters, self.lowpass_filter ), axis = 0 )
return self.filters
def __init__(self, size_image, n_angles, nu_a = 0.3, nu_b = 0.2, border="null"):
""" init the transform with its hyper parameters
Args:
size_image (Integer): the size of the image that will be given as input to the transform
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
nu_a (float, optional): smoothing factor for concentric windows. Defaults to 0.3.
nu_b (float, optional): smoothing factor for angular windows. Defaults to 0.2.
border (str): "null" or "toric". Depending on the hypothesis made on extrapolation outside borders.
"""
self.n_angles = n_angles
self.nu_a = nu_a
self.nu_b = nu_b
self.border = border
self._compute_decimation_factor_from_n_angles(n_angles)
self._get_frame_functions(n_angles, nu_a = 0.3, nu_b = 0.2)
self._compute_angular_filters(size_image, n_angles, border)
def _decimation(self,arr, coef, axis):
""" Function that performs a time/spatial decimation in the frequency domain
Args:
arr (array): input data
coef (Integer): decimation factor
axis (Integer): axis that determines the direction of decimation
Returns:
array: the new decimated array
"""
return functools.reduce( lambda a,b : a+b,
cp.split( arr ,
coef ,
axis = axis
)
)
def __call__(self,image):
""" Function that performs the directional filter bank of the image as it is described in the article. This function can be applied
to a batch of images. This batch can have any shape. In this case the outputs will bet a set of batch too.
Args:
image (array): input data of dimension (n1 x n2 x ... ) x n x n. n1 x ... are the dimension of the batch. and n is the size of the square image
Returns:
tuple : Three arrays:
- (n1 x n2 x ... ) x n_angle x 2 x (n/2) x (n/2) : the low frequencies of the directional filter bank
- (n1 x n2 x ... ) x n_anglcp.get_default_memory_pool()e x 2 x nk x (n/2) : the vertical frequencies of the directional filter bank
- (n1 x n2 x ... ) x n_angle x 2 x (n/2) x nk : the horizontal frequencies of the directional filter bank
"""
fft = cp.fft.fft2(image, norm = "ortho")
ndims_image = len(fft.shape)
ndims_filter = 3
axis_filter = ndims_image - 2
axis_real_imag = axis_filter + 1
expanded_filters = self.filters
for _ in range(axis_filter):
expanded_filters = cp.expand_dims(expanded_filters, axis = 0)
fft = cp.expand_dims(fft, axis = axis_filter)
filtered_fft = fft * expanded_filters
filtered_fft = cp.expand_dims( filtered_fft, axis_real_imag )
vdirectional_filtered, hdirectional_filtered, lowfreq_filtered = \
cp.split( filtered_fft,
[self.n_angles, 2* self.n_angles],
axis = axis_filter
)
lowfreq_filtered = self._decimation(lowfreq_filtered, 2 , -1)
lowfreq_filtered = self._decimation(lowfreq_filtered, 2 , -2)
vdirectional_filtered = self._decimation(vdirectional_filtered, 2, -2)
vdirectional_filtered = self._decimation(vdirectional_filtered, self.decimation_factor , -1)
hdirectional_filtered = self._decimation(hdirectional_filtered, self.decimation_factor , -2)
hdirectional_filtered = self._decimation(hdirectional_filtered, 2 , -1)
hdirectional_filtered = cp.fft.ifft2(hdirectional_filtered, norm = "ortho")
vdirectional_filtered = cp.fft.ifft2(vdirectional_filtered, norm = "ortho")
lowfreq_filtered = cp.fft.ifft2(lowfreq_filtered, norm = "ortho")
hdirectional_filtered = cp.concatenate( ( hdirectional_filtered.real,
hdirectional_filtered.imag
),
axis = axis_real_imag
)
vdirectional_filtered = cp.concatenate( ( vdirectional_filtered.real,
vdirectional_filtered.imag
),
axis = axis_real_imag
)
hdirectional_filtered = hdirectional_filtered * math.sqrt(2)
vdirectional_filtered = vdirectional_filtered * math.sqrt(2)
lowfreq_filtered = lowfreq_filtered.real
return (lowfreq_filtered, vdirectional_filtered, hdirectional_filtered)
def reconstruction(self, lowfreq_filtered, vdirectional_filtered, hdirectional_filtered):
""" Function that performs the inverse directional filter bank of a transform as it is described in the article. This function can be applied
to a batch of transforms. This batch can have any shape. In this case the outputs will bet a set of batch too.
Args:
lowfreq_filtered ([array]): (n1 x n2 x ... ) x 1 x 1 x (n/2) x (n/2) : the low frequencies of the directional filter bank
vdirectional_filtered ([array]): (n1 x n2 x ... ) x n_angle x 2 x nk x (n/2) : the vertical frequencies of the directional filter bank
hdirectional_filtered ([array]): (n1 x n2 x ... ) x n_angle x 2 x (n/2) x nk : the horizontal frequencies of the directional filter bank
Returns:
[array]: reconstructed image from the transform of size (n1 x n2 x ... ) x n x n
"""
ndims_image = len(lowfreq_filtered.shape) - 2
axis_filter = ndims_image - 2
axis_real_imag = axis_filter + 1
expanded_filters = self.filters
for _ in range(axis_filter):
expanded_filters = cp.expand_dims(expanded_filters, axis = 0)
get_real_part = lambda arr: cp.take(arr, 0, axis = axis_real_imag)
get_imag_part = lambda arr: cp.take(arr, 1, axis = axis_real_imag)
to_complex = lambda arr: get_real_part(arr) + 1j * get_imag_part(arr)
lowfreq_filtered = cp.fft.fft2(lowfreq_filtered, norm = "ortho")
lowfreq_filtered = cp.squeeze(lowfreq_filtered, axis = axis_real_imag)
hdirectional_filtered = cp.fft.fft2( to_complex(hdirectional_filtered), norm = "ortho" ) /math.sqrt(2)
vdirectional_filtered = cp.fft.fft2( to_complex(vdirectional_filtered), norm = "ortho") /math.sqrt(2)
lowfreq_filtered = cp.tile(lowfreq_filtered, [1] * (ndims_image - 1) + [2,2])
hdirectional_filtered = cp.tile( hdirectional_filtered, [1] * (ndims_image - 1) + [self.decimation_factor,2] )
vdirectional_filtered = cp.tile( vdirectional_filtered, [1] * (ndims_image - 1) + [2,self.decimation_factor] )
filtered_fft = cp.concatenate((vdirectional_filtered, hdirectional_filtered, lowfreq_filtered), axis = axis_filter)
filtered_fft = filtered_fft * expanded_filters
hf_filtered, lowfreq_filtered = cp.split(filtered_fft, [2*self.n_angles], axis = axis_filter)
lowfreq_filtered = cp.squeeze(lowfreq_filtered, axis = axis_filter)
hf_filtered = cp.sum( hf_filtered, axis = axis_filter)
hf_filtered_flipped = cp.flip(hf_filtered, axis =(-1))
hf_filtered_flipped = cp.roll(hf_filtered_flipped, 1, axis =(-1))
hf_filtered_flipped = cp.flip(hf_filtered_flipped, axis =(-2))
hf_filtered_flipped = cp.roll(hf_filtered_flipped, 1, axis =(-2))
hf_filtered = hf_filtered + cp.conj(hf_filtered_flipped)
return cp.fft.ifft2(hf_filtered + lowfreq_filtered, norm = "ortho").real
class CurveletsOperator():
""" Class that compute the curvelet transform. The procedure was taken from the article the uniform discrete curvelet transform.
"""
def __init__(self, size_image, nums_angles, nu_a = 0.3, nu_b = 0.2):
""" init the transform with its hyper parameters
Args:
size_image (Integer): the size of the image that will be given as input to the transform
nums_angles (List[Integer]): number of angular window per direction (one direction means horizontal or vertical)
given for each scale, from the coarsest to the finest
nu_a (float, optional): smoothing factor for concentric windows. Defaults to 0.3.
nu_b (float, optional): smoothing factor for angular windows. Defaults to 0.2.
"""
self._directional_filter_banks = []
border = "toric"
size = size_image
self.nums_angles = list(nums_angles)
for num_angle in reversed(self.nums_angles):
self._directional_filter_banks += [DirectionalFilterBank(size, num_angle, nu_a, nu_b, border)]
size = size/2
border="null"
def __call__(self, image):
"""
Compute the curvelet transform from the image
Args:
image (array): a batch of images of size (n1 x n2 x ... ) x n x n
Returns:
[List(array)]: the list of elements of the transform u0, the lowest frequencies, and u_js, the angular windows, were j is the scale,
s the direction. each of its element have a size of (n1 x n2 x ... ) x na x nb x nc x nd with na, nb, nc and nd vary
from one element ton another.
"""
result = [cp.expand_dims(image, axis = (-4,-3))]
for dir_filt_bank in self._directional_filter_banks:
result = list(dir_filt_bank(cp.squeeze(result[0], axis = (-4,-3)))) + result[1:]
return ArraysCollection(result)
def inverse(self, transform):
"""
Compute the inverse curvelet transform
Args:
transform (List(array)): The orignal transform. Must not be build from scratch. usually a result of the __call__ method.
the list of elements of the transform u0, the lowest frequencies, and u_js, the angular windows, were j is the scale,
s the direction. each of its element have a size of (n1 x n2 x ... ) x na x nb x nc x nd with na, nb, nc and nd vary
from one element ton another. (n1 x n2 x ... ) is the size of the batch.
Returns:
image (array): a batch of images of size (n1 x n2 x ... ) x n x n
"""
result = transform
for dir_filt_bank in reversed(self._directional_filter_banks):
result = [dir_filt_bank.reconstruction(result[0], result[1], result[2])] + list(result[3:])
result[0] = cp.expand_dims(result[0], axis = (-4,-3))
return cp.squeeze(result[0], axis = (-4,-3)) | curvelets.py | import cupy as cp
from ArraysCollection import ArraysCollection
import functools
import math
class DirectionalFilterBank():
""" Class that perform a directional filter bank, which means one step of the curvelet transform for one scale.
The procedure was taken from the article the uniform discrete curvelet transform.
"""
def _compute_decimation_factor_from_n_angles(self, n_angles):
""" Fonction that compute the decimation factor i.e. the factor that will divide horizontally the vertical angular
window, and divide vertically the horizontal angular window.
Args:
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
Returns:
Integer: the decimation factor that will divide horizontally the vertical angular window, and divide vertically the
horizontal angular window
"""
self.decimation_factor = 1
while self.decimation_factor * 2 < n_angles:
self.decimation_factor *= 2
return self.decimation_factor
def T_angle(self,x,y):
""" Elementwise function that, given x and y pixel coordinates return T(theta(x,y)) with T given in the article
Args:
x (array): coordinate along x
y (array): coordinate along y
Returns:
[array]: same dimension of x and y. For each element xi, yi of x and y returns T(theta(xi,yi)).
"""
result = cp.zeros(x.shape)
result = cp.where(x >= abs(y), y/(x+ 1e-18), result)
result = cp.where(y >= abs(x), 2 - x/(y+ 1e-18), result)
result = cp.where(y <= - abs(x), -2 - x/(y+ 1e-18), result)
result = cp.where(x <= - abs(y), (y>=0) * ( 4 + y/(x+ 1e-18)) \
+ (y< 0) * ( -4 + y/(x+ 1e-18))
, result
)
result = cp.where(cp.logical_and(x == 0, y == 0), 0, result)
return result
def _get_frame_functions(self, n_angles, nu_a = 0.3, nu_b = 0.2):
""" Function that setup the frame function given in the article. The name are the same
Args:
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
nu_a (float, optional): smoothing factor for concentric windows. Defaults to 0.3.
nu_b (float, optional): smoothing factor for angular windows. Defaults to 0.2.
Returns:
function, function: the windows function corresponding to the low pass filter and the angular filters
"""
poly = lambda t: 1/2 + t * (35/32 + t**2 * (-35/32 + t**2 * (21/32 - 5/32 * t**2)))
#cp.polyval([- 5/32, 0, 21/32, 0, -35/32, 0, 35/32,1/2], t)
beta_squared = lambda t: cp.where(cp.abs(t) < 1, poly(t), (t > 0).astype(float))
safe_beta_squared = lambda t: cp.clip(beta_squared(t), 0, 1) # when rounding error makes values out of [0,1]
beta = lambda t : cp.sqrt(safe_beta_squared(t))
w1_tilda = lambda t : beta((1 - abs(t))/nu_a)
w0_tilda = lambda t : w1_tilda((2 * (1 + nu_a)) * t)
w0 = lambda cx, cy : w0_tilda(cx)* w0_tilda(cy)
w1 = lambda cx, cy : cp.sqrt(1 - w0(cx, cy) ** 2) * w1_tilda(cx) * w1_tilda(cy)
width_angle = 2/n_angles
denominator = width_angle * nu_b
v1_tilda = lambda t: beta( ((width_angle - 1) - t)/ denominator ) \
* beta( ( t + 1 ) / denominator )
v_tilda = lambda t, idx_angle : v1_tilda( t - width_angle * idx_angle )
v = lambda cx,cy, idx_angle : v_tilda(self.T_angle(cx,cy), idx_angle)
u_tilda = lambda cx, cy, idx_angle: w1(cx, cy) * v(cx,cy, idx_angle)
self.beta = beta
self.w0 = w0
self.w1 = w1
self.v_tilda = v_tilda
self.v = v
self.u_tilda = u_tilda
return self.w0, self.u_tilda
def _compute_angular_filters(self,size_image, n_angles, border):
""" Function that precompute the filters that will be used for the filter bank
Args:
size_image (Integer): the size of the image that will be given as input to the transform
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
border (str): "null" or "toric". Depending on the hypothesis made on extrapolation outside borders.
Returns:
array: a 3D dimensional array, which is astack of images each corresponding to one filters.
"""
graduation = cp.arange(- size_image // 2, size_image // 2)
x,y = cp.meshgrid(graduation, graduation, indexing = 'ij')
x = x / (size_image // 2)
y = y / (size_image // 2)
x = cp.fft.fftshift(x)
y = cp.fft.fftshift(y)
self.lowpass_filter = cp.expand_dims(self.w0(x,y), axis = 0)
if border == "toric":
# self.angular_filters = cp.array( [ [ [
# self.u_tilda(x + px,y + py, idx_angle) for px in [-2,0,2]
# ] for py in [-2,0,2]
# ] for idx_angle in range(n_angles*2)
# ]
# )
ang_frame_func = lambda idx_angle: cp.sum(
cp.array(
[
[ self.u_tilda(x + px,y + py, idx_angle) for px in [-2,0,2] ]
for py in [-2,0,2]
]
),
axis = (0,1)
)
self.angular_filters = cp.array(
[
ang_frame_func(idx_angle)
for idx_angle in range(n_angles*2)
]
)
elif border == "null":
self.angular_filters = cp.array( [ self.u_tilda(x ,y , idx_angle)
for idx_angle in range(n_angles*2)
]
)
self.filters = cp.concatenate( ( self.angular_filters, self.lowpass_filter ), axis = 0 )
return self.filters
def __init__(self, size_image, n_angles, nu_a = 0.3, nu_b = 0.2, border="null"):
""" init the transform with its hyper parameters
Args:
size_image (Integer): the size of the image that will be given as input to the transform
n_angles (Integer): number of angular window per direction (one direction means horizontal or vertical)
nu_a (float, optional): smoothing factor for concentric windows. Defaults to 0.3.
nu_b (float, optional): smoothing factor for angular windows. Defaults to 0.2.
border (str): "null" or "toric". Depending on the hypothesis made on extrapolation outside borders.
"""
self.n_angles = n_angles
self.nu_a = nu_a
self.nu_b = nu_b
self.border = border
self._compute_decimation_factor_from_n_angles(n_angles)
self._get_frame_functions(n_angles, nu_a = 0.3, nu_b = 0.2)
self._compute_angular_filters(size_image, n_angles, border)
def _decimation(self,arr, coef, axis):
""" Function that performs a time/spatial decimation in the frequency domain
Args:
arr (array): input data
coef (Integer): decimation factor
axis (Integer): axis that determines the direction of decimation
Returns:
array: the new decimated array
"""
return functools.reduce( lambda a,b : a+b,
cp.split( arr ,
coef ,
axis = axis
)
)
def __call__(self,image):
""" Function that performs the directional filter bank of the image as it is described in the article. This function can be applied
to a batch of images. This batch can have any shape. In this case the outputs will bet a set of batch too.
Args:
image (array): input data of dimension (n1 x n2 x ... ) x n x n. n1 x ... are the dimension of the batch. and n is the size of the square image
Returns:
tuple : Three arrays:
- (n1 x n2 x ... ) x n_angle x 2 x (n/2) x (n/2) : the low frequencies of the directional filter bank
- (n1 x n2 x ... ) x n_anglcp.get_default_memory_pool()e x 2 x nk x (n/2) : the vertical frequencies of the directional filter bank
- (n1 x n2 x ... ) x n_angle x 2 x (n/2) x nk : the horizontal frequencies of the directional filter bank
"""
fft = cp.fft.fft2(image, norm = "ortho")
ndims_image = len(fft.shape)
ndims_filter = 3
axis_filter = ndims_image - 2
axis_real_imag = axis_filter + 1
expanded_filters = self.filters
for _ in range(axis_filter):
expanded_filters = cp.expand_dims(expanded_filters, axis = 0)
fft = cp.expand_dims(fft, axis = axis_filter)
filtered_fft = fft * expanded_filters
filtered_fft = cp.expand_dims( filtered_fft, axis_real_imag )
vdirectional_filtered, hdirectional_filtered, lowfreq_filtered = \
cp.split( filtered_fft,
[self.n_angles, 2* self.n_angles],
axis = axis_filter
)
lowfreq_filtered = self._decimation(lowfreq_filtered, 2 , -1)
lowfreq_filtered = self._decimation(lowfreq_filtered, 2 , -2)
vdirectional_filtered = self._decimation(vdirectional_filtered, 2, -2)
vdirectional_filtered = self._decimation(vdirectional_filtered, self.decimation_factor , -1)
hdirectional_filtered = self._decimation(hdirectional_filtered, self.decimation_factor , -2)
hdirectional_filtered = self._decimation(hdirectional_filtered, 2 , -1)
hdirectional_filtered = cp.fft.ifft2(hdirectional_filtered, norm = "ortho")
vdirectional_filtered = cp.fft.ifft2(vdirectional_filtered, norm = "ortho")
lowfreq_filtered = cp.fft.ifft2(lowfreq_filtered, norm = "ortho")
hdirectional_filtered = cp.concatenate( ( hdirectional_filtered.real,
hdirectional_filtered.imag
),
axis = axis_real_imag
)
vdirectional_filtered = cp.concatenate( ( vdirectional_filtered.real,
vdirectional_filtered.imag
),
axis = axis_real_imag
)
hdirectional_filtered = hdirectional_filtered * math.sqrt(2)
vdirectional_filtered = vdirectional_filtered * math.sqrt(2)
lowfreq_filtered = lowfreq_filtered.real
return (lowfreq_filtered, vdirectional_filtered, hdirectional_filtered)
def reconstruction(self, lowfreq_filtered, vdirectional_filtered, hdirectional_filtered):
""" Function that performs the inverse directional filter bank of a transform as it is described in the article. This function can be applied
to a batch of transforms. This batch can have any shape. In this case the outputs will bet a set of batch too.
Args:
lowfreq_filtered ([array]): (n1 x n2 x ... ) x 1 x 1 x (n/2) x (n/2) : the low frequencies of the directional filter bank
vdirectional_filtered ([array]): (n1 x n2 x ... ) x n_angle x 2 x nk x (n/2) : the vertical frequencies of the directional filter bank
hdirectional_filtered ([array]): (n1 x n2 x ... ) x n_angle x 2 x (n/2) x nk : the horizontal frequencies of the directional filter bank
Returns:
[array]: reconstructed image from the transform of size (n1 x n2 x ... ) x n x n
"""
ndims_image = len(lowfreq_filtered.shape) - 2
axis_filter = ndims_image - 2
axis_real_imag = axis_filter + 1
expanded_filters = self.filters
for _ in range(axis_filter):
expanded_filters = cp.expand_dims(expanded_filters, axis = 0)
get_real_part = lambda arr: cp.take(arr, 0, axis = axis_real_imag)
get_imag_part = lambda arr: cp.take(arr, 1, axis = axis_real_imag)
to_complex = lambda arr: get_real_part(arr) + 1j * get_imag_part(arr)
lowfreq_filtered = cp.fft.fft2(lowfreq_filtered, norm = "ortho")
lowfreq_filtered = cp.squeeze(lowfreq_filtered, axis = axis_real_imag)
hdirectional_filtered = cp.fft.fft2( to_complex(hdirectional_filtered), norm = "ortho" ) /math.sqrt(2)
vdirectional_filtered = cp.fft.fft2( to_complex(vdirectional_filtered), norm = "ortho") /math.sqrt(2)
lowfreq_filtered = cp.tile(lowfreq_filtered, [1] * (ndims_image - 1) + [2,2])
hdirectional_filtered = cp.tile( hdirectional_filtered, [1] * (ndims_image - 1) + [self.decimation_factor,2] )
vdirectional_filtered = cp.tile( vdirectional_filtered, [1] * (ndims_image - 1) + [2,self.decimation_factor] )
filtered_fft = cp.concatenate((vdirectional_filtered, hdirectional_filtered, lowfreq_filtered), axis = axis_filter)
filtered_fft = filtered_fft * expanded_filters
hf_filtered, lowfreq_filtered = cp.split(filtered_fft, [2*self.n_angles], axis = axis_filter)
lowfreq_filtered = cp.squeeze(lowfreq_filtered, axis = axis_filter)
hf_filtered = cp.sum( hf_filtered, axis = axis_filter)
hf_filtered_flipped = cp.flip(hf_filtered, axis =(-1))
hf_filtered_flipped = cp.roll(hf_filtered_flipped, 1, axis =(-1))
hf_filtered_flipped = cp.flip(hf_filtered_flipped, axis =(-2))
hf_filtered_flipped = cp.roll(hf_filtered_flipped, 1, axis =(-2))
hf_filtered = hf_filtered + cp.conj(hf_filtered_flipped)
return cp.fft.ifft2(hf_filtered + lowfreq_filtered, norm = "ortho").real
class CurveletsOperator():
""" Class that compute the curvelet transform. The procedure was taken from the article the uniform discrete curvelet transform.
"""
def __init__(self, size_image, nums_angles, nu_a = 0.3, nu_b = 0.2):
""" init the transform with its hyper parameters
Args:
size_image (Integer): the size of the image that will be given as input to the transform
nums_angles (List[Integer]): number of angular window per direction (one direction means horizontal or vertical)
given for each scale, from the coarsest to the finest
nu_a (float, optional): smoothing factor for concentric windows. Defaults to 0.3.
nu_b (float, optional): smoothing factor for angular windows. Defaults to 0.2.
"""
self._directional_filter_banks = []
border = "toric"
size = size_image
self.nums_angles = list(nums_angles)
for num_angle in reversed(self.nums_angles):
self._directional_filter_banks += [DirectionalFilterBank(size, num_angle, nu_a, nu_b, border)]
size = size/2
border="null"
def __call__(self, image):
"""
Compute the curvelet transform from the image
Args:
image (array): a batch of images of size (n1 x n2 x ... ) x n x n
Returns:
[List(array)]: the list of elements of the transform u0, the lowest frequencies, and u_js, the angular windows, were j is the scale,
s the direction. each of its element have a size of (n1 x n2 x ... ) x na x nb x nc x nd with na, nb, nc and nd vary
from one element ton another.
"""
result = [cp.expand_dims(image, axis = (-4,-3))]
for dir_filt_bank in self._directional_filter_banks:
result = list(dir_filt_bank(cp.squeeze(result[0], axis = (-4,-3)))) + result[1:]
return ArraysCollection(result)
def inverse(self, transform):
"""
Compute the inverse curvelet transform
Args:
transform (List(array)): The orignal transform. Must not be build from scratch. usually a result of the __call__ method.
the list of elements of the transform u0, the lowest frequencies, and u_js, the angular windows, were j is the scale,
s the direction. each of its element have a size of (n1 x n2 x ... ) x na x nb x nc x nd with na, nb, nc and nd vary
from one element ton another. (n1 x n2 x ... ) is the size of the batch.
Returns:
image (array): a batch of images of size (n1 x n2 x ... ) x n x n
"""
result = transform
for dir_filt_bank in reversed(self._directional_filter_banks):
result = [dir_filt_bank.reconstruction(result[0], result[1], result[2])] + list(result[3:])
result[0] = cp.expand_dims(result[0], axis = (-4,-3))
return cp.squeeze(result[0], axis = (-4,-3)) | 0.922552 | 0.71566 |
# Pandas : pip install pandas
# Matplotlib: pip install matplotlib
# Numpy: pip install numpy
# Ipython: pip install ipython
import math
import numpy as np
import matplotlib.pyplot as plt
# Given data
S0=100
K=105
T=5
r=0.05
sig=0.3
# Function to get Option Price for a given M
def getOptionPrice(M):
dt = T/M
u = math.exp(sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
d = math.exp(-sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
p = (math.exp(r*dt)-d)/(u-d)
# Check if No Arbitrage Principle has got violated
if p < 0 or p > 1:
print("No Arbitrage Principle has been Violated")
return '-','-'
callList = [0]*(M+1)
putList = [0]*(M+1)
for i in range(M+1):
callList[i] = max(S0*(u**i)*(d**(M-i)) - K, 0)
putList[i] = max(0, K - S0*(u**i)*(d**(M-i)))
for i in range(M):
for j in range(M-i):
callList[j] = ((1-p)*callList[j] + p*callList[j+1])*math.exp(-r*T/M)
putList[j] = ((1-p)*putList[j] + p*putList[j+1])*math.exp(-r*T/M)
return callList[0], putList[0]
# Lists to store the option prices
callPrices = []
putPrices = []
M=0
# Compute initial option prices in steps of 1
while M < 400:
M += 1
call, put = getOptionPrice(M)
callPrices.append(call)
putPrices.append(put)
MList = np.linspace(1, 400, 400)
plt.plot(MList, callPrices)
plt.xlabel('Value of M')
plt.ylabel('Call Option Price')
plt.title('Varying Price of Call Option with Value of M (Step Size 1)')
plt.show()
plt.plot(MList, putPrices)
plt.xlabel('Value of M')
plt.ylabel('Price of Put Option')
plt.title('Varying Price of Put Option with Value of M (Step Size 1)')
plt.show()
# Lists to store the option prices
callPrices = []
putPrices = []
# Compute initial option prices in steps of 5
M=0
while M < 400:
M += 5
call, put = getOptionPrice(M)
callPrices.append(call)
putPrices.append(put)
MList = np.linspace(1, 400, 80)
plt.plot(MList, callPrices)
plt.xlabel('Value of M')
plt.ylabel('Call Option Price')
plt.title('Varying Call Option Price with Value of M (Step Size 5)')
plt.show()
plt.plot(MList, putPrices)
plt.xlabel('Value of M')
plt.ylabel('Price of Put Option')
plt.title('Varying Put Option Price with Value of M (Step Size 5)')
plt.show() | Semester 6/MA 374 (Financial Engg. Lab)/Lab 1/180123062_ABSatyaprakash_q2 1.py |
# Pandas : pip install pandas
# Matplotlib: pip install matplotlib
# Numpy: pip install numpy
# Ipython: pip install ipython
import math
import numpy as np
import matplotlib.pyplot as plt
# Given data
S0=100
K=105
T=5
r=0.05
sig=0.3
# Function to get Option Price for a given M
def getOptionPrice(M):
dt = T/M
u = math.exp(sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
d = math.exp(-sig*math.sqrt(dt)+(r-sig*sig/2)*dt)
p = (math.exp(r*dt)-d)/(u-d)
# Check if No Arbitrage Principle has got violated
if p < 0 or p > 1:
print("No Arbitrage Principle has been Violated")
return '-','-'
callList = [0]*(M+1)
putList = [0]*(M+1)
for i in range(M+1):
callList[i] = max(S0*(u**i)*(d**(M-i)) - K, 0)
putList[i] = max(0, K - S0*(u**i)*(d**(M-i)))
for i in range(M):
for j in range(M-i):
callList[j] = ((1-p)*callList[j] + p*callList[j+1])*math.exp(-r*T/M)
putList[j] = ((1-p)*putList[j] + p*putList[j+1])*math.exp(-r*T/M)
return callList[0], putList[0]
# Lists to store the option prices
callPrices = []
putPrices = []
M=0
# Compute initial option prices in steps of 1
while M < 400:
M += 1
call, put = getOptionPrice(M)
callPrices.append(call)
putPrices.append(put)
MList = np.linspace(1, 400, 400)
plt.plot(MList, callPrices)
plt.xlabel('Value of M')
plt.ylabel('Call Option Price')
plt.title('Varying Price of Call Option with Value of M (Step Size 1)')
plt.show()
plt.plot(MList, putPrices)
plt.xlabel('Value of M')
plt.ylabel('Price of Put Option')
plt.title('Varying Price of Put Option with Value of M (Step Size 1)')
plt.show()
# Lists to store the option prices
callPrices = []
putPrices = []
# Compute initial option prices in steps of 5
M=0
while M < 400:
M += 5
call, put = getOptionPrice(M)
callPrices.append(call)
putPrices.append(put)
MList = np.linspace(1, 400, 80)
plt.plot(MList, callPrices)
plt.xlabel('Value of M')
plt.ylabel('Call Option Price')
plt.title('Varying Call Option Price with Value of M (Step Size 5)')
plt.show()
plt.plot(MList, putPrices)
plt.xlabel('Value of M')
plt.ylabel('Price of Put Option')
plt.title('Varying Put Option Price with Value of M (Step Size 5)')
plt.show() | 0.52074 | 0.603435 |
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import numpy as np
import matplotlib.pyplot as plt
import livvkit
from livvkit.util.LIVVDict import LIVVDict
from livvkit.util import elements
from livvkit.util import functions
case_color = {'bench': '#d7191c',
'test': '#fc8d59'}
line_style = {'bench': 'o-',
'test': '-'}
setup = None
def set_up():
global setup
setup = functions.read_json(os.path.join(os.path.dirname(__file__), 'ismip.json'))
for exp, size in [('ismip-hom-a', '005'), ('ismip-hom-c', '005'), ('ismip-hom-f', '000')]:
recreate_file = os.path.join(livvkit.__path__[0], setup[exp]["data_dir"],
setup[exp]['pattern'][0].replace('???', size))
setup[exp]['interp_points'] = \
np.genfromtxt(recreate_file, delimiter=',', missing_values='nan',
usecols=(0,), unpack=True)
if exp == 'ismip-hom-f':
setup[exp]['interp_points'] = setup[exp]['interp_points']*100 - 50
def get_case_length(case):
return str(int(case.split('-')[-1][1:])).zfill(3)
def run(config, analysis_data):
case = config['name']
if case in ['ismip-hom-a', 'ismip-hom-c', 'ismip-hom-f']:
coord = 'x_hat'
else:
coord = 'y_hat'
lengths = list(set(
[get_case_length(d) for d in six.iterkeys(analysis_data)]
))
plot_list = []
for p, pattern in enumerate(sorted(setup[case]['pattern'])):
fig_label = pattern.split('_')[1]
description = ''
for l in sorted(lengths):
plt.figure(figsize=(10, 8), dpi=150)
plt.xlabel(setup[case]['xlabel'][p])
plt.ylabel(setup[case]['ylabel'][p])
if case in ['ismip-hom-a', 'ismip-hom-c']:
plt.title(str(int(l))+' km')
title = fig_label[0:-1]+'. '+fig_label[-1]+': '+str(int(l))+' km'
else:
plt.title('No-Slip Bed')
title = fig_label[0:-2]+'. '+fig_label[-2:]+': No-Slip Bed'
plot_file = os.path.join(config["plot_dir"], config['name']+'_'+fig_label+'_'+l+'.png')
recreate_file = os.path.join(
livvkit.__path__[0], setup[case]["data_dir"], pattern
).replace('???', l)
axis, fs_amin, fs_amax, fs_mean, fs_std, ho_amin, ho_amax, ho_mean, ho_std = \
np.genfromtxt(recreate_file, delimiter=',', missing_values='nan', unpack=True)
if case in ['ismip-hom-f']:
axis = axis*100.0 - 50.0
plt.fill_between(axis, ho_amin, ho_amax, facecolor='green', alpha=0.5)
plt.fill_between(axis, fs_amin, fs_amax, facecolor='blue', alpha=0.5)
plt.plot(axis, fs_mean, 'b-', linewidth=2, label='Full stokes')
plt.plot(axis, ho_mean, 'g-', linewidth=2, label='Higher order')
analysis = {}
for a in six.iterkeys(analysis_data):
if int(l) == int(a.split('-')[-1][1:]):
analysis[a] = analysis_data[a]
for a in six.iterkeys(analysis):
for model in sorted(six.iterkeys(analysis[a])):
plt.plot(analysis[a][model][coord],
analysis[a][model][config['plot_vars'][p]],
line_style[model],
color=case_color[model],
linewidth=2,
label=a+'-'+model)
plt.legend(loc='best')
if livvkit.publish:
plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
plt.savefig(plot_file)
plt.close()
plot_list.append(elements.image(title, description, os.path.basename(plot_file)))
return elements.gallery("Numerics Plots", plot_list)
def summarize_result(data, config):
case = config['name']
summary = LIVVDict()
lengths = list(set([get_case_length(d) for d in six.iterkeys(data)]))
for p, pattern in enumerate(sorted(setup[case]['pattern'])):
for l in sorted(lengths):
recreate_file = os.path.join(
livvkit.__path__[0], setup[case]["data_dir"], pattern
).replace('???', l)
axis, fs_amin, fs_amax, fs_mean, fs_std, ho_amin, ho_amax, ho_mean, ho_std = \
np.genfromtxt(recreate_file, delimiter=',', missing_values='nan', unpack=True)
analysis = {}
for a in six.iterkeys(data):
if int(l) == int(a.split('-')[-1][1:]):
analysis[a] = data[a]
for a in six.iterkeys(analysis):
for model in sorted(six.iterkeys(analysis[a])):
if setup[case]['ylabel'][p].split(" ")[0].lower() == 'surface':
percent_errors = np.divide(analysis[a][model][config['plot_vars'][p]]
- ho_mean, ho_mean+1000)
coefficient = np.divide(ho_std, ho_mean+1000)
else:
percent_errors = np.divide(analysis[a][model][config['plot_vars'][p]]
- ho_mean, ho_mean)
coefficient = np.divide(ho_std, ho_mean)
label = a+' '+setup[case]['ylabel'][p].split(" ")[0]
if model.lower() == 'bench':
summary[label]['Bench mean % error'] = \
'{:3.2%}'.format(np.nanmean(percent_errors))
else:
summary[label]['Test mean % error'] = \
'{:3.2%}'.format(np.nanmean(percent_errors))
summary[label]['Coefficient of variation'] = \
'{:3.2%}'.format(np.nanmean(coefficient))
return summary
def print_summary(case, summary):
""" Show some statistics from the run """
for subcase in six.iterkeys(summary):
message = case + " " + subcase
print(" " + message)
print(" " + "-"*len(message))
for key, val in summary[subcase].items():
print(" "*4 + key.ljust(25) + ":" + val.rjust(7))
print("") | livvkit/components/numerics_tests/ismip.py | from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import numpy as np
import matplotlib.pyplot as plt
import livvkit
from livvkit.util.LIVVDict import LIVVDict
from livvkit.util import elements
from livvkit.util import functions
case_color = {'bench': '#d7191c',
'test': '#fc8d59'}
line_style = {'bench': 'o-',
'test': '-'}
setup = None
def set_up():
global setup
setup = functions.read_json(os.path.join(os.path.dirname(__file__), 'ismip.json'))
for exp, size in [('ismip-hom-a', '005'), ('ismip-hom-c', '005'), ('ismip-hom-f', '000')]:
recreate_file = os.path.join(livvkit.__path__[0], setup[exp]["data_dir"],
setup[exp]['pattern'][0].replace('???', size))
setup[exp]['interp_points'] = \
np.genfromtxt(recreate_file, delimiter=',', missing_values='nan',
usecols=(0,), unpack=True)
if exp == 'ismip-hom-f':
setup[exp]['interp_points'] = setup[exp]['interp_points']*100 - 50
def get_case_length(case):
return str(int(case.split('-')[-1][1:])).zfill(3)
def run(config, analysis_data):
case = config['name']
if case in ['ismip-hom-a', 'ismip-hom-c', 'ismip-hom-f']:
coord = 'x_hat'
else:
coord = 'y_hat'
lengths = list(set(
[get_case_length(d) for d in six.iterkeys(analysis_data)]
))
plot_list = []
for p, pattern in enumerate(sorted(setup[case]['pattern'])):
fig_label = pattern.split('_')[1]
description = ''
for l in sorted(lengths):
plt.figure(figsize=(10, 8), dpi=150)
plt.xlabel(setup[case]['xlabel'][p])
plt.ylabel(setup[case]['ylabel'][p])
if case in ['ismip-hom-a', 'ismip-hom-c']:
plt.title(str(int(l))+' km')
title = fig_label[0:-1]+'. '+fig_label[-1]+': '+str(int(l))+' km'
else:
plt.title('No-Slip Bed')
title = fig_label[0:-2]+'. '+fig_label[-2:]+': No-Slip Bed'
plot_file = os.path.join(config["plot_dir"], config['name']+'_'+fig_label+'_'+l+'.png')
recreate_file = os.path.join(
livvkit.__path__[0], setup[case]["data_dir"], pattern
).replace('???', l)
axis, fs_amin, fs_amax, fs_mean, fs_std, ho_amin, ho_amax, ho_mean, ho_std = \
np.genfromtxt(recreate_file, delimiter=',', missing_values='nan', unpack=True)
if case in ['ismip-hom-f']:
axis = axis*100.0 - 50.0
plt.fill_between(axis, ho_amin, ho_amax, facecolor='green', alpha=0.5)
plt.fill_between(axis, fs_amin, fs_amax, facecolor='blue', alpha=0.5)
plt.plot(axis, fs_mean, 'b-', linewidth=2, label='Full stokes')
plt.plot(axis, ho_mean, 'g-', linewidth=2, label='Higher order')
analysis = {}
for a in six.iterkeys(analysis_data):
if int(l) == int(a.split('-')[-1][1:]):
analysis[a] = analysis_data[a]
for a in six.iterkeys(analysis):
for model in sorted(six.iterkeys(analysis[a])):
plt.plot(analysis[a][model][coord],
analysis[a][model][config['plot_vars'][p]],
line_style[model],
color=case_color[model],
linewidth=2,
label=a+'-'+model)
plt.legend(loc='best')
if livvkit.publish:
plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
plt.savefig(plot_file)
plt.close()
plot_list.append(elements.image(title, description, os.path.basename(plot_file)))
return elements.gallery("Numerics Plots", plot_list)
def summarize_result(data, config):
case = config['name']
summary = LIVVDict()
lengths = list(set([get_case_length(d) for d in six.iterkeys(data)]))
for p, pattern in enumerate(sorted(setup[case]['pattern'])):
for l in sorted(lengths):
recreate_file = os.path.join(
livvkit.__path__[0], setup[case]["data_dir"], pattern
).replace('???', l)
axis, fs_amin, fs_amax, fs_mean, fs_std, ho_amin, ho_amax, ho_mean, ho_std = \
np.genfromtxt(recreate_file, delimiter=',', missing_values='nan', unpack=True)
analysis = {}
for a in six.iterkeys(data):
if int(l) == int(a.split('-')[-1][1:]):
analysis[a] = data[a]
for a in six.iterkeys(analysis):
for model in sorted(six.iterkeys(analysis[a])):
if setup[case]['ylabel'][p].split(" ")[0].lower() == 'surface':
percent_errors = np.divide(analysis[a][model][config['plot_vars'][p]]
- ho_mean, ho_mean+1000)
coefficient = np.divide(ho_std, ho_mean+1000)
else:
percent_errors = np.divide(analysis[a][model][config['plot_vars'][p]]
- ho_mean, ho_mean)
coefficient = np.divide(ho_std, ho_mean)
label = a+' '+setup[case]['ylabel'][p].split(" ")[0]
if model.lower() == 'bench':
summary[label]['Bench mean % error'] = \
'{:3.2%}'.format(np.nanmean(percent_errors))
else:
summary[label]['Test mean % error'] = \
'{:3.2%}'.format(np.nanmean(percent_errors))
summary[label]['Coefficient of variation'] = \
'{:3.2%}'.format(np.nanmean(coefficient))
return summary
def print_summary(case, summary):
""" Show some statistics from the run """
for subcase in six.iterkeys(summary):
message = case + " " + subcase
print(" " + message)
print(" " + "-"*len(message))
for key, val in summary[subcase].items():
print(" "*4 + key.ljust(25) + ":" + val.rjust(7))
print("") | 0.44071 | 0.275977 |