code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
""""""
# Standard library modules.
# Third party modules.
import pytest
import sqlalchemy
# Local modules.
import dataclasses_sql
# Globals and constants variables.
@pytest.fixture
def metadata():
engine = sqlalchemy.create_engine("sqlite:///:memory:")
return sqlalchemy.MetaData(engine)
def test_delete_no_data(metadata, treedata):
with pytest.raises(ValueError):
dataclasses_sql.update(metadata, treedata)
def test_delete(metadata, treedata):
# Insert
assert treedata.diameter_m == pytest.approx(3.0, abs=1e-4)
success = dataclasses_sql.insert(metadata, treedata)
assert success
# Delete
success = dataclasses_sql.delete(metadata, treedata)
assert success
# Check
with metadata.bind.begin() as conn:
rows = conn.execute("select * from treedata").fetchall()
assert len(rows) == 0
| [
"pytest.approx",
"dataclasses_sql.update",
"sqlalchemy.create_engine",
"dataclasses_sql.insert",
"sqlalchemy.MetaData",
"pytest.raises",
"dataclasses_sql.delete"
] | [((216, 262), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (240, 262), False, 'import sqlalchemy\n'), ((274, 301), 'sqlalchemy.MetaData', 'sqlalchemy.MetaData', (['engine'], {}), '(engine)\n', (293, 301), False, 'import sqlalchemy\n'), ((565, 607), 'dataclasses_sql.insert', 'dataclasses_sql.insert', (['metadata', 'treedata'], {}), '(metadata, treedata)\n', (587, 607), False, 'import dataclasses_sql\n'), ((655, 697), 'dataclasses_sql.delete', 'dataclasses_sql.delete', (['metadata', 'treedata'], {}), '(metadata, treedata)\n', (677, 697), False, 'import dataclasses_sql\n'), ((358, 383), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (371, 383), False, 'import pytest\n'), ((393, 435), 'dataclasses_sql.update', 'dataclasses_sql.update', (['metadata', 'treedata'], {}), '(metadata, treedata)\n', (415, 435), False, 'import dataclasses_sql\n'), ((522, 552), 'pytest.approx', 'pytest.approx', (['(3.0)'], {'abs': '(0.0001)'}), '(3.0, abs=0.0001)\n', (535, 552), False, 'import pytest\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import queue
from typing import TYPE_CHECKING, TypeVar
T = TypeVar("T")
if TYPE_CHECKING:
SimpleQueue = queue.SimpleQueue
else:
class FakeGenericMeta(type):
def __getitem__(self, item):
return self
class SimpleQueue(queue.SimpleQueue, metaclass=FakeGenericMeta):
pass
class UniqueQueue(SimpleQueue[T]):
_queue: UniqueQueue[T]
def __new__(cls) -> UniqueQueue[T]:
if not hasattr(cls, "_queue"):
orig = super(UniqueQueue, cls)
cls._queue = orig.__new__(cls)
return cls._queue
| [
"typing.TypeVar"
] | [((142, 154), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (149, 154), False, 'from typing import TYPE_CHECKING, TypeVar\n')] |
from django.db import models
class events(models.Model):
id = models.AutoField(primary_key=True)
note_id = models.BigIntegerField(null=True, blank=True)
tweet_id = models.BigIntegerField()
type = models.IntegerField(null=True, blank=True)
timestamp = models.DateTimeField()
from_user = models.CharField(max_length=90)
to_user = models.CharField(max_length=90)
class Meta:
db_table = u'tracker_events'
class notes(models.Model):
id = models.BigIntegerField(max_length=30, primary_key=True)
issuer = models.CharField(max_length=90, blank=True)
bearer = models.CharField(max_length=90, blank=True)
promise = models.CharField(max_length=420, blank=True)
created = models.DateTimeField(null=True, blank=True)
expiry = models.DateTimeField(null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
transferable = models.IntegerField(null=True, blank=True)
type = models.IntegerField(null=True, blank=True)
conditional = models.CharField(max_length=420, null=True, blank=True)
class Meta:
db_table = u'tracker_notes'
class trustlist(models.Model):
id = models.AutoField(primary_key=True)
user = models.CharField(max_length=90, blank=True)
trusted = models.CharField(max_length=90, blank=True)
class Meta:
db_table = u'tracker_trust_list'
class tags(models.Model):
id = models.AutoField(primary_key=True)
tag = models.CharField(max_length=30)
class Meta:
db_table = u'tracker_tags'
class tweets(models.Model):
id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField(null=True, blank=True)
tweet_id = models.BigIntegerField(null=True, blank=True)
author = models.CharField(max_length=90, blank=True)
content = models.CharField(max_length=420, blank=True)
reply_to_id = models.BigIntegerField(null=True, blank=True)
parsed = models.CharField(max_length=1, null=True, blank=True)
url = models.CharField(max_length=420, null=True, blank=True)
display_url = models.CharField(max_length=420, null=True, blank=True)
img_url = models.CharField(max_length=420, null=True, blank=True)
tag_1 = models.IntegerField(null=True, blank=True)
tag_2 = models.IntegerField(null=True, blank=True)
tag_3 = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'tracker_tweets'
class users(models.Model):
id = models.AutoField(primary_key=True)
username = models.CharField(max_length=90, blank=True)
karma = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'tracker_users' | [
"django.db.models.IntegerField",
"django.db.models.AutoField",
"django.db.models.BigIntegerField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((67, 101), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (83, 101), False, 'from django.db import models\n'), ((116, 161), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (138, 161), False, 'from django.db import models\n'), ((177, 201), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (199, 201), False, 'from django.db import models\n'), ((213, 255), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (232, 255), False, 'from django.db import models\n'), ((272, 294), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (292, 294), False, 'from django.db import models\n'), ((311, 342), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)'}), '(max_length=90)\n', (327, 342), False, 'from django.db import models\n'), ((357, 388), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)'}), '(max_length=90)\n', (373, 388), False, 'from django.db import models\n'), ((479, 534), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'max_length': '(30)', 'primary_key': '(True)'}), '(max_length=30, primary_key=True)\n', (501, 534), False, 'from django.db import models\n'), ((548, 591), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)', 'blank': '(True)'}), '(max_length=90, blank=True)\n', (564, 591), False, 'from django.db import models\n'), ((605, 648), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)', 'blank': '(True)'}), '(max_length=90, blank=True)\n', (621, 648), False, 'from django.db import models\n'), ((663, 707), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(420)', 'blank': '(True)'}), '(max_length=420, blank=True)\n', (679, 707), False, 'from django.db import models\n'), ((722, 765), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (742, 765), False, 'from django.db import models\n'), ((779, 822), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (799, 822), False, 'from django.db import models\n'), ((836, 878), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (855, 878), False, 'from django.db import models\n'), ((898, 940), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (917, 940), False, 'from django.db import models\n'), ((952, 994), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (971, 994), False, 'from django.db import models\n'), ((1013, 1068), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(420)', 'null': '(True)', 'blank': '(True)'}), '(max_length=420, null=True, blank=True)\n', (1029, 1068), False, 'from django.db import models\n'), ((1162, 1196), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1178, 1196), False, 'from django.db import models\n'), ((1208, 1251), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)', 'blank': '(True)'}), '(max_length=90, blank=True)\n', (1224, 1251), False, 'from django.db import models\n'), ((1266, 1309), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)', 'blank': '(True)'}), '(max_length=90, blank=True)\n', (1282, 1309), False, 'from django.db import models\n'), ((1411, 1445), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1427, 1445), False, 'from django.db import models\n'), ((1456, 1487), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1472, 1487), False, 'from django.db import models\n'), ((1585, 1619), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1601, 1619), False, 'from django.db import models\n'), ((1636, 1679), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1656, 1679), False, 'from django.db import models\n'), ((1695, 1740), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1717, 1740), False, 'from django.db import models\n'), ((1754, 1797), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)', 'blank': '(True)'}), '(max_length=90, blank=True)\n', (1770, 1797), False, 'from django.db import models\n'), ((1812, 1856), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(420)', 'blank': '(True)'}), '(max_length=420, blank=True)\n', (1828, 1856), False, 'from django.db import models\n'), ((1875, 1920), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1897, 1920), False, 'from django.db import models\n'), ((1934, 1987), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1, null=True, blank=True)\n', (1950, 1987), False, 'from django.db import models\n'), ((1998, 2053), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(420)', 'null': '(True)', 'blank': '(True)'}), '(max_length=420, null=True, blank=True)\n', (2014, 2053), False, 'from django.db import models\n'), ((2072, 2127), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(420)', 'null': '(True)', 'blank': '(True)'}), '(max_length=420, null=True, blank=True)\n', (2088, 2127), False, 'from django.db import models\n'), ((2142, 2197), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(420)', 'null': '(True)', 'blank': '(True)'}), '(max_length=420, null=True, blank=True)\n', (2158, 2197), False, 'from django.db import models\n'), ((2210, 2252), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2229, 2252), False, 'from django.db import models\n'), ((2265, 2307), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2284, 2307), False, 'from django.db import models\n'), ((2320, 2362), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2339, 2362), False, 'from django.db import models\n'), ((2453, 2487), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2469, 2487), False, 'from django.db import models\n'), ((2503, 2546), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(90)', 'blank': '(True)'}), '(max_length=90, blank=True)\n', (2519, 2546), False, 'from django.db import models\n'), ((2559, 2601), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2578, 2601), False, 'from django.db import models\n')] |
#! /usr/bin/python3
"""
__Version__: 0.1
__Author__: <NAME>
Data: 15/02/2020
Description: Sample scrip for scan host ports with only buit-in functions
This code just works with addresses of v4 family.
Python 3.x
"""
# Import modules
import socket
import sys
import errno
import os
import argparse
import ipaddress
# Main Class
class MyPortScanner(object):
# Constructor, receive two parameters: target = IP that will be scanned and list of port that will be tested
def __init__(self, target, portlist):
self.target = target
if type(portlist) is str:
self.portlist = [int(x) for x in portlist.split(',')]
else:
self.portlist = portlist
# Function that performs the scan on v4 family
def check_port_socket_v4_tcp(self):
print('--------------------------------')
print('[+] Initializing scan...')
print('[i] Target host: {}'.format(self.target))
print('[i] Ports: {}'.format(self.portlist))
try:
for port in self.portlist:
# Create the v4 socket, AF_INET == V4 Family, SOCK_STREAM == TCP
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Define the timeout of response to 3 seconds
s.settimeout(3)
result = s.connect_ex((str(self.target), port))
# If the return code is 0 then the port is OPEN
if result == 0:
print('[+] Port {}: Open'.format(port))
# Otherwise, the port is closed
else:
print('[!] Port {}: Closed'.format(port))
print('\t[-] Code error: {}'.format(errno.errorcode[result]))
print('\t[-] Message: {}'.format(os.strerror(result)))
s.close()
# If have any problem with connection, the scan will be aborted
except socket.error as e:
print(str(e))
print('[-] Connection Error')
sys.exit()
print('[+] Script finished.')
# Performs the script
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Scan ports TCP\nVersion: 0.1')
# Target parameter, accept just IPV4 Address
parser.add_argument('-t', dest='target_host_v4', help='Target host IPv4', required=True, type=ipaddress.IPv4Address)
# Port that will be scanned, if this parameter not been set then the default ports will be scanned
parser.add_argument('-p', dest='ports', help='Ports separated by comma', type=str, default=[21, 22, 23, 53, 80, 443,
3389, 389, 3306, 1521,
8080, 8000])
params = parser.parse_args()
# Create an instance of MyPortScanner
m = MyPortScanner(params.target_host_v4, params.ports)
# Call the function check_port_socket_v4_tcp
m.check_port_socket_v4_tcp()
| [
"socket.socket",
"os.strerror",
"argparse.ArgumentParser",
"sys.exit"
] | [((2123, 2193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Scan ports TCP\nVersion: 0.1"""'}), '(description="""Scan ports TCP\nVersion: 0.1""")\n', (2146, 2193), False, 'import argparse\n'), ((1144, 1193), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1157, 1193), False, 'import socket\n'), ((2009, 2019), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2017, 2019), False, 'import sys\n'), ((1775, 1794), 'os.strerror', 'os.strerror', (['result'], {}), '(result)\n', (1786, 1794), False, 'import os\n')] |
#print_hello_friend.py
from datetime import datetime
print(datetime.now())
print("G'day Mate!")
| [
"datetime.datetime.now"
] | [((61, 75), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (73, 75), False, 'from datetime import datetime\n')] |
from urllib import request
def download_from_url(url, filename):
request.urlretrieve(url, filename) | [
"urllib.request.urlretrieve"
] | [((70, 104), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'filename'], {}), '(url, filename)\n', (89, 104), False, 'from urllib import request\n')] |
from typing import List
from led import Led
from transitions import AbstractTransition
class Sudden(AbstractTransition):
def __init__(self, red: float, green: float, blue: float) -> None:
super().__init__()
self.target = Led(red, green, blue)
@AbstractTransition.brightness.setter
def brightness(self, brightness):
self.target.brightness = brightness
AbstractTransition.brightness.fset(self, brightness)
def step(self, previous: List[Led]) -> List[Led]:
return [self.target for _ in previous]
| [
"transitions.AbstractTransition.brightness.fset",
"led.Led"
] | [((246, 267), 'led.Led', 'Led', (['red', 'green', 'blue'], {}), '(red, green, blue)\n', (249, 267), False, 'from led import Led\n'), ((401, 453), 'transitions.AbstractTransition.brightness.fset', 'AbstractTransition.brightness.fset', (['self', 'brightness'], {}), '(self, brightness)\n', (435, 453), False, 'from transitions import AbstractTransition\n')] |
import operator as op
import itertools
with open("input.txt") as file:
data = file.read()
shop = """Weapons: Cost Damage Armor
Dagger 8 4 0
Shortsword 10 5 0
Warhammer 25 6 0
Longsword 40 7 0
Greataxe 74 8 0
Armor: Cost Damage Armor
Leather 13 0 1
Chainmail 31 0 2
Splintmail 53 0 3
Bandedmail 75 0 4
Platemail 102 0 5
Rings: Cost Damage Armor
Damage +1 25 1 0
Damage +2 50 2 0
Damage +3 100 3 0
Defense +1 20 0 1
Defense +2 40 0 2
Defense +3 80 0 3
"""
weapons = []
armors = []
rings = []
current = None
for line in shop.splitlines():
if "Weapons:" in line:
current = weapons
elif "Armor:" in line:
current = armors
elif "Rings:" in line:
current = rings
elif line == "":
current = None
else:
name, cost, damage, armor = line.rsplit(None, 3)
current.append([name, int(cost), int(damage), int(armor)])
boss = {}
for line in data.splitlines():
prop, val = map(str.strip, line.split(":"))
boss[prop] = int(val)
player = {
'Hit Points': 100,
'Damage': 0,
'Armor': 0
}
def attack(attacker, defender):
defender['Health'] -= max(1, attacker['Damage'] - defender['Armor'])
def simulate_battle(player, boss):
player['Health'] = player['Hit Points']
boss['Health'] = boss['Hit Points']
for turn in range(max(map(op.itemgetter('Hit Points'), (player, boss)))):
attack(player, boss)
if boss['Health'] <= 0:
return True
attack(boss, player)
if player['Health'] <= 0:
return False
raise Exception("Battle did not end")
def generate_gear(weapons, armors, rings):
for weapon in weapons:
for armor in itertools.chain([None], armors):
for ring1, ring2 in itertools.combinations(rings, 2):
yield (weapon, armor, ring1, ring2)
for ring1 in rings:
yield (weapon, armor, ring1, None)
yield (weapon, armor, None, None)
def solve():
min_cost_to_win = None
max_cost_to_lose = None
for weaponStats, armorStats, ring1Stats, ring2Stats in generate_gear(weapons, armors, rings):
gear_name = [gear[0] for gear in [weaponStats, armorStats, ring1Stats, ring2Stats] if gear is not None]
cost, damage, armor = reduce(lambda a,b: [a[0] + b[0], a[1] + b[1], a[2] + b[2]], [gear[1:] for gear in [weaponStats, armorStats, ring1Stats, ring2Stats] if gear is not None], [0,0,0])
player['Damage'] = damage
player['Armor'] = armor
player_wins = simulate_battle(player, boss)
if player_wins and (min_cost_to_win is None or cost < min_cost_to_win):
min_cost_to_win = cost
print("Cheaper equipment with win condition, cost={}, damage={}, armor={}, gear={}".format(cost, damage, armor, gear_name))
#print("Stats {}, {}, {}, {}".format(weaponStats, armorStats, ring1Stats, ring2Stats))
if not player_wins and (max_cost_to_lose is None or cost > max_cost_to_lose):
max_cost_to_lose = cost
print("More expensive equipment with lose condition, cost={}, damage={}, armor={}, gear={}".format(cost, damage, armor, gear_name))
solve() | [
"itertools.chain",
"operator.itemgetter",
"itertools.combinations"
] | [((1917, 1948), 'itertools.chain', 'itertools.chain', (['[None]', 'armors'], {}), '([None], armors)\n', (1932, 1948), False, 'import itertools\n'), ((1982, 2014), 'itertools.combinations', 'itertools.combinations', (['rings', '(2)'], {}), '(rings, 2)\n', (2004, 2014), False, 'import itertools\n'), ((1549, 1576), 'operator.itemgetter', 'op.itemgetter', (['"""Hit Points"""'], {}), "('Hit Points')\n", (1562, 1576), True, 'import operator as op\n')] |
import sys
import os
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog
from PyQt5.QtCore import pyqtSignal
from mainUi import Ui_Form
from sortUi import sortUi
from functools import partial
import csvIssue
class MyMainForm(QMainWindow, Ui_Form):
def __init__(self, parent=None):
super(MyMainForm, self).__init__(parent)
self.setupUi(self)
self.sub = MySubForm()
self.submit_button.clicked.connect(self.submit)
self.generate_button.clicked.connect(self.generate)
self.withdraw_button.clicked.connect(self.withdraw)
self.edit_sort_button.clicked.connect(self.edit)
def submit(self):
item = self.item_line.text()
sort = self.sort_line.currentText()
price = self.price_line.text()
if(not item or not sort or not price):
self.response_Browser.setText("请正确输入噢 ;)")
else:
try:
csvIssue.write_spending(item, sort, price)
self.response_Browser.setText(
"记账成功!\n" + "你购买了属于"+sort+"品类下的"+item+",\n花费了"+price+"元.")
self.tips_browser.setText("最近一次记账: "+item+" 金额: "+price+"元")
except:
self.response_Browser.setText("出错了 ;(")
self.item_line.setText("")
self.price_line.setText("")
def generate(self):
os.system("python diagram.py")
res_code = QMessageBox.question(
self, "生成成功", "立即查看?", QMessageBox.Yes | QMessageBox.No)
if res_code != 65536:
os.startfile("result.html")
def withdraw(self):
res_code = QMessageBox.question(
self, "?", "确认撤销?", QMessageBox.Yes | QMessageBox.No)
if res_code != 65536:
flag = csvIssue.delete_last_line()
if flag:
QMessageBox.information(
self, 'Success', '成功删除', QMessageBox.Ok)
else:
QMessageBox.information(
self, 'Success', '记账记录已为空', QMessageBox.Ok)
last_line = csvIssue.get_last_line()
if not last_line:
self.tips_browser.setText("记账记录为空!\n千里之行,始于足下。")
else:
self.tips_browser.setText(
"最近一次记账: "+str(last_line[0])+" 金额: "+str(last_line[2])+"元")
self.response_Browser.setText("")
def edit(self):
self.sub.show()
self.sub._signal.connect(self.update_comboBox)
def update_comboBox(self, plist):
self.sort_line.clear()
self.sort_line.addItems(plist)
class MySubForm(QDialog, sortUi):
_signal = pyqtSignal(list)
def __init__(self, parent=None):
super(MySubForm, self).__init__(parent)
self.setupUi(self)
self.first_add.clicked.connect(partial(self.add_classifier, 'first'))
self.second_add.clicked.connect(partial(self.add_classifier, 'second'))
self.first_delete.clicked.connect(partial(self.check_select, 'first'))
self.second_delete.clicked.connect(
partial(self.check_select, 'second'))
def add_classifier(self, name):
if name == 'second':
second = self.second_line.text()
first = self.second_Box.currentText()
elif name == 'first':
second = self.first_line.text()
first = self.first_Box.currentText()
else:
QMessageBox.critical(self, "Error", "Error!", QMessageBox.Ok)
return
if first and second:
flag = csvIssue.update_classifier(second, first)
if flag:
QMessageBox.information(
self, 'Success', '添加成功! ', QMessageBox.Ok)
self.update_list()
else:
QMessageBox.information(
self, 'Duplicated', '项目已存在', QMessageBox.Ok)
self.first_line.clear()
self.second_line.clear()
else:
QMessageBox.critical(self, 'Error', '错误的参数', QMessageBox.Ok)
def check_select(self, name):
if name == 'first':
items = [item.text()
for item in self.first_classifier.selectedItems()]
if not items:
QMessageBox.information(self, '?', '?', QMessageBox.Ok)
return
elif name == 'second':
items = [item.text()
for item in self.second_classifier.selectedItems()]
if not items:
QMessageBox.information(self, '?', '?', QMessageBox.Ok)
return
self.delete_classifier(items)
def update_list(self):
first_classifier_list = csvIssue.init_first_classifier()
second_classifier_list = csvIssue.init_second_classifier()
self.first_classifier.clear()
self.first_classifier.addItems(first_classifier_list)
self.second_classifier.clear()
self.second_classifier.addItems(second_classifier_list)
self.second_Box.clear()
self.second_Box.addItems(first_classifier_list)
self._signal.emit(second_classifier_list)
def delete_classifier(self, items):
res_code = QMessageBox.question(
self, "?", "确认删除?", QMessageBox.Yes | QMessageBox.No)
if res_code == 65536:
return
else:
flag = csvIssue.delete_classifier(items)
if flag:
self.update_list()
else:
QMessageBox.critical(
self, 'Error', '分类下存在记账记录', QMessageBox.Ok)
if __name__ == "__main__":
app = QApplication(sys.argv)
myWin = MyMainForm()
myWin.show()
sys.exit(app.exec_())
| [
"PyQt5.QtCore.pyqtSignal",
"csvIssue.write_spending",
"PyQt5.QtWidgets.QMessageBox.critical",
"csvIssue.delete_last_line",
"csvIssue.init_first_classifier",
"csvIssue.delete_classifier",
"PyQt5.QtWidgets.QMessageBox.information",
"PyQt5.QtWidgets.QMessageBox.question",
"csvIssue.get_last_line",
"f... | [((2625, 2641), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['list'], {}), '(list)\n', (2635, 2641), False, 'from PyQt5.QtCore import pyqtSignal\n'), ((5585, 5607), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5597, 5607), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((1369, 1399), 'os.system', 'os.system', (['"""python diagram.py"""'], {}), "('python diagram.py')\n", (1378, 1399), False, 'import os\n'), ((1419, 1496), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""生成成功"""', '"""立即查看?"""', '(QMessageBox.Yes | QMessageBox.No)'], {}), "(self, '生成成功', '立即查看?', QMessageBox.Yes | QMessageBox.No)\n", (1439, 1496), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((1624, 1698), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""?"""', '"""确认撤销?"""', '(QMessageBox.Yes | QMessageBox.No)'], {}), "(self, '?', '确认撤销?', QMessageBox.Yes | QMessageBox.No)\n", (1644, 1698), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((4661, 4693), 'csvIssue.init_first_classifier', 'csvIssue.init_first_classifier', ([], {}), '()\n', (4691, 4693), False, 'import csvIssue\n'), ((4727, 4760), 'csvIssue.init_second_classifier', 'csvIssue.init_second_classifier', ([], {}), '()\n', (4758, 4760), False, 'import csvIssue\n'), ((5166, 5240), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""?"""', '"""确认删除?"""', '(QMessageBox.Yes | QMessageBox.No)'], {}), "(self, '?', '确认删除?', QMessageBox.Yes | QMessageBox.No)\n", (5186, 5240), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((1552, 1579), 'os.startfile', 'os.startfile', (['"""result.html"""'], {}), "('result.html')\n", (1564, 1579), False, 'import os\n'), ((1761, 1788), 'csvIssue.delete_last_line', 'csvIssue.delete_last_line', ([], {}), '()\n', (1786, 1788), False, 'import csvIssue\n'), ((2059, 2083), 'csvIssue.get_last_line', 'csvIssue.get_last_line', ([], {}), '()\n', (2081, 2083), False, 'import csvIssue\n'), ((2795, 2832), 'functools.partial', 'partial', (['self.add_classifier', '"""first"""'], {}), "(self.add_classifier, 'first')\n", (2802, 2832), False, 'from functools import partial\n'), ((2874, 2912), 'functools.partial', 'partial', (['self.add_classifier', '"""second"""'], {}), "(self.add_classifier, 'second')\n", (2881, 2912), False, 'from functools import partial\n'), ((2957, 2992), 'functools.partial', 'partial', (['self.check_select', '"""first"""'], {}), "(self.check_select, 'first')\n", (2964, 2992), False, 'from functools import partial\n'), ((3050, 3086), 'functools.partial', 'partial', (['self.check_select', '"""second"""'], {}), "(self.check_select, 'second')\n", (3057, 3086), False, 'from functools import partial\n'), ((3530, 3571), 'csvIssue.update_classifier', 'csvIssue.update_classifier', (['second', 'first'], {}), '(second, first)\n', (3556, 3571), False, 'import csvIssue\n'), ((3955, 4015), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['self', '"""Error"""', '"""错误的参数"""', 'QMessageBox.Ok'], {}), "(self, 'Error', '错误的参数', QMessageBox.Ok)\n", (3975, 4015), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((5336, 5369), 'csvIssue.delete_classifier', 'csvIssue.delete_classifier', (['items'], {}), '(items)\n', (5362, 5369), False, 'import csvIssue\n'), ((943, 985), 'csvIssue.write_spending', 'csvIssue.write_spending', (['item', 'sort', 'price'], {}), '(item, sort, price)\n', (966, 985), False, 'import csvIssue\n'), ((1826, 1890), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Success"""', '"""成功删除"""', 'QMessageBox.Ok'], {}), "(self, 'Success', '成功删除', QMessageBox.Ok)\n", (1849, 1890), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((1946, 2013), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Success"""', '"""记账记录已为空"""', 'QMessageBox.Ok'], {}), "(self, 'Success', '记账记录已为空', QMessageBox.Ok)\n", (1969, 2013), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((3400, 3461), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['self', '"""Error"""', '"""Error!"""', 'QMessageBox.Ok'], {}), "(self, 'Error', 'Error!', QMessageBox.Ok)\n", (3420, 3461), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((3609, 3675), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Success"""', '"""添加成功! """', 'QMessageBox.Ok'], {}), "(self, 'Success', '添加成功! ', QMessageBox.Ok)\n", (3632, 3675), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((3766, 3834), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Duplicated"""', '"""项目已存在"""', 'QMessageBox.Ok'], {}), "(self, 'Duplicated', '项目已存在', QMessageBox.Ok)\n", (3789, 3834), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((4226, 4281), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""?"""', '"""?"""', 'QMessageBox.Ok'], {}), "(self, '?', '?', QMessageBox.Ok)\n", (4249, 4281), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((5460, 5524), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['self', '"""Error"""', '"""分类下存在记账记录"""', 'QMessageBox.Ok'], {}), "(self, 'Error', '分类下存在记账记录', QMessageBox.Ok)\n", (5480, 5524), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n'), ((4484, 4539), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""?"""', '"""?"""', 'QMessageBox.Ok'], {}), "(self, '?', '?', QMessageBox.Ok)\n", (4507, 4539), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog\n')] |
import unittest
import uuid
from . import user_util
class TestUtilFuncs(unittest.TestCase):
def test_hash_and_verify_password(self):
passwords = [str(uuid.uuid4()) for i in range(10)]
for pw in passwords:
self.assertTrue(
user_util.verify_password(pw, user_util.hash_password(pw))
)
| [
"uuid.uuid4"
] | [((166, 178), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (176, 178), False, 'import uuid\n')] |
#!/usr/bin/env python
from prometheus_client import start_http_server, Summary
import random
import argparse
import time
from prometheus_client import Counter
from prometheus_client import Gauge
from prometheus_client import Summary
from prometheus_client import Histogram
import sys
import time
import json
import datetime
import dateutil.parser
import logging
import socket
import pprint
import threading
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from http import HTTPStatus
from urllib.parse import urlparse
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
import concurrent.futures
# makes name "metric" name compliant
def m(s):
return s.replace("-","_")
# determine if string is int
def stringIsInt(s):
try:
int(s)
return True
except ValueError:
return False
# a prometheus client_python
# custom "Collector": https://github.com/prometheus/client_python
# This classes collect() method is called periodically
# and it dumps the current state of the job_name_2_metrics_db
# database of metrics
class STSACollector(object):
# for controlling access to job_name_2_metrics_db
lock = threading.RLock()
# default life of all metrics
# stored in job_name_2_metrics_db
# before they are purged
metric_ttl_seconds = 600
# current database of our
# metrics state organized by job_name
# collect() always dumps the current
# state of what is in here
job_name_2_metrics_db = {}
# Will analyze the service servicechekerdb file located
# at the given path and create approproiate metrics
# for it
def processServicecheckerDb(self,servicecheckerdb_path):
# open the file
servicecheckerdb = {}
with open(servicecheckerdb_path) as f:
servicecheckerdb = json.load(f)
job_name = servicecheckerdb['name']
job_id = servicecheckerdb['id']
logging.info("Processing servicecheckerdb: '%s'", job_id)
latest_job_metrics = []
# process service records for each one
for service_result in servicecheckerdb['service_results']:
collected_metrics = self.processServiceResult(service_result)
latest_job_metrics.extend(collected_metrics)
# hotswap and replace latest metrics..
try:
self.lock.acquire()
self.job_name_2_metrics_db[job_name] = latest_job_metrics
finally:
self.lock.release()
# if the metric's 'created_at' is is beyond the metric_ttl_seconds
def metricIsExpired(self,metric_def):
iso_utc_str = metric_def['created_at']
iso_utc = dateutil.parser.parse(iso_utc_str)
diff_seconds = (datetime.datetime.utcnow()-iso_utc).total_seconds()
if diff_seconds > self.metric_ttl_seconds:
return True
return False
# this is invoked by prometheus_client.core
# on some sort of schedule or by request to get
# latest metrics
def collect(self):
try:
self.lock.acquire()
gauges_db = {}
# 1. build our in-memory set of current state
# of all metrics via Gauges
for job_name in self.job_name_2_metrics_db:
metrics = self.job_name_2_metrics_db[job_name]
for metric_def in metrics:
metric_name = metric_def['name']
metric_type = metric_def['metric_type']
if self.metricIsExpired(metric_def):
logging.debug("Skipping expired metric: %s %s", metric_name, metric_def['created_at'])
continue;
if metric_type in 'gauge':
if metric_name not in gauges_db:
gauges_db[metric_name] = GaugeMetricFamily(metric_name,metric_def['desc'],labels=list(metric_def['labels'].keys()))
gauges_db[metric_name].add_metric(list(metric_def['labels'].values()),metric_def['value'])
else:
logging.error("Unrecognized metric_type: %s ... skipping...", metric_type)
# 2. Once built yield every Gauge
for metric_name in gauges_db:
yield gauges_db[metric_name]
except Exception:
logger.exception("Unexpected error in collect()")
finally:
self.lock.release()
def processServiceResult(self,service_result):
metrics_2_return = []
# return quick if zero replicas...
service_record = service_result['service_record']
if service_record['replicas'] == 0:
return metrics_2_return
service_record = service_result['service_record']
metrics = service_result['metrics']
formal_name = service_record['formal_name']
context = service_record['context']['name']
version = service_record['context']['version']
tags = service_record['context']['tags']
classifier = service_record['classifier']
swarm_name = service_record['swarm_name']
docker_service_name = service_record['name']
if classifier is None:
classifier = "none"
if version is None:
version = "unknown"
if context is None:
context = "unknown"
tags_str = "none"
if tags is not None and len(tags) > 0:
tags_str = ",".join(tags)
# Service level metrics
metrics_2_return.extend(self.get_all_metrics(m(formal_name),m(context),classifier,swarm_name,version,tags_str,m(docker_service_name),metrics,service_record))
return metrics_2_return
def get_service_layer_error_gauge_def(self,
value,
metric_name,
desc,
formal_name,
layer,
swarm,
context,
classifier,
version,
tags,
docker_service_name,
service_check_url,
service_check_host,
service_check_port,
service_check_path,
service_check_url_dns,
error_key):
return { 'name' : metric_name,
'desc' : desc,
'label_schema' : "service_layer_error",
'metric_type': 'gauge',
'value': value,
'created_at': datetime.datetime.utcnow().isoformat(),
'labels': {'formal_name':formal_name,
'layer':layer,
'swarm':swarm,
'context':context,
'classifier':classifier,
'version':version,
'tags':tags,
'docker_service_name':docker_service_name,
'url':service_check_url,
'host':service_check_host,
'port':service_check_port,
'path':service_check_path,
'dns':service_check_url_dns,
'error':error_key}
}
def get_service_gauge_def(self, value, metric_name, desc,formal_name,swarm,context,classifier,version,tags,docker_service_name):
return { 'name' : metric_name,
'desc' : desc,
'label_schema' : "service",
'metric_type': 'gauge',
'value': value,
'created_at': datetime.datetime.utcnow().isoformat(),
'labels': {'formal_name':formal_name,
'swarm':swarm,
'context':context,
'classifier':classifier,
'version':version,
'tags':tags,
'docker_service_name':docker_service_name}
}
def get_service_layer_gauge_def(self, value, metric_name, desc,formal_name,layer,swarm,context,classifier,version,tags,docker_service_name):
return { 'name' : metric_name,
'desc' : desc,
'label_schema' : "service_layer",
'metric_type': 'gauge',
'value': value,
'created_at': datetime.datetime.utcnow().isoformat(),
'labels': {'formal_name':formal_name,
'layer':layer,
'swarm':swarm,
'context':context,
'classifier':classifier,
'version':version,
'tags':tags,
'docker_service_name':docker_service_name}
}
def get_metrics_for_layer(self, formal_name, service_record, metrics, layer, swarm,context,classifier,version,tags,docker_service_name):
#self.inc_counter(metrics['total_ok'],("sts_analyzer_c_ok_total"),("Cumulative total OK"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name)
#self.inc_counter(metrics['total_fail'],("sts_analyzer_c_fail_total"),("Cumulative total FAILED"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name)
#self.inc_counter(metrics['total_attempts'],("sts_analyzer_c_attempts_total"),("Cumulative total attempts"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name)
gs = []
gs.append(self.get_service_layer_gauge_def(metrics['health_rating'],"sts_analyzer_g_health_rating","Most recent % OK checks for",formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(metrics['retry_percentage'],"sts_analyzer_g_retry_percentage","Most recent % of checks that had to be retried",formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(metrics['fail_percentage'],"sts_analyzer_g_fail_percentage","Most recent % of checks that have failed",formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(metrics['total_ok'],("sts_analyzer_g_ok"),("Most recent total OK checks"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(metrics['total_fail'],("sts_analyzer_g_failures"),("Most recent total FAILED checks"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(metrics['total_fail']+metrics['total_ok'],("sts_analyzer_g_total_checks"),("Most recent total checks executed"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(metrics['total_attempts'],("sts_analyzer_g_attempts"),("Most recent total ATTEMPTS checks"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def((metrics['avg_resp_time_ms'] / 1000.0),("sts_analyzer_g_avg_resp_time_seconds"),("Average response time for checks in seconds"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def((metrics['total_req_time_ms'] / 1000.0),("sts_analyzer_g_total_resp_time_seconds"),("Total response time for checks in seconds"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
gs.append(self.get_service_layer_gauge_def(service_record['replicas'],("sts_analyzer_g_replicas"),("Most recent total replicas"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name))
# lets create stats for each failed attempt paths/error stats
failed_attempt_stats = metrics['failed_attempt_stats']
for service_check_url in failed_attempt_stats:
url_errors = failed_attempt_stats[service_check_url]
# lets breakup the url into some more labels
service_check_url_host = None
service_check_url_dns = None
service_check_url_path = None
service_check_url_port = None
try:
parsed = urlparse(service_check_url)
service_check_url_path = parsed.path
if ":" in parsed.netloc:
service_check_url_host = parsed.netloc.split(":")[0]
service_check_url_port = parsed.netloc.split(":")[1]
else:
service_check_url_host = parsed.netloc
if 'https' in parsed.scheme:
service_check_url_port = "443"
else:
service_check_url_port = "80"
service_check_url_dns = socket.gethostbyname(service_check_url_host)
except Exception as e:
service_check_url_dns = "lookup_fail"
# now lets create a guage for each url/error
for attempt_error in url_errors:
error_short_key = attempt_error
error_count = url_errors[attempt_error]
# lets shorten certain types of errors
if 'timeout' in attempt_error or 'timed out' in attempt_error:
error_short_key = "timeout"
elif 'body_evaluator' in attempt_error:
error_short_key = "bodyeval_fail"
elif 'nodename nor servname provided, or not known' in attempt_error:
error_short_key = "dns_fail"
elif 'Name does not resolve' in attempt_error:
error_short_key = "dns_fail"
elif 'gaierror' and 'Try again' in attempt_error:
error_short_key = "dns_fail"
elif 'Remote end closed connection without response' in attempt_error:
error_short_key = "remote_cut_conn"
elif 'Connection refused' in attempt_error:
error_short_key = "conn_refused"
elif 'Connection reset by peer' in attempt_error:
error_short_key = "conn_reset_peer"
elif 'Host is unreachable' in attempt_error:
error_short_key = "host_unreach"
else:
# convert any messages w/ a status code into code only
for s in list(HTTPStatus):
if str(s.value) in attempt_error:
error_short_key = str(s.value)
break
gs.append(self.get_service_layer_error_gauge_def(error_count,("sts_analyzer_g_attempt_errors"),("Most recent total attempt errors"),formal_name,layer,swarm,context,classifier,version,tags,docker_service_name,service_check_url,service_check_url_host,service_check_url_port,service_check_url_path,service_check_url_dns,error_short_key))
return gs
def get_all_metrics(self, formal_name, context, classifier, swarm, version,tags,docker_service_name, metrics, service_record):
to_return = []
# Metric for the service "existence" itself
to_return.append(self.get_service_gauge_def(1, "sts_analyzer_g_services","Current total number of services with 1+ replicas",m(formal_name),swarm,m(context),classifier,version,tags,docker_service_name))
# layer specific metrics...
for l in range(0,5):
layer = "layer"+str(l)
# if nothing was actually checked for they layer
# lets flag it as 100 for purposes of how prometheus
# grafana metrics are averaged across all layers
if metrics[layer]['health_rating'] == 0 and metrics[layer]['total_attempts'] == 0:
metrics[layer]['health_rating'] = 100
to_return.extend(self.get_metrics_for_layer(formal_name,service_record,metrics[layer],layer,swarm,context,classifier,version,tags,docker_service_name))
return to_return
class ServiceCheckerDBMonitor(FileSystemEventHandler):
# we need to register new service-checker db
# file paths to process with this collector
stsa_collector = None
# max threads
threads = 1
# our Pool
executor = None
def set_threads(self, t):
self.threads = t
def on_created(self, event):
super(ServiceCheckerDBMonitor, self).on_created(event)
if not self.executor:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.threads)
if event.is_directory:
return
if 'servicecheckerdb' in event.src_path:
logging.info("Responding to creation of %s: %s", "file", event.src_path)
# give write time to close....
time.sleep(10)
self.executor.submit(self.stsa_collector.processServicecheckerDb,event.src_path)
def init_watching(metric_ttl_seconds,input_dir,listen_port,listen_addr,threads):
# mthreaded...
if (isinstance(threads,str)):
threads = int(threads)
# create watchdog to look for new files
event_handler = ServiceCheckerDBMonitor()
event_handler.set_threads(threads)
# create prometheus python client collector for metrics
# our watchdog registers files to process in this collector
event_handler.stsa_collector = STSACollector()
event_handler.stsa_collector.metric_ttl_seconds = int(metric_ttl_seconds)
# schedule our file watchdog
observer = Observer()
observer.schedule(event_handler, input_dir, recursive=True)
observer.start()
REGISTRY.register(event_handler.stsa_collector)
# Start up the server to expose the metrics.
start_http_server(int(listen_port),addr=listen_addr)
logging.info("Exposing servicecheckerdb metrics for Prometheus at: http://%s:%s/metrics",listen_addr,str(listen_port))
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
observer.stop()
observer.join()
###########################
# Main program
##########################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-dir', dest='input_dir', default="./output", help="Directory path to recursively monitor for new '*servicecheckerdb*' json output files. Default './output'")
parser.add_argument('-p', '--listen-port', dest='listen_port', default=8000, help="HTTP port to expose /metrics at, default 8000")
parser.add_argument('-a', '--listen-addr', dest='listen_addr', default='127.0.0.1', help="Address to expost metrics http server on, default 127.0.0.1")
parser.add_argument('-t', '--metric-ttl-seconds', dest='metric_ttl_seconds', default=300, help="TTL for generated metrics that will be exposed. This value should be > than the interval that new *servicecheckerdb*.json are created. Default 300")
parser.add_argument('-l', '--log-file', dest='log_file', default=None, help="Path to log file, default None, STDOUT")
parser.add_argument('-x', '--log-level', dest='log_level', default="DEBUG", help="log level, default DEBUG ")
parser.add_argument('-d', '--threads', dest='threads', default=1, help="max threads for watchdog file processing, default 1")
args = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(args.log_level),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=args.log_file,filemode='w')
logging.Formatter.converter = time.gmtime
init_watching(args.metric_ttl_seconds,args.input_dir,args.listen_port,args.listen_addr,args.threads)
| [
"socket.gethostbyname",
"urllib.parse.urlparse",
"argparse.ArgumentParser",
"datetime.datetime.utcnow",
"logging.debug",
"threading.RLock",
"time.sleep",
"json.load",
"logging.getLevelName",
"prometheus_client.core.REGISTRY.register",
"watchdog.observers.Observer",
"logging.info",
"logging.e... | [((1218, 1235), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1233, 1235), False, 'import threading\n'), ((18169, 18179), 'watchdog.observers.Observer', 'Observer', ([], {}), '()\n', (18177, 18179), False, 'from watchdog.observers import Observer\n'), ((18270, 18317), 'prometheus_client.core.REGISTRY.register', 'REGISTRY.register', (['event_handler.stsa_collector'], {}), '(event_handler.stsa_collector)\n', (18287, 18317), False, 'from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY\n'), ((18792, 18817), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18815, 18817), False, 'import argparse\n'), ((1975, 2032), 'logging.info', 'logging.info', (['"""Processing servicecheckerdb: \'%s\'"""', 'job_id'], {}), '("Processing servicecheckerdb: \'%s\'", job_id)\n', (1987, 2032), False, 'import logging\n'), ((1868, 1880), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1877, 1880), False, 'import json\n'), ((17329, 17401), 'logging.info', 'logging.info', (['"""Responding to creation of %s: %s"""', '"""file"""', 'event.src_path'], {}), "('Responding to creation of %s: %s', 'file', event.src_path)\n", (17341, 17401), False, 'import logging\n'), ((17458, 17472), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (17468, 17472), False, 'import time\n'), ((18591, 18605), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (18601, 18605), False, 'import time\n'), ((19984, 20020), 'logging.getLevelName', 'logging.getLevelName', (['args.log_level'], {}), '(args.log_level)\n', (20004, 20020), False, 'import logging\n'), ((12907, 12934), 'urllib.parse.urlparse', 'urlparse', (['service_check_url'], {}), '(service_check_url)\n', (12915, 12934), False, 'from urllib.parse import urlparse\n'), ((13483, 13527), 'socket.gethostbyname', 'socket.gethostbyname', (['service_check_url_host'], {}), '(service_check_url_host)\n', (13503, 13527), False, 'import socket\n'), ((2762, 2788), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2786, 2788), False, 'import datetime\n'), ((6956, 6982), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6980, 6982), False, 'import datetime\n'), ((8110, 8136), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8134, 8136), False, 'import datetime\n'), ((8909, 8935), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8933, 8935), False, 'import datetime\n'), ((3588, 3679), 'logging.debug', 'logging.debug', (['"""Skipping expired metric: %s %s"""', 'metric_name', "metric_def['created_at']"], {}), "('Skipping expired metric: %s %s', metric_name, metric_def[\n 'created_at'])\n", (3601, 3679), False, 'import logging\n'), ((4124, 4198), 'logging.error', 'logging.error', (['"""Unrecognized metric_type: %s ... skipping..."""', 'metric_type'], {}), "('Unrecognized metric_type: %s ... skipping...', metric_type)\n", (4137, 4198), False, 'import logging\n')] |
#!/usr/bin/env pythonw
import numpy as np
import matplotlib.pyplot as plt
def flip_coins(flips = 1000000, bins=100):
# Uninformative prior
prior = np.ones(bins, dtype='float')/bins
likelihood_heads = np.arange(bins)/float(bins)
likelihood_tails = 1-likelihood_heads
flips = np.random.choice(a=[True, False], size=flips, p=[0.75, 0.25])
for coin in flips:
if coin: # Heads
posterior = prior * likelihood_heads
else: # Tails
posterior = prior * likelihood_tails
# Normalize
posterior /= np.sum(posterior)
# The posterior is now the new prior
prior = posterior
return posterior
plt.plot(np.arange(100)/float(100), flip_coins(10))
plt.plot(np.arange(100)/float(100), flip_coins(100))
plt.plot(np.arange(100)/float(100), flip_coins(1000))
plt.plot(np.arange(100)/float(100), flip_coins(10000))
plt.plot(np.arange(100)/float(100), flip_coins(100000))
plt.legend([10, 100, 1000, 10000, 100000])
plt.show() | [
"numpy.ones",
"numpy.random.choice",
"matplotlib.pyplot.legend",
"numpy.sum",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((954, 996), 'matplotlib.pyplot.legend', 'plt.legend', (['[10, 100, 1000, 10000, 100000]'], {}), '([10, 100, 1000, 10000, 100000])\n', (964, 996), True, 'import matplotlib.pyplot as plt\n'), ((997, 1007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1005, 1007), True, 'import matplotlib.pyplot as plt\n'), ((296, 357), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[True, False]', 'size': 'flips', 'p': '[0.75, 0.25]'}), '(a=[True, False], size=flips, p=[0.75, 0.25])\n', (312, 357), True, 'import numpy as np\n'), ((157, 185), 'numpy.ones', 'np.ones', (['bins'], {'dtype': '"""float"""'}), "(bins, dtype='float')\n", (164, 185), True, 'import numpy as np\n'), ((214, 229), 'numpy.arange', 'np.arange', (['bins'], {}), '(bins)\n', (223, 229), True, 'import numpy as np\n'), ((571, 588), 'numpy.sum', 'np.sum', (['posterior'], {}), '(posterior)\n', (577, 588), True, 'import numpy as np\n'), ((693, 707), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (702, 707), True, 'import numpy as np\n'), ((745, 759), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (754, 759), True, 'import numpy as np\n'), ((798, 812), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (807, 812), True, 'import numpy as np\n'), ((852, 866), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (861, 866), True, 'import numpy as np\n'), ((907, 921), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (916, 921), True, 'import numpy as np\n')] |
#%%
from fireworks import PyTorch_Model, Message, HookedPassThroughPipe, Experiment
from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe
from fireworks.toolbox.preprocessing import train_test_split
from fireworks.extensions import IgniteJunction
from fireworks.core import PyTorch_Model
import pandas as pd
import torch
from torchvision.datasets.mnist import FashionMNIST
from os import environ as env
from itertools import count
import matplotlib.pyplot as plt
import visdom
env_name = 'mnist_fashion'
# vis = visdom.Visdom(env=env_name) # If you have a running Visdom server, you can uncomment this to generate plots.
description = "Here, we will train a convolutional neural network on the Fashion MNIST dataset to demonstrate the usage of Fireworks."
experiment = Experiment(env_name, description=description)
#%%
mnist_dir = env.get('MNIST_DIR', './MNIST/')
print(mnist_dir)
# First, we download our dataset and plot one of its elements as an example.
mnist = FashionMNIST(mnist_dir, download=True)
dataset = Message({'examples': mnist.data, 'labels': mnist.targets})
example = dataset['examples'][0]
plt.imshow(example)
plt.show()
#%%
# Now we construct our training and test sets as a pipeline.
train, test = train_test_split(dataset, test=.1)
# We can compose pipes to create an input pipeline that will shuffle the training set on each iteration and produce minibatches formatted for our image classifier.
shuffler = ShufflerPipe(train)
minibatcher = BatchingPipe(shuffler, batch_size=100)
to_cuda = TensorPipe(minibatcher, columns=['examples', 'labels']) # By default, all columns will be moved to Cuda if possible, but you can explicitly specify which ones as well
def tensor_to_float(batch, column='examples'):
""" This converts the images from bytes to floats which is the data type that torch.nn.Conv2d expects. """
batch[column] = batch[column].float()
return batch
def reshape_batch(batch, column='examples'):
""" This reshapes the batch to have an extra dimension corresponding to the input channels so we can apply the torch.nn.Conv2d operation in our model. """
shape = batch[column].shape
new_shape = torch.Size([shape[0], 1, shape[1], shape[2]])
batch[column] = batch[column].reshape(new_shape)
return batch
def normalize_batch(batch, column='examples'):
""" Normalizes pixel intensities to fall between 0 and 1. """
batch[column] /= 255.
return batch
to_float = FunctionPipe(input=to_cuda, function=tensor_to_float)
normalized = FunctionPipe(input=to_float, function=normalize_batch)
training_set = FunctionPipe(input=normalized, function=reshape_batch)
# We can also compose a pipeline in one go like we do here for the test set.
test_set = \
FunctionPipe(
input=FunctionPipe(
input=FunctionPipe(
input=TensorPipe(
input=BatchingPipe(
input=test,
batch_size=100
),
columns=['examples', 'labels']
),
function=to_float
),
function=normalize_batch
),
function=reshape_batch
)
#%%
# Construct Model
class mnistModel(PyTorch_Model):
""" Embeds each image into a 10-dimensional vector. """
required_components = ['in_column', 'out_column', 'conv1', 'pool1', 'conv2', 'pool2']
def init_default_components(self):
self.components['in_column'] = 'examples'
self.components['out_column'] = 'embeddings'
self.components['conv1'] = torch.nn.Conv2d(1, 64, 2, padding=1)
self.components['pool1'] = torch.nn.MaxPool2d(2)
self.components['conv2'] = torch.nn.Conv2d(64, 32, 2)
self.components['pool2'] = torch.nn.MaxPool2d(2)
self.components['nonlinearity'] = torch.nn.ELU()
def forward(self, batch):
embedding = batch[self.in_column]
embedding = self.nonlinearity(self.conv1(embedding))
embedding = self.pool1(embedding)
embedding = self.nonlinearity(self.conv2(embedding))
embedding = self.pool2(embedding)
embedding = embedding.reshape(len(batch), 1152)
batch[self.out_column] = embedding
return batch
class Classifier(PyTorch_Model):
""" Uses the input embedding to perform a classification. """
required_components = ['in_column', 'out_column', 'linear_layer']
def init_default_components(self):
self.components['in_column'] = 'embeddings'
self.components['out_column'] = 'predictions'
self.components['linear1'] = torch.nn.Linear(1152, 256)
self.components['linear2'] = torch.nn.Linear(256, 10)
self.components['nonlinearity'] = torch.nn.ELU()
self.components['softmax'] = torch.nn.Softmax(dim=1)
def forward(self, batch):
predictions = batch[self.in_column]
predictions = self.nonlinearity(self.linear1(predictions))
predictions = self.softmax(self.linear2(predictions))
batch[self.out_column] = predictions
return batch
# All function calls to the classifier will call the embedder first
# ie. classifier(x) is equivalent to classifier.forward(embedder.forward(x))
embedder = mnistModel()
classifier = Classifier(input=embedder)
if torch.cuda.is_available():
embedder.cuda()
classifier.cuda()
#%%
# Set up loss function and training loop
ce_loss = torch.nn.CrossEntropyLoss()
loss = lambda batch: ce_loss(batch['predictions'], batch['labels'])
# By default, this Junction applies a standard training closure of evaluating the model,
# computing gradients of the loss, and backpropagating using the chosen optimizer.
trainer = IgniteJunction(
components={
'model': classifier,
'dataset': training_set
},
loss=loss, optimizer='Adam',
lr=.0001, weight_decay=.001,
visdom=False, # If you have a running Visdom server, you can set this to true to plot training loss over time.
environment=env_name
)
trainer.run(max_epochs=10) # This will take almost 20 minutes on CPU and around 1 minute on GPU
#%%
# Now that we've trained our model, we can compute some metrics on the test set.
# Here, we construct a Pipe that will compute metrics such as sensitivity, specificity, f1, etc.
# on the test set.
classes = {i: class_name for i, class_name in zip(count(), mnist.classes)}
class Metrics(HookedPassThroughPipe):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.true_positives = {class_name: 0 for class_name in classes.values()}
self.true_negatives = {class_name: 0 for class_name in classes.values()}
self.false_positives = {class_name: 0 for class_name in classes.values()}
self.false_negatives = {class_name: 0 for class_name in classes.values()}
self.total_count = 0
self.label_counts = {class_name: 0 for class_name in classes.values()}
self.prediction_counts = {class_name: 0 for class_name in classes.values()}
def _call_hook(self, batch):
"""
This will get called every time the model is called. As a result, this pipe will continuously update
itself as we iterate through the test set.
"""
labels = batch['labels']
predictions = torch.max(batch['predictions'],1)[1]
correct_indices = (predictions == labels).nonzero().flatten().tolist()
incorrect_indices = (predictions != labels).nonzero().flatten().tolist()
for index, name in classes.items():
self.label_counts[name] += int(sum(labels == index)) # How often the class showed up
self.prediction_counts[name] += int(sum(predictions == index)) # How often the class was predicted
self.true_positives[name] += int(sum(predictions[correct_indices] == index)) # How often the correct prediction was for thsi class
self.true_negatives[name] += int(sum(predictions[correct_indices] != index)) # How often the correct prediction was not for the class; ie. how often the prediction was a true negative for this class
self.false_positives[name] += int(sum(predictions[incorrect_indices] == index)) # How often a wrong prediction was for this class
self.false_negatives[name] += int(sum(predictions[incorrect_indices] != index)) # How often a wrong prediction was for another class; ie. how often the prediction was a false negative for this class
self.total_count += len(batch)
return batch
def compile_metrics(self):
"""
After we have gone through the entire test set, we can call this method to compute the actual metrics.
"""
class_names = classes.values()
negative_counts = {name: sum(self.label_counts[other] for other in class_names if other != name) for name in class_names}
self.sensitivity = {name: self.true_positives[name] / self.label_counts[name] for name in class_names}
self.specificity = {name: self.true_negatives[name] / negative_counts[name] for name in class_names}
negative_prediction_counts = {name: sum(self.prediction_counts[other] for other in class_names if other != name) for name in class_names}
self.ppv = {name: self.true_positives[name] / self.prediction_counts[name] for name in class_names}
self.npv = {name: self.true_negatives[name] / negative_prediction_counts[name] for name in class_names}
self.f1 = {name: 2 / (1/self.ppv[name] + 1/self.sensitivity[name]) for name in class_names}
self.accuracy = {name: (self.true_positives[name] + self.true_negatives[name]) / self.total_count for name in class_names}
def get_metrics(self):
"""
Lastly, we will use this method to return the computed metrics as a Pandas DataFrame.
"""
columns = ['sensitivity', 'specificity', 'ppv', 'npv', 'f1', 'accuracy']
df = pd.DataFrame(columns=columns, index=classes.values())
for attribute in columns:
value = getattr(self, attribute)
df[attribute] = [value[key] for key in df.index]
return df
# This class is implemented a a HookedPassThroughPipe, meaning that it's _call_hook method will be applied every time
# The class is called like a function, and this call will pass through to its input.
metrics_computer = Metrics(input=classifier)
for batch in test_set:
# We can simply call this object repeatedly on batches in the test set
# This operation is equivalent to metrics_computer._call_hook(classifier(batch))
metrics_computer(batch)
metrics_computer.compile()
df = metrics_computer.get_metrics()
print(df)
# You can also convert this DataFrame to a Message.
m = Message(df)
print(m)
# Lastly, we can save our results from this experiment
# At it's simplest, the experiment object gives you a way of organizing files.
# You can uses its open() method to get a file handle or path string to a file
# inside its experiment directory.
# Each time you create an experiment, a new experiment directory will be created automatically.
df.to_csv(experiment.open("metrics.csv", string_only=True))
# Since our models are still subclasses of torch.nn.module, we can save them using the standard torch.save feature
# but if we want, we can also save their parameters in other formats such as JSON
state = embedder.get_state()
Message.from_objects(state).to('json', path=experiment.open("embedder.json",string_only=True))
state = classifier.get_state()
Message.from_objects(state).to('json', path=experiment.open("classifier.json",string_only=True))
| [
"torch.nn.CrossEntropyLoss",
"torch.max",
"fireworks.toolbox.preprocessing.train_test_split",
"torch.cuda.is_available",
"torchvision.datasets.mnist.FashionMNIST",
"matplotlib.pyplot.imshow",
"fireworks.Experiment",
"fireworks.toolbox.BatchingPipe",
"fireworks.Message.from_objects",
"fireworks.Mes... | [((801, 846), 'fireworks.Experiment', 'Experiment', (['env_name'], {'description': 'description'}), '(env_name, description=description)\n', (811, 846), False, 'from fireworks import PyTorch_Model, Message, HookedPassThroughPipe, Experiment\n'), ((864, 896), 'os.environ.get', 'env.get', (['"""MNIST_DIR"""', '"""./MNIST/"""'], {}), "('MNIST_DIR', './MNIST/')\n", (871, 896), True, 'from os import environ as env\n'), ((1000, 1038), 'torchvision.datasets.mnist.FashionMNIST', 'FashionMNIST', (['mnist_dir'], {'download': '(True)'}), '(mnist_dir, download=True)\n', (1012, 1038), False, 'from torchvision.datasets.mnist import FashionMNIST\n'), ((1049, 1107), 'fireworks.Message', 'Message', (["{'examples': mnist.data, 'labels': mnist.targets}"], {}), "({'examples': mnist.data, 'labels': mnist.targets})\n", (1056, 1107), False, 'from fireworks import PyTorch_Model, Message, HookedPassThroughPipe, Experiment\n'), ((1141, 1160), 'matplotlib.pyplot.imshow', 'plt.imshow', (['example'], {}), '(example)\n', (1151, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1169, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1287), 'fireworks.toolbox.preprocessing.train_test_split', 'train_test_split', (['dataset'], {'test': '(0.1)'}), '(dataset, test=0.1)\n', (1268, 1287), False, 'from fireworks.toolbox.preprocessing import train_test_split\n'), ((1463, 1482), 'fireworks.toolbox.ShufflerPipe', 'ShufflerPipe', (['train'], {}), '(train)\n', (1475, 1482), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n'), ((1497, 1535), 'fireworks.toolbox.BatchingPipe', 'BatchingPipe', (['shuffler'], {'batch_size': '(100)'}), '(shuffler, batch_size=100)\n', (1509, 1535), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n'), ((1546, 1601), 'fireworks.toolbox.TensorPipe', 'TensorPipe', (['minibatcher'], {'columns': "['examples', 'labels']"}), "(minibatcher, columns=['examples', 'labels'])\n", (1556, 1601), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n'), ((2471, 2524), 'fireworks.toolbox.FunctionPipe', 'FunctionPipe', ([], {'input': 'to_cuda', 'function': 'tensor_to_float'}), '(input=to_cuda, function=tensor_to_float)\n', (2483, 2524), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n'), ((2538, 2592), 'fireworks.toolbox.FunctionPipe', 'FunctionPipe', ([], {'input': 'to_float', 'function': 'normalize_batch'}), '(input=to_float, function=normalize_batch)\n', (2550, 2592), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n'), ((2608, 2662), 'fireworks.toolbox.FunctionPipe', 'FunctionPipe', ([], {'input': 'normalized', 'function': 'reshape_batch'}), '(input=normalized, function=reshape_batch)\n', (2620, 2662), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n'), ((5345, 5370), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5368, 5370), False, 'import torch\n'), ((5470, 5497), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (5495, 5497), False, 'import torch\n'), ((5749, 5927), 'fireworks.extensions.IgniteJunction', 'IgniteJunction', ([], {'components': "{'model': classifier, 'dataset': training_set}", 'loss': 'loss', 'optimizer': '"""Adam"""', 'lr': '(0.0001)', 'weight_decay': '(0.001)', 'visdom': '(False)', 'environment': 'env_name'}), "(components={'model': classifier, 'dataset': training_set},\n loss=loss, optimizer='Adam', lr=0.0001, weight_decay=0.001, visdom=\n False, environment=env_name)\n", (5763, 5927), False, 'from fireworks.extensions import IgniteJunction\n'), ((10778, 10789), 'fireworks.Message', 'Message', (['df'], {}), '(df)\n', (10785, 10789), False, 'from fireworks import PyTorch_Model, Message, HookedPassThroughPipe, Experiment\n'), ((2185, 2230), 'torch.Size', 'torch.Size', (['[shape[0], 1, shape[1], shape[2]]'], {}), '([shape[0], 1, shape[1], shape[2]])\n', (2195, 2230), False, 'import torch\n'), ((3600, 3636), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(64)', '(2)'], {'padding': '(1)'}), '(1, 64, 2, padding=1)\n', (3615, 3636), False, 'import torch\n'), ((3672, 3693), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3690, 3693), False, 'import torch\n'), ((3729, 3755), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(64)', '(32)', '(2)'], {}), '(64, 32, 2)\n', (3744, 3755), False, 'import torch\n'), ((3791, 3812), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(2)'], {}), '(2)\n', (3809, 3812), False, 'import torch\n'), ((3855, 3869), 'torch.nn.ELU', 'torch.nn.ELU', ([], {}), '()\n', (3867, 3869), False, 'import torch\n'), ((4635, 4661), 'torch.nn.Linear', 'torch.nn.Linear', (['(1152)', '(256)'], {}), '(1152, 256)\n', (4650, 4661), False, 'import torch\n'), ((4699, 4723), 'torch.nn.Linear', 'torch.nn.Linear', (['(256)', '(10)'], {}), '(256, 10)\n', (4714, 4723), False, 'import torch\n'), ((4766, 4780), 'torch.nn.ELU', 'torch.nn.ELU', ([], {}), '()\n', (4778, 4780), False, 'import torch\n'), ((4818, 4841), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (4834, 4841), False, 'import torch\n'), ((11431, 11458), 'fireworks.Message.from_objects', 'Message.from_objects', (['state'], {}), '(state)\n', (11451, 11458), False, 'from fireworks import PyTorch_Model, Message, HookedPassThroughPipe, Experiment\n'), ((11557, 11584), 'fireworks.Message.from_objects', 'Message.from_objects', (['state'], {}), '(state)\n', (11577, 11584), False, 'from fireworks import PyTorch_Model, Message, HookedPassThroughPipe, Experiment\n'), ((6420, 6427), 'itertools.count', 'count', ([], {}), '()\n', (6425, 6427), False, 'from itertools import count\n'), ((7363, 7397), 'torch.max', 'torch.max', (["batch['predictions']", '(1)'], {}), "(batch['predictions'], 1)\n", (7372, 7397), False, 'import torch\n'), ((2892, 2932), 'fireworks.toolbox.BatchingPipe', 'BatchingPipe', ([], {'input': 'test', 'batch_size': '(100)'}), '(input=test, batch_size=100)\n', (2904, 2932), False, 'from fireworks.toolbox import ShufflerPipe, TensorPipe, BatchingPipe, FunctionPipe\n')] |
import os
import shutil
video_files = ['.webm', '.mkv', '.vob', '.gif', '.avi', '.amv', '.mp4',]
audio_files = ['.aif','.cda', '.mid', '.mp3', '.mpa', '.ogg', ]
image_files = ['.tif', '.tiff', '.bmp', '.jpg', '.jpeg', '.gif', '.png', '.eps', '.raw', '.cr2', '.nef', '.orf', '.sr2', '.ico']
setup_files = ['.exe', '.msi', ]
disc_files = ['.iso', ]
code_files = ['.py', '.html', '.css', '.js', '.whl', ]
archive_files = ['.zip', '.rar', '.tar', '.tgz', '.tar.gz', ]
torrent_files = ['.torrent']
path = str(input('Input path: '))
if path == 'download':
path = 'C:\\Users\\tiko1\\Downloads\\'
elif path == 'e':
path = 'E:\\'
elif path == 'c':
path = 'C:\\'
else:
if os.path.exists(path):
pass
else:
print('Your path doesn\'t exists')
all_files = os.listdir(path)
for file_name in all_files:
ext = os.path.splitext(file_name)[-1].lower()
for audio in audio_files:
if ext == audio:
if not os.path.exists(path + 'audio\\'):
os.mkdir(path + 'audio\\')
shutil.move(path + file_name, path + 'audio\\')
for video in video_files:
if ext == video:
if not os.path.exists(path + 'video\\'):
os.mkdir(path + 'video\\')
shutil.move(path + file_name, path + 'video\\')
for image in image_files:
if ext == image:
if not os.path.exists(path + 'image\\'):
os.mkdir(path + 'image\\')
shutil.move(path + file_name, path + 'image\\')
for setup in setup_files:
if ext == setup:
if not os.path.exists(path + 'setup\\'):
os.mkdir(path + 'setup\\')
shutil.move(path + file_name, path + 'setup\\')
for code in code_files:
if ext == code:
if not os.path.exists(path + 'code\\'):
os.mkdir(path + 'code\\')
shutil.move(path + file_name, path + 'code\\')
for archive in archive_files:
if ext == archive:
if not os.path.exists(path + 'archive\\'):
os.mkdir(path + 'archive\\')
shutil.move(path + file_name, path + 'archive\\')
for torrent in torrent_files:
if ext == torrent:
if not os.path.exists(path + 'torrent\\'):
os.mkdir(path + 'torrent\\')
shutil.move(path + file_name, path + 'torrent\\')
for disc in disc_files:
if ext == disc:
if not os.path.exists(path + 'disc\\'):
os.mkdir(path + 'disc\\')
shutil.move(path + file_name, path + 'disc\\') | [
"os.path.exists",
"os.listdir",
"shutil.move",
"os.path.splitext",
"os.mkdir"
] | [((822, 838), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (832, 838), False, 'import os\n'), ((716, 736), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (730, 736), False, 'import os\n'), ((1089, 1136), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'audio\\\\')"], {}), "(path + file_name, path + 'audio\\\\')\n", (1100, 1136), False, 'import shutil\n'), ((1305, 1352), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'video\\\\')"], {}), "(path + file_name, path + 'video\\\\')\n", (1316, 1352), False, 'import shutil\n'), ((1521, 1568), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'image\\\\')"], {}), "(path + file_name, path + 'image\\\\')\n", (1532, 1568), False, 'import shutil\n'), ((1737, 1784), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'setup\\\\')"], {}), "(path + file_name, path + 'setup\\\\')\n", (1748, 1784), False, 'import shutil\n'), ((1948, 1994), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'code\\\\')"], {}), "(path + file_name, path + 'code\\\\')\n", (1959, 1994), False, 'import shutil\n'), ((2173, 2222), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'archive\\\\')"], {}), "(path + file_name, path + 'archive\\\\')\n", (2184, 2222), False, 'import shutil\n'), ((2401, 2450), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'torrent\\\\')"], {}), "(path + file_name, path + 'torrent\\\\')\n", (2412, 2450), False, 'import shutil\n'), ((2614, 2660), 'shutil.move', 'shutil.move', (['(path + file_name)', "(path + 'disc\\\\')"], {}), "(path + file_name, path + 'disc\\\\')\n", (2625, 2660), False, 'import shutil\n'), ((881, 908), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (897, 908), False, 'import os\n'), ((998, 1030), 'os.path.exists', 'os.path.exists', (["(path + 'audio\\\\')"], {}), "(path + 'audio\\\\')\n", (1012, 1030), False, 'import os\n'), ((1049, 1075), 'os.mkdir', 'os.mkdir', (["(path + 'audio\\\\')"], {}), "(path + 'audio\\\\')\n", (1057, 1075), False, 'import os\n'), ((1214, 1246), 'os.path.exists', 'os.path.exists', (["(path + 'video\\\\')"], {}), "(path + 'video\\\\')\n", (1228, 1246), False, 'import os\n'), ((1265, 1291), 'os.mkdir', 'os.mkdir', (["(path + 'video\\\\')"], {}), "(path + 'video\\\\')\n", (1273, 1291), False, 'import os\n'), ((1430, 1462), 'os.path.exists', 'os.path.exists', (["(path + 'image\\\\')"], {}), "(path + 'image\\\\')\n", (1444, 1462), False, 'import os\n'), ((1481, 1507), 'os.mkdir', 'os.mkdir', (["(path + 'image\\\\')"], {}), "(path + 'image\\\\')\n", (1489, 1507), False, 'import os\n'), ((1646, 1678), 'os.path.exists', 'os.path.exists', (["(path + 'setup\\\\')"], {}), "(path + 'setup\\\\')\n", (1660, 1678), False, 'import os\n'), ((1697, 1723), 'os.mkdir', 'os.mkdir', (["(path + 'setup\\\\')"], {}), "(path + 'setup\\\\')\n", (1705, 1723), False, 'import os\n'), ((1859, 1890), 'os.path.exists', 'os.path.exists', (["(path + 'code\\\\')"], {}), "(path + 'code\\\\')\n", (1873, 1890), False, 'import os\n'), ((1909, 1934), 'os.mkdir', 'os.mkdir', (["(path + 'code\\\\')"], {}), "(path + 'code\\\\')\n", (1917, 1934), False, 'import os\n'), ((2078, 2112), 'os.path.exists', 'os.path.exists', (["(path + 'archive\\\\')"], {}), "(path + 'archive\\\\')\n", (2092, 2112), False, 'import os\n'), ((2131, 2159), 'os.mkdir', 'os.mkdir', (["(path + 'archive\\\\')"], {}), "(path + 'archive\\\\')\n", (2139, 2159), False, 'import os\n'), ((2306, 2340), 'os.path.exists', 'os.path.exists', (["(path + 'torrent\\\\')"], {}), "(path + 'torrent\\\\')\n", (2320, 2340), False, 'import os\n'), ((2359, 2387), 'os.mkdir', 'os.mkdir', (["(path + 'torrent\\\\')"], {}), "(path + 'torrent\\\\')\n", (2367, 2387), False, 'import os\n'), ((2525, 2556), 'os.path.exists', 'os.path.exists', (["(path + 'disc\\\\')"], {}), "(path + 'disc\\\\')\n", (2539, 2556), False, 'import os\n'), ((2575, 2600), 'os.mkdir', 'os.mkdir', (["(path + 'disc\\\\')"], {}), "(path + 'disc\\\\')\n", (2583, 2600), False, 'import os\n')] |
from pathlib import Path
'''
This script creates a new html that has placed the javascript code inline to make a standalone html
'''
src = Path.cwd() / 'coldcard_address_generator_html.html'
dest = Path.cwd() / 'coldcard_address_generator_html_standalone.html'
dest2 = Path.cwd() / 'index.html' # for github pages
string = '<script src="js/coldcard_address_generator_html.js"></script>'
def main():
with open(src) as s:
source = s.readlines()
source = replace(source, '<script src="js/coldcard_address_generator_html.js"></script>', Path.cwd() / 'js' / 'coldcard_address_generator_html.js')
source = replace(source, '<script type="module" src="js/bip32.js"></script>', Path.cwd() / 'js' / 'bip32.js')
source = replace(source, '<script type="module" src="js/bitcoinjs-lib.js"></script>', Path.cwd() / 'js' / 'bitcoinjs-lib.js')
with open(dest, 'w') as d:
for line in source:
d.write(line)
with open(dest2, 'w') as d:
for line in source:
d.write(line)
def replace(string_arr, find_str, replacement_file):
out = []
with open(replacement_file) as r:
rep = r.read()
for line in string_arr:
if find_str in line:
out.append('<script>' + rep + '</script>\n')
print()
else:
out.append(line)
return out
if __name__ == '__main__':
main()
| [
"pathlib.Path.cwd"
] | [((142, 152), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (150, 152), False, 'from pathlib import Path\n'), ((201, 211), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (209, 211), False, 'from pathlib import Path\n'), ((272, 282), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (280, 282), False, 'from pathlib import Path\n'), ((559, 569), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (567, 569), False, 'from pathlib import Path\n'), ((699, 709), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (707, 709), False, 'from pathlib import Path\n'), ((821, 831), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (829, 831), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
import argparse
import logging
from csv import DictReader
from datetime import date, datetime
from pathlib import Path
logger = logging.getLogger(__name__)
header = """V042
ATaxTool Donations 0.4
D{date:%Y-%m-%d}
^
"""
record_layout_1 = """TD
N280
C1
L1
${amount:0.2f}
X{payee} ({ein})
^
"""
record_layout_2 = """TD
N280
C1
L1
${amount:0.2f}
X {payee}/{note} ({ein})
^
"""
def main(args):
input_file = Path(args.input).resolve(strict=True)
logger.info(f"Input file: {input_file}")
if args.output is None:
output_file = input_file.with_stem(
input_file.stem + f"_{datetime.now():%Y%m%d_%H%M%S}"
).with_suffix(".txf")
else:
output_file = Path(args.output).resolve()
logger.info(f"Output file: {output_file}")
with input_file.open("r") as f:
records = list(DictReader(f))
with output_file.open("x", newline="\r\n") as f:
f.write(header.format(date=date.today()))
for record in records:
if float(record["amount"]) > 0:
if record["notes"] != "":
f.write(
record_layout_2.format(
payee=record["payee"],
ein=record["ein"],
note=record["notes"],
amount=-float(record["amount"]),
)
)
else:
f.write(
record_layout_1.format(
payee=record["payee"],
ein=record["ein"],
amount=-float(record["amount"]),
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", help="Input CSV file")
parser.add_argument("-o", "--output", help="File to write TXF output to")
main(parser.parse_args())
| [
"logging.getLogger",
"csv.DictReader",
"argparse.ArgumentParser",
"pathlib.Path",
"datetime.datetime.now",
"datetime.date.today"
] | [((152, 179), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (169, 179), False, 'import logging\n'), ((1772, 1797), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1795, 1797), False, 'import argparse\n'), ((436, 452), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (440, 452), False, 'from pathlib import Path\n'), ((854, 867), 'csv.DictReader', 'DictReader', (['f'], {}), '(f)\n', (864, 867), False, 'from csv import DictReader\n'), ((719, 736), 'pathlib.Path', 'Path', (['args.output'], {}), '(args.output)\n', (723, 736), False, 'from pathlib import Path\n'), ((958, 970), 'datetime.date.today', 'date.today', ([], {}), '()\n', (968, 970), False, 'from datetime import date, datetime\n'), ((626, 640), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (638, 640), False, 'from datetime import date, datetime\n')] |
import sys, numpy
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
#0=drama,1=comedy,2=animated,3=action/adventure
def random_forest_class(raw_test_set):
x_train=[]
y_train=[]
count=0
vectorizer = TfidfVectorizer(analyzer='word', stop_words='english', max_features = 1024)
with open("movieTraining2.txt", "r") as mtf:
for line in mtf:
temp = line.split("||")
x_train.append(temp[4])
y_train.append(temp[0])
count+=1
'''
if count==5:
raw_test_set.append(temp[3])
elif count==127:
raw_test_set.append(temp[3])
print(temp[1])
elif count==157:
raw_test_set.append(temp[3])
print(temp[1])
elif count==394:
raw_test_set.append(temp[3])
'''
train_data_feat = vectorizer.fit_transform(x_train).toarray()
randoforest = RandomForestClassifier(n_estimators = 100)
randoforest = randoforest.fit(train_data_feat, y_train)
test_data_feat = vectorizer.transform(raw_test_set).toarray()
result = randoforest.predict(test_data_feat)
return result
#print(len(y_train),len(raw_test_set))
#print(x_train[220])
def naive_bayes_class(raw_test_set):
x_train=[]
y_train=[]
count=0
vectorizer = TfidfVectorizer(analyzer='word', stop_words='english', max_features = 1024)
with open("movieTraining2.txt", "r") as mtf:
for line in mtf:
temp = line.split("||")
x_train.append(temp[4])
y_train.append(temp[1])
count+=1
'''
if count==5:
raw_test_set.append(temp[3])
elif count==127:
raw_test_set.append(temp[3])
print(temp[1])
elif count==157:
raw_test_set.append(temp[3])
print(temp[1])
elif count==394:
raw_test_set.append(temp[3])
'''
train_data_feat = vectorizer.fit_transform(x_train).toarray()
nb = MultinomialNB()
nb = nb.fit(train_data_feat, y_train)
test_data_feat = vectorizer.transform(raw_test_set).toarray()
result = nb.predict(test_data_feat)
return result
def int2class(lizt, bol):
if bol:
for i in lizt:
if int(i)==0:
print('drama')
elif int(i)==1:
print('comedy')
elif int(i)==2:
print('animated')
elif int(i)==3:
print('action')
elif int(i)==4:
print('horror')
else:
for i in lizt:
print(i)
rts=[
"When Dipper and <NAME> get sent to their great-uncle Stan's shop in Gravity Falls, Oregon for the summer, they think it will be boring. But when Dipper find a strange journal in the woods, they learn about some strange secrets about the town. Welcome to Gravity Falls. Just north of Normal, west of Weird.",
"When chemistry teacher <NAME> is diagnosed with Stage III cancer and given only two years to live, he decides he has nothing to lose. He lives with his teenage son, who has cerebral palsy, and his wife, in New Mexico. Determined to ensure that his family will have a secure future, Walt embarks on a career of drugs and crime. He proves to be remarkably proficient in this new world as he begins manufacturing and selling methamphetamine with one of his former students. The series tracks the impacts of a fatal diagnosis on a regular, hard working man, and explores how a fatal diagnosis affects his morality and transforms him into a major player of the drug trade.",
"A 19th century Western. <NAME> is a clumsy Imperial Guard to the Emperor of China. When <NAME> is kidnapped from the Forbidden City, Wang feels personally responsible and insists on joining the guards sent to rescue the Princess, who has been whisked away to the United States. In Nevada and hot on the trail of the kidnappers, Wang is separated from the group and soon finds himself an unlikely partner with <NAME>, a small time robber with delusions of grandeur. Together, the two forge onto one misadventure after another.",
"A former lawyer attends a community college when it is discovered he faked his bachelor degree. In an attempt to get with a student in his Spanish class he forms a Spanish study group. To his surprise more people attend the study group and the group of misfits form an unlikely community.",
"A Michigan farmer and a prospector form a partnership in the California gold country. Their adventures include buying and sharing a wife, hijacking a stage, kidnaping six prostitutes, and turning their mining camp into a boomtown. Along the way there is plenty of drinking, gambling, and singing. They even find time to do some creative gold mining.",
"Dory is a wide-eyed, blue tang fish who suffers from memory loss every 10 seconds or so. The one thing she can remember is that she somehow became separated from her parents as a child. With help from her friends Nemo and Marlin, Dory embarks on an epic adventure to find them. Her journey brings her to the Marine Life Institute, a conservatory that houses diverse ocean species.",
"A US research station, Antarctica, early-winter 1982. The base is suddenly buzzed by a helicopter from the nearby Norwegian research station. They are trying to kill a dog that has escaped from their base. After the destruction of the Norwegian chopper the members of the US team fly to the Norwegian base, only to discover them all dead or missing. They do find the remains of a strange creature the Norwegians burned. The Americans take it to their base and deduce that it is an alien life form. After a while it is apparent that the alien can take over and assimilate into other life forms, including humans, and can spread like a virus.",
"This is a shit movie"
]
#res = naive_bayes_class(rts)
#res = random_forest_class(rts)
#x = []
#print("Please Describe a Story:")
#x.append(input().strip())
res = naive_bayes_class(rts)
int2class(res,True)
res = random_forest_class(rts)
int2class(res,False)
| [
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.ensemble.RandomForestClassifier"
] | [((324, 397), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""word"""', 'stop_words': '"""english"""', 'max_features': '(1024)'}), "(analyzer='word', stop_words='english', max_features=1024)\n", (339, 397), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1099, 1139), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (1121, 1139), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1508, 1581), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""word"""', 'stop_words': '"""english"""', 'max_features': '(1024)'}), "(analyzer='word', stop_words='english', max_features=1024)\n", (1523, 1581), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2278, 2293), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2291, 2293), False, 'from sklearn.naive_bayes import MultinomialNB\n')] |
from flask import Blueprint, jsonify, request, session
from pymongo import DESCENDING
from api.views import users
from api import collection_users
@users.route("/api/register", methods=["POST"])
def user_register():
if request.method == "POST":
data = request.get_json()
email = data['email']
password = data['password']
confirm = data['confirm']
user_name = data["user_name"]
if not all([email, password, confirm]):
return jsonify("Sorry, all the input fields are required.")
if password != confirm:
return jsonify("Inconsistent passwords.")
try:
existent_user = collection_users.find_one({"email": email}, projection={"password": False})
if existent_user != None:
return jsonify("User exists with this email. Please try a new one.")
# default role: student (user_id >= 3001)
max_id = 3000
for user in collection_users.find(filter={}, sort=[("user_id", DESCENDING)], limit=1, projection={"user_id": True}):
if user["user_id"] > max_id:
max_id = user["user_id"]
new_user = {
"user_id": max_id + 1, "user_name": user_name, 'email': email,
'password': password, "role": 0,
"enrolled_courses": ["IT5007", "IT5002"],
"favored_courses": [],
"taken_courses": ["IT5001", "IT5003"],
"about_me": "",
}
collection_users.insert_one(new_user)
return jsonify("Registeration Succeeds!")
except Exception as e:
print(e)
return jsonify("Registration failed due to server error.")
@users.route("/api/login", methods=["GET", "POST"])
def user_login():
if request.method == "POST":
data = request.get_json()
email = data['email']
remember = data['remember']
password = str(data['password'])
if not email or not password:
return jsonify({"status": 0, "message": "Invalid input."})
# ip = request.remote_addr //for trying limit
check_comb = {"email": email, "password": password}
try:
user = collection_users.find_one(check_comb, projection={"user_id": True, "user_name": True})
print('Succeed: login matched.')
session[email] = True
session.permanent = True
session["user_id"] = user["user_id"]
session["user_name"] = user["user_name"]
return jsonify({"status": 1, "message": 'Log in Successfuuly!'})
except Exception as e:
print(e)
print("Login Error: Fail to find user with given email and password")
return jsonify({"status": 0, "message": "Mismatch on email or password."})
@users.route("/api/users/info", methods=["POST"])
def get_userinfo():
user_id = session.get("user_id")
if user_id == None:
print(f"getUserInfo Error: Probably no user has logined.")
return jsonify({"user_id": -1})
try:
user = collection_users.find_one({"user_id": user_id}, projection={"_id": False, "password": False})
print(f"Succeed: user {user_id} found." if user != None else f"Failed: cannot find user {user_id}!")
return jsonify(user) if user != None else jsonify({"user_id": -1})
except Exception as e:
print(e)
print(f"getUserInfor Error: user with id {user_id} is not found.")
return jsonify({"user_id": -1})
@users.route('/api/logout', methods=["POST"])
def logout():
if request.method == 'POST':
print("session to be deleted: ", session) # for tests only
if session == None:
print("Logout Error: no session currently.")
return jsonify({"status": 0})
user_name = session.get("user_name")
session.clear()
print(f"Succeed: {user_name} log out successfully. Remaining session info: ", session) # for tests only
return jsonify({"status": 1, "user_name": user_name})
@users.route("/api/users/set_favor", methods=["POST"])
def set_favoredCourse():
data = request.get_json()
user_id = data["user_id"]
course_id = data["course_id"]
status = data["status"]
try:
favored_courses = collection_users.find_one({"user_id": user_id}, projection={"favored_courses": True})["favored_courses"]
if status == 0 and course_id in favored_courses:
favored_courses.remove(course_id)
collection_users.update_one({"user_id": user_id}, {"$set": {"favored_courses": favored_courses}})
elif status == 1 and course_id not in favored_courses:
favored_courses.append(course_id)
collection_users.update_one({"user_id": user_id}, {"$set": {"favored_courses": favored_courses}})
else:
print("Backend record of favored courses mismatches with the frontend!")
return jsonify({"status": -1})
return jsonify({"status": 1})
except Exception as e:
print(e)
return jsonify({"status": -1}) | [
"api.views.users.route",
"api.collection_users.find_one",
"api.collection_users.update_one",
"api.collection_users.find",
"flask.session.get",
"flask.request.get_json",
"api.collection_users.insert_one",
"flask.session.clear",
"flask.jsonify"
] | [((158, 204), 'api.views.users.route', 'users.route', (['"""/api/register"""'], {'methods': "['POST']"}), "('/api/register', methods=['POST'])\n", (169, 204), False, 'from api.views import users\n'), ((1850, 1900), 'api.views.users.route', 'users.route', (['"""/api/login"""'], {'methods': "['GET', 'POST']"}), "('/api/login', methods=['GET', 'POST'])\n", (1861, 1900), False, 'from api.views import users\n'), ((2991, 3039), 'api.views.users.route', 'users.route', (['"""/api/users/info"""'], {'methods': "['POST']"}), "('/api/users/info', methods=['POST'])\n", (3002, 3039), False, 'from api.views import users\n'), ((3712, 3756), 'api.views.users.route', 'users.route', (['"""/api/logout"""'], {'methods': "['POST']"}), "('/api/logout', methods=['POST'])\n", (3723, 3756), False, 'from api.views import users\n'), ((4274, 4327), 'api.views.users.route', 'users.route', (['"""/api/users/set_favor"""'], {'methods': "['POST']"}), "('/api/users/set_favor', methods=['POST'])\n", (4285, 4327), False, 'from api.views import users\n'), ((3076, 3098), 'flask.session.get', 'session.get', (['"""user_id"""'], {}), "('user_id')\n", (3087, 3098), False, 'from flask import Blueprint, jsonify, request, session\n'), ((4366, 4384), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4382, 4384), False, 'from flask import Blueprint, jsonify, request, session\n'), ((277, 295), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (293, 295), False, 'from flask import Blueprint, jsonify, request, session\n'), ((1970, 1988), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1986, 1988), False, 'from flask import Blueprint, jsonify, request, session\n'), ((3210, 3234), 'flask.jsonify', 'jsonify', (["{'user_id': -1}"], {}), "({'user_id': -1})\n", (3217, 3234), False, 'from flask import Blueprint, jsonify, request, session\n'), ((3263, 3360), 'api.collection_users.find_one', 'collection_users.find_one', (["{'user_id': user_id}"], {'projection': "{'_id': False, 'password': False}"}), "({'user_id': user_id}, projection={'_id': False,\n 'password': False})\n", (3288, 3360), False, 'from api import collection_users\n'), ((4039, 4063), 'flask.session.get', 'session.get', (['"""user_name"""'], {}), "('user_name')\n", (4050, 4063), False, 'from flask import Blueprint, jsonify, request, session\n'), ((4073, 4088), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (4086, 4088), False, 'from flask import Blueprint, jsonify, request, session\n'), ((4221, 4267), 'flask.jsonify', 'jsonify', (["{'status': 1, 'user_name': user_name}"], {}), "({'status': 1, 'user_name': user_name})\n", (4228, 4267), False, 'from flask import Blueprint, jsonify, request, session\n'), ((5227, 5249), 'flask.jsonify', 'jsonify', (["{'status': 1}"], {}), "({'status': 1})\n", (5234, 5249), False, 'from flask import Blueprint, jsonify, request, session\n'), ((507, 559), 'flask.jsonify', 'jsonify', (['"""Sorry, all the input fields are required."""'], {}), "('Sorry, all the input fields are required.')\n", (514, 559), False, 'from flask import Blueprint, jsonify, request, session\n'), ((613, 647), 'flask.jsonify', 'jsonify', (['"""Inconsistent passwords."""'], {}), "('Inconsistent passwords.')\n", (620, 647), False, 'from flask import Blueprint, jsonify, request, session\n'), ((701, 776), 'api.collection_users.find_one', 'collection_users.find_one', (["{'email': email}"], {'projection': "{'password': False}"}), "({'email': email}, projection={'password': False})\n", (726, 776), False, 'from api import collection_users\n'), ((1023, 1130), 'api.collection_users.find', 'collection_users.find', ([], {'filter': '{}', 'sort': "[('user_id', DESCENDING)]", 'limit': '(1)', 'projection': "{'user_id': True}"}), "(filter={}, sort=[('user_id', DESCENDING)], limit=1,\n projection={'user_id': True})\n", (1044, 1130), False, 'from api import collection_users\n'), ((1621, 1658), 'api.collection_users.insert_one', 'collection_users.insert_one', (['new_user'], {}), '(new_user)\n', (1648, 1658), False, 'from api import collection_users\n'), ((1679, 1713), 'flask.jsonify', 'jsonify', (['"""Registeration Succeeds!"""'], {}), "('Registeration Succeeds!')\n", (1686, 1713), False, 'from flask import Blueprint, jsonify, request, session\n'), ((2160, 2211), 'flask.jsonify', 'jsonify', (["{'status': 0, 'message': 'Invalid input.'}"], {}), "({'status': 0, 'message': 'Invalid input.'})\n", (2167, 2211), False, 'from flask import Blueprint, jsonify, request, session\n'), ((2364, 2454), 'api.collection_users.find_one', 'collection_users.find_one', (['check_comb'], {'projection': "{'user_id': True, 'user_name': True}"}), "(check_comb, projection={'user_id': True,\n 'user_name': True})\n", (2389, 2454), False, 'from api import collection_users\n'), ((2694, 2751), 'flask.jsonify', 'jsonify', (["{'status': 1, 'message': 'Log in Successfuuly!'}"], {}), "({'status': 1, 'message': 'Log in Successfuuly!'})\n", (2701, 2751), False, 'from flask import Blueprint, jsonify, request, session\n'), ((3483, 3496), 'flask.jsonify', 'jsonify', (['user'], {}), '(user)\n', (3490, 3496), False, 'from flask import Blueprint, jsonify, request, session\n'), ((3518, 3542), 'flask.jsonify', 'jsonify', (["{'user_id': -1}"], {}), "({'user_id': -1})\n", (3525, 3542), False, 'from flask import Blueprint, jsonify, request, session\n'), ((3681, 3705), 'flask.jsonify', 'jsonify', (["{'user_id': -1}"], {}), "({'user_id': -1})\n", (3688, 3705), False, 'from flask import Blueprint, jsonify, request, session\n'), ((3985, 4007), 'flask.jsonify', 'jsonify', (["{'status': 0}"], {}), "({'status': 0})\n", (3992, 4007), False, 'from flask import Blueprint, jsonify, request, session\n'), ((4523, 4613), 'api.collection_users.find_one', 'collection_users.find_one', (["{'user_id': user_id}"], {'projection': "{'favored_courses': True}"}), "({'user_id': user_id}, projection={\n 'favored_courses': True})\n", (4548, 4613), False, 'from api import collection_users\n'), ((4746, 4848), 'api.collection_users.update_one', 'collection_users.update_one', (["{'user_id': user_id}", "{'$set': {'favored_courses': favored_courses}}"], {}), "({'user_id': user_id}, {'$set': {\n 'favored_courses': favored_courses}})\n", (4773, 4848), False, 'from api import collection_users\n'), ((5312, 5335), 'flask.jsonify', 'jsonify', (["{'status': -1}"], {}), "({'status': -1})\n", (5319, 5335), False, 'from flask import Blueprint, jsonify, request, session\n'), ((840, 901), 'flask.jsonify', 'jsonify', (['"""User exists with this email. Please try a new one."""'], {}), "('User exists with this email. Please try a new one.')\n", (847, 901), False, 'from flask import Blueprint, jsonify, request, session\n'), ((1788, 1839), 'flask.jsonify', 'jsonify', (['"""Registration failed due to server error."""'], {}), "('Registration failed due to server error.')\n", (1795, 1839), False, 'from flask import Blueprint, jsonify, request, session\n'), ((2909, 2976), 'flask.jsonify', 'jsonify', (["{'status': 0, 'message': 'Mismatch on email or password.'}"], {}), "({'status': 0, 'message': 'Mismatch on email or password.'})\n", (2916, 2976), False, 'from flask import Blueprint, jsonify, request, session\n'), ((4968, 5070), 'api.collection_users.update_one', 'collection_users.update_one', (["{'user_id': user_id}", "{'$set': {'favored_courses': favored_courses}}"], {}), "({'user_id': user_id}, {'$set': {\n 'favored_courses': favored_courses}})\n", (4995, 5070), False, 'from api import collection_users\n'), ((5187, 5210), 'flask.jsonify', 'jsonify', (["{'status': -1}"], {}), "({'status': -1})\n", (5194, 5210), False, 'from flask import Blueprint, jsonify, request, session\n')] |
import time
import os
import pyglet
from gtts import gTTS
from pydub import AudioSegment
import traceback
def play_text(*txts):
try:
sounds = []
fnames = []
for i, s in enumerate(txts):
g = gTTS(text=s, lang='en')
fname = 'voice{}.mp3'.format(i)
with open(fname, 'wb') as file:
g.write_to_fp(file)
mp3 = AudioSegment.from_mp3(fname)
fname = fname.replace('.mp3', '.wav')
mp3.export(fname, format='wav')
os.remove(fname.replace('.wav', '.mp3'))
sounds.append(pyglet.media.load(fname, streaming=False))
fnames.append(fname)
s = time.time()
duration = max(sound.duration for sound in sounds)
for sound in sounds:
sound.play()
while time.time() < s + duration + 0.1:
time.sleep(0.1)
for fname in fnames:
os.remove(fname)
except Exception as e:
print("Warning: playback failed with the following error\n=====")
traceback.print_exc()
print("\n=====")
def play_sound(fname, start=0, duration=None, block=False):
try:
sound = pyglet.media.load(fname, streaming=False)
if duration is None:
duration = sound.duration
s = time.time()
p = sound.play()
p.seek(start)
if block:
while time.time() < s + duration + 0.1 - start:
time.sleep(0.1)
p.pause()
except:
print("Warning: playback failed")
if __name__ == '__main__':
from journal_club import where_jc
play_sound(os.path.join(where_jc, 'countdown.wav'))
play_text('hello')
| [
"pydub.AudioSegment.from_mp3",
"os.path.join",
"pyglet.media.load",
"time.sleep",
"gtts.gTTS",
"traceback.print_exc",
"time.time",
"os.remove"
] | [((687, 698), 'time.time', 'time.time', ([], {}), '()\n', (696, 698), False, 'import time\n'), ((1188, 1229), 'pyglet.media.load', 'pyglet.media.load', (['fname'], {'streaming': '(False)'}), '(fname, streaming=False)\n', (1205, 1229), False, 'import pyglet\n'), ((1309, 1320), 'time.time', 'time.time', ([], {}), '()\n', (1318, 1320), False, 'import time\n'), ((1636, 1675), 'os.path.join', 'os.path.join', (['where_jc', '"""countdown.wav"""'], {}), "(where_jc, 'countdown.wav')\n", (1648, 1675), False, 'import os\n'), ((231, 254), 'gtts.gTTS', 'gTTS', ([], {'text': 's', 'lang': '"""en"""'}), "(text=s, lang='en')\n", (235, 254), False, 'from gtts import gTTS\n'), ((397, 425), 'pydub.AudioSegment.from_mp3', 'AudioSegment.from_mp3', (['fname'], {}), '(fname)\n', (418, 425), False, 'from pydub import AudioSegment\n'), ((826, 837), 'time.time', 'time.time', ([], {}), '()\n', (835, 837), False, 'import time\n'), ((872, 887), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (882, 887), False, 'import time\n'), ((929, 945), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (938, 945), False, 'import os\n'), ((1055, 1076), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1074, 1076), False, 'import traceback\n'), ((599, 640), 'pyglet.media.load', 'pyglet.media.load', (['fname'], {'streaming': '(False)'}), '(fname, streaming=False)\n', (616, 640), False, 'import pyglet\n'), ((1404, 1415), 'time.time', 'time.time', ([], {}), '()\n', (1413, 1415), False, 'import time\n'), ((1462, 1477), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1472, 1477), False, 'import time\n')] |
import numpy as np
import pandas as pd
def batch_df2batch(df, evaluate_ids=(), n_obs=-1, tform=np.eye(3), is_vehicles_evaluated=False):
"""
Convert dataframe to SGAN input
:param df:
:param evaluate_ids:
:param n_obs: number of timesteps observed
:param tform:
:param is_vehicles_evaluated:
:return:
"""
if is_vehicles_evaluated:
agent_ids = np.unique(df['agent_id'])
else:
agent_ids = np.unique(df[df['agent_type'] == 0]['agent_id']) # peds only
# input transform
df = tform_df(df, tform)
# assume min t is the start
t_inds = np.unique(np.sort(df['t']))
t0 = t_inds[0]
skip = t_inds[1] - t_inds[0]
abs_xy = np.zeros((n_obs, agent_ids.size, 2), dtype=np.float32)
rel_xy = np.zeros_like(abs_xy)
for i, agent_id in enumerate(agent_ids):
for step, t in enumerate(range(t0, t0+n_obs*skip, skip)):
xy = df[(df['agent_id'] == agent_id) & (df['t'] == t)][['x', 'y']]
if xy.size > 0:
abs_xy[step, i, :] = xy.values[0]
else:
abs_xy[step, i, :] = np.nan
# for relative, 1st entry is 0,0, rest are the differences
rel_xy[1:, i, :] = abs_xy[1:, i, :] - abs_xy[:-1, i, :]
# handle observations w/zeros
abs_xy[np.isnan(abs_xy)] = 0.
rel_xy[np.isnan(rel_xy)] = 0.
seq_start_end = [(0, agent_ids.size)]
return abs_xy, rel_xy, seq_start_end
def raw_pred2df(pred_list, evaluate_ids, evaluate_inds, tform=np.eye(3)):
"""
:param pred_list: [i] = n_preds, n_peds, 2 | list of sampled predictions
- n_preds = number of timesteps predicted into future
:param evaluate_ids: list of agent ids
:param evaluate_inds: [i] = index of agent_id=evaluate_ids[i] in prediction
:param tform: (3,3) | transformation matrix
:return:
"""
merged_peds = np.stack(pred_list, axis=-1) # (n_preds, n_peds, 2, n_samples)
n_preds = merged_peds.shape[0]
n_samples = merged_peds.shape[3]
cols = ['t', 'agent_id', 'x', 'y', 'sample_id', 'p']
INT_COLUMNS = [cols[i] for i in [0, 1, -2]]
data = []
for ind, id in zip(evaluate_inds, evaluate_ids):
for t in range(n_preds):
z = np.zeros((n_samples, 1))
agent_t_info = np.hstack([
t + z,
id + z,
merged_peds[t, ind, :, :].T,
np.arange(n_samples).reshape((n_samples, 1)),
1./n_samples + z,
])
data.append(agent_t_info)
df = pd.DataFrame(np.vstack(data), columns=cols)
df[['x', 'y']] = tform_2d_mat(df[['x', 'y']].values, tform)
df[INT_COLUMNS] = df[INT_COLUMNS].astype(np.int)
return df
def tform_df(df, tform):
xy = df[['x', 'y']]
ret_df = df.copy()
ret_df[['x', 'y']] = tform_2d_mat(xy, tform)
return ret_df
def tform_2d_mat(xy, tform):
xy1 = np.hstack([xy, np.ones((xy.shape[0], 1))])
xy1_p = (tform.dot(xy1.T)).T
return xy1_p[:, :2]
| [
"numpy.eye",
"numpy.unique",
"numpy.ones",
"numpy.sort",
"numpy.stack",
"numpy.zeros",
"numpy.isnan",
"numpy.vstack",
"numpy.zeros_like",
"numpy.arange"
] | [((97, 106), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (103, 106), True, 'import numpy as np\n'), ((707, 761), 'numpy.zeros', 'np.zeros', (['(n_obs, agent_ids.size, 2)'], {'dtype': 'np.float32'}), '((n_obs, agent_ids.size, 2), dtype=np.float32)\n', (715, 761), True, 'import numpy as np\n'), ((775, 796), 'numpy.zeros_like', 'np.zeros_like', (['abs_xy'], {}), '(abs_xy)\n', (788, 796), True, 'import numpy as np\n'), ((1507, 1516), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1513, 1516), True, 'import numpy as np\n'), ((1882, 1910), 'numpy.stack', 'np.stack', (['pred_list'], {'axis': '(-1)'}), '(pred_list, axis=-1)\n', (1890, 1910), True, 'import numpy as np\n'), ((397, 422), 'numpy.unique', 'np.unique', (["df['agent_id']"], {}), "(df['agent_id'])\n", (406, 422), True, 'import numpy as np\n'), ((453, 501), 'numpy.unique', 'np.unique', (["df[df['agent_type'] == 0]['agent_id']"], {}), "(df[df['agent_type'] == 0]['agent_id'])\n", (462, 501), True, 'import numpy as np\n'), ((623, 639), 'numpy.sort', 'np.sort', (["df['t']"], {}), "(df['t'])\n", (630, 639), True, 'import numpy as np\n'), ((1303, 1319), 'numpy.isnan', 'np.isnan', (['abs_xy'], {}), '(abs_xy)\n', (1311, 1319), True, 'import numpy as np\n'), ((1337, 1353), 'numpy.isnan', 'np.isnan', (['rel_xy'], {}), '(rel_xy)\n', (1345, 1353), True, 'import numpy as np\n'), ((2566, 2581), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (2575, 2581), True, 'import numpy as np\n'), ((2239, 2263), 'numpy.zeros', 'np.zeros', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (2247, 2263), True, 'import numpy as np\n'), ((2925, 2950), 'numpy.ones', 'np.ones', (['(xy.shape[0], 1)'], {}), '((xy.shape[0], 1))\n', (2932, 2950), True, 'import numpy as np\n'), ((2411, 2431), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2420, 2431), True, 'import numpy as np\n')] |
"""
base.py -- client for the base Rinnai API
"""
import datetime, json, logging, time
import requests
from rinnaicontrolr.aws_srp import AWSSRP
LOGGER = logging.getLogger('rinnaicontrolr')
from rinnaicontrolr.const import (
POOL_ID,
CLIENT_ID,
POOL_REGION,
GRAPHQL_ENDPOINT,
SHADOW_ENDPOINT,
GET_DEVICES_PAYLOAD
)
class RinnaiWaterHeater(object):
# Represents a Rinnai Water Heater, with methods for status and issuing commands
def __init__(self, username, password, timeout=30):
"""
timeout is the number of seconds to timeout any HTTPS request after authentication.
Authentication timeouts are handled by the boto3 client config, which can be
controled by an environment variable. Also, keep in mind that some functions
in this API perform multiple HTTPS requests.
"""
self.username = username
self.password = password
self.timeout = timeout
self.token = {}
def validate_token(self):
"""Fetch or refresh the access token as needed"""
now = time.time()
if now >= self.token.get('expires_at', 0):
if self.token.get('RefreshToken'):
self._refresh_token()
else:
self._get_initial_token()
assert now < self.token.get('expires_at', 0), self.token
def _get_initial_token(self):
"""Authenticate and store the initial access token"""
aws = AWSSRP(username=self.username, password=self.password, pool_id=POOL_ID,
client_id=CLIENT_ID, pool_region=POOL_REGION)
self._store_token(aws.authenticate_user())
def _store_token(self, js):
self.token = js['AuthenticationResult']
assert 'AccessToken' in self.token, self.token
assert 'IdToken' in self.token, self.token
assert 'RefreshToken' in self.token, self.token
self.token['expires_at'] = time.time() + self.token['ExpiresIn']
LOGGER.debug(f'received token, expires {self.token["expires_at"]}')
def _refresh_token(self):
# Since we've stored the password there's no reason to actually use the
# refresh token. If we wanted to do so, we could look at renew_access_token()
# in https://github.com/capless/warrant/blob/master/warrant/__init__.py
# We don't do that now to avoid unnecessary code paths (and their bugs).
# NOTE: If Rinnai ever supports 2FA, that would be a reason to use
# the refresh token instead of re-running the password verifier, but
# that would also require other changes to this file.
self._get_initial_token()
def _set_shadow(self, dev, attribute, value):
"""Use the (unauthenticated) shadow API to set attribute to value
on device dev."""
data = {
'user': dev['user_uuid'],
'thing': dev['thing_name'],
'attribute': attribute,
'value': value
}
headers = {
'User-Agent': 'okhttp/3.12.1'
}
r = requests.post(SHADOW_ENDPOINT, data=data, headers=headers, timeout=self.timeout)
r.raise_for_status()
return r
def get_devices(self):
"""Returns a list of devices, one for each water heater associated
with self.username. A device is just a dictionary of data for that
device. If you want to refresh the data for the device,
call this method again."""
# We should call validate_token() here to ensure we have an access token.
# Except Rinnai's API is not authenticated, so we don't need an access token.
# self.validate_token()
payload = GET_DEVICES_PAYLOAD % (self.username)
headers = {
'x-amz-user-agent': 'aws-amplify/3.4.3 react-native',
'x-api-key': '<KEY>',
'Content-Type': 'application/json'
}
r = requests.post(GRAPHQL_ENDPOINT, data=payload, headers=headers, timeout=self.timeout)
r.raise_for_status()
result = r.json()
for items in result["data"]['getUserByEmail']['items']:
for k,v in items['devices'].items():
return v
def start_recirculation(self, dev, duration: int):
"""Start recirculation on the specified device. dev is one of the devices
returned by get_devices()."""
self._set_shadow(dev, 'set_priority_status', 'true')
self._set_shadow(dev, 'recirculation_duration', str(duration))
return self._set_shadow(dev, 'set_recirculation_enabled', 'true')
def is_recirculating(self, dev):
return dev['shadow']['recirculation_enabled']
def set_temperature_setpoint(self, dev, temp: int):
self._set_shadow(dev, 'set_priority_status', 'true')
return self._set_shadow(dev, 'set_domestic_temperature', str(temp))
def get_temperature_setpoint(self, dev):
return dev['info']['domestic_temperature']
def is_heating(self, dev):
return dev['info']['domestic_combustion'] == 'true'
@property
def is_connected(self):
"""Connection status of client with Rinnai Cloud service"""
return time.time() < self.token.get('expires_at', 0)
| [
"logging.getLogger",
"requests.post",
"rinnaicontrolr.aws_srp.AWSSRP",
"time.time"
] | [((157, 192), 'logging.getLogger', 'logging.getLogger', (['"""rinnaicontrolr"""'], {}), "('rinnaicontrolr')\n", (174, 192), False, 'import datetime, json, logging, time\n'), ((1083, 1094), 'time.time', 'time.time', ([], {}), '()\n', (1092, 1094), False, 'import datetime, json, logging, time\n'), ((1468, 1589), 'rinnaicontrolr.aws_srp.AWSSRP', 'AWSSRP', ([], {'username': 'self.username', 'password': 'self.password', 'pool_id': 'POOL_ID', 'client_id': 'CLIENT_ID', 'pool_region': 'POOL_REGION'}), '(username=self.username, password=self.password, pool_id=POOL_ID,\n client_id=CLIENT_ID, pool_region=POOL_REGION)\n', (1474, 1589), False, 'from rinnaicontrolr.aws_srp import AWSSRP\n'), ((3059, 3144), 'requests.post', 'requests.post', (['SHADOW_ENDPOINT'], {'data': 'data', 'headers': 'headers', 'timeout': 'self.timeout'}), '(SHADOW_ENDPOINT, data=data, headers=headers, timeout=self.timeout\n )\n', (3072, 3144), False, 'import requests\n'), ((3905, 3994), 'requests.post', 'requests.post', (['GRAPHQL_ENDPOINT'], {'data': 'payload', 'headers': 'headers', 'timeout': 'self.timeout'}), '(GRAPHQL_ENDPOINT, data=payload, headers=headers, timeout=self\n .timeout)\n', (3918, 3994), False, 'import requests\n'), ((1936, 1947), 'time.time', 'time.time', ([], {}), '()\n', (1945, 1947), False, 'import datetime, json, logging, time\n'), ((5167, 5178), 'time.time', 'time.time', ([], {}), '()\n', (5176, 5178), False, 'import datetime, json, logging, time\n')] |
# Generated by Django 2.1.3 on 2019-02-12 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0002_auto_20180126_1900'),
]
operations = [
migrations.CreateModel(
name='GlobalAlert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('content', models.TextField()),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.TextField",
"django.db.models.BooleanField"
] | [((335, 428), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (351, 428), False, 'from django.db import migrations, models\n'), ((454, 487), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (473, 487), False, 'from django.db import migrations, models\n'), ((518, 536), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (534, 536), False, 'from django.db import migrations, models\n')] |
import click
from agent.pipeline.validators import elastic_query, jdbc_query
from agent import source
class BaseValidator:
@staticmethod
def validate(pipeline):
pass
class ElasticValidator(BaseValidator):
@staticmethod
def validate(pipeline):
with open(pipeline.config['query_file']) as f:
query = f.read()
errors = elastic_query.get_errors(query, pipeline.source.config[source.ElasticSource.CONFIG_OFFSET_FIELD])
if errors:
raise click.ClickException(errors)
class JDBCValidator(BaseValidator):
@staticmethod
def validate(pipeline):
errors = jdbc_query.get_errors(pipeline.query)
if errors:
raise click.ClickException(errors)
def get_config_validator(source_type: str) -> BaseValidator:
if source_type == source.TYPE_ELASTIC:
return ElasticValidator()
if source_type in [source.TYPE_MYSQL, source.TYPE_POSTGRES, source.TYPE_CLICKHOUSE]:
return JDBCValidator()
return BaseValidator()
| [
"agent.pipeline.validators.jdbc_query.get_errors",
"click.ClickException",
"agent.pipeline.validators.elastic_query.get_errors"
] | [((373, 475), 'agent.pipeline.validators.elastic_query.get_errors', 'elastic_query.get_errors', (['query', 'pipeline.source.config[source.ElasticSource.CONFIG_OFFSET_FIELD]'], {}), '(query, pipeline.source.config[source.ElasticSource\n .CONFIG_OFFSET_FIELD])\n', (397, 475), False, 'from agent.pipeline.validators import elastic_query, jdbc_query\n'), ((638, 675), 'agent.pipeline.validators.jdbc_query.get_errors', 'jdbc_query.get_errors', (['pipeline.query'], {}), '(pipeline.query)\n', (659, 675), False, 'from agent.pipeline.validators import elastic_query, jdbc_query\n'), ((508, 536), 'click.ClickException', 'click.ClickException', (['errors'], {}), '(errors)\n', (528, 536), False, 'import click\n'), ((713, 741), 'click.ClickException', 'click.ClickException', (['errors'], {}), '(errors)\n', (733, 741), False, 'import click\n')] |
import numpy as npy
def convert(num):
if num < 0:
# num = -num
num *= 1024
# num += 32768
num = int(num - 0.5)
num = 65535 + num
n_str = str(hex(num))[2:]
if len(n_str) == 1:
n_str = 'fff' + n_str
elif len(n_str) == 2:
n_str = 'ff' + n_str
elif len(n_str) == 3:
n_str = 'f' + n_str
return n_str
else:
num *= 1024
num = int(num + 0.5)
n_str = str(hex(num))[2:]
if len(n_str) == 1:
n_str = '000' + n_str
elif len(n_str) == 2:
n_str = '00' + n_str
elif len(n_str) == 3:
n_str = '0' + n_str
return n_str
file = open('16bit.coe', 'w')
file_str = "memory_initialization_radix=16;\nmemory_initialization_vector=\n"
# fc1 params
fc1_w = npy.load('dense_kernel_0.npy')
for r in range(128):
cnt_128 = 0
unit_128_8 = ''
for c in range(1024):
unit_128_8 += convert(fc1_w[c][r])
if cnt_128 < 127:
cnt_128 += 1
else:
cnt_128 = 0
file_str += unit_128_8 + ',\n'
unit_128_8 = ''
fc1_b = npy.load('dense_bias_0.npy')
unit_128_8 = ''
for i in range(128):
unit_128_8 += convert(fc1_b[i])
file_str += unit_128_8 + ',\n'
# fc2 params
fc2_w = npy.load('dense_1_kernel_0.npy')
for r in range(128):
unit_128_8 = ''
for c in range(128):
unit_128_8 += convert(fc2_w[c][r])
unit_128_8 += ',\n'
file_str += unit_128_8
fc2_b = npy.load('dense_1_bias_0.npy')
unit_128_8 = ''
for i in range(128):
unit_128_8 += convert(fc2_b[i])
file_str += unit_128_8 + ',\n'
# fc3 params
fc3_w = npy.load('dense_2_kernel_0.npy')
for r in range(10):
unit_128_8 = ''
for c in range(128):
unit_128_8 += convert(fc3_w[c][r])
unit_128_8 += ',\n'
file_str += unit_128_8
fc3_b = npy.load('dense_2_bias_0.npy')
unit_128_8 = ''
for i in range(128):
if i < 10:
unit_128_8 += convert(fc3_b[i])
else:
unit_128_8 += '0000'
file_str += unit_128_8 + ';'
file.write(file_str)
| [
"numpy.load"
] | [((849, 879), 'numpy.load', 'npy.load', (['"""dense_kernel_0.npy"""'], {}), "('dense_kernel_0.npy')\n", (857, 879), True, 'import numpy as npy\n'), ((1175, 1203), 'numpy.load', 'npy.load', (['"""dense_bias_0.npy"""'], {}), "('dense_bias_0.npy')\n", (1183, 1203), True, 'import numpy as npy\n'), ((1330, 1362), 'numpy.load', 'npy.load', (['"""dense_1_kernel_0.npy"""'], {}), "('dense_1_kernel_0.npy')\n", (1338, 1362), True, 'import numpy as npy\n'), ((1532, 1562), 'numpy.load', 'npy.load', (['"""dense_1_bias_0.npy"""'], {}), "('dense_1_bias_0.npy')\n", (1540, 1562), True, 'import numpy as npy\n'), ((1689, 1721), 'numpy.load', 'npy.load', (['"""dense_2_kernel_0.npy"""'], {}), "('dense_2_kernel_0.npy')\n", (1697, 1721), True, 'import numpy as npy\n'), ((1890, 1920), 'numpy.load', 'npy.load', (['"""dense_2_bias_0.npy"""'], {}), "('dense_2_bias_0.npy')\n", (1898, 1920), True, 'import numpy as npy\n')] |
import os
import shutil
from dockerspawner import DockerSpawner
class IllumiDeskDockerSpawner(DockerSpawner):
"""
Custom DockerSpawner which assigns a user notebook image
based on the user's role. This spawner requires:
1. That the `Authenticator.enable_auth_state = True`
2. That the user's `USER_ROLE` environment variable is set
"""
def _image_from_role(self, user_role: str) -> str:
"""
Given a user role, return the right image
Args:
user_role: the user's role
Returns:
docker_image: docker image used to spawn container based on role
"""
if not user_role:
raise ValueError('user_role is missing')
# default to standard image, otherwise assign image based on role
self.log.debug('User role used to set image: %s' % user_role)
docker_image = str(os.environ.get('DOCKER_STANDARD_IMAGE'))
if user_role == 'Learner' or user_role == 'Student':
docker_image = str(os.environ.get('DOCKER_LEARNER_IMAGE'))
elif user_role == 'Instructor':
docker_image = str(os.environ.get('DOCKER_INSTRUCTOR_IMAGE'))
elif user_role == 'Grader':
docker_image = str(os.environ.get('DOCKER_GRADER_IMAGE'))
self.log.debug('Image based on user role set to %s' % docker_image)
return docker_image
async def auth_state_hook(self, spawner, auth_state):
"""
Customized hook to assign USER_ROLE environment variable to LTI user role.
The USER_ROLE environment variable is used to select the notebook image based
on the user's role.
"""
if not auth_state:
self.log.debug('auth_state not enabled.')
return
self.log.debug('auth_state_hook set with %s role' % auth_state['user_role'])
self.environment['USER_ROLE'] = auth_state['user_role']
self.log.debug(
'Assigned USER_ROLE env var to %s' % self.environment['USER_ROLE']
)
# Create a new user directory if it does not exist on the host, regardless
# of whether or not its mounted with NFS.
def pre_spawn_hook(self, spawner):
"""
Creates the user directory based on information passed from the
`spawner` object.
Args:
spawner: JupyterHub spawner object
"""
if not self.user.name:
raise ValueError('Spawner object does not contain the username')
username = self.user.name
user_path = os.path.join('/home', username)
if not os.path.exists(user_path):
os.mkdir(user_path)
shutil.chown(
user_path,
user=int(os.environ.get('MNT_HOME_DIR_UID')),
group=int(os.environ.get('MNT_HOME_DIR_GID')),
)
os.chmod(user_path, 0o755)
def start(self):
user_role = self.user.spawner.environment.get('USER_ROLE') or 'Learner'
self.log.debug('User %s has role: %s' % (self.user.name, user_role))
self.image = self._image_from_role(str(user_role))
self.log.debug('Starting with image: %s' % self.image)
return super().start()
| [
"os.path.exists",
"os.path.join",
"os.environ.get",
"os.chmod",
"os.mkdir"
] | [((2563, 2594), 'os.path.join', 'os.path.join', (['"""/home"""', 'username'], {}), "('/home', username)\n", (2575, 2594), False, 'import os\n'), ((913, 952), 'os.environ.get', 'os.environ.get', (['"""DOCKER_STANDARD_IMAGE"""'], {}), "('DOCKER_STANDARD_IMAGE')\n", (927, 952), False, 'import os\n'), ((2610, 2635), 'os.path.exists', 'os.path.exists', (['user_path'], {}), '(user_path)\n', (2624, 2635), False, 'import os\n'), ((2649, 2668), 'os.mkdir', 'os.mkdir', (['user_path'], {}), '(user_path)\n', (2657, 2668), False, 'import os\n'), ((2873, 2897), 'os.chmod', 'os.chmod', (['user_path', '(493)'], {}), '(user_path, 493)\n', (2881, 2897), False, 'import os\n'), ((1046, 1084), 'os.environ.get', 'os.environ.get', (['"""DOCKER_LEARNER_IMAGE"""'], {}), "('DOCKER_LEARNER_IMAGE')\n", (1060, 1084), False, 'import os\n'), ((1157, 1198), 'os.environ.get', 'os.environ.get', (['"""DOCKER_INSTRUCTOR_IMAGE"""'], {}), "('DOCKER_INSTRUCTOR_IMAGE')\n", (1171, 1198), False, 'import os\n'), ((1267, 1304), 'os.environ.get', 'os.environ.get', (['"""DOCKER_GRADER_IMAGE"""'], {}), "('DOCKER_GRADER_IMAGE')\n", (1281, 1304), False, 'import os\n'), ((2747, 2781), 'os.environ.get', 'os.environ.get', (['"""MNT_HOME_DIR_UID"""'], {}), "('MNT_HOME_DIR_UID')\n", (2761, 2781), False, 'import os\n'), ((2810, 2844), 'os.environ.get', 'os.environ.get', (['"""MNT_HOME_DIR_GID"""'], {}), "('MNT_HOME_DIR_GID')\n", (2824, 2844), False, 'import os\n')] |
from django.db import models
from app_asset.models import Host
# Create your models here.
class Project(models.Model):
project_name = models.CharField(max_length=32,unique=True)
project_msg = models.CharField(max_length=64,null=True)
def __unicode__(self):
return self.project_name
class GitCode(models.Model):
git_name = models.CharField(max_length=64,unique=True)
git_msg = models.CharField(max_length=64, null=True)
git_language = models.CharField(max_length=64, null=True)
project = models.ForeignKey(to='Project',on_delete=models.SET_NULL,null=True)
git_url = models.CharField(max_length=128,unique=True)
git_user = models.CharField(max_length=64, null=True)
git_passwd = models.CharField(max_length=64, null=True)
git_sshkey = models.TextField( null=True)
def __unicode__(self):
return self.git_name
class Publist(models.Model):
gitcode = models.ForeignKey(to='GitCode',on_delete=models.CASCADE)
host_ip = models.ForeignKey(to=Host,on_delete=models.CASCADE)
publist_dir = models.CharField(max_length=128)
publist_msg = models.CharField(max_length=128,null=True)
current_version = models.CharField(max_length=64,null=True)
version_info = models.CharField(max_length=512,null=True)
author = models.CharField(max_length=64,null=True)
publist_date = models.CharField(max_length=64,null=True)
update_time = models.DateTimeField(auto_now=True,null=True)
def __unicode__(self):
return self.gitcode
class PublistRecord(models.Model):
publist = models.ForeignKey(to='Publist',on_delete=models.CASCADE)
current_version = models.CharField(max_length=64,null=True)
version_info = models.CharField(max_length=1024, null=True)
author = models.CharField(max_length=64, null=True)
publist_date = models.CharField(max_length=64, null=True)
update_time = models.DateTimeField(auto_now_add=True,null=True)
up_content = models.TextField(null=True)
def __unicode__(self):
return self.publist
class Wchartlog(models.Model):
site_name = models.CharField(max_length=64, null=True)
from_user = models.CharField(max_length=64, null=True)
content= models.CharField(max_length=2048, null=True)
up_id = models.CharField(max_length=64, null=True)
status = models.CharField(max_length=64, default="waiting")
add_time = models.DateTimeField(auto_now_add=True,null=True)
def __unicode__(self):
return self.Site_name
| [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((141, 185), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'unique': '(True)'}), '(max_length=32, unique=True)\n', (157, 185), False, 'from django.db import models\n'), ((203, 245), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (219, 245), False, 'from django.db import models\n'), ((350, 394), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)'}), '(max_length=64, unique=True)\n', (366, 394), False, 'from django.db import models\n'), ((408, 450), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (424, 450), False, 'from django.db import models\n'), ((470, 512), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (486, 512), False, 'from django.db import models\n'), ((527, 596), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""Project"""', 'on_delete': 'models.SET_NULL', 'null': '(True)'}), "(to='Project', on_delete=models.SET_NULL, null=True)\n", (544, 596), False, 'from django.db import models\n'), ((609, 654), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'unique': '(True)'}), '(max_length=128, unique=True)\n', (625, 654), False, 'from django.db import models\n'), ((669, 711), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (685, 711), False, 'from django.db import models\n'), ((729, 771), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (745, 771), False, 'from django.db import models\n'), ((789, 816), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (805, 816), False, 'from django.db import models\n'), ((919, 976), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""GitCode"""', 'on_delete': 'models.CASCADE'}), "(to='GitCode', on_delete=models.CASCADE)\n", (936, 976), False, 'from django.db import models\n'), ((990, 1042), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'Host', 'on_delete': 'models.CASCADE'}), '(to=Host, on_delete=models.CASCADE)\n', (1007, 1042), False, 'from django.db import models\n'), ((1060, 1092), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1076, 1092), False, 'from django.db import models\n'), ((1111, 1154), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'null': '(True)'}), '(max_length=128, null=True)\n', (1127, 1154), False, 'from django.db import models\n'), ((1176, 1218), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (1192, 1218), False, 'from django.db import models\n'), ((1237, 1280), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'null': '(True)'}), '(max_length=512, null=True)\n', (1253, 1280), False, 'from django.db import models\n'), ((1293, 1335), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (1309, 1335), False, 'from django.db import models\n'), ((1354, 1396), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (1370, 1396), False, 'from django.db import models\n'), ((1414, 1460), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (1434, 1460), False, 'from django.db import models\n'), ((1565, 1622), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""Publist"""', 'on_delete': 'models.CASCADE'}), "(to='Publist', on_delete=models.CASCADE)\n", (1582, 1622), False, 'from django.db import models\n'), ((1644, 1686), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (1660, 1686), False, 'from django.db import models\n'), ((1705, 1749), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'null': '(True)'}), '(max_length=1024, null=True)\n', (1721, 1749), False, 'from django.db import models\n'), ((1763, 1805), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (1779, 1805), False, 'from django.db import models\n'), ((1825, 1867), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (1841, 1867), False, 'from django.db import models\n'), ((1886, 1936), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (1906, 1936), False, 'from django.db import models\n'), ((1953, 1980), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (1969, 1980), False, 'from django.db import models\n'), ((2084, 2126), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (2100, 2126), False, 'from django.db import models\n'), ((2143, 2185), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (2159, 2185), False, 'from django.db import models\n'), ((2199, 2243), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2048)', 'null': '(True)'}), '(max_length=2048, null=True)\n', (2215, 2243), False, 'from django.db import models\n'), ((2256, 2298), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (2272, 2298), False, 'from django.db import models\n'), ((2312, 2362), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'default': '"""waiting"""'}), "(max_length=64, default='waiting')\n", (2328, 2362), False, 'from django.db import models\n'), ((2378, 2428), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (2398, 2428), False, 'from django.db import models\n')] |
#/usr/bin/env python
import sys
import logging
logger = logging.getLogger('utility_to_osm.ssr2.git_diff')
import utility_to_osm.file_util as file_util
from osmapis_stedsnr import OSMstedsnr
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# diff is called by git with 7 parameters:
# path old-file old-hex old-mode new-file new-hex new-mode
new_file, old_file = sys.argv[1], sys.argv[2]
logger.info('Reading %s', old_file)
content = file_util.read_file(old_file)
old_osm = OSMstedsnr.from_xml(content)
logger.info('Reading %s', new_file)
content = file_util.read_file(new_file)
new_osm = OSMstedsnr.from_xml(content)
print('\n=== Missing stedsnr ===\n')
old_stedsnr = sorted(old_osm.stedsnr.keys())
new_stedsnr = sorted(new_osm.stedsnr.keys())
for key in old_stedsnr:
if key not in new_stedsnr:
print('Diff, %s missing in old' % key)
print(old_osm.stedsnr[key][0])
for key in new_stedsnr:
if key not in old_stedsnr:
print('Diff, %s missing in new' % key)
print(new_osm.stedsnr[key][0])
print('\n=== Tagging differences ===\n')
stedsnr = set(old_stedsnr).intersection(new_stedsnr)
for key in stedsnr:
old = old_osm.stedsnr[key][0]
new = new_osm.stedsnr[key][0]
limit_distance = 1e-5 # FIXME: resonable?
old_lat, old_lon = float(old.attribs['lat']), float(old.attribs['lon'])
new_lat, new_lon = float(new.attribs['lat']), float(new.attribs['lon'])
if abs(old_lat - new_lat) > limit_distance or abs(old_lon - new_lon) > limit_distance:
print('Diff in position %s old [%s, %s] != new [%s, %s]' % (key, old_lat, old_lon, new_lat, new_lon))
for tag_key in old.tags:
if tag_key not in new.tags:
print('Diff %s, %s missing in new:' % (key, tag_key))
print(' old[%s] = %s\n' % (tag_key, old.tags[tag_key]))
for tag_key in new.tags:
if tag_key not in old.tags:
print('Diff %s, %s missing in old:' % (key, tag_key))
print(' new[%s] = %s\n' % (tag_key, new.tags[tag_key]))
common_tags = set(old.tags.keys()).intersection(new.tags.keys())
for tag_key in common_tags:
if tag_key in ('ssr:date', ):
continue # don't care
o, n = new.tags[tag_key], old.tags[tag_key]
if o != n:
print('Diff %s:\n old[%s] = %s\n new[%s] = %s\n' % (key, tag_key, o, tag_key, n))
| [
"logging.getLogger",
"osmapis_stedsnr.OSMstedsnr.from_xml",
"utility_to_osm.file_util.read_file",
"logging.basicConfig"
] | [((56, 105), 'logging.getLogger', 'logging.getLogger', (['"""utility_to_osm.ssr2.git_diff"""'], {}), "('utility_to_osm.ssr2.git_diff')\n", (73, 105), False, 'import logging\n'), ((223, 263), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (242, 263), False, 'import logging\n'), ((485, 514), 'utility_to_osm.file_util.read_file', 'file_util.read_file', (['old_file'], {}), '(old_file)\n', (504, 514), True, 'import utility_to_osm.file_util as file_util\n'), ((529, 557), 'osmapis_stedsnr.OSMstedsnr.from_xml', 'OSMstedsnr.from_xml', (['content'], {}), '(content)\n', (548, 557), False, 'from osmapis_stedsnr import OSMstedsnr\n'), ((617, 646), 'utility_to_osm.file_util.read_file', 'file_util.read_file', (['new_file'], {}), '(new_file)\n', (636, 646), True, 'import utility_to_osm.file_util as file_util\n'), ((661, 689), 'osmapis_stedsnr.OSMstedsnr.from_xml', 'OSMstedsnr.from_xml', (['content'], {}), '(content)\n', (680, 689), False, 'from osmapis_stedsnr import OSMstedsnr\n')] |
#!/usr/bin/env python3
import uuid
from passlib.hash import pbkdf2_sha512
password = input('Enter password: ')
password_parts = pbkdf2_sha512.encrypt(password, salt_size=32).split('$')
password = password_parts[4]
salt = password_parts[3]
def convert_b64(input):
return input.replace('.', '+') + '='
print('Password: ' + convert_b64(password))
print('Salt: ' + convert_b64(salt))
| [
"passlib.hash.pbkdf2_sha512.encrypt"
] | [((131, 176), 'passlib.hash.pbkdf2_sha512.encrypt', 'pbkdf2_sha512.encrypt', (['password'], {'salt_size': '(32)'}), '(password, salt_size=32)\n', (152, 176), False, 'from passlib.hash import pbkdf2_sha512\n')] |
import yaml
import os
import subprocess
import coloredlogs
import logging
import psutil
import shutil
import hashlib
import uuid
import fileinput
import requests
from nvc import __appname__
from dotenv import load_dotenv
import git
app_root = os.path.dirname(os.path.abspath(__file__))
app_home = os.path.expanduser("~")
app_cwd = os.getcwd()
def question(word):
answer = False
while answer not in ["y", "n"]:
answer = input("{} [y/n]? ".format(word)).lower().strip()
if answer == "y":
answer = True
else:
answer = False
return answer
def log_rep(stdin):
coloredlogs.install()
logging.info(stdin)
def log_warn(stdin):
coloredlogs.install()
logging.warn(stdin)
def log_err(stdin):
coloredlogs.install()
logging.error(stdin)
def template_git(url, dir):
try:
chk_repo = os.path.isdir(dir)
if chk_repo:
shutil.rmtree(dir)
git.Repo.clone_from(url, dir)
return True
except Exception as e:
log_err(e)
return False
def yaml_writeln(stream, path):
with open(path, '+a') as outfile:
try:
yaml.dump(stream, outfile, default_flow_style=False)
except yaml.YAMLError as exc:
print(exc)
else:
return True
def yaml_read(path):
with open(path, 'r') as outfile:
try:
data = yaml.load(outfile, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
else:
return data
def nvc_config():
return yaml_read(app_root+"/templates/config.yml")
def nvc_config_roles(pkg):
return yaml_read(app_root+"/templates/"+pkg+"/roles/item.yml")
def nvc_config_vars(pkg):
# return yaml_read(app_root+"/templates/"+pkg+"/vars/item.yml")
return yaml_read(app_root+"/templates/"+pkg+"/vars/config.yml")
def get_cpu_info():
cpu = psutil.cpu_times()
count_cpu = psutil.cpu_count()
cpu_data = {
"user": cpu[0],
"nice": cpu[1],
"system": cpu[2],
"idle": cpu[3],
"count": count_cpu,
}
return cpu_data
def get_memory_info():
memory = psutil.virtual_memory()
swap = psutil.virtual_memory()
mem_data = {
"physmem": {
"total": memory[0],
"available": memory[1],
"percent": memory[2],
"used": memory[3],
"free": memory[4]
},
"swap": {
"total": swap[0],
"available": swap[1],
"percent": swap[2],
"used": swap[3],
"free": swap[4]
}
}
return mem_data
def get_disk_info():
disk = psutil.disk_usage('/')
disk_data = {
"total": disk[0],
"used": disk[1],
"free": disk[2],
"percent": disk[3]
}
return disk_data
def create_file(file, path, value=None):
if path:
default_path = str(path)
f = open(default_path+"/"+file, "a+")
f.write(value)
f.close()
try:
return read_file(default_path+"/"+file)
except Exception as e:
print(e)
def copy(src, dest):
try:
shutil.copytree(src, dest)
except OSError as e:
print('Directory not copied. Error: %s' % e)
def check_folder(path):
return os.path.isdir(path)
def copyfile(src, dest):
try:
shutil.copyfile(src, dest)
except OSError as e:
print('Directory not copied. Error: %s' % e)
def create_folder(path):
return os.makedirs(path)
def read_file(file):
if os.path.isfile(file):
return True
else:
return False
def rm_dir(path):
return shutil.rmtree(path)
def token_hash(string):
random_string = uuid.uuid4()
raw_token = '{}{}'.format(random_string, string)
access_token = hashlib.sha256(raw_token.encode(
'utf-8')).hexdigest()
return access_token
def get_http(url, headers=None):
send = requests.get(url, headers=headers)
respons = send.json()
return respons
def send_http(url, data = None, headers=None):
send = requests.post(url, json=data, headers=headers)
respons = send.json()
return respons | [
"requests.post",
"git.Repo.clone_from",
"yaml.load",
"psutil.virtual_memory",
"logging.info",
"logging.error",
"logging.warn",
"os.path.isdir",
"os.path.expanduser",
"yaml.dump",
"requests.get",
"uuid.uuid4",
"os.path.isfile",
"shutil.copyfile",
"coloredlogs.install",
"psutil.disk_usag... | [((299, 322), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (317, 322), False, 'import os\n'), ((333, 344), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (342, 344), False, 'import os\n'), ((261, 286), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (276, 286), False, 'import os\n'), ((610, 631), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (629, 631), False, 'import coloredlogs\n'), ((636, 655), 'logging.info', 'logging.info', (['stdin'], {}), '(stdin)\n', (648, 655), False, 'import logging\n'), ((683, 704), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (702, 704), False, 'import coloredlogs\n'), ((709, 728), 'logging.warn', 'logging.warn', (['stdin'], {}), '(stdin)\n', (721, 728), False, 'import logging\n'), ((755, 776), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (774, 776), False, 'import coloredlogs\n'), ((781, 801), 'logging.error', 'logging.error', (['stdin'], {}), '(stdin)\n', (794, 801), False, 'import logging\n'), ((1906, 1924), 'psutil.cpu_times', 'psutil.cpu_times', ([], {}), '()\n', (1922, 1924), False, 'import psutil\n'), ((1941, 1959), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (1957, 1959), False, 'import psutil\n'), ((2167, 2190), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2188, 2190), False, 'import psutil\n'), ((2202, 2225), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2223, 2225), False, 'import psutil\n'), ((2677, 2699), 'psutil.disk_usage', 'psutil.disk_usage', (['"""/"""'], {}), "('/')\n", (2694, 2699), False, 'import psutil\n'), ((3296, 3315), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3309, 3315), False, 'import os\n'), ((3503, 3520), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3514, 3520), False, 'import os\n'), ((3551, 3571), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3565, 3571), False, 'import os\n'), ((3655, 3674), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (3668, 3674), False, 'import shutil\n'), ((3721, 3733), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3731, 3733), False, 'import uuid\n'), ((3939, 3973), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3951, 3973), False, 'import requests\n'), ((4079, 4125), 'requests.post', 'requests.post', (['url'], {'json': 'data', 'headers': 'headers'}), '(url, json=data, headers=headers)\n', (4092, 4125), False, 'import requests\n'), ((860, 878), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (873, 878), False, 'import os\n'), ((939, 968), 'git.Repo.clone_from', 'git.Repo.clone_from', (['url', 'dir'], {}), '(url, dir)\n', (958, 968), False, 'import git\n'), ((3154, 3180), 'shutil.copytree', 'shutil.copytree', (['src', 'dest'], {}), '(src, dest)\n', (3169, 3180), False, 'import shutil\n'), ((3360, 3386), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dest'], {}), '(src, dest)\n', (3375, 3386), False, 'import shutil\n'), ((912, 930), 'shutil.rmtree', 'shutil.rmtree', (['dir'], {}), '(dir)\n', (925, 930), False, 'import shutil\n'), ((1153, 1205), 'yaml.dump', 'yaml.dump', (['stream', 'outfile'], {'default_flow_style': '(False)'}), '(stream, outfile, default_flow_style=False)\n', (1162, 1205), False, 'import yaml\n'), ((1397, 1439), 'yaml.load', 'yaml.load', (['outfile'], {'Loader': 'yaml.FullLoader'}), '(outfile, Loader=yaml.FullLoader)\n', (1406, 1439), False, 'import yaml\n')] |
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from collections import namedtuple
import os.path as path
from bes.testing.program_unit_test import program_unit_test
from bes.fs.file_util import file_util
from bes.system.host import host
class test_cli(program_unit_test):
# if host.is_unix():
# _program = program_unit_test.file_path(__file__, 'fake_program.py')
# elif host.is_windows():
# _program = program_unit_test.file_path(__file__, 'fake_program.bat')
# else:
# host.raise_unsupported_system()
def test_cli(self):
kitchen_program_content = '''\
#!/usr/bin/env python
from kitchen_cli import kitchen_cli
if __name__ == '__main__':
kitchen_cli.run()
'''
knife_cli_args_content = '''\
class knife_cli_args(object):
def knife_add_args(self, subparser):
p = subparser.add_parser('cut', help = 'Cut something.')
p.add_argument('what', action = 'store', default = None, help = 'What to cut []')
def _command_knife(self, command, *args, **kargs):
func = getattr(self, command)
return func(*args, **kargs)
@classmethod
def cut(clazz, what):
print('cut({})'.format(what))
return 0
'''
oven_cli_args_content = '''\
class oven_cli_args(object):
def oven_add_args(self, subparser):
p = subparser.add_parser('bake', help = 'Bake something.')
p.add_argument('what', action = 'store', default = None, help = 'What to bake []')
def _command_oven(self, command, *args, **kargs):
func = getattr(self, command)
return func(*args, **kargs)
@classmethod
def bake(clazz, what):
print('bake({})'.format(what))
return 0
'''
kitchen_cli_content = '''\
from bes.cli.cli_command import cli_command
from bes.cli.cli import cli
from knife_cli_args import knife_cli_args
from oven_cli_args import oven_cli_args
class kitchen_cli(cli):
def __init__(self):
super(kitchen_cli, self).__init__('kitchen')
#@abstractmethod
def command_list(self):
return []
#@abstractmethod
def command_group_list(self):
return [
cli_command('knife', 'knife_add_args', 'Knife', knife_cli_args),
cli_command('oven', 'oven_add_args', 'Oven', oven_cli_args),
]
@classmethod
def run(clazz):
raise SystemExit(kitchen_cli().main())
'''
tmp = self.make_temp_dir()
kitchen_program = file_util.save(path.join(tmp, 'kitchen.py'), content = kitchen_program_content)
file_util.save(path.join(tmp, 'knife_cli_args.py'), content = knife_cli_args_content)
file_util.save(path.join(tmp, 'oven_cli_args.py'), content = oven_cli_args_content)
file_util.save(path.join(tmp, 'kitchen_cli.py'), content = kitchen_cli_content)
rv = self.run_program(kitchen_program, [ 'knife', 'cut', 'bread' ])
self.assertEqual( 0, rv.exit_code )
self.assertEqual( 'cut(bread)', rv.output.strip() )
rv = self.run_program(kitchen_program, [ 'oven', 'bake', 'cheesecake' ])
self.assertEqual( 0, rv.exit_code )
self.assertEqual( 'bake(cheesecake)', rv.output.strip() )
if __name__ == '__main__':
program_unit_test.main()
| [
"os.path.join",
"bes.testing.program_unit_test.program_unit_test.main"
] | [((3103, 3127), 'bes.testing.program_unit_test.program_unit_test.main', 'program_unit_test.main', ([], {}), '()\n', (3125, 3127), False, 'from bes.testing.program_unit_test import program_unit_test\n'), ((2393, 2421), 'os.path.join', 'path.join', (['tmp', '"""kitchen.py"""'], {}), "(tmp, 'kitchen.py')\n", (2402, 2421), True, 'import os.path as path\n'), ((2477, 2512), 'os.path.join', 'path.join', (['tmp', '"""knife_cli_args.py"""'], {}), "(tmp, 'knife_cli_args.py')\n", (2486, 2512), True, 'import os.path as path\n'), ((2567, 2601), 'os.path.join', 'path.join', (['tmp', '"""oven_cli_args.py"""'], {}), "(tmp, 'oven_cli_args.py')\n", (2576, 2601), True, 'import os.path as path\n'), ((2655, 2687), 'os.path.join', 'path.join', (['tmp', '"""kitchen_cli.py"""'], {}), "(tmp, 'kitchen_cli.py')\n", (2664, 2687), True, 'import os.path as path\n')] |
import sys
sys.path.append('../../Data Structures')
from stack import Stack
def isPalindrome(linkedList={}):
'''
Solution 1 - Hash map
Complexity Analysis
O(n) time | O(n) space
Check if a linked list is a palindrome
dict: linkedList
return: True if its palindrome
'''
# Gracefully handle type and Falsy values
if (not isinstance(linkedList, dict) or bool(linkedList) == False):
print('Argument should be a valid non-empty dictionary')
return False
charsMap = dict()
currNode = linkedList
while (currNode != None):
char = currNode['data']
charLower = char.lower()
currNode = currNode['next']
if (char == ' '):
continue
if (charLower in charsMap):
charsMap[charLower] += 1
else:
charsMap[charLower] = 1
oddCount = 0
for char in charsMap.values():
if (char % 2 == 1):
oddCount += 1
return oddCount <= 1
def isPalindrome(linkedList={}):
'''
Solution 2 - Reverse string
Complexity Analysis
O(n) time | O(n) space
Check if a linked list is a palindrome
dict: linkedList
return: True if its palindrome
'''
# Gracefully handle type and Falsy values
if (not isinstance(linkedList, dict) or bool(linkedList) == False):
print('Argument should be a valid non-empty dictionary')
return False
slowPointer = linkedList
fastPointer = linkedList
middleNode = None
stringFirstHalf = ''
stringSecondHalf = ''
while (slowPointer != None):
if (fastPointer != None and fastPointer['next'] != None):
stringFirstHalf = slowPointer['data'] + stringFirstHalf
middleNode = slowPointer['next']
fastPointer = fastPointer['next']['next']
else:
stringSecondHalf += slowPointer['data']
slowPointer = slowPointer['next']
# Fast pointer will NOT be NULL if there is odd elements in the linked list
if (fastPointer != None):
stringFirstHalf = middleNode['data'] + stringFirstHalf
return stringFirstHalf.lower() == stringSecondHalf.lower()
def isPalindrome(linkedList={}):
'''
Solution 3 - Runner slow/fast pointers creating a stack
Complexity Analysis
O(n) time | O(n) space
Check if a linked list is a palindrome
dict: linkedList
return: True if its palindrome
'''
# Gracefully handle type and Falsy values
if (not isinstance(linkedList, dict) or bool(linkedList) == False):
print('Argument should be a valid non-empty dictionary')
return False
charsStack = Stack()
slowPointer = linkedList
fastPointer = linkedList
middleNode = None
while (slowPointer != None):
if (fastPointer != None and fastPointer['next'] != None):
charsStack.push(slowPointer['data'])
middleNode = slowPointer['next']
fastPointer = fastPointer['next']['next']
else:
# Skip middle node in odd linked list
if (fastPointer != None and middleNode == slowPointer):
slowPointer = slowPointer['next']
continue
charStack = charsStack.pop()
if (slowPointer['data'].lower() != charStack.lower()):
return False
slowPointer = slowPointer['next']
return True
# Test cases (black box - unit testing)
testCases = [
# Normal
# Data that is typical (expected) and should be accepted by the system.
{
'assert': isPalindrome({ 'data': 'a', 'next': { 'data': 'b', 'next': { 'data': 'b', 'next': { 'data': 'a', 'next': None } } } }),
'expected': True,
},
{
'assert': isPalindrome({ 'data': 'a', 'next': { 'data': 'b', 'next': { 'data': 'c', 'next': { 'data': 'b', 'next': { 'data': 'a', 'next': None } } } } }),
'expected': True,
},
{
'assert': isPalindrome({ 'data': 'a', 'next': { 'data': 'b', 'next': { 'data': 'c', 'next': { 'data': 'd', 'next': { 'data': 'e', 'next': None } } } } }),
'expected': False
},
{
'assert': isPalindrome({ 'data': 'K', 'next': { 'data': 'a', 'next': { 'data': 'y', 'next': { 'data': 'a', 'next': { 'data': 'k', 'next': None } } } } }),
'expected': True
},
{
'assert': isPalindrome({ 'data': 'R', 'next': { 'data': 'e', 'next': { 'data': 'p', 'next': { 'data': 'a', 'next': { 'data': 'p', 'next': { 'data': 'e', 'next': { 'data': 'r', 'next': None } } } } } } }),
'expected': True
},
{
'assert': isPalindrome({ 'data': 'T', 'next': { 'data': 'a', 'next': { 'data': 'c', 'next': { 'data': 'o', 'next': { 'data': 'C', 'next': { 'data': 'a', 'next': { 'data': 't', 'next': None } } } } } } }),
'expected': True
},
{
'assert': isPalindrome({ 'data': 'T', 'next': { 'data': 'a', 'next': { 'data': 'c', 'next': { 'data': 'o', 'next': { 'data': 'C', 'next': { 'data': 'a', 'next': { 'data': 't', 'next': { 'data': 'w', 'next': None } } } } } } } }),
'expected': False
},
# Boundary data (extreme data, edge case)
# Data at the upper or lower limits of expectations that should be accepted by the system.
# {
# 'assert': isPalindrome({ 'data': 'a', 'next': None }),
# 'expected': True
# },
# Abnormal data (erroneous data)
# Data that falls outside of what is acceptable and should be rejected by the system.
{ 'assert': isPalindrome(), 'expected': False },
{ 'assert': isPalindrome(0), 'expected': False },
{ 'assert': isPalindrome(''), 'expected': False },
{ 'assert': isPalindrome([]), 'expected': False },
{ 'assert': isPalindrome(()), 'expected': False },
{ 'assert': isPalindrome({}), 'expected': False },
{ 'assert': isPalindrome(None), 'expected': False },
{ 'assert': isPalindrome(False), 'expected': False }
]
# Run tests
for (index, test) in enumerate(testCases):
print(f'# Test {index + 1}')
print(f'Actual: {test["assert"]}')
print(f'Expected: {test["expected"]}')
print('🤘 Test PASSED 🤘' if test['assert'] == test['expected'] else '👎 Test FAILED 👎', '\n')
| [
"stack.Stack",
"sys.path.append"
] | [((11, 51), 'sys.path.append', 'sys.path.append', (['"""../../Data Structures"""'], {}), "('../../Data Structures')\n", (26, 51), False, 'import sys\n'), ((2671, 2678), 'stack.Stack', 'Stack', ([], {}), '()\n', (2676, 2678), False, 'from stack import Stack\n')] |
"""
Class Report: Part 4 of the Sprint Challenge
- Generate random Product list, and get an Inventory Report on that list
"""
from random import randint, sample, uniform
from acme import Product
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']
def generate_products(num_products=30):
products = []
for _ in range(num_products):
# build the name of the product
adj = sample(ADJECTIVES, 1)
noun = sample(NOUNS, 1)
name = adj[0] + ' ' + noun[0]
# establish the other variables for the product
price = randint(5, 100)
weight = randint(5, 100)
flamm = round(uniform(0, 2.5), 6)
products.append(Product(name, price, weight, flamm))
return products
def inventory_report(self):
"""
Prints out an Inventory Report for a list of Products.
"""
names = []
prices = []
weights = []
flames = []
# Loop over products list
for x in self:
names.append(x.name)
prices.append(x.price)
weights.append(x.weight)
flames.append(x.flamm)
print('ACME CORPORATION OFFICIAL INVENTORY REPORT')
print(f'There are {len(set(names))} unique products.')
print(f'Average Price: ${round(sum(prices) / len(prices),2)}.')
print(f'Average weight: {round(sum(weights) / len(weights), 2)} kgs.')
print(f'Average Flammability {round(sum(flames) / len(flames), 2)}.')
return names, prices, weights, flames
if __name__ == '__main__':
inventory_report(generate_products())
| [
"acme.Product",
"random.uniform",
"random.sample",
"random.randint"
] | [((479, 500), 'random.sample', 'sample', (['ADJECTIVES', '(1)'], {}), '(ADJECTIVES, 1)\n', (485, 500), False, 'from random import randint, sample, uniform\n'), ((516, 532), 'random.sample', 'sample', (['NOUNS', '(1)'], {}), '(NOUNS, 1)\n', (522, 532), False, 'from random import randint, sample, uniform\n'), ((644, 659), 'random.randint', 'randint', (['(5)', '(100)'], {}), '(5, 100)\n', (651, 659), False, 'from random import randint, sample, uniform\n'), ((677, 692), 'random.randint', 'randint', (['(5)', '(100)'], {}), '(5, 100)\n', (684, 692), False, 'from random import randint, sample, uniform\n'), ((715, 730), 'random.uniform', 'uniform', (['(0)', '(2.5)'], {}), '(0, 2.5)\n', (722, 730), False, 'from random import randint, sample, uniform\n'), ((760, 795), 'acme.Product', 'Product', (['name', 'price', 'weight', 'flamm'], {}), '(name, price, weight, flamm)\n', (767, 795), False, 'from acme import Product\n')] |
"""
The Flaskee is an Open Source project for Microservices.
Develop By <NAME> | https://nadeengamage.com | <EMAIL>
"""
from werkzeug.serving import run_simple
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from flaskee import api
app = api.create_app()
application = DispatcherMiddleware(app)
if __name__ == "__main__":
run_simple('0.0.0.0', 5000, application, use_reloader=True, use_debugger=True)
| [
"werkzeug.serving.run_simple",
"flaskee.api.create_app",
"werkzeug.middleware.dispatcher.DispatcherMiddleware"
] | [((257, 273), 'flaskee.api.create_app', 'api.create_app', ([], {}), '()\n', (271, 273), False, 'from flaskee import api\n'), ((289, 314), 'werkzeug.middleware.dispatcher.DispatcherMiddleware', 'DispatcherMiddleware', (['app'], {}), '(app)\n', (309, 314), False, 'from werkzeug.middleware.dispatcher import DispatcherMiddleware\n'), ((347, 425), 'werkzeug.serving.run_simple', 'run_simple', (['"""0.0.0.0"""', '(5000)', 'application'], {'use_reloader': '(True)', 'use_debugger': '(True)'}), "('0.0.0.0', 5000, application, use_reloader=True, use_debugger=True)\n", (357, 425), False, 'from werkzeug.serving import run_simple\n')] |
from django.urls import path, include, re_path
from django.views.generic import FormView
from . import views
from .views import news, faq, resources, group, user, puzzles, submission, score_challenge, metrics
import publications.views as plist
news_patterns = [
path('', news.List.as_view(), name="news_list"),
path('add', news.Create.as_view(), name="news_new"),
re_path(r"(?P<pk>\d+)/update/$", news.Update.as_view(), name="news_update"),
re_path(r"(?P<pk>\d+)/$", news.Detail.as_view(), name="news_details"),
re_path(r"(?P<pk>\d+)/delete/$", news.Delete.as_view(), name="news_delete")
]
faq_pattern = [
path('', faq.List.as_view(), name="faq_list"),
path('add', faq.Create.as_view(), name="faq_new"),
re_path(r"(?P<pk>\d+)/update/$", faq.Update.as_view(), name="faq_update"),
re_path(r"(?P<pk>\d+)/$", faq.Detail.as_view(), name="faq_details"),
re_path(r"(?P<pk>\d+)/delete/$", faq.Delete.as_view(), name="faq_delete")
]
resources_pattern = [
path('', resources.List.as_view(), name="resources_list"),
path('add', resources.Create.as_view(), name="resources_new"),
re_path(r"(?P<pk>\d+)/update/$", resources.Update.as_view(), name="resources_update"),
re_path(r"(?P<pk>\d+)/$", resources.Detail.as_view(), name="resources_details"),
re_path(r"(?P<pk>\d+)/delete/$", resources.Delete.as_view(), name="resources_delete")
]
groups_pattern = [
re_path(r'^$', group.List.as_view(), name='groups_list'),
re_path(r'(?P<pk>\d+)/update/$', group.Update.as_view(), name='group_update'),
re_path(r'(?P<pk>\d+)/$', group.Detail.as_view(), name='group_detail')
]
accounts_pattern = [
path("", include("django.contrib.auth.urls")),
re_path(r'^signup/$', user.Signup.as_view(), name='signup'),
re_path(r'^signin/$', user.Signin.as_view(), name='signin'),
re_path(r'^profile/$', user.Detail.as_view(), name='user_detail'),
re_path(r'^profile/update/$', user.Update.as_view(), name='user_update'),
re_path(r'^profile/update/password/$', user.PasswordUpdate.as_view(), name='user_password_update'),
re_path(r'^unconfirmed/$', user.UnconfirmedList.as_view(), name='unconfirmed_list'),
re_path(r'^unconfirmed/(?P<pk>\d+)/user_confirmed/$', user.UnconfirmedList.user_confirm, name='user_confirmed'),
re_path(r'^unconfirmed/(?P<pk>\d+)/user_rejected/$', user.UnconfirmedList.user_reject, name='user_rejected'),
re_path(r'^reset$', user.PasswordReset.as_view(), name='user_password_reset'),
re_path(r'^send-reset/$', user.ResetForm.reset, name='send_reset'),
re_path(r'^new_password/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', user.NewPassword.as_view(), name='user_new_password'),
re_path(r'^emailSend/$', user.Signup.email_send, name='email_send'),
re_path(r'^active/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', user.Signup.activate, name="activate"),
path("groups/", include(groups_pattern)),
re_path(r'^logout/$', user.Signin.logout, name='logout')
]
organizer_puzzles_pattern = [
path('', puzzles.list_organizer, name='organizer-puzzles'),
path('create-new', puzzles.create_new, name='create-new'),
path('create-next', puzzles.create_next, name='create-next'),
re_path(r"(?P<pk>\d+)/update-puzzle/$", puzzles.update_puzzle_info, name='update-puzzle-info'),
re_path(r"(?P<pk>\d+)/update-round/$", puzzles.update_challenge, name='update-challenge'),
re_path(r"(?P<pk>\d+)/delete-puzzle/$", puzzles.PuzzleInfoDelete.as_view(), name='puzzle-info-delete'),
re_path(r"(?P<pk>\d+)/delete-round/$", puzzles.ChallengeDelete.as_view(), name='challenge-delete'),
re_path(r"(?P<pk>\d+)/publish-results/$", puzzles.publish_results, name='publish-results'),
]
completed_puzzles_pattern = [
path('', puzzles.list_completed, name='completed-puzzles'),
re_path(r"(?P<pk>\d+)/results/$", puzzles.ChallengeAll.as_view(), name='show-results'),
re_path(r"(?P<pk>\d+)/results/automatic/$", puzzles.ChallengeAutomatic.as_view(), name='show-results-automatic'),
re_path(r"(?P<pk>\d+)/results/human/$", puzzles.ChallengeUser.as_view(), name='show-results-human'),
]
puzzles_pattern = [
path('', puzzles.list_open, name='open-puzzles'),
path('completed-puzzles/', include(completed_puzzles_pattern)),
path('my-puzzles/', include(organizer_puzzles_pattern)),
re_path(r"(?P<pk>\d+)/download-all-files/$", puzzles.file_download_batch, name='download-all-files'),
re_path(r"(?P<pk>\d+)/download-file/$", puzzles.file_download, name='download-file'),
re_path(r"(?P<pk>\d+)/download-target-structure/$", puzzles.pdb_download, name='download-structure'),
re_path(r"(?P<pk>\d+)/compute/$", metrics.calculate_metrics, name="metrics-calculate"),
]
submission_pattern = [
re_path(r"create/$", submission.CreateBatch.as_view(), name="submission_batch"),
re_path(r"create/(?P<pk>\d+)/$", submission.CreateSingle.as_view(), name="submission_single"),
re_path(r"list/$", submission.List.as_view(), name="submission_user_list"),
re_path(r"content/(?P<pk>\d+)/$", submission.Content.as_view(), name="submission_content"),
re_path(r"(?P<pk>\d+)/$", submission.Detail.as_view(), name="submission_detail"),
]
# scores_pattern = [
# re_path(r"challenge/(?P<pk>\d+)/$", score_challenge.Challenge.as_view(), name="challenge_score"),
# re_path(r"challenge/(?P<pk>\d+)/automatic$", score_challenge.ChallengeAutomatic.as_view(), name="challenge_score_automatic"),
# re_path(r"challenge/(?P<pk>\d+)/user$", score_challenge.ChallengeUser.as_view(), name="challenge_score_user"),
#
# ]
urlpatterns = [
path('', views.home, name='home'),
path("news/", include(news_patterns)),
path("accounts/", include(accounts_pattern)),
path("puzzles/", include(puzzles_pattern)),
path("faq/", include(faq_pattern)),
path("contact/", views.contactView, name="contact"),
path("resources/", include(resources_pattern)),
path("submission/", include(submission_pattern)),
#path("scores/", include(scores_pattern))
]
| [
"django.urls.re_path",
"django.urls.path",
"django.urls.include"
] | [((2178, 2294), 'django.urls.re_path', 're_path', (['"""^unconfirmed/(?P<pk>\\\\d+)/user_confirmed/$"""', 'user.UnconfirmedList.user_confirm'], {'name': '"""user_confirmed"""'}), "('^unconfirmed/(?P<pk>\\\\d+)/user_confirmed/$', user.UnconfirmedList.\n user_confirm, name='user_confirmed')\n", (2185, 2294), False, 'from django.urls import path, include, re_path\n'), ((2295, 2408), 'django.urls.re_path', 're_path', (['"""^unconfirmed/(?P<pk>\\\\d+)/user_rejected/$"""', 'user.UnconfirmedList.user_reject'], {'name': '"""user_rejected"""'}), "('^unconfirmed/(?P<pk>\\\\d+)/user_rejected/$', user.UnconfirmedList.\n user_reject, name='user_rejected')\n", (2302, 2408), False, 'from django.urls import path, include, re_path\n'), ((2492, 2557), 'django.urls.re_path', 're_path', (['"""^send-reset/$"""', 'user.ResetForm.reset'], {'name': '"""send_reset"""'}), "('^send-reset/$', user.ResetForm.reset, name='send_reset')\n", (2499, 2557), False, 'from django.urls import path, include, re_path\n'), ((2726, 2792), 'django.urls.re_path', 're_path', (['"""^emailSend/$"""', 'user.Signup.email_send'], {'name': '"""email_send"""'}), "('^emailSend/$', user.Signup.email_send, name='email_send')\n", (2733, 2792), False, 'from django.urls import path, include, re_path\n'), ((2799, 2944), 'django.urls.re_path', 're_path', (['"""^active/(?P<uidb64>[0-9A-Za-z_\\\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$"""', 'user.Signup.activate'], {'name': '"""activate"""'}), "(\n '^active/(?P<uidb64>[0-9A-Za-z_\\\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$'\n , user.Signup.activate, name='activate')\n", (2806, 2944), False, 'from django.urls import path, include, re_path\n'), ((2986, 3041), 'django.urls.re_path', 're_path', (['"""^logout/$"""', 'user.Signin.logout'], {'name': '"""logout"""'}), "('^logout/$', user.Signin.logout, name='logout')\n", (2993, 3041), False, 'from django.urls import path, include, re_path\n'), ((3080, 3138), 'django.urls.path', 'path', (['""""""', 'puzzles.list_organizer'], {'name': '"""organizer-puzzles"""'}), "('', puzzles.list_organizer, name='organizer-puzzles')\n", (3084, 3138), False, 'from django.urls import path, include, re_path\n'), ((3144, 3201), 'django.urls.path', 'path', (['"""create-new"""', 'puzzles.create_new'], {'name': '"""create-new"""'}), "('create-new', puzzles.create_new, name='create-new')\n", (3148, 3201), False, 'from django.urls import path, include, re_path\n'), ((3207, 3267), 'django.urls.path', 'path', (['"""create-next"""', 'puzzles.create_next'], {'name': '"""create-next"""'}), "('create-next', puzzles.create_next, name='create-next')\n", (3211, 3267), False, 'from django.urls import path, include, re_path\n'), ((3273, 3372), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/update-puzzle/$"""', 'puzzles.update_puzzle_info'], {'name': '"""update-puzzle-info"""'}), "('(?P<pk>\\\\d+)/update-puzzle/$', puzzles.update_puzzle_info, name=\n 'update-puzzle-info')\n", (3280, 3372), False, 'from django.urls import path, include, re_path\n'), ((3373, 3467), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/update-round/$"""', 'puzzles.update_challenge'], {'name': '"""update-challenge"""'}), "('(?P<pk>\\\\d+)/update-round/$', puzzles.update_challenge, name=\n 'update-challenge')\n", (3380, 3467), False, 'from django.urls import path, include, re_path\n'), ((3680, 3775), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/publish-results/$"""', 'puzzles.publish_results'], {'name': '"""publish-results"""'}), "('(?P<pk>\\\\d+)/publish-results/$', puzzles.publish_results, name=\n 'publish-results')\n", (3687, 3775), False, 'from django.urls import path, include, re_path\n'), ((3809, 3867), 'django.urls.path', 'path', (['""""""', 'puzzles.list_completed'], {'name': '"""completed-puzzles"""'}), "('', puzzles.list_completed, name='completed-puzzles')\n", (3813, 3867), False, 'from django.urls import path, include, re_path\n'), ((4211, 4259), 'django.urls.path', 'path', (['""""""', 'puzzles.list_open'], {'name': '"""open-puzzles"""'}), "('', puzzles.list_open, name='open-puzzles')\n", (4215, 4259), False, 'from django.urls import path, include, re_path\n'), ((4394, 4498), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/download-all-files/$"""', 'puzzles.file_download_batch'], {'name': '"""download-all-files"""'}), "('(?P<pk>\\\\d+)/download-all-files/$', puzzles.file_download_batch,\n name='download-all-files')\n", (4401, 4498), False, 'from django.urls import path, include, re_path\n'), ((4500, 4589), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/download-file/$"""', 'puzzles.file_download'], {'name': '"""download-file"""'}), "('(?P<pk>\\\\d+)/download-file/$', puzzles.file_download, name=\n 'download-file')\n", (4507, 4589), False, 'from django.urls import path, include, re_path\n'), ((4590, 4694), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/download-target-structure/$"""', 'puzzles.pdb_download'], {'name': '"""download-structure"""'}), "('(?P<pk>\\\\d+)/download-target-structure/$', puzzles.pdb_download,\n name='download-structure')\n", (4597, 4694), False, 'from django.urls import path, include, re_path\n'), ((4696, 4787), 'django.urls.re_path', 're_path', (['"""(?P<pk>\\\\d+)/compute/$"""', 'metrics.calculate_metrics'], {'name': '"""metrics-calculate"""'}), "('(?P<pk>\\\\d+)/compute/$', metrics.calculate_metrics, name=\n 'metrics-calculate')\n", (4703, 4787), False, 'from django.urls import path, include, re_path\n'), ((5662, 5695), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""home"""'}), "('', views.home, name='home')\n", (5666, 5695), False, 'from django.urls import path, include, re_path\n'), ((5882, 5933), 'django.urls.path', 'path', (['"""contact/"""', 'views.contactView'], {'name': '"""contact"""'}), "('contact/', views.contactView, name='contact')\n", (5886, 5933), False, 'from django.urls import path, include, re_path\n'), ((1664, 1699), 'django.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (1671, 1699), False, 'from django.urls import path, include, re_path\n'), ((2956, 2979), 'django.urls.include', 'include', (['groups_pattern'], {}), '(groups_pattern)\n', (2963, 2979), False, 'from django.urls import path, include, re_path\n'), ((4292, 4326), 'django.urls.include', 'include', (['completed_puzzles_pattern'], {}), '(completed_puzzles_pattern)\n', (4299, 4326), False, 'from django.urls import path, include, re_path\n'), ((4353, 4387), 'django.urls.include', 'include', (['organizer_puzzles_pattern'], {}), '(organizer_puzzles_pattern)\n', (4360, 4387), False, 'from django.urls import path, include, re_path\n'), ((5715, 5737), 'django.urls.include', 'include', (['news_patterns'], {}), '(news_patterns)\n', (5722, 5737), False, 'from django.urls import path, include, re_path\n'), ((5762, 5787), 'django.urls.include', 'include', (['accounts_pattern'], {}), '(accounts_pattern)\n', (5769, 5787), False, 'from django.urls import path, include, re_path\n'), ((5811, 5835), 'django.urls.include', 'include', (['puzzles_pattern'], {}), '(puzzles_pattern)\n', (5818, 5835), False, 'from django.urls import path, include, re_path\n'), ((5855, 5875), 'django.urls.include', 'include', (['faq_pattern'], {}), '(faq_pattern)\n', (5862, 5875), False, 'from django.urls import path, include, re_path\n'), ((5958, 5984), 'django.urls.include', 'include', (['resources_pattern'], {}), '(resources_pattern)\n', (5965, 5984), False, 'from django.urls import path, include, re_path\n'), ((6011, 6038), 'django.urls.include', 'include', (['submission_pattern'], {}), '(submission_pattern)\n', (6018, 6038), False, 'from django.urls import path, include, re_path\n')] |
import numpy as np
import queue
import cv2
import os
import datetime
SIZE = 32
SCALE = 0.007874015748031496
def quantized_np(array,scale,data_width=8):
quantized_array= np.round(array/scale)
quantized_array = np.maximum(quantized_array, -2**(data_width-1))
quantized_array = np.minimum(quantized_array, 2**(data_width-1)-1)
return quantized_array
def get_x_y_cuts(data, n_lines=1):
w, h = data.shape
visited = set()
q = queue.Queue()
offset = [(-1, -1), (0, -1), (1, -1), (-1, 0),
(1, 0), (-1, 1), (0, 1), (1, 1)]
cuts = []
for y in range(h):
for x in range(w):
x_axis = []
y_axis = []
if data[x][y] < 200 and (x, y) not in visited:
q.put((x, y))
visited.add((x, y))
while not q.empty():
x_p, y_p = q.get()
for x_offset, y_offset in offset:
x_c, y_c = x_p + x_offset, y_p + y_offset
if (x_c, y_c) in visited:
continue
visited.add((x_c, y_c))
try:
if data[x_c][y_c] < 200:
q.put((x_c, y_c))
x_axis.append(x_c)
y_axis.append(y_c)
except:
pass
if x_axis:
min_x, max_x = min(x_axis), max(x_axis)
min_y, max_y = min(y_axis), max(y_axis)
if max_x - min_x > 3 and max_y - min_y > 3:
cuts.append([min_x, max_x + 1, min_y, max_y + 1])
if n_lines == 1:
cuts = sorted(cuts, key=lambda x: x[2])
pr_item = cuts[0]
count = 1
len_cuts = len(cuts)
new_cuts = [cuts[0]]
pr_k = 0
for i in range(1, len_cuts):
pr_item = new_cuts[pr_k]
now_item = cuts[i]
if not (now_item[2] > pr_item[3]):
new_cuts[pr_k][0] = min(pr_item[0], now_item[0])
new_cuts[pr_k][1] = max(pr_item[1], now_item[1])
new_cuts[pr_k][2] = min(pr_item[2], now_item[2])
new_cuts[pr_k][3] = max(pr_item[3], now_item[3])
else:
new_cuts.append(now_item)
pr_k += 1
cuts = new_cuts
return cuts
def get_image_cuts(image, dir=None, is_data=False, n_lines=1, data_needed=False, count=0,QUAN = False):
if is_data:
data = image
else:
data = cv2.imread(image, 2)
cuts = get_x_y_cuts(data, n_lines=n_lines)
image_cuts = None
for i, item in enumerate(cuts):
count += 1
max_dim = max(item[1] - item[0], item[3] - item[2])
new_data = np.ones((int(1.4 * max_dim), int(1.4 * max_dim))) * 255
x_min, x_max = (
max_dim - item[1] + item[0]) // 2, (max_dim - item[1] + item[0]) // 2 + item[1] - item[0]
y_min, y_max = (
max_dim - item[3] + item[2]) // 2, (max_dim - item[3] + item[2]) // 2 + item[3] - item[2]
new_data[int(0.2 * max_dim) + x_min:int(0.2 * max_dim) + x_max, int(0.2 * max_dim) +
y_min:int(0.2 * max_dim) + y_max] = data[item[0]:item[1], item[2]:item[3]]
standard_data = cv2.resize(new_data, (SIZE, SIZE))
if not data_needed:
cv2.imwrite(dir + str(count) + ".jpg", standard_data)
if data_needed:
data_flat = np.reshape(standard_data, (1, SIZE*SIZE))
data_flat = (255 - data_flat) / 255
if QUAN == True:
data_flat = quantized_np(data_flat,SCALE,data_width=8)
else:
pass
if image_cuts is None:
image_cuts = data_flat
else:
image_cuts = np.r_[image_cuts, data_flat]
if data_needed:
return image_cuts
return count
def main(img_dir):
for file in os.listdir(img_dir):
if file.endswith('jpeg'):
path = os.path.join(img_dir, file)
oldtime = datetime.datetime.now()
#count = process.get_image_cuts(path, dir='./dataset/'+file.split('.')[0]+'_cut',count=0)
image_cuts = get_image_cuts(
path, dir = img_dir + file.split('.')[0]+'_cut', count=0, data_needed=True)
newtime = datetime.datetime.now()
Totaltime = (newtime-oldtime).microseconds
print("image cut time: ", Totaltime)
print(np.size(image_cuts, 0))
if __name__ == '__main__':
img_dir = './dataset'
main(img_dir)
| [
"os.listdir",
"numpy.reshape",
"numpy.minimum",
"cv2.resize",
"numpy.size",
"os.path.join",
"queue.Queue",
"datetime.datetime.now",
"numpy.maximum",
"cv2.imread",
"numpy.round"
] | [((175, 198), 'numpy.round', 'np.round', (['(array / scale)'], {}), '(array / scale)\n', (183, 198), True, 'import numpy as np\n'), ((219, 270), 'numpy.maximum', 'np.maximum', (['quantized_array', '(-2 ** (data_width - 1))'], {}), '(quantized_array, -2 ** (data_width - 1))\n', (229, 270), True, 'import numpy as np\n'), ((289, 343), 'numpy.minimum', 'np.minimum', (['quantized_array', '(2 ** (data_width - 1) - 1)'], {}), '(quantized_array, 2 ** (data_width - 1) - 1)\n', (299, 343), True, 'import numpy as np\n'), ((451, 464), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (462, 464), False, 'import queue\n'), ((3947, 3966), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (3957, 3966), False, 'import os\n'), ((2533, 2553), 'cv2.imread', 'cv2.imread', (['image', '(2)'], {}), '(image, 2)\n', (2543, 2553), False, 'import cv2\n'), ((3277, 3311), 'cv2.resize', 'cv2.resize', (['new_data', '(SIZE, SIZE)'], {}), '(new_data, (SIZE, SIZE))\n', (3287, 3311), False, 'import cv2\n'), ((3454, 3497), 'numpy.reshape', 'np.reshape', (['standard_data', '(1, SIZE * SIZE)'], {}), '(standard_data, (1, SIZE * SIZE))\n', (3464, 3497), True, 'import numpy as np\n'), ((4021, 4048), 'os.path.join', 'os.path.join', (['img_dir', 'file'], {}), '(img_dir, file)\n', (4033, 4048), False, 'import os\n'), ((4071, 4094), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4092, 4094), False, 'import datetime\n'), ((4352, 4375), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4373, 4375), False, 'import datetime\n'), ((4498, 4520), 'numpy.size', 'np.size', (['image_cuts', '(0)'], {}), '(image_cuts, 0)\n', (4505, 4520), True, 'import numpy as np\n')] |
"""Packager for cloud environment."""
from setuptools import setup, find_packages
setup(
name='preprocess',
version='1.0.0',
packages=find_packages(),
install_requires=[
'tensorflow',
'numpy',
],
)
| [
"setuptools.find_packages"
] | [((147, 162), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (160, 162), False, 'from setuptools import setup, find_packages\n')] |
import numpy as np
import time
def max_subsequence_sum(sequence):
max_sum = 0
for i in range(0, len(sequence)):
for j in range(i, len(sequence)):
this_sum = 0
for k in range(i, j+1):
this_sum += sequence[k]
if this_sum > max_sum:
max_sum = this_sum
return max_sum
seq = np.random.randint(-100000,100000,size=1000)
start = time.time()
result = max_subsequence_sum(seq)
print(time.time() - start) | [
"numpy.random.randint",
"time.time"
] | [((372, 417), 'numpy.random.randint', 'np.random.randint', (['(-100000)', '(100000)'], {'size': '(1000)'}), '(-100000, 100000, size=1000)\n', (389, 417), True, 'import numpy as np\n'), ((424, 435), 'time.time', 'time.time', ([], {}), '()\n', (433, 435), False, 'import time\n'), ((476, 487), 'time.time', 'time.time', ([], {}), '()\n', (485, 487), False, 'import time\n')] |
import pygame
from tools import render_text
from graph_object import GraphObject
class Edge (GraphObject):
def __init__(self, v1, v2, weight=0, width=1, color=(0, 0, 0)):
super().__init__()
self.v1, self.v2 = v1, v2
self.__weight, self.__weight_surface = 0, None
self.set_weight(weight)
self.width, self.color = width, color
@property
def weight(self):
return self.__weight
def set_weight(self, weight):
self.__weight = weight
self.__weight_surface = render_text(str(weight), "Arial", 16)
@property
def pos1(self):
return self.v1.pos
@property
def pos2(self):
return self.v2.pos
def draw(self, surface):
super().draw(surface)
pygame.draw.line(surface, self.color, self.pos1, self.pos2, self.width) # draw edge line
if self.__weight_surface: # draw edge weight
x1, y1 = self.pos1
x2, y2 = self.pos2
pos = (x2-x1)//2 + x1, (y2-y1)//2 + y1
surface.blit(self.__weight_surface, pos) | [
"pygame.draw.line"
] | [((766, 837), 'pygame.draw.line', 'pygame.draw.line', (['surface', 'self.color', 'self.pos1', 'self.pos2', 'self.width'], {}), '(surface, self.color, self.pos1, self.pos2, self.width)\n', (782, 837), False, 'import pygame\n')] |
from py_profiler import profiler, profiling_service
@profiler('hello')
def hello():
print('hello')
class Foo:
@profiler('Food.some_thing')
def some_thing(self):
print('some_thing')
@profiler()
def method_2(self):
print('method_2')
raise Exception('aaaa')
if __name__ == "__main__":
try:
foo = Foo()
hello()
foo.some_thing()
foo.method_2()
except Exception as e:
pass
finally:
print(profiling_service.as_table())
| [
"py_profiler.profiling_service.as_table",
"py_profiler.profiler"
] | [((55, 72), 'py_profiler.profiler', 'profiler', (['"""hello"""'], {}), "('hello')\n", (63, 72), False, 'from py_profiler import profiler, profiling_service\n'), ((124, 151), 'py_profiler.profiler', 'profiler', (['"""Food.some_thing"""'], {}), "('Food.some_thing')\n", (132, 151), False, 'from py_profiler import profiler, profiling_service\n'), ((212, 222), 'py_profiler.profiler', 'profiler', ([], {}), '()\n', (220, 222), False, 'from py_profiler import profiler, profiling_service\n'), ((494, 522), 'py_profiler.profiling_service.as_table', 'profiling_service.as_table', ([], {}), '()\n', (520, 522), False, 'from py_profiler import profiler, profiling_service\n')] |
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of rm command for deleting resources."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.storage import user_request_args_factory
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.storage import errors
from googlecloudsdk.command_lib.storage import flags
from googlecloudsdk.command_lib.storage import name_expansion
from googlecloudsdk.command_lib.storage import plurality_checkable_iterator
from googlecloudsdk.command_lib.storage import stdin_iterator
from googlecloudsdk.command_lib.storage.tasks import task_executor
from googlecloudsdk.command_lib.storage.tasks import task_graph_executor
from googlecloudsdk.command_lib.storage.tasks import task_status
from googlecloudsdk.command_lib.storage.tasks.rm import delete_task_iterator_factory
from googlecloudsdk.core import log
class Rm(base.Command):
"""Delete objects and buckets."""
detailed_help = {
'DESCRIPTION':
"""
Delete objects and buckets.
""",
'EXAMPLES':
"""
The following command deletes a Cloud Storage object named ``my-object''
from the bucket ``my-bucket'':
$ {command} gs://my-bucket/my-object
The following command deletes all objects directly within the directory
``my-dir'' but no objects within subdirectories:
$ {command} gs://my-bucket/my-dir/*
The following command deletes all objects and subdirectories within the
directory ``my-dir'':
$ {command} gs://my-bucket/my-dir/**
Note that for buckets that contain
[versioned objects](https://cloud.google.com/storage/docs/object-versioning),
the above command only affects live versions. Use the `--recursive` flag
instead to delete all versions.
The following command deletes all versions of all resources in
``my-bucket'' and then deletes the bucket.
$ {command} --recursive gs://my-bucket/
The following command deletes all text files in the top-level of
``my-bucket'', but not text files in subdirectories:
$ {command} -recursive gs://my-bucket/*.txt
The following command deletes one wildcard expression per line passed
in by stdin:
$ some_program | {command} -I
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'urls',
nargs='*',
help='The URLs of the resources to delete.')
parser.add_argument(
'--stdin',
'-I',
action='store_true',
help='Read the list of resources to remove from stdin.')
parser.add_argument(
'--recursive',
'-R',
'-r',
action='store_true',
help=('Recursively delete the contents of buckets or directories that'
' match the path expression. If the path is set to a bucket, like'
' ``gs://bucket\'\', the bucket is also deleted. This option'
' implies the `--all-versions` option. If you want to delete only'
' live object versions, use the ``**\'\' wildcard instead.'))
parser.add_argument(
'--all-versions',
'-a',
action='store_true',
help='Delete all'
' [versions](https://cloud.google.com/storage/docs/object-versioning)'
' of an object.')
flags.add_precondition_flags(parser)
def Run(self, args):
if args.stdin:
if args.urls:
raise errors.Error(
'No URL arguments allowed when reading URLs from stdin.')
urls = stdin_iterator.StdinIterator()
else:
if not args.urls:
raise errors.Error(
'Without the --stdin flag, the rm command requires at least one URL'
' argument.')
urls = args.urls
name_expansion_iterator = name_expansion.NameExpansionIterator(
urls,
all_versions=args.all_versions or args.recursive,
include_buckets=args.recursive,
recursion_requested=args.recursive)
user_request_args = (
user_request_args_factory.get_user_request_args_from_command_args(args))
task_status_queue = task_graph_executor.multiprocessing_context.Queue()
task_iterator_factory = (
delete_task_iterator_factory.DeleteTaskIteratorFactory(
name_expansion_iterator,
task_status_queue=task_status_queue,
user_request_args=user_request_args))
log.status.Print('Removing objects:')
object_exit_code = task_executor.execute_tasks(
task_iterator_factory.object_iterator(),
parallelizable=True,
task_status_queue=task_status_queue,
progress_type=task_status.ProgressType.COUNT)
bucket_iterator = plurality_checkable_iterator.PluralityCheckableIterator(
task_iterator_factory.bucket_iterator())
# We perform the is_empty check to avoid printing unneccesary status lines.
if args.recursive and not bucket_iterator.is_empty():
log.status.Print('Removing Buckets:')
bucket_exit_code = task_executor.execute_tasks(
bucket_iterator,
parallelizable=True,
task_status_queue=task_status_queue,
progress_type=task_status.ProgressType.COUNT)
else:
bucket_exit_code = 0
self.exit_code = max(object_exit_code, bucket_exit_code)
| [
"googlecloudsdk.core.log.status.Print",
"googlecloudsdk.command_lib.storage.tasks.task_executor.execute_tasks",
"googlecloudsdk.command_lib.storage.tasks.task_graph_executor.multiprocessing_context.Queue",
"googlecloudsdk.command_lib.storage.flags.add_precondition_flags",
"googlecloudsdk.command_lib.storage... | [((4001, 4037), 'googlecloudsdk.command_lib.storage.flags.add_precondition_flags', 'flags.add_precondition_flags', (['parser'], {}), '(parser)\n', (4029, 4037), False, 'from googlecloudsdk.command_lib.storage import flags\n'), ((4466, 4635), 'googlecloudsdk.command_lib.storage.name_expansion.NameExpansionIterator', 'name_expansion.NameExpansionIterator', (['urls'], {'all_versions': '(args.all_versions or args.recursive)', 'include_buckets': 'args.recursive', 'recursion_requested': 'args.recursive'}), '(urls, all_versions=args.all_versions or\n args.recursive, include_buckets=args.recursive, recursion_requested=\n args.recursive)\n', (4502, 4635), False, 'from googlecloudsdk.command_lib.storage import name_expansion\n'), ((4695, 4766), 'googlecloudsdk.api_lib.storage.user_request_args_factory.get_user_request_args_from_command_args', 'user_request_args_factory.get_user_request_args_from_command_args', (['args'], {}), '(args)\n', (4760, 4766), False, 'from googlecloudsdk.api_lib.storage import user_request_args_factory\n'), ((4792, 4843), 'googlecloudsdk.command_lib.storage.tasks.task_graph_executor.multiprocessing_context.Queue', 'task_graph_executor.multiprocessing_context.Queue', ([], {}), '()\n', (4841, 4843), False, 'from googlecloudsdk.command_lib.storage.tasks import task_graph_executor\n'), ((4882, 5039), 'googlecloudsdk.command_lib.storage.tasks.rm.delete_task_iterator_factory.DeleteTaskIteratorFactory', 'delete_task_iterator_factory.DeleteTaskIteratorFactory', (['name_expansion_iterator'], {'task_status_queue': 'task_status_queue', 'user_request_args': 'user_request_args'}), '(name_expansion_iterator,\n task_status_queue=task_status_queue, user_request_args=user_request_args)\n', (4936, 5039), False, 'from googlecloudsdk.command_lib.storage.tasks.rm import delete_task_iterator_factory\n'), ((5079, 5116), 'googlecloudsdk.core.log.status.Print', 'log.status.Print', (['"""Removing objects:"""'], {}), "('Removing objects:')\n", (5095, 5116), False, 'from googlecloudsdk.core import log\n'), ((4212, 4242), 'googlecloudsdk.command_lib.storage.stdin_iterator.StdinIterator', 'stdin_iterator.StdinIterator', ([], {}), '()\n', (4240, 4242), False, 'from googlecloudsdk.command_lib.storage import stdin_iterator\n'), ((5620, 5657), 'googlecloudsdk.core.log.status.Print', 'log.status.Print', (['"""Removing Buckets:"""'], {}), "('Removing Buckets:')\n", (5636, 5657), False, 'from googlecloudsdk.core import log\n'), ((5683, 5840), 'googlecloudsdk.command_lib.storage.tasks.task_executor.execute_tasks', 'task_executor.execute_tasks', (['bucket_iterator'], {'parallelizable': '(True)', 'task_status_queue': 'task_status_queue', 'progress_type': 'task_status.ProgressType.COUNT'}), '(bucket_iterator, parallelizable=True,\n task_status_queue=task_status_queue, progress_type=task_status.\n ProgressType.COUNT)\n', (5710, 5840), False, 'from googlecloudsdk.command_lib.storage.tasks import task_executor\n'), ((4115, 4185), 'googlecloudsdk.command_lib.storage.errors.Error', 'errors.Error', (['"""No URL arguments allowed when reading URLs from stdin."""'], {}), "('No URL arguments allowed when reading URLs from stdin.')\n", (4127, 4185), False, 'from googlecloudsdk.command_lib.storage import errors\n'), ((4291, 4393), 'googlecloudsdk.command_lib.storage.errors.Error', 'errors.Error', (['"""Without the --stdin flag, the rm command requires at least one URL argument."""'], {}), "(\n 'Without the --stdin flag, the rm command requires at least one URL argument.'\n )\n", (4303, 4393), False, 'from googlecloudsdk.command_lib.storage import errors\n')] |
import sqlite3
import sys
import datetime
import os
# day of month to switch to new database
change_day = 1
c_added_text_entities = [["sensor.hitachi_relay", "sensor.netatmo_relay"],
["sensor.cooling_target_temp", "sensor.heating_target_temp"]]
user = "pipacsba"
server_ip = "192.168.17.115"
ssh_key = "/home/scripts/ssh_pass.key"
files_ready = False
def do_merge(file_to, file_from, file_schema, work_dir):
global files_ready
os.chdir(work_dir)
# copy the current db from hass server
scp_command = "scp -i " + ssh_key + " " + user + "@" + server_ip + ":/mnt/ramdsk/home-assistant_v2.db " + file_from
# print(scp_command)
os.system(scp_command)
steps_ready = 0
if datetime.datetime.now().day == change_day:
print("Change database")
saved = {}
# first get last values from file_to
if steps_ready == 0:
db_to = sqlite3.connect(file_to)
to_cursorr = db_to.cursor()
# flatten the input data
x_flat = (item for row in c_added_text_entities for item in row)
# for each entity
for entity in x_flat:
awhere = (entity,)
ret = to_cursorr.execute('SELECT * from states WHERE entity_id=? ORDER BY state_id DESC LIMIT 1', awhere)
entity_state = to_cursorr.fetchall()
saved[entity] = entity_state[0]
to_cursorr.close()
db_to.close()
steps_ready = 1
# move the old file to a new file
if steps_ready == 1:
yesterday = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(days=1), '%Y_%m')
os.rename(file_to, "hass_" + yesterday + ".db")
steps_ready = 2
# copy the template to hass.db
if steps_ready == 2:
os.system('cp ' + file_schema + ' ' + file_to)
steps_ready = 3
# get the first date from the file_from DB
if steps_ready == 3:
db_from = sqlite3.connect(file_from)
from_cursor = db_from.cursor()
ret = from_cursor.execute('SELECT last_changed from states ORDER BY state_id ASC LIMIT 10')
times = from_cursor.fetchall()
min_time = datetime.datetime.now()
for atime_str in times:
atime = datetime.datetime.strptime(atime_str[0], "%Y-%m-%d %H:%M:%S.%f")
if atime < min_time:
min_time = atime
min_time_str = min_time.strftime("%Y-%m-%d %H:%M:%S.%f")
from_cursor.close()
db_from.close()
steps_ready = 4
# add saved entities to new db
if steps_ready == 4:
db_to = sqlite3.connect(file_to)
to_cursorr = db_to.cursor()
unique_id = 0
for entity in saved:
unique_id = unique_id + 1
row = saved[entity]
row = row[:10] + ('',)
row = (unique_id,) + row[1:]
row = row[:6] + (min_time_str,) + (min_time_str,) + (min_time_str,) + row[9:]
# sometimes the \\ is duplicated
row = row[:4] + (str(row[4]).replace("\\\\", "\\"), ) + row[5:]
my_command = 'INSERT INTO states (state_id, domain, entity_id, state, attributes, event_id, last_changed, last_updated, created, context_id, context_user_id) VALUES ' + str(row)
to_cursorr.execute(my_command)
db_to.commit()
to_cursorr.close()
db_to.close()
steps_ready = 5
if steps_ready == 5:
files_ready = True
else:
files_ready = True
if files_ready:
db_to = sqlite3.connect(file_to)
db_from = sqlite3.connect(file_from)
to_cursor = db_to.cursor()
# print(to_cursor.execute("tables"))
# Get the contents of a table
from_cursor = db_from.cursor()
from_cursor.execute('SELECT * FROM states')
output = from_cursor.fetchall() # Returns the results as a list.
# Insert those contents into another table.
ret = to_cursor.execute('SELECT MAX(state_id) FROM states')
unique_id = to_cursor.fetchall()
if None in unique_id[0]:
unique_id = 0
else:
unique_id = int(list(unique_id[0])[0])
print("Existing database max state_id: ", unique_id)
num_added = 0
num_duplicate = 0
num_skipped = 0
for row in output:
unique_id = unique_id + 1
row = row[:10] + ('',)
row = (unique_id,) + row[1:]
# sometimes the \\ is duplicated
row = row[:4] + (str(row[4]).replace("\\\\", "\\"), ) + row[5:]
row = row[:9]
# print(row)
awhere = (row[2],) + (row[3],)+(row[6],) + (row[7],)
# print(awhere)
to_cursor.execute('SELECT state_id FROM states WHERE entity_id=? AND state=? AND last_changed=? AND last_updated=?', awhere)
isunique = to_cursor.fetchone()
# print(isunique)
if None is isunique:
# WHERE last_changed=last_updated
if row[6] == row[7]:
my_command = 'INSERT INTO states (state_id, domain, entity_id, state, attributes, event_id, last_changed, last_updated, created) VALUES ' + str(row)
# print(my_command)
to_cursor.execute(my_command)
# print(unique_id, " unique element found, added.")
num_added = num_added+1
else:
num_skipped = num_skipped+1
unique_id = unique_id-1
else:
# print("Non unique element found, not added.")
unique_id = unique_id-1
num_duplicate = num_duplicate+1
print(num_added, " element added to the database, ", num_duplicate, " element was existing already and ", num_skipped, " elements were not state-changes.")
# Cleanup
db_to.commit()
db_to.execute("VACUUM")
to_cursor.close()
from_cursor.close()
db_to.close()
db_from.close()
# copy the current db from hass server
os.system("rm " + file_from)
if __name__ == '__main__':
file__to = sys.argv[1]
file__from = sys.argv[2]
file__schema = sys.argv[3]
work__dir = sys.path[0]
if len(sys.argv) > 4: work__dir = sys.argv[4]
do_merge(file__to, file__from, file__schema, work__dir)
| [
"sqlite3.connect",
"datetime.datetime.strptime",
"os.rename",
"os.chdir",
"datetime.datetime.now",
"os.system",
"datetime.timedelta"
] | [((486, 504), 'os.chdir', 'os.chdir', (['work_dir'], {}), '(work_dir)\n', (494, 504), False, 'import os\n'), ((701, 723), 'os.system', 'os.system', (['scp_command'], {}), '(scp_command)\n', (710, 723), False, 'import os\n'), ((6526, 6554), 'os.system', 'os.system', (["('rm ' + file_from)"], {}), "('rm ' + file_from)\n", (6535, 6554), False, 'import os\n'), ((3909, 3933), 'sqlite3.connect', 'sqlite3.connect', (['file_to'], {}), '(file_to)\n', (3924, 3933), False, 'import sqlite3\n'), ((3953, 3979), 'sqlite3.connect', 'sqlite3.connect', (['file_from'], {}), '(file_from)\n', (3968, 3979), False, 'import sqlite3\n'), ((755, 778), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (776, 778), False, 'import datetime\n'), ((951, 975), 'sqlite3.connect', 'sqlite3.connect', (['file_to'], {}), '(file_to)\n', (966, 975), False, 'import sqlite3\n'), ((1760, 1807), 'os.rename', 'os.rename', (['file_to', "('hass_' + yesterday + '.db')"], {}), "(file_to, 'hass_' + yesterday + '.db')\n", (1769, 1807), False, 'import os\n'), ((1930, 1976), 'os.system', 'os.system', (["('cp ' + file_schema + ' ' + file_to)"], {}), "('cp ' + file_schema + ' ' + file_to)\n", (1939, 1976), False, 'import os\n'), ((2125, 2151), 'sqlite3.connect', 'sqlite3.connect', (['file_from'], {}), '(file_from)\n', (2140, 2151), False, 'import sqlite3\n'), ((2369, 2392), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2390, 2392), False, 'import datetime\n'), ((2866, 2890), 'sqlite3.connect', 'sqlite3.connect', (['file_to'], {}), '(file_to)\n', (2881, 2890), False, 'import sqlite3\n'), ((2455, 2519), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['atime_str[0]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(atime_str[0], '%Y-%m-%d %H:%M:%S.%f')\n", (2481, 2519), False, 'import datetime\n'), ((1684, 1707), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1705, 1707), False, 'import datetime\n'), ((1710, 1736), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1728, 1736), False, 'import datetime\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
import time
import logging
import indigo
from os.path import exists
import paho.mqtt.client as mqtt
################################################################################
class MQTTBroker(object):
def __init__(self, device):
self.logger = logging.getLogger("Plugin.MQTTBroker")
self.deviceID = device.id
self.reconnectTime = None
self.address = device.pluginProps.get(u'address', "")
self.port = int(device.pluginProps.get(u'port', 1883))
self.protocol = int(device.pluginProps.get(u'protocol', 4))
self.transport = device.pluginProps.get(u'transport', "tcp")
self.username = device.pluginProps.get(u'username', None).strip()
self.password = device.pluginProps.get(u'password', None).strip()
self.logger.debug(u"{}: Broker __init__ address = {}, port = {}, protocol = {}, transport = {}".format(device.name, self.address, self.port, self.protocol, self.transport))
device.updateStateOnServer(key="status", value="Not Connected")
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
self.client = mqtt.Client(client_id="indigo-mqtt-{}".format(device.id), clean_session=True, userdata=None, protocol=self.protocol, transport=self.transport)
self.client.suppress_exceptions = True
if bool(indigo.activePlugin.pluginPrefs[u"showDebugInfo"]):
self.logger.debug(u"{}: Enabling library level debugging".format(device.name))
self.client.enable_logger(self.logger)
if self.username:
self.client.username_pw_set(self.username, self.password)
if device.pluginProps.get(u'useTLS', False):
certFile = device.pluginProps.get(u'certFile', None)
if not certFile or not len(certFile):
self.logger.debug(u"{}: No cert file provided, using default cert_file".format(device.name))
self.client.tls_set()
else:
self.logger.debug(u"{}: Specified cert_file '{}'".format(device.name, certFile))
if certFile[0:1] != '/': # leave absolute path alone
certFile = indigo.server.getInstallFolderPath() + '/' + certFile
if not exists(certFile):
self.logger.debug(u"{}: Specified cert file '{}' doesn't exist, using default cert_file".format(device.name, certFile))
self.client.tls_set()
else:
self.logger.debug(u"{}: Using cert_file '{}'".format(device.name, certFile))
self.client.tls_set(ca_certs=certFile)
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.client.on_publish = self.on_publish
self.client.on_subscribe = self.on_subscribe
self.client.on_unsubscribe = self.on_unsubscribe
try:
self.client.connect(self.address, self.port, 60)
except Exception as e:
self.logger.debug(u"{}: Broker connect error: {}".format(device.name, e))
device.updateStateOnServer(key="status", value="Connection Failed")
device.updateStateImageOnServer(indigo.kStateImageSel.SensorTripped)
self.connected = False
else:
self.connected = True
self.client.loop_start()
def disconnect(self):
self.client.on_disconnect = None
device = indigo.devices[self.deviceID]
self.logger.info(u"{}: Disconnecting".format(device.name))
self.client.loop_stop()
self.client.disconnect()
device.updateStateOnServer(key="status", value="Not Connected")
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
def publish(self, topic, payload=None, qos=0, retain=False):
self.client.publish(topic, payload, qos, retain)
def subscribe(self, topic, qos=0):
device = indigo.devices[self.deviceID]
self.logger.info(u"{}: Subscribing to: {} ({})".format(device.name, topic, qos))
self.client.subscribe(topic, qos)
def unsubscribe(self, topic):
device = indigo.devices[self.deviceID]
self.logger.info(u"{}: Unsubscribing from: {}".format(device.name, topic))
self.client.unsubscribe(topic)
################################################################################
# Callbacks
################################################################################
def on_connect(self, client, userdata, flags, rc):
device = indigo.devices[self.deviceID]
self.logger.debug(u"{}: Connected with result code {}".format(device.name, rc))
# Subscribing in on_connect() means that if we lose the connection and reconnect then subscriptions will be renewed.
subs = device.pluginProps.get(u'subscriptions', None)
if subs:
for s in subs:
qos = int(s[0:1])
topic = s[2:]
self.logger.info(u"{}: Subscribing to: {} ({})".format(device.name, topic, qos))
client.subscribe(topic, qos)
device.updateStateOnServer(key="status", value="Connected {}".format(rc))
device.updateStateImageOnServer(indigo.kStateImageSel.SensorOn)
def on_disconnect(self, client, userdata, rc):
device = indigo.devices[self.deviceID]
self.logger.error(u"{}: Disconnected with result code {}".format(device.name, rc))
device.updateStateOnServer(key="status", value="Disconnected {}".format(rc))
device.updateStateImageOnServer(indigo.kStateImageSel.SensorTripped)
self.connected = False
def on_message(self, client, userdata, msg):
device = indigo.devices[self.deviceID]
payload = msg.payload.decode("utf-8")
self.logger.threaddebug(u"{}: Message topic: {}, payload = {}".format(device.name, msg.topic, payload))
indigo.activePlugin.processReceivedMessage(self.deviceID, msg.topic, payload)
def on_publish(self, client, userdata, mid):
device = indigo.devices[self.deviceID]
self.logger.threaddebug(u"{}: Message published: {}".format(device.name, mid))
def on_subscribe(self, client, userdata, mid, granted_qos):
device = indigo.devices[self.deviceID]
self.logger.threaddebug(u"{}: Subscribe complete: {}, {}".format(device.name, mid, granted_qos))
def on_unsubscribe(self, client, userdata, mid):
device = indigo.devices[self.deviceID]
self.logger.threaddebug(u"{}: Unsubscribe complete: {}".format(device.name, mid))
| [
"logging.getLogger",
"os.path.exists",
"indigo.server.getInstallFolderPath",
"indigo.activePlugin.processReceivedMessage"
] | [((333, 371), 'logging.getLogger', 'logging.getLogger', (['"""Plugin.MQTTBroker"""'], {}), "('Plugin.MQTTBroker')\n", (350, 371), False, 'import logging\n'), ((6149, 6226), 'indigo.activePlugin.processReceivedMessage', 'indigo.activePlugin.processReceivedMessage', (['self.deviceID', 'msg.topic', 'payload'], {}), '(self.deviceID, msg.topic, payload)\n', (6191, 6226), False, 'import indigo\n'), ((2351, 2367), 'os.path.exists', 'exists', (['certFile'], {}), '(certFile)\n', (2357, 2367), False, 'from os.path import exists\n'), ((2274, 2310), 'indigo.server.getInstallFolderPath', 'indigo.server.getInstallFolderPath', ([], {}), '()\n', (2308, 2310), False, 'import indigo\n')] |
#!/usr/bin/env python
# pylint: disable=too-many-locals,arguments-differ,unused-import
import tensorflow as tf
from tensorflow.keras.layers import (BatchNormalization, Dense, Dropout,
Flatten, MaxPooling2D, SpatialDropout2D,
add)
from tensorflow.nn import leaky_relu, relu, tanh
from deep_model_blocks import (BottleneckResidualBlock, Conv, ConvBlock,
Deconv, DeconvBlock, ResidualBlock, ResizeBlock,
ReverseBottleneckResidualBlock,
ReverseResidualBlock, UBlock)
from model import Model
class Deep480pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoise.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoise.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseFancyFilters(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseFancyFilters.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 3, 2),
# ConvBlock(initial_filters*16, 3, 1),
DeconvBlock(initial_filters*16, 3, 2),
ConvBlock(initial_filters*8, 3, 1),
DeconvBlock(initial_filters*8, 7, 2),
ConvBlock(initial_filters*4, 7, 1),
DeconvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*2, 7, 1),
DeconvBlock(initial_filters*2, 7, 2),
# ConvBlock(initial_filters*1, 7, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 7, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseFancyFilters.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseThreeSteps(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseThreeSteps.Generator, self).__init__()
initial_filters = 32
self.fc_shape = (60, 80, 16)
self.fc = tf.keras.layers.Dense(self.fc_shape[0]*self.fc_shape[1]*self.fc_shape[2], use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*2, 3, 1),
ConvBlock(initial_filters*2, 3, 1),
DeconvBlock(initial_filters*2, 7, 2),
ConvBlock(initial_filters*1, 3, 1),
ConvBlock(initial_filters*1, 3, 1),
DeconvBlock(initial_filters*1, 7, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 7, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, self.fc_shape[0], self.fc_shape[1], self.fc_shape[2]))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseThreeSteps.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseSmallerGenLayer(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayer.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*8, 5, 1),
# ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayer.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseSmallerGenLayerFancyFilters(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayerFancyFilters.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 7, 2),
# ConvBlock(initial_filters*8, 3, 1),
# ConvBlock(initial_filters*8, 3, 1),
DeconvBlock(initial_filters*16, 7, 2),
ConvBlock(initial_filters*4, 3, 1),
ConvBlock(initial_filters*4, 3, 1),
DeconvBlock(initial_filters*8, 7, 2),
ConvBlock(initial_filters*2, 3, 1),
ConvBlock(initial_filters*2, 3, 1),
DeconvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*1, 3, 1),
ConvBlock(initial_filters*1, 3, 1),
DeconvBlock(initial_filters*2, 7, 2),
# ConvBlock(initial_filters*1, 3, 1),
# ConvBlock(initial_filters*1, 3, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 7, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseSmallerGenLayerFancyFilters.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseNoDeconv(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseNoDeconv.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
ResizeBlock((30, 40), initial_filters*32, 5),
# ConvBlock(initial_filters*16, 5, 1),
ResizeBlock((60, 80), initial_filters*16, 5),
ConvBlock(initial_filters*8, 5, 1),
ResizeBlock((120, 160), initial_filters*8, 5),
ConvBlock(initial_filters*4, 5, 1),
ResizeBlock((240, 320), initial_filters*4, 5),
ConvBlock(initial_filters*2, 5, 1),
ResizeBlock((480, 640), initial_filters*2, 5),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseNoDeconv.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseResidual(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseResidual.Generator, self).__init__()
initial_filters = int(512/32)//2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
ReverseResidualBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*2*16, 5, 1),
ReverseResidualBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*2*8, 5, 1),
ReverseResidualBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*2*4, 5, 1),
ReverseResidualBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2*2, 5, 1),
ReverseResidualBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*2*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseResidual.Discriminator, self).__init__()
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep480pNoiseMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMultiscaleDisc.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
# default
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
# # more filters in deconv
# DeconvBlock(initial_filters*32*2, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
# DeconvBlock(initial_filters*16*2, 5, 2),
# ConvBlock(initial_filters*8, 5, 1),
# DeconvBlock(initial_filters*8*2, 5, 2),
# ConvBlock(initial_filters*4, 5, 1),
# DeconvBlock(initial_filters*4*2, 5, 2),
# ConvBlock(initial_filters*2, 5, 1),
# DeconvBlock(initial_filters*2, 5, 2),
# # ConvBlock(initial_filters*1, 5, 1),
# # more filters in conv
# DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16*2, 5, 1),
# DeconvBlock(initial_filters*16, 5, 2),
# ConvBlock(initial_filters*8*2, 5, 1),
# DeconvBlock(initial_filters*8, 5, 2),
# ConvBlock(initial_filters*4*2, 5, 1),
# DeconvBlock(initial_filters*4, 5, 2),
# ConvBlock(initial_filters*2*2, 5, 1),
# DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
# default
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 4, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 4, 1),
# NOTE: keep track of image resizing+conv!
ConvBlock(initial_filters*32, 4, 2),
ConvBlock(initial_filters*16, 4, 1),
# # more filters in unstrided
# ConvBlock(initial_filters*2, 4, 2),
# ConvBlock(initial_filters*1*2, 5, 1),
# ConvBlock(initial_filters*4, 4, 2),
# ConvBlock(initial_filters*2*2, 5, 1),
# ConvBlock(initial_filters*8, 4, 2),
# ConvBlock(initial_filters*4*2, 5, 1),
# # NOTE: keep track of image resizing+conv!
# ConvBlock(initial_filters*16, 4, 2),
# ConvBlock(initial_filters*8*2, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMultiscaleDiscGenLarge(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscGenLarge.Generator, self).__init__()
initial_filters = int(512/32/2)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
x = tanh(self.final_conv(x))
return tf.image.resize_nearest_neighbor(x, (480, 640))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMultiscaleDiscGenLarge.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
# default
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 5, 1),
# NOTE: keep track of image resizing+conv!
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscGenLarge.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMultiscaleDiscGenLarge.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMultiscaleDiscGenLarge.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMultiscaleDiscShallow(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscShallow.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
DeconvBlock(initial_filters*16, 5, 2),
DeconvBlock(initial_filters*8, 5, 2),
DeconvBlock(initial_filters*4, 5, 2),
DeconvBlock(initial_filters*2, 5, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMultiscaleDiscShallow.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
self.scaling_factor = scaling_factor
self.resize = None
initial_filters = 32//2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*32, 5, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
if self.resize is None:
if self.scaling_factor != 1:
size_x = int(x.shape[1].value * self.scaling_factor)
size_y = int(x.shape[2].value * self.scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMultiscaleDiscShallow.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMultiscaleDiscShallow.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMultiscaleDiscShallow.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseResizeMultiscaleDiscShallow(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
# ResizeBlock((30, 40), initial_filters*32, 5),
# ResizeBlock((60, 80), initial_filters*16, 5),
# ResizeBlock((120, 160), initial_filters*8, 5),
# ResizeBlock((240, 320), initial_filters*4, 5),
# ResizeBlock((480, 640), initial_filters*2, 5),
DeconvBlock(initial_filters*32, 5, 2),
DeconvBlock(initial_filters*16, 5, 2),
DeconvBlock(initial_filters*8, 5, 2),
DeconvBlock(initial_filters*4, 5, 2),
DeconvBlock(initial_filters*2, 5, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
self.scaling_factor = scaling_factor
self.resize = None
initial_filters = 32//2
self.blocks = [
# resize is on smaller resolution so that it fits in memory...
ResizeBlock((240, 320), initial_filters*2, 5),
ResizeBlock((120, 160), initial_filters*4, 5),
ResizeBlock((60, 80), initial_filters*8, 5),
ResizeBlock((30, 40), initial_filters*16, 5),
ResizeBlock((15, 20), initial_filters*32, 5),
# ConvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*4, 5, 2),
# ConvBlock(initial_filters*8, 5, 2),
# ConvBlock(initial_filters*16, 5, 2),
# ConvBlock(initial_filters*32, 5, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
if self.resize is None:
if self.scaling_factor != 1:
size_x = int(x.shape[1].value * self.scaling_factor)
size_y = int(x.shape[2].value * self.scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseResizeMultiscaleDiscShallow.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep60pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoise.Generator, self).__init__()
initial_filters = int(512/32) * 4
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoise.Discriminator, self).__init__()
initial_filters = 32 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep60pNoiseDeeper(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoiseDeeper.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
ConvBlock(initial_filters*32, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep60pNoiseDeeper.Discriminator, self).__init__()
initial_filters = 32 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep120pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoise.Generator, self).__init__()
initial_filters = int(512/32) * 4
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoise.Discriminator, self).__init__()
initial_filters = 32 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep120pNoiseMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseMultiscaleDisc.Generator, self).__init__()
initial_filters = int(512/32) * 4
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep120pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(160 * scaling_factor)
size_y = int(120 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//1 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep120pNoiseMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep120pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep120pNoiseMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep120pNoiseDeeper(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseDeeper.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseDeeper.Discriminator, self).__init__()
initial_filters = 32 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep120pNoiseShallowGenMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep120pNoiseShallowGenMultiscaleDisc.Generator, self).__init__()
initial_filters = 1024
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters, 5, 2),
DeconvBlock(initial_filters, 5, 2),
DeconvBlock(initial_filters, 5, 2),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep120pNoiseShallowGenMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(160 * scaling_factor)
size_y = int(120 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//1 * 4
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep120pNoiseShallowGenMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep120pNoiseShallowGenMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep120pNoiseShallowGenMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep240pNoise(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep240pNoise.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep240pNoise.Discriminator, self).__init__()
initial_filters = 32 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
class Deep240pNoiseMultiscaleDisc(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep240pNoiseMultiscaleDisc.Generator, self).__init__()
initial_filters = int(512/32) * 2
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*2, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep240pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(320 * scaling_factor)
size_y = int(240 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2 * 2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
# ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
# ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
# ConvBlock(initial_filters*8, 5, 1),
# NOTE: keep track of image resizing+conv!
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
# ConvBlock(initial_filters*16, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep240pNoiseMultiscaleDisc.Discriminator, self).__init__()
self.discriminators = [Deep240pNoiseMultiscaleDisc.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep240pNoiseMultiscaleDisc.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMsDiscS2S1(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1.Generator, self).__init__()
initial_filters = int(512/32)*1
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2S1.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2*1
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*1, 4, 1),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*8, 4, 1),
ConvBlock(initial_filters*32, 4, 2),
ConvBlock(initial_filters*16, 4, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2S1.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2S1.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMsDiscS2(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2.Generator, self).__init__()
initial_filters = int(512/32)*1
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2*1
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*16, 4, 2),
ConvBlock(initial_filters*32, 4, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseMsDiscS2S1Shared(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Shared.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2S1Shared.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2//2
self.s2_blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*16, 5, 2),
# ConvBlock(initial_filters*32, 5, 2),
]
self.s1_blocks = [
ConvBlock(initial_filters*2, 5, 1),
ConvBlock(initial_filters*4, 5, 1),
ConvBlock(initial_filters*8, 5, 1),
ConvBlock(initial_filters*16, 5, 1),
# ConvBlock(initial_filters*32, 5, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.s2_fc = Dense(config.discriminator_classes, use_bias=False)
self.s2s1_fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
s2 = x
for block in self.s2_blocks:
s2 = block(s2, training=training)
s2 = self.dropout(s2, training=training)
s2 = self.flatten(s2)
s2 = self.s2_fc(s2)
s2s1 = x
for i in range(len(self.s1_blocks)):
s2s1 = self.s2_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.s1_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.flatten(s2s1)
s2s1 = self.s2s1_fc(s2s1)
return tf.reduce_mean(tf.concat([s2, s2s1], axis=-1), axis=-1, keepdims=True)
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Shared.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2S1Shared.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2S1Shared.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoiseS2S1Shared(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseS2S1Shared.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseS2S1Shared.Discriminator, self).__init__()
initial_filters = 32//2
self.s2_blocks = [
ConvBlock(initial_filters*2, 4, 2),
ConvBlock(initial_filters*4, 4, 2),
ConvBlock(initial_filters*8, 4, 2),
ConvBlock(initial_filters*16, 4, 2),
# ConvBlock(initial_filters*32, 4, 2),
]
self.s1_blocks = [
ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*8, 4, 1),
ConvBlock(initial_filters*16, 4, 1),
# ConvBlock(initial_filters*32, 4, 1),
]
self.dropout = Dropout(0.3)
self.flatten = Flatten()
self.s2_fc = Dense(config.discriminator_classes, use_bias=False)
self.s2s1_fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
s2 = x
for block in self.s2_blocks:
s2 = block(s2, training=training)
s2 = self.dropout(s2, training=training)
s2 = self.flatten(s2)
s2 = self.s2_fc(s2)
s2s1 = x
for i in range(len(self.s1_blocks)):
s2s1 = self.s2_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.s1_blocks[i](s2s1, training=training)
s2s1 = self.dropout(s2s1, training=training)
s2s1 = self.flatten(s2s1)
s2s1 = self.s2s1_fc(s2s1)
return tf.reduce_mean(tf.concat([s2, s2s1], axis=-1), axis=-1, keepdims=True)
class Deep480pNoiseMsDiscS2S1Modified(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Modified.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2S1Modified.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
ConvBlock(initial_filters*2, 7, 2),
ConvBlock(initial_filters*2, 7, 1),
ConvBlock(initial_filters*4, 7, 2),
ConvBlock(initial_filters*4, 7, 1),
ConvBlock(initial_filters*8, 7, 2),
ConvBlock(initial_filters*8, 7, 1),
ConvBlock(initial_filters*16, 7, 2),
ConvBlock(initial_filters*16, 7, 1),
# ConvBlock(initial_filters*32, 7, 2),
# ConvBlock(initial_filters*16, 7, 1),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2S1Modified.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2S1Modified.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2S1Modified.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
class Deep480pNoisePatch(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoisePatch.Generator, self).__init__()
initial_filters = int(512/32)
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
DeconvBlock(initial_filters*32, 5, 2),
# ConvBlock(initial_filters*16, 5, 1),
DeconvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*8, 5, 1),
DeconvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*4, 5, 1),
DeconvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*2, 5, 1),
DeconvBlock(initial_filters*2, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoisePatch.Discriminator, self).__init__()
del config
initial_filters = 32
self.blocks = [
ConvBlock(initial_filters*2, 4, 2),
# ConvBlock(initial_filters*1, 4, 1),
ConvBlock(initial_filters*4, 4, 2),
# ConvBlock(initial_filters*2, 4, 1),
ConvBlock(initial_filters*8, 4, 2),
# ConvBlock(initial_filters*4, 4, 1),
ConvBlock(initial_filters*16, 4, 2),
# ConvBlock(initial_filters*8, 4, 1),
]
self.dropout = Dropout(0.3)
self.final_conv = Conv(1, 4, 1)
def call(self, x, training=True):
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.final_conv(x)
x = tf.reduce_mean(x, axis=[1, 2])
return x
class Deep480pNoiseMsDiscS2EvenG(Model):
class Generator(tf.keras.Model):
def __init__(self, config):
super(Deep480pNoiseMsDiscS2EvenG.Generator, self).__init__()
initial_filters = 64
self.fc = tf.keras.layers.Dense(15*20*64, use_bias=False)
self.initial_norm = tf.keras.layers.BatchNormalization()
self.blocks = [
# default
DeconvBlock(initial_filters*1, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
ConvBlock(initial_filters*1, 5, 1),
DeconvBlock(initial_filters*1, 5, 2),
# ConvBlock(initial_filters*1, 5, 1),
]
self.final_conv = Conv(3 if config.has_colored_target else 1, 5, 1)
def call(self, x, training=True):
x = self.fc(x)
x = self.initial_norm(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 15, 20, 64))
for block in self.blocks:
x = block(x, training=training)
return tanh(self.final_conv(x))
class Discriminator(tf.keras.Model):
class MultiscaleDisc(tf.keras.Model):
def __init__(self, config, scaling_factor, dropout):
super(Deep480pNoiseMsDiscS2EvenG.Discriminator.MultiscaleDisc, self).__init__()
assert scaling_factor > 0
if scaling_factor != 1:
size_x = int(640 * scaling_factor)
size_y = int(480 * scaling_factor)
tf.logging.info("Multiscale discriminator operating on resolution: {}x{}".format(size_x, size_y))
self.resize = lambda x: tf.image.resize_nearest_neighbor(x, (size_x, size_y))
else:
tf.logging.info("Multiscale discriminator operating on regular resolution")
self.resize = lambda x: x
initial_filters = 32//2
self.blocks = [
ConvBlock(initial_filters*2, 5, 2),
ConvBlock(initial_filters*4, 5, 2),
ConvBlock(initial_filters*8, 5, 2),
ConvBlock(initial_filters*16, 5, 2),
ConvBlock(initial_filters*32, 5, 2),
]
self.dropout = dropout
self.flatten = Flatten()
self.fc = Dense(config.discriminator_classes, use_bias=False)
def call(self, x, training):
x = self.resize(x)
for block in self.blocks:
x = block(x, training=training)
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc(x)
return x
def __init__(self, config):
super(Deep480pNoiseMsDiscS2EvenG.Discriminator, self).__init__()
self.discriminators = [Deep480pNoiseMsDiscS2EvenG.Discriminator.MultiscaleDisc(
config, factor, Dropout(0.3)) for factor in [1, 0.5]]
def call(self, x, training=True):
return tf.reduce_mean(tf.concat([disc(x, training) for disc in self.discriminators], axis=-1), axis=-1)
def summary(self, line_length=None, positions=None, print_fn=None):
super(Deep480pNoiseMsDiscS2EvenG.Discriminator, self).summary(line_length, positions, print_fn)
print_fn("\nDetails:")
for discriminator in self.discriminators:
discriminator.summary(line_length, positions, print_fn)
| [
"deep_model_blocks.ConvBlock",
"tensorflow.image.resize_nearest_neighbor",
"deep_model_blocks.DeconvBlock",
"tensorflow.nn.relu",
"tensorflow.logging.info",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.BatchNormalization",
"deep_model_blocks.ResizeBlock",
"tensorflow.concat",
"tenso... | [((862, 913), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (883, 913), True, 'import tensorflow as tf\n'), ((936, 972), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (970, 972), True, 'import tensorflow as tf\n'), ((1513, 1562), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (1517, 1562), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1683, 1696), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1693, 1696), True, 'import tensorflow as tf\n'), ((1707, 1744), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (1717, 1744), True, 'import tensorflow as tf\n'), ((2443, 2455), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2450, 2455), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((2477, 2486), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2484, 2486), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((2503, 2554), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (2508, 2554), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((3004, 3055), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (3025, 3055), True, 'import tensorflow as tf\n'), ((3078, 3114), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3112, 3114), True, 'import tensorflow as tf\n'), ((3655, 3704), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(7)', '(1)'], {}), '(3 if config.has_colored_target else 1, 7, 1)\n', (3659, 3704), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3825, 3838), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3835, 3838), True, 'import tensorflow as tf\n'), ((3849, 3886), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (3859, 3886), True, 'import tensorflow as tf\n'), ((4597, 4609), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (4604, 4609), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((4631, 4640), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4638, 4640), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((4657, 4708), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (4662, 4708), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((5181, 5279), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(self.fc_shape[0] * self.fc_shape[1] * self.fc_shape[2])'], {'use_bias': '(False)'}), '(self.fc_shape[0] * self.fc_shape[1] * self.fc_shape[2\n ], use_bias=False)\n', (5202, 5279), True, 'import tensorflow as tf\n'), ((5297, 5333), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (5331, 5333), True, 'import tensorflow as tf\n'), ((5723, 5772), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(7)', '(1)'], {}), '(3 if config.has_colored_target else 1, 7, 1)\n', (5727, 5772), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5893, 5906), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5903, 5906), True, 'import tensorflow as tf\n'), ((5917, 5996), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, self.fc_shape[0], self.fc_shape[1], self.fc_shape[2])'}), '(x, shape=(-1, self.fc_shape[0], self.fc_shape[1], self.fc_shape[2]))\n', (5927, 5996), True, 'import tensorflow as tf\n'), ((6705, 6717), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (6712, 6717), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((6739, 6748), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6746, 6748), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((6765, 6816), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (6770, 6816), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((7271, 7322), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (7292, 7322), True, 'import tensorflow as tf\n'), ((7345, 7381), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (7379, 7381), True, 'import tensorflow as tf\n'), ((8155, 8204), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (8159, 8204), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8325, 8338), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (8335, 8338), True, 'import tensorflow as tf\n'), ((8349, 8386), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (8359, 8386), True, 'import tensorflow as tf\n'), ((9100, 9112), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (9107, 9112), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((9134, 9143), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (9141, 9143), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((9160, 9211), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (9165, 9211), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((9690, 9741), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (9711, 9741), True, 'import tensorflow as tf\n'), ((9764, 9800), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (9798, 9800), True, 'import tensorflow as tf\n'), ((10574, 10623), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(7)', '(1)'], {}), '(3 if config.has_colored_target else 1, 7, 1)\n', (10578, 10623), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10744, 10757), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (10754, 10757), True, 'import tensorflow as tf\n'), ((10768, 10805), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (10778, 10805), True, 'import tensorflow as tf\n'), ((11531, 11543), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (11538, 11543), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((11565, 11574), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (11572, 11574), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((11591, 11642), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (11596, 11642), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((12084, 12135), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (12105, 12135), True, 'import tensorflow as tf\n'), ((12158, 12194), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12192, 12194), True, 'import tensorflow as tf\n'), ((12776, 12825), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (12780, 12825), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12946, 12959), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (12956, 12959), True, 'import tensorflow as tf\n'), ((12970, 13007), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (12980, 13007), True, 'import tensorflow as tf\n'), ((13714, 13726), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (13721, 13726), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((13748, 13757), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13755, 13757), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((13774, 13825), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (13779, 13825), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((14270, 14321), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (14291, 14321), True, 'import tensorflow as tf\n'), ((14344, 14380), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (14378, 14380), True, 'import tensorflow as tf\n'), ((14972, 15021), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (14976, 15021), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15142, 15155), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (15152, 15155), True, 'import tensorflow as tf\n'), ((15166, 15203), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (15176, 15203), True, 'import tensorflow as tf\n'), ((15910, 15922), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (15917, 15922), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((15944, 15953), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (15951, 15953), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((15970, 16021), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (15975, 16021), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((16475, 16526), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (16496, 16526), True, 'import tensorflow as tf\n'), ((16549, 16585), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (16583, 16585), True, 'import tensorflow as tf\n'), ((18236, 18285), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (18240, 18285), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((18406, 18419), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (18416, 18419), True, 'import tensorflow as tf\n'), ((18430, 18467), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (18440, 18467), True, 'import tensorflow as tf\n'), ((21808, 21859), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (21829, 21859), True, 'import tensorflow as tf\n'), ((21882, 21918), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (21916, 21918), True, 'import tensorflow as tf\n'), ((22550, 22599), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (22554, 22599), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22720, 22733), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (22730, 22733), True, 'import tensorflow as tf\n'), ((22744, 22781), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (22754, 22781), True, 'import tensorflow as tf\n'), ((22903, 22950), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(480, 640)'], {}), '(x, (480, 640))\n', (22935, 22950), True, 'import tensorflow as tf\n'), ((25597, 25648), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (25618, 25648), True, 'import tensorflow as tf\n'), ((25671, 25707), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (25705, 25707), True, 'import tensorflow as tf\n'), ((26009, 26058), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (26013, 26058), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((26179, 26192), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (26189, 26192), True, 'import tensorflow as tf\n'), ((26203, 26240), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (26213, 26240), True, 'import tensorflow as tf\n'), ((28945, 28996), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (28966, 28996), True, 'import tensorflow as tf\n'), ((29019, 29055), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (29053, 29055), True, 'import tensorflow as tf\n'), ((29651, 29700), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (29655, 29700), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((29821, 29834), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (29831, 29834), True, 'import tensorflow as tf\n'), ((29845, 29882), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (29855, 29882), True, 'import tensorflow as tf\n'), ((32936, 32987), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (32957, 32987), True, 'import tensorflow as tf\n'), ((33010, 33046), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (33044, 33046), True, 'import tensorflow as tf\n'), ((33298, 33347), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (33302, 33347), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33468, 33481), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (33478, 33481), True, 'import tensorflow as tf\n'), ((33492, 33529), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (33502, 33529), True, 'import tensorflow as tf\n'), ((34137, 34149), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (34144, 34149), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((34171, 34180), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (34178, 34180), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((34197, 34248), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (34202, 34248), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((34688, 34739), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (34709, 34739), True, 'import tensorflow as tf\n'), ((34762, 34798), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (34796, 34798), True, 'import tensorflow as tf\n'), ((35285, 35334), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (35289, 35334), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35455, 35468), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (35465, 35468), True, 'import tensorflow as tf\n'), ((35479, 35516), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (35489, 35516), True, 'import tensorflow as tf\n'), ((36268, 36280), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (36275, 36280), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((36302, 36311), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (36309, 36311), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((36328, 36379), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (36333, 36379), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((36809, 36860), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (36830, 36860), True, 'import tensorflow as tf\n'), ((36883, 36919), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (36917, 36919), True, 'import tensorflow as tf\n'), ((37268, 37317), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (37272, 37317), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37438, 37451), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (37448, 37451), True, 'import tensorflow as tf\n'), ((37462, 37499), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (37472, 37499), True, 'import tensorflow as tf\n'), ((38108, 38120), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (38115, 38120), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((38142, 38151), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (38149, 38151), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((38168, 38219), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (38173, 38219), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((38677, 38728), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (38698, 38728), True, 'import tensorflow as tf\n'), ((38751, 38787), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (38785, 38787), True, 'import tensorflow as tf\n'), ((39136, 39185), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (39140, 39185), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((39306, 39319), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (39316, 39319), True, 'import tensorflow as tf\n'), ((39330, 39367), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (39340, 39367), True, 'import tensorflow as tf\n'), ((41884, 41935), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (41905, 41935), True, 'import tensorflow as tf\n'), ((41958, 41994), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (41992, 41994), True, 'import tensorflow as tf\n'), ((42480, 42529), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (42484, 42529), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42650, 42663), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (42660, 42663), True, 'import tensorflow as tf\n'), ((42674, 42711), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (42684, 42711), True, 'import tensorflow as tf\n'), ((43604, 43616), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (43611, 43616), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((43638, 43647), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (43645, 43647), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((43664, 43715), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (43669, 43715), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((44181, 44232), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (44202, 44232), True, 'import tensorflow as tf\n'), ((44255, 44291), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (44289, 44291), True, 'import tensorflow as tf\n'), ((44489, 44538), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (44493, 44538), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((44659, 44672), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (44669, 44672), True, 'import tensorflow as tf\n'), ((44683, 44720), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (44693, 44720), True, 'import tensorflow as tf\n'), ((47265, 47316), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (47286, 47316), True, 'import tensorflow as tf\n'), ((47339, 47375), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (47373, 47375), True, 'import tensorflow as tf\n'), ((47817, 47866), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (47821, 47866), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47987, 48000), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (47997, 48000), True, 'import tensorflow as tf\n'), ((48011, 48048), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (48021, 48048), True, 'import tensorflow as tf\n'), ((48751, 48763), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (48758, 48763), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((48785, 48794), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (48792, 48794), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((48811, 48862), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (48816, 48862), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((49320, 49371), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (49341, 49371), True, 'import tensorflow as tf\n'), ((49394, 49430), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (49428, 49430), True, 'import tensorflow as tf\n'), ((50057, 50106), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (50061, 50106), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((50227, 50240), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (50237, 50240), True, 'import tensorflow as tf\n'), ((50251, 50288), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (50261, 50288), True, 'import tensorflow as tf\n'), ((53178, 53229), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (53199, 53229), True, 'import tensorflow as tf\n'), ((53252, 53288), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (53286, 53288), True, 'import tensorflow as tf\n'), ((53825, 53874), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (53829, 53874), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53995, 54008), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (54005, 54008), True, 'import tensorflow as tf\n'), ((54019, 54056), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (54029, 54056), True, 'import tensorflow as tf\n'), ((56767, 56818), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (56788, 56818), True, 'import tensorflow as tf\n'), ((56841, 56877), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (56875, 56877), True, 'import tensorflow as tf\n'), ((57414, 57463), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (57418, 57463), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57584, 57597), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (57594, 57597), True, 'import tensorflow as tf\n'), ((57608, 57645), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (57618, 57645), True, 'import tensorflow as tf\n'), ((60120, 60171), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (60141, 60171), True, 'import tensorflow as tf\n'), ((60194, 60230), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (60228, 60230), True, 'import tensorflow as tf\n'), ((60771, 60820), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (60775, 60820), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60941, 60954), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (60951, 60954), True, 'import tensorflow as tf\n'), ((60965, 61002), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (60975, 61002), True, 'import tensorflow as tf\n'), ((64329, 64380), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (64350, 64380), True, 'import tensorflow as tf\n'), ((64403, 64439), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (64437, 64439), True, 'import tensorflow as tf\n'), ((64980, 65029), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (64984, 65029), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65150, 65163), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (65160, 65163), True, 'import tensorflow as tf\n'), ((65174, 65211), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (65184, 65211), True, 'import tensorflow as tf\n'), ((66068, 66080), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (66075, 66080), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((66102, 66111), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (66109, 66111), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((66131, 66182), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (66136, 66182), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((66204, 66255), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (66209, 66255), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((67152, 67203), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (67173, 67203), True, 'import tensorflow as tf\n'), ((67226, 67262), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (67260, 67262), True, 'import tensorflow as tf\n'), ((67803, 67852), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (67807, 67852), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67973, 67986), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (67983, 67986), True, 'import tensorflow as tf\n'), ((67997, 68034), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (68007, 68034), True, 'import tensorflow as tf\n'), ((70771, 70822), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (70792, 70822), True, 'import tensorflow as tf\n'), ((70845, 70881), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (70879, 70881), True, 'import tensorflow as tf\n'), ((71422, 71471), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (71426, 71471), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71592, 71605), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (71602, 71605), True, 'import tensorflow as tf\n'), ((71616, 71653), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (71626, 71653), True, 'import tensorflow as tf\n'), ((72382, 72394), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (72389, 72394), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((72419, 72432), 'deep_model_blocks.Conv', 'Conv', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (72423, 72432), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((72630, 72660), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '[1, 2]'}), '(x, axis=[1, 2])\n', (72644, 72660), True, 'import tensorflow as tf\n'), ((72898, 72949), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(15 * 20 * 64)'], {'use_bias': '(False)'}), '(15 * 20 * 64, use_bias=False)\n', (72919, 72949), True, 'import tensorflow as tf\n'), ((72972, 73008), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (73006, 73008), True, 'import tensorflow as tf\n'), ((73566, 73615), 'deep_model_blocks.Conv', 'Conv', (['(3 if config.has_colored_target else 1)', '(5)', '(1)'], {}), '(3 if config.has_colored_target else 1, 5, 1)\n', (73570, 73615), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73736, 73749), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (73746, 73749), True, 'import tensorflow as tf\n'), ((73760, 73797), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '(-1, 15, 20, 64)'}), '(x, shape=(-1, 15, 20, 64))\n', (73770, 73797), True, 'import tensorflow as tf\n'), ((1006, 1045), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (1017, 1045), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1105, 1144), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (1116, 1144), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1154, 1190), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (1163, 1190), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1201, 1239), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (1212, 1239), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1249, 1285), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (1258, 1285), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1296, 1334), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (1307, 1334), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1344, 1380), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (1353, 1380), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((1391, 1429), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (1402, 1429), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2047, 2083), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (2056, 2083), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2093, 2129), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (2102, 2129), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2140, 2176), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (2149, 2176), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2186, 2222), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (2195, 2222), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2233, 2269), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (2242, 2269), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2279, 2315), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (2288, 2315), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2326, 2363), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (2335, 2363), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((2373, 2409), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (2382, 2409), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3148, 3187), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(3)', '(2)'], {}), '(initial_filters * 32, 3, 2)\n', (3159, 3187), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3247, 3286), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(3)', '(2)'], {}), '(initial_filters * 16, 3, 2)\n', (3258, 3286), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3296, 3332), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(3)', '(1)'], {}), '(initial_filters * 8, 3, 1)\n', (3305, 3332), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3343, 3381), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(7)', '(2)'], {}), '(initial_filters * 8, 7, 2)\n', (3354, 3381), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3391, 3427), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(7)', '(1)'], {}), '(initial_filters * 4, 7, 1)\n', (3400, 3427), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3438, 3476), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(7)', '(2)'], {}), '(initial_filters * 4, 7, 2)\n', (3449, 3476), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3486, 3522), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(7)', '(1)'], {}), '(initial_filters * 2, 7, 1)\n', (3495, 3522), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((3533, 3571), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(7)', '(2)'], {}), '(initial_filters * 2, 7, 2)\n', (3544, 3571), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4201, 4237), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (4210, 4237), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4247, 4283), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (4256, 4283), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4294, 4330), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (4303, 4330), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4340, 4376), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (4349, 4376), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4387, 4423), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (4396, 4423), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4433, 4469), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (4442, 4469), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4480, 4517), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (4489, 4517), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((4527, 4563), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (4536, 4563), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5367, 5405), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(7)', '(2)'], {}), '(initial_filters * 4, 7, 2)\n', (5378, 5405), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5415, 5451), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(3)', '(1)'], {}), '(initial_filters * 2, 3, 1)\n', (5424, 5451), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5461, 5497), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(3)', '(1)'], {}), '(initial_filters * 2, 3, 1)\n', (5470, 5497), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5508, 5546), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(7)', '(2)'], {}), '(initial_filters * 2, 7, 2)\n', (5519, 5546), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5556, 5592), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(3)', '(1)'], {}), '(initial_filters * 1, 3, 1)\n', (5565, 5592), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5602, 5638), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(3)', '(1)'], {}), '(initial_filters * 1, 3, 1)\n', (5611, 5638), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((5649, 5687), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(7)', '(2)'], {}), '(initial_filters * 1, 7, 2)\n', (5660, 5687), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6309, 6345), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (6318, 6345), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6355, 6391), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (6364, 6391), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6402, 6438), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (6411, 6438), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6448, 6484), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (6457, 6484), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6495, 6531), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (6504, 6531), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6541, 6577), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (6550, 6577), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6588, 6625), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (6597, 6625), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((6635, 6671), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (6644, 6671), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7415, 7454), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (7426, 7454), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7561, 7600), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (7572, 7600), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7610, 7646), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (7619, 7646), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7656, 7692), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (7665, 7692), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7703, 7741), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (7714, 7741), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7751, 7787), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (7760, 7787), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7797, 7833), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (7806, 7833), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7844, 7882), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (7855, 7882), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7892, 7928), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (7901, 7928), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7938, 7974), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (7947, 7974), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((7985, 8023), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (7996, 8023), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8704, 8740), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (8713, 8740), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8750, 8786), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (8759, 8786), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8797, 8833), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (8806, 8833), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8843, 8879), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (8852, 8879), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8890, 8926), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (8899, 8926), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8936, 8972), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (8945, 8972), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((8983, 9020), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (8992, 9020), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((9030, 9066), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (9039, 9066), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((9834, 9873), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(7)', '(2)'], {}), '(initial_filters * 32, 7, 2)\n', (9845, 9873), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((9980, 10019), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(7)', '(2)'], {}), '(initial_filters * 16, 7, 2)\n', (9991, 10019), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10029, 10065), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(3)', '(1)'], {}), '(initial_filters * 4, 3, 1)\n', (10038, 10065), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10075, 10111), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(3)', '(1)'], {}), '(initial_filters * 4, 3, 1)\n', (10084, 10111), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10122, 10160), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(7)', '(2)'], {}), '(initial_filters * 8, 7, 2)\n', (10133, 10160), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10170, 10206), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(3)', '(1)'], {}), '(initial_filters * 2, 3, 1)\n', (10179, 10206), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10216, 10252), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(3)', '(1)'], {}), '(initial_filters * 2, 3, 1)\n', (10225, 10252), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10263, 10301), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(7)', '(2)'], {}), '(initial_filters * 4, 7, 2)\n', (10274, 10301), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10311, 10347), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(3)', '(1)'], {}), '(initial_filters * 1, 3, 1)\n', (10320, 10347), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10357, 10393), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(3)', '(1)'], {}), '(initial_filters * 1, 3, 1)\n', (10366, 10393), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((10404, 10442), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(7)', '(2)'], {}), '(initial_filters * 2, 7, 2)\n', (10415, 10442), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11135, 11171), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (11144, 11171), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11181, 11217), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (11190, 11217), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11228, 11264), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (11237, 11264), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11274, 11310), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (11283, 11310), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11321, 11357), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (11330, 11357), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11367, 11403), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (11376, 11403), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11414, 11451), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (11423, 11451), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((11461, 11497), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (11470, 11497), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12228, 12274), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(30, 40)', '(initial_filters * 32)', '(5)'], {}), '((30, 40), initial_filters * 32, 5)\n', (12239, 12274), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12334, 12380), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(60, 80)', '(initial_filters * 16)', '(5)'], {}), '((60, 80), initial_filters * 16, 5)\n', (12345, 12380), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12390, 12426), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (12399, 12426), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12437, 12484), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(120, 160)', '(initial_filters * 8)', '(5)'], {}), '((120, 160), initial_filters * 8, 5)\n', (12448, 12484), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12494, 12530), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (12503, 12530), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12541, 12588), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(240, 320)', '(initial_filters * 4)', '(5)'], {}), '((240, 320), initial_filters * 4, 5)\n', (12552, 12588), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12598, 12634), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (12607, 12634), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((12645, 12692), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(480, 640)', '(initial_filters * 2)', '(5)'], {}), '((480, 640), initial_filters * 2, 5)\n', (12656, 12692), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13318, 13354), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (13327, 13354), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13364, 13400), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (13373, 13400), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13411, 13447), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (13420, 13447), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13457, 13493), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (13466, 13493), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13504, 13540), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (13513, 13540), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13550, 13586), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (13559, 13586), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13597, 13634), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (13606, 13634), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((13644, 13680), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (13653, 13680), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14414, 14462), 'deep_model_blocks.ReverseResidualBlock', 'ReverseResidualBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (14434, 14462), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14472, 14513), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2 * 16)', '(5)', '(1)'], {}), '(initial_filters * 2 * 16, 5, 1)\n', (14481, 14513), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14522, 14570), 'deep_model_blocks.ReverseResidualBlock', 'ReverseResidualBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (14542, 14570), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14580, 14620), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2 * 8)', '(5)', '(1)'], {}), '(initial_filters * 2 * 8, 5, 1)\n', (14589, 14620), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14629, 14676), 'deep_model_blocks.ReverseResidualBlock', 'ReverseResidualBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (14649, 14676), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14686, 14726), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2 * 4)', '(5)', '(1)'], {}), '(initial_filters * 2 * 4, 5, 1)\n', (14695, 14726), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14735, 14782), 'deep_model_blocks.ReverseResidualBlock', 'ReverseResidualBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (14755, 14782), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14792, 14832), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2 * 2)', '(5)', '(1)'], {}), '(initial_filters * 2 * 2, 5, 1)\n', (14801, 14832), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14841, 14888), 'deep_model_blocks.ReverseResidualBlock', 'ReverseResidualBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (14861, 14888), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((14898, 14938), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2 * 1)', '(5)', '(1)'], {}), '(initial_filters * 2 * 1, 5, 1)\n', (14907, 14938), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15514, 15550), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (15523, 15550), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15560, 15596), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (15569, 15596), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15607, 15643), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (15616, 15643), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15653, 15689), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (15662, 15689), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15700, 15736), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (15709, 15736), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15746, 15782), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (15755, 15782), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15793, 15830), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (15802, 15830), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((15840, 15876), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (15849, 15876), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16639, 16678), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (16650, 16678), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16738, 16777), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (16749, 16777), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16787, 16823), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (16796, 16823), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16834, 16872), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (16845, 16872), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16882, 16918), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (16891, 16918), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16929, 16967), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (16940, 16967), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((16977, 17013), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (16986, 17013), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((17024, 17062), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (17035, 17062), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((20501, 20510), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (20508, 20510), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((20529, 20580), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (20534, 20580), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((21952, 21991), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (21963, 21991), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22001, 22038), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (22010, 22038), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22049, 22088), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (22060, 22088), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22098, 22134), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (22107, 22134), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22145, 22183), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (22156, 22183), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22193, 22229), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (22202, 22229), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22240, 22278), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (22251, 22278), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22288, 22324), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (22297, 22324), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22335, 22373), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (22346, 22373), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22383, 22419), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (22392, 22419), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22430, 22468), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(5)', '(2)'], {}), '(initial_filters * 1, 5, 2)\n', (22441, 22468), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((22478, 22514), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (22487, 22514), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((24270, 24279), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (24277, 24279), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((24298, 24349), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (24303, 24349), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((25741, 25780), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (25752, 25780), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((25790, 25829), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (25801, 25829), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((25839, 25877), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (25850, 25877), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((25887, 25925), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (25898, 25925), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((25935, 25973), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (25946, 25973), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((27065, 27074), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (27072, 27074), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((27093, 27144), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (27098, 27144), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((29383, 29422), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (29394, 29422), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((29432, 29471), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (29443, 29471), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((29481, 29519), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (29492, 29519), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((29529, 29567), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (29540, 29567), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((29577, 29615), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (29588, 29615), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((31090, 31099), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (31097, 31099), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((31118, 31169), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (31123, 31169), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((33080, 33119), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (33091, 33119), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33129, 33166), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (33138, 33166), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33177, 33216), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (33188, 33216), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33226, 33262), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (33235, 33262), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33835, 33871), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (33844, 33871), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33881, 33917), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (33890, 33917), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33928, 33964), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (33937, 33964), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((33974, 34010), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (33983, 34010), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((34021, 34057), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (34030, 34057), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((34067, 34103), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (34076, 34103), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((34832, 34869), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 32)', '(5)', '(1)'], {}), '(initial_filters * 32, 5, 1)\n', (34841, 34869), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((34879, 34916), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (34888, 34916), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((34926, 34963), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (34935, 34963), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((34974, 35013), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (34985, 35013), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35023, 35060), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (35032, 35060), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35070, 35107), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (35079, 35107), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35118, 35157), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (35129, 35157), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35167, 35203), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (35176, 35203), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35213, 35249), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (35222, 35249), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35828, 35864), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (35837, 35864), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35874, 35910), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (35883, 35910), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35920, 35956), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (35929, 35956), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((35967, 36003), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (35976, 36003), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((36013, 36049), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (36022, 36049), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((36059, 36095), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (36068, 36095), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((36106, 36142), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (36115, 36142), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((36152, 36188), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (36161, 36188), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((36198, 36234), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (36207, 36234), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((36953, 36992), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (36964, 36992), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37052, 37091), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (37063, 37091), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37101, 37137), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (37110, 37137), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37148, 37186), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (37159, 37186), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37196, 37232), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (37205, 37232), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37806, 37842), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (37815, 37842), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37852, 37888), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (37861, 37888), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37899, 37935), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (37908, 37935), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37945, 37981), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (37954, 37981), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((37992, 38028), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (38001, 38028), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((38038, 38074), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (38047, 38074), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((38821, 38860), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (38832, 38860), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((38920, 38959), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (38931, 38959), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((38969, 39005), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (38978, 39005), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((39016, 39054), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (39027, 39054), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((39064, 39100), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (39073, 39100), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((40607, 40616), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (40614, 40616), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((40635, 40686), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (40640, 40686), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((42028, 42067), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (42039, 42067), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42077, 42114), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (42086, 42114), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42124, 42161), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (42133, 42161), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42172, 42211), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (42183, 42211), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42221, 42257), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (42230, 42257), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42267, 42303), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (42276, 42303), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42314, 42352), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (42325, 42352), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42362, 42398), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (42371, 42398), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((42408, 42444), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (42417, 42444), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43024, 43060), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (43033, 43060), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43070, 43106), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (43079, 43106), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43116, 43152), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (43125, 43152), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43163, 43199), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (43172, 43199), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43209, 43245), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (43218, 43245), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43255, 43291), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (43264, 43291), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43302, 43338), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (43311, 43338), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43348, 43384), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (43357, 43384), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43394, 43430), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (43403, 43430), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43441, 43478), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (43450, 43478), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43488, 43524), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (43497, 43524), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((43534, 43570), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (43543, 43570), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((44325, 44359), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['initial_filters', '(5)', '(2)'], {}), '(initial_filters, 5, 2)\n', (44336, 44359), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((44371, 44405), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['initial_filters', '(5)', '(2)'], {}), '(initial_filters, 5, 2)\n', (44382, 44405), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((44417, 44451), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['initial_filters', '(5)', '(2)'], {}), '(initial_filters, 5, 2)\n', (44428, 44451), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((45970, 45979), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (45977, 45979), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((45998, 46049), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (46003, 46049), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((47409, 47448), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (47420, 47448), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47458, 47495), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (47467, 47495), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47506, 47545), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (47517, 47545), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47555, 47591), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (47564, 47591), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47602, 47640), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (47613, 47640), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47650, 47686), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (47659, 47686), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47697, 47735), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (47708, 47735), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((47745, 47781), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (47754, 47781), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48355, 48391), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (48364, 48391), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48401, 48437), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (48410, 48437), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48448, 48484), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (48457, 48484), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48494, 48530), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (48503, 48530), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48541, 48577), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (48550, 48577), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48587, 48623), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (48596, 48623), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48634, 48671), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (48643, 48671), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((48681, 48717), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (48690, 48717), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49464, 49503), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (49475, 49503), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49513, 49550), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (49522, 49550), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49560, 49597), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (49569, 49597), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49608, 49647), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (49619, 49647), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49657, 49693), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (49666, 49693), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49703, 49739), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (49712, 49739), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49750, 49788), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (49761, 49788), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49798, 49834), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (49807, 49834), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49844, 49880), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (49853, 49880), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49891, 49929), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (49902, 49929), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49939, 49975), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (49948, 49975), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((49985, 50021), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (49994, 50021), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51895, 51904), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (51902, 51904), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((51923, 51974), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (51928, 51974), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((53322, 53361), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (53333, 53361), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53371, 53408), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (53380, 53408), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53419, 53458), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (53430, 53458), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53468, 53504), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (53477, 53504), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53515, 53553), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (53526, 53553), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53563, 53599), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (53572, 53599), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53610, 53648), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (53621, 53648), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53658, 53694), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (53667, 53694), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53705, 53743), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (53716, 53743), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((53753, 53789), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (53762, 53789), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55500, 55509), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (55507, 55509), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((55528, 55579), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (55533, 55579), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((56911, 56950), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (56922, 56950), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((56960, 56997), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (56969, 56997), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57008, 57047), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (57019, 57047), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57057, 57093), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (57066, 57093), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57104, 57142), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (57115, 57142), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57152, 57188), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (57161, 57188), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57199, 57237), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (57210, 57237), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57247, 57283), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (57256, 57283), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57294, 57332), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (57305, 57332), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((57342, 57378), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (57351, 57378), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((58845, 58854), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (58852, 58854), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((58873, 58924), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (58878, 58924), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((60264, 60303), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (60275, 60303), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60363, 60402), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (60374, 60402), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60412, 60448), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (60421, 60448), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60459, 60497), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (60470, 60497), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60507, 60543), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (60516, 60543), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60554, 60592), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (60565, 60592), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60602, 60638), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (60611, 60638), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((60649, 60687), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (60660, 60687), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62506, 62515), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (62513, 62515), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((62537, 62588), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (62542, 62588), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((62612, 62663), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (62617, 62663), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((64473, 64512), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (64484, 64512), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64572, 64611), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (64583, 64611), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64621, 64657), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (64630, 64657), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64668, 64706), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (64679, 64706), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64716, 64752), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (64725, 64752), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64763, 64801), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (64774, 64801), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64811, 64847), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (64820, 64847), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((64858, 64896), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (64869, 64896), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65530, 65566), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (65539, 65566), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65577, 65613), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (65586, 65613), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65624, 65660), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (65633, 65660), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65671, 65708), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (65680, 65708), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65806, 65842), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(1)'], {}), '(initial_filters * 2, 4, 1)\n', (65815, 65842), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65853, 65889), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(1)'], {}), '(initial_filters * 4, 4, 1)\n', (65862, 65889), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65900, 65936), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(1)'], {}), '(initial_filters * 8, 4, 1)\n', (65909, 65936), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((65947, 65984), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(1)'], {}), '(initial_filters * 16, 4, 1)\n', (65956, 65984), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((66855, 66885), 'tensorflow.concat', 'tf.concat', (['[s2, s2s1]'], {'axis': '(-1)'}), '([s2, s2s1], axis=-1)\n', (66864, 66885), True, 'import tensorflow as tf\n'), ((67296, 67335), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (67307, 67335), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67395, 67434), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (67406, 67434), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67444, 67480), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (67453, 67480), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67491, 67529), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (67502, 67529), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67539, 67575), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (67548, 67575), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67586, 67624), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (67597, 67624), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67634, 67670), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (67643, 67670), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((67681, 67719), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (67692, 67719), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69488, 69497), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (69495, 69497), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((69516, 69567), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (69521, 69567), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((70915, 70954), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (70926, 70954), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71014, 71053), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (71025, 71053), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71063, 71099), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (71072, 71099), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71110, 71148), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (71121, 71148), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71158, 71194), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (71167, 71194), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71205, 71243), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (71216, 71243), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71253, 71289), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (71262, 71289), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71300, 71338), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (71311, 71338), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((71978, 72014), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (71987, 72014), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((72073, 72109), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (72082, 72109), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((72168, 72204), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (72177, 72204), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((72263, 72300), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (72272, 72300), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73062, 73100), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(5)', '(2)'], {}), '(initial_filters * 1, 5, 2)\n', (73073, 73100), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73159, 73197), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(5)', '(2)'], {}), '(initial_filters * 1, 5, 2)\n', (73170, 73197), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73207, 73243), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (73216, 73243), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73254, 73292), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(5)', '(2)'], {}), '(initial_filters * 1, 5, 2)\n', (73265, 73292), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73302, 73338), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (73311, 73338), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73349, 73387), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(5)', '(2)'], {}), '(initial_filters * 1, 5, 2)\n', (73360, 73387), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73397, 73433), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (73406, 73433), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((73444, 73482), 'deep_model_blocks.DeconvBlock', 'DeconvBlock', (['(initial_filters * 1)', '(5)', '(2)'], {}), '(initial_filters * 1, 5, 2)\n', (73455, 73482), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((75000, 75009), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (75007, 75009), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((75028, 75079), 'tensorflow.keras.layers.Dense', 'Dense', (['config.discriminator_classes'], {'use_bias': '(False)'}), '(config.discriminator_classes, use_bias=False)\n', (75033, 75079), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((19186, 19261), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (19201, 19261), True, 'import tensorflow as tf\n'), ((19390, 19426), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (19399, 19426), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19438, 19474), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(4)', '(1)'], {}), '(initial_filters * 1, 4, 1)\n', (19447, 19474), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19487, 19523), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (19496, 19523), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19535, 19571), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(1)'], {}), '(initial_filters * 2, 4, 1)\n', (19544, 19571), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19584, 19620), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (19593, 19620), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19632, 19668), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(1)'], {}), '(initial_filters * 4, 4, 1)\n', (19641, 19668), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19681, 19718), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (19690, 19718), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19730, 19766), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(1)'], {}), '(initial_filters * 8, 4, 1)\n', (19739, 19766), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19834, 19871), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 32)', '(4)', '(2)'], {}), '(initial_filters * 32, 4, 2)\n', (19843, 19871), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((19883, 19920), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(1)'], {}), '(initial_filters * 16, 4, 1)\n', (19892, 19920), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((21053, 21065), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (21060, 21065), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((23566, 23641), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (23581, 23641), True, 'import tensorflow as tf\n'), ((23770, 23806), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (23779, 23806), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((23818, 23854), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (23827, 23854), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((23867, 23903), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (23876, 23903), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((23915, 23951), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (23924, 23951), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((23964, 24000), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (23973, 24000), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((24012, 24048), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (24021, 24048), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((24116, 24153), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (24125, 24153), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((24165, 24201), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (24174, 24201), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((24838, 24850), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (24845, 24850), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((26766, 26802), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (26775, 26802), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((26814, 26850), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (26823, 26850), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((26862, 26898), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (26871, 26898), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((26910, 26947), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (26919, 26947), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((26959, 26996), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (26968, 26996), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((28175, 28187), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (28182, 28187), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((30489, 30536), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(240, 320)', '(initial_filters * 2)', '(5)'], {}), '((240, 320), initial_filters * 2, 5)\n', (30500, 30536), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((30548, 30595), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(120, 160)', '(initial_filters * 4)', '(5)'], {}), '((120, 160), initial_filters * 4, 5)\n', (30559, 30595), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((30607, 30652), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(60, 80)', '(initial_filters * 8)', '(5)'], {}), '((60, 80), initial_filters * 8, 5)\n', (30618, 30652), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((30664, 30710), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(30, 40)', '(initial_filters * 16)', '(5)'], {}), '((30, 40), initial_filters * 16, 5)\n', (30675, 30710), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((30722, 30768), 'deep_model_blocks.ResizeBlock', 'ResizeBlock', (['(15, 20)', '(initial_filters * 32)', '(5)'], {}), '((15, 20), initial_filters * 32, 5)\n', (30733, 30768), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((32212, 32224), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (32219, 32224), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((40086, 40161), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (40101, 40161), True, 'import tensorflow as tf\n'), ((40270, 40306), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (40279, 40306), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((40316, 40352), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (40325, 40352), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((40363, 40399), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (40372, 40399), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((40409, 40445), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (40418, 40445), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((40456, 40492), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (40465, 40492), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((40502, 40538), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (40511, 40538), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((41159, 41171), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (41166, 41171), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((45449, 45524), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (45464, 45524), True, 'import tensorflow as tf\n'), ((45633, 45669), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (45642, 45669), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((45679, 45715), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(5)', '(1)'], {}), '(initial_filters * 1, 5, 1)\n', (45688, 45715), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((45726, 45762), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (45735, 45762), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((45772, 45808), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (45781, 45808), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((45819, 45855), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (45828, 45855), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((45865, 45901), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (45874, 45901), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((46542, 46554), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (46549, 46554), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((51007, 51082), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (51022, 51082), True, 'import tensorflow as tf\n'), ((51193, 51229), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (51202, 51229), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51241, 51277), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (51250, 51277), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51340, 51376), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (51349, 51376), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51388, 51424), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (51397, 51424), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51487, 51523), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (51496, 51523), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51535, 51571), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (51544, 51571), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51689, 51726), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (51698, 51726), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((51738, 51775), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (51747, 51775), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((52447, 52459), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (52454, 52459), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((54772, 54847), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (54787, 54847), True, 'import tensorflow as tf\n'), ((54956, 54992), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (54965, 54992), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55004, 55040), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 1)', '(4)', '(1)'], {}), '(initial_filters * 1, 4, 1)\n', (55013, 55040), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55053, 55089), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (55062, 55089), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55101, 55137), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(1)'], {}), '(initial_filters * 2, 4, 1)\n', (55110, 55137), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55150, 55186), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (55159, 55186), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55198, 55234), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(1)'], {}), '(initial_filters * 4, 4, 1)\n', (55207, 55234), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55247, 55284), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (55256, 55284), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55296, 55332), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(1)'], {}), '(initial_filters * 8, 4, 1)\n', (55305, 55332), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55345, 55382), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 32)', '(4)', '(2)'], {}), '(initial_filters * 32, 4, 2)\n', (55354, 55382), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((55394, 55431), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(1)'], {}), '(initial_filters * 16, 4, 1)\n', (55403, 55431), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((56044, 56056), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (56051, 56056), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((58358, 58433), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (58373, 58433), True, 'import tensorflow as tf\n'), ((58542, 58578), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(4)', '(2)'], {}), '(initial_filters * 2, 4, 2)\n', (58551, 58578), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((58591, 58627), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(4)', '(2)'], {}), '(initial_filters * 4, 4, 2)\n', (58600, 58627), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((58640, 58676), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(4)', '(2)'], {}), '(initial_filters * 8, 4, 2)\n', (58649, 58676), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((58689, 58726), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(4)', '(2)'], {}), '(initial_filters * 16, 4, 2)\n', (58698, 58726), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((58739, 58776), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 32)', '(4)', '(2)'], {}), '(initial_filters * 32, 4, 2)\n', (58748, 58776), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((59385, 59397), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (59392, 59397), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((61723, 61798), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (61738, 61798), True, 'import tensorflow as tf\n'), ((61911, 61947), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (61920, 61947), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((61960, 61996), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (61969, 61996), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62009, 62045), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (62018, 62045), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62058, 62095), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (62067, 62095), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62201, 62237), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(1)'], {}), '(initial_filters * 2, 5, 1)\n', (62210, 62237), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62250, 62286), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(1)'], {}), '(initial_filters * 4, 5, 1)\n', (62259, 62286), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62299, 62335), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(1)'], {}), '(initial_filters * 8, 5, 1)\n', (62308, 62335), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((62348, 62385), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(1)'], {}), '(initial_filters * 16, 5, 1)\n', (62357, 62385), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((63322, 63352), 'tensorflow.concat', 'tf.concat', (['[s2, s2s1]'], {'axis': '(-1)'}), '([s2, s2s1], axis=-1)\n', (63331, 63352), True, 'import tensorflow as tf\n'), ((63598, 63610), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (63605, 63610), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((68757, 68832), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (68772, 68832), True, 'import tensorflow as tf\n'), ((68939, 68975), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(7)', '(2)'], {}), '(initial_filters * 2, 7, 2)\n', (68948, 68975), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((68987, 69023), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(7)', '(1)'], {}), '(initial_filters * 2, 7, 1)\n', (68996, 69023), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69036, 69072), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(7)', '(2)'], {}), '(initial_filters * 4, 7, 2)\n', (69045, 69072), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69084, 69120), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(7)', '(1)'], {}), '(initial_filters * 4, 7, 1)\n', (69093, 69120), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69133, 69169), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(7)', '(2)'], {}), '(initial_filters * 8, 7, 2)\n', (69142, 69169), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69181, 69217), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(7)', '(1)'], {}), '(initial_filters * 8, 7, 1)\n', (69190, 69217), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69230, 69267), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(7)', '(2)'], {}), '(initial_filters * 16, 7, 2)\n', (69239, 69267), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((69279, 69316), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(7)', '(1)'], {}), '(initial_filters * 16, 7, 1)\n', (69288, 69316), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((70048, 70060), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (70055, 70060), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((74515, 74590), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (74530, 74590), True, 'import tensorflow as tf\n'), ((74697, 74733), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 2)', '(5)', '(2)'], {}), '(initial_filters * 2, 5, 2)\n', (74706, 74733), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((74746, 74782), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 4)', '(5)', '(2)'], {}), '(initial_filters * 4, 5, 2)\n', (74755, 74782), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((74795, 74831), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 8)', '(5)', '(2)'], {}), '(initial_filters * 8, 5, 2)\n', (74804, 74831), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((74844, 74881), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 16)', '(5)', '(2)'], {}), '(initial_filters * 16, 5, 2)\n', (74853, 74881), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((74894, 74931), 'deep_model_blocks.ConvBlock', 'ConvBlock', (['(initial_filters * 32)', '(5)', '(2)'], {}), '(initial_filters * 32, 5, 2)\n', (74903, 74931), False, 'from deep_model_blocks import BottleneckResidualBlock, Conv, ConvBlock, Deconv, DeconvBlock, ResidualBlock, ResizeBlock, ReverseBottleneckResidualBlock, ReverseResidualBlock, UBlock\n'), ((75550, 75562), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (75557, 75562), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Flatten, MaxPooling2D, SpatialDropout2D, add\n'), ((19108, 19161), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (19140, 19161), True, 'import tensorflow as tf\n'), ((23488, 23541), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (23520, 23541), True, 'import tensorflow as tf\n'), ((27610, 27685), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (27625, 27685), True, 'import tensorflow as tf\n'), ((31635, 31710), 'tensorflow.logging.info', 'tf.logging.info', (['"""Multiscale discriminator operating on regular resolution"""'], {}), "('Multiscale discriminator operating on regular resolution')\n", (31650, 31710), True, 'import tensorflow as tf\n'), ((40008, 40061), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (40040, 40061), True, 'import tensorflow as tf\n'), ((45371, 45424), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (45403, 45424), True, 'import tensorflow as tf\n'), ((50929, 50982), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (50961, 50982), True, 'import tensorflow as tf\n'), ((54694, 54747), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (54726, 54747), True, 'import tensorflow as tf\n'), ((58280, 58333), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (58312, 58333), True, 'import tensorflow as tf\n'), ((61645, 61698), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (61677, 61698), True, 'import tensorflow as tf\n'), ((68679, 68732), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (68711, 68732), True, 'import tensorflow as tf\n'), ((74437, 74490), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (74469, 74490), True, 'import tensorflow as tf\n'), ((27528, 27581), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (27560, 27581), True, 'import tensorflow as tf\n'), ((31553, 31606), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '(size_x, size_y)'], {}), '(x, (size_x, size_y))\n', (31585, 31606), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from setuptools import setup, find_packages
setup(
use_scm_version={
"version_scheme": "post-release",
"local_scheme": "dirty-tag",
},
setup_requires=["setuptools_scm"],
packages=find_packages("src"),
package_dir={"": "src"},
package_data={"naima": ["data/*.npz"]},
install_requires=[
"astropy>=1.0.2",
"emcee>=3.0.2",
"corner",
"matplotlib",
"scipy",
"h5py",
"pyyaml",
],
python_requires=">=3.9",
)
| [
"setuptools.find_packages"
] | [((299, 319), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (312, 319), False, 'from setuptools import setup, find_packages\n')] |
import numpy as np
from pyFAI.multi_geometry import MultiGeometry
from pyFAI.ext import splitBBox
def inpaint_saxs(imgs, ais, masks):
"""
Inpaint the 2D image collected by the pixel detector to remove artifacts in later data reduction
Parameters:
-----------
:param imgs: List of 2D image in pixel
:type imgs: ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as imgs)
:type masks: ndarray
"""
inpaints, mask_inpaints = [], []
for i, (img, ai, mask) in enumerate(zip(imgs, ais, masks)):
inpaints.append(ai.inpainting(img.copy(order='C'),
mask))
mask_inpaints.append(np.logical_not(np.ones_like(mask)))
return inpaints, mask_inpaints
def cake_saxs(inpaints, ais, masks, radial_range=(0, 60), azimuth_range=(-90, 90), npt_rad=250, npt_azim=250):
"""
Unwrapp the stitched image from q-space to 2theta-Chi space (Radial-Azimuthal angle)
Parameters:
-----------
:param inpaints: List of 2D inpainted images
:type inpaints: List of ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as inpaints)
:type masks: List of ndarray
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
:param npt_rad: number of point in the radial range
:type npt_rad: int
:param npt_azim: number of point in the azimuthal range
:type npt_azim: int
"""
mg = MultiGeometry(ais,
unit='q_A^-1',
radial_range=radial_range,
azimuth_range=azimuth_range,
wavelength=None,
empty=0.0,
chi_disc=180)
cake, q, chi = mg.integrate2d(lst_data=inpaints,
npt_rad=npt_rad,
npt_azim=npt_azim,
correctSolidAngle=True,
lst_mask=masks)
return cake, q, chi[::-1]
def integrate_rad_saxs(inpaints, ais, masks, radial_range=(0, 40), azimuth_range=(0, 90), npt=2000):
"""
Radial integration of transmission data using the pyFAI multigeometry module
Parameters:
-----------
:param inpaints: List of 2D inpainted images
:type inpaints: List of ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as inpaints)
:type masks: List of ndarray
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
:param npt: number of point of the final 1D profile
:type npt: int
"""
mg = MultiGeometry(ais,
unit='q_A^-1',
radial_range=radial_range,
azimuth_range=azimuth_range,
wavelength=None,
empty=-1,
chi_disc=180)
q, i_rad = mg.integrate1d(lst_data=inpaints,
npt=npt,
correctSolidAngle=True,
lst_mask=masks)
return q, i_rad
def integrate_azi_saxs(cake, q_array, chi_array, radial_range=(0, 10), azimuth_range=(-90, 0)):
"""
Azimuthal integration of transmission data using masked array on a caked images (image in 2-theta_chi space)
Parameters:
-----------
:param cake: 2D array unwrapped in 2th-chi space
:type cake: ndarray (same dimension as tth_array and chiarray)
:param q_array: 2D array containing 2th angles of each pixel
:type q_array: ndarray (same dimension as cake and chiarray)
:param chi_array: 2D array containing chi angles of each pixel
:type chi_array: ndarray (same dimension as cake and tth_array)
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
"""
q_mesh, chi_mesh = np.meshgrid(q_array, chi_array)
cake_mask = np.ma.masked_array(cake)
cake_mask = np.ma.masked_where(q_mesh < radial_range[0], cake_mask)
cake_mask = np.ma.masked_where(q_mesh > radial_range[1], cake_mask)
cake_mask = np.ma.masked_where(azimuth_range[0] > chi_mesh, cake_mask)
cake_mask = np.ma.masked_where(azimuth_range[1] < chi_mesh, cake_mask)
i_azi = cake_mask.mean(axis=1)
return chi_array, i_azi
def integrate_rad_gisaxs(img, q_par, q_per, bins=1000, radial_range=None, azimuth_range=None):
"""
Radial integration of Grazing incidence data using the pyFAI multigeometry module
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param bins: number of point of the final 1D profile
:type bins: int
:param img: 2D array containing the stitched intensity
:type img: ndarray
:param radial_range: q_par range (in A-1) at the which the integration will be done
:type radial_range: Tuple
:param azimuth_range: q_per range (in A-1) at the which the integration will be done
:type azimuth_range: Tuple
"""
# recalculate the q-range of the input array
q_h = np.linspace(q_par[0], q_par[-1], np.shape(img)[1])
q_v = np.linspace(q_per[0], q_per[-1], np.shape(img)[0])[::-1]
if radial_range is None:
radial_range = (0, q_h.max())
if azimuth_range is None:
azimuth_range = (0, q_v.max())
q_h_te, q_v_te = np.meshgrid(q_h, q_v)
tth_array = np.sqrt(q_h_te ** 2 + q_v_te ** 2)
chi_array = np.rad2deg(np.arctan2(q_h_te, q_v_te))
# Mask the remeshed array
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(img < 1E-5, img_mask)
img_mask = np.ma.masked_where(tth_array < radial_range[0], img_mask)
img_mask = np.ma.masked_where(tth_array > radial_range[1], img_mask)
img_mask = np.ma.masked_where(chi_array < np.min(azimuth_range), img_mask)
img_mask = np.ma.masked_where(chi_array > np.max(azimuth_range), img_mask)
q_rad, i_rad, _, _ = splitBBox.histoBBox1d(img_mask,
pos0=tth_array,
delta_pos0=np.ones_like(img_mask) * (q_par[1] - q_par[0])/np.shape(
img_mask)[1],
pos1=q_v_te,
delta_pos1=np.ones_like(img_mask) * (q_per[1] - q_per[0])/np.shape(
img_mask)[0],
bins=bins,
pos0Range=np.array([np.min(tth_array), np.max(tth_array)]),
pos1Range=q_per,
dummy=None,
delta_dummy=None,
mask=img_mask.mask
)
return q_rad, i_rad
def integrate_qpar(img, q_par, q_per, q_par_range=None, q_per_range=None):
"""
Horizontal integration of a 2D array using masked array
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param img: 2D array containing intensity
:type img: ndarray
:param q_par_range: q_par range (in A-1) at the which the integration will be done
:type q_par_range: Tuple
:param q_per_range: q_per range (in A-1) at the which the integration will be done
:type q_per_range: Tuple
"""
if q_par_range is None:
q_par_range = (np.asarray(q_par).min(), np.asarray(q_par).max())
if q_per_range is None:
q_per_range = (np.asarray(q_per).min(), np.asarray(q_per).max())
q_par = np.linspace(q_par[0], q_par[1], np.shape(img)[1])
q_per = np.linspace(q_per[0], q_per[1], np.shape(img)[0])[::-1]
qpar_mesh, qper_mesh = np.meshgrid(q_par, q_per)
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(qper_mesh < q_per_range[0], img_mask)
img_mask = np.ma.masked_where(qper_mesh > q_per_range[1], img_mask)
img_mask = np.ma.masked_where(q_par_range[0] > qpar_mesh, img_mask)
img_mask = np.ma.masked_where(q_par_range[1] < qpar_mesh, img_mask)
i_par = np.mean(img_mask, axis=0)
return q_par, i_par
def integrate_qper(img, q_par, q_per, q_par_range=None, q_per_range=None):
"""
Vertical integration of a 2D array using masked array
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param img: 2D array containing intensity
:type img: ndarray
:param q_par_range: q_par range (in A-1) at the which the integration will be done
:type q_par_range: Tuple
:param q_per_range: q_per range (in A-1) at the which the integration will be done
:type q_per_range: Tuple
"""
if q_par_range is None:
q_par_range = (np.asarray(q_par).min(), np.asarray(q_par).max())
if q_per_range is None:
q_per_range = (np.asarray(q_per).min(), np.asarray(q_per).max())
q_par = np.linspace(q_par[0], q_par[1], np.shape(img)[1])
q_per = np.linspace(q_per[0], q_per[1], np.shape(img)[0])[::-1]
q_par_mesh, q_per_mesh = np.meshgrid(q_par, q_per)
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(q_per_mesh < q_per_range[0], img_mask)
img_mask = np.ma.masked_where(q_per_mesh > q_per_range[1], img_mask)
img_mask = np.ma.masked_where(q_par_mesh < q_par_range[0], img_mask)
img_mask = np.ma.masked_where(q_par_mesh > q_par_range[1], img_mask)
i_per = np.mean(img_mask, axis=1)
return q_per, i_per
# TODO: Implement azimuthal integration for GI
def cake_gisaxs(img, q_par, q_per, bins=None, radial_range=None, azimuth_range=None):
"""
Unwrap the stitched image from q-space to 2theta-Chi space (Radial-Azimuthal angle)
Parameters:
-----------
:param img: List of 2D images
:type img: List of ndarray
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param bins: number of point in both x and y direction of the final cake
:type bins: Tuple
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
"""
if bins is None:
bins = tuple(reversed(img.shape))
if radial_range is None:
radial_range = (0, q_par[-1])
if azimuth_range is None:
azimuth_range = (-180, 180)
azimuth_range = np.deg2rad(azimuth_range)
# recalculate the q-range of the input array
q_h = np.linspace(q_par[0], q_par[-1], bins[0])
q_v = np.linspace(q_per[0], q_per[-1], bins[1])[::-1]
q_h_te, q_v_te = np.meshgrid(q_h, q_v)
tth_array = np.sqrt(q_h_te**2 + q_v_te**2)
chi_array = -np.arctan2(q_h_te, q_v_te)
# Mask the remeshed array
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(tth_array < radial_range[0], img_mask)
img_mask = np.ma.masked_where(tth_array > radial_range[1], img_mask)
img_mask = np.ma.masked_where(chi_array < np.min(azimuth_range), img_mask)
img_mask = np.ma.masked_where(chi_array > np.max(azimuth_range), img_mask)
cake, q, chi, _, _ = splitBBox.histoBBox2d(weights=img_mask,
pos0=tth_array,
delta_pos0=np.ones_like(img_mask) * (q_par[1] - q_par[0])/bins[1],
pos1=chi_array,
delta_pos1=np.ones_like(img_mask) * (q_per[1] - q_per[0])/bins[1],
bins=bins,
pos0Range=np.array([np.min(radial_range), np.max(radial_range)]),
pos1Range=np.array([np.min(azimuth_range), np.max(azimuth_range)]),
dummy=None,
delta_dummy=None,
mask=img_mask.mask)
return cake, q, np.rad2deg(chi)[::-1]
| [
"numpy.mean",
"numpy.shape",
"numpy.ones_like",
"numpy.sqrt",
"numpy.asarray",
"numpy.ma.masked_where",
"numpy.max",
"numpy.deg2rad",
"numpy.linspace",
"numpy.arctan2",
"numpy.min",
"numpy.meshgrid",
"pyFAI.multi_geometry.MultiGeometry",
"numpy.rad2deg",
"numpy.ma.masked_array"
] | [((1972, 2108), 'pyFAI.multi_geometry.MultiGeometry', 'MultiGeometry', (['ais'], {'unit': '"""q_A^-1"""', 'radial_range': 'radial_range', 'azimuth_range': 'azimuth_range', 'wavelength': 'None', 'empty': '(0.0)', 'chi_disc': '(180)'}), "(ais, unit='q_A^-1', radial_range=radial_range, azimuth_range=\n azimuth_range, wavelength=None, empty=0.0, chi_disc=180)\n", (1985, 2108), False, 'from pyFAI.multi_geometry import MultiGeometry\n'), ((3455, 3590), 'pyFAI.multi_geometry.MultiGeometry', 'MultiGeometry', (['ais'], {'unit': '"""q_A^-1"""', 'radial_range': 'radial_range', 'azimuth_range': 'azimuth_range', 'wavelength': 'None', 'empty': '(-1)', 'chi_disc': '(180)'}), "(ais, unit='q_A^-1', radial_range=radial_range, azimuth_range=\n azimuth_range, wavelength=None, empty=-1, chi_disc=180)\n", (3468, 3590), False, 'from pyFAI.multi_geometry import MultiGeometry\n'), ((4812, 4843), 'numpy.meshgrid', 'np.meshgrid', (['q_array', 'chi_array'], {}), '(q_array, chi_array)\n', (4823, 4843), True, 'import numpy as np\n'), ((4860, 4884), 'numpy.ma.masked_array', 'np.ma.masked_array', (['cake'], {}), '(cake)\n', (4878, 4884), True, 'import numpy as np\n'), ((4902, 4957), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_mesh < radial_range[0])', 'cake_mask'], {}), '(q_mesh < radial_range[0], cake_mask)\n', (4920, 4957), True, 'import numpy as np\n'), ((4974, 5029), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_mesh > radial_range[1])', 'cake_mask'], {}), '(q_mesh > radial_range[1], cake_mask)\n', (4992, 5029), True, 'import numpy as np\n'), ((5047, 5105), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(azimuth_range[0] > chi_mesh)', 'cake_mask'], {}), '(azimuth_range[0] > chi_mesh, cake_mask)\n', (5065, 5105), True, 'import numpy as np\n'), ((5122, 5180), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(azimuth_range[1] < chi_mesh)', 'cake_mask'], {}), '(azimuth_range[1] < chi_mesh, cake_mask)\n', (5140, 5180), True, 'import numpy as np\n'), ((6382, 6403), 'numpy.meshgrid', 'np.meshgrid', (['q_h', 'q_v'], {}), '(q_h, q_v)\n', (6393, 6403), True, 'import numpy as np\n'), ((6420, 6454), 'numpy.sqrt', 'np.sqrt', (['(q_h_te ** 2 + q_v_te ** 2)'], {}), '(q_h_te ** 2 + q_v_te ** 2)\n', (6427, 6454), True, 'import numpy as np\n'), ((6556, 6594), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (6574, 6594), True, 'import numpy as np\n'), ((6611, 6652), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img < 1e-05)', 'img_mask'], {}), '(img < 1e-05, img_mask)\n', (6629, 6652), True, 'import numpy as np\n'), ((6667, 6724), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array < radial_range[0])', 'img_mask'], {}), '(tth_array < radial_range[0], img_mask)\n', (6685, 6724), True, 'import numpy as np\n'), ((6740, 6797), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array > radial_range[1])', 'img_mask'], {}), '(tth_array > radial_range[1], img_mask)\n', (6758, 6797), True, 'import numpy as np\n'), ((9010, 9035), 'numpy.meshgrid', 'np.meshgrid', (['q_par', 'q_per'], {}), '(q_par, q_per)\n', (9021, 9035), True, 'import numpy as np\n'), ((9051, 9089), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (9069, 9089), True, 'import numpy as np\n'), ((9106, 9162), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qper_mesh < q_per_range[0])', 'img_mask'], {}), '(qper_mesh < q_per_range[0], img_mask)\n', (9124, 9162), True, 'import numpy as np\n'), ((9178, 9234), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qper_mesh > q_per_range[1])', 'img_mask'], {}), '(qper_mesh > q_per_range[1], img_mask)\n', (9196, 9234), True, 'import numpy as np\n'), ((9251, 9307), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_range[0] > qpar_mesh)', 'img_mask'], {}), '(q_par_range[0] > qpar_mesh, img_mask)\n', (9269, 9307), True, 'import numpy as np\n'), ((9323, 9379), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_range[1] < qpar_mesh)', 'img_mask'], {}), '(q_par_range[1] < qpar_mesh, img_mask)\n', (9341, 9379), True, 'import numpy as np\n'), ((9393, 9418), 'numpy.mean', 'np.mean', (['img_mask'], {'axis': '(0)'}), '(img_mask, axis=0)\n', (9400, 9418), True, 'import numpy as np\n'), ((10463, 10488), 'numpy.meshgrid', 'np.meshgrid', (['q_par', 'q_per'], {}), '(q_par, q_per)\n', (10474, 10488), True, 'import numpy as np\n'), ((10504, 10542), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (10522, 10542), True, 'import numpy as np\n'), ((10559, 10616), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_per_mesh < q_per_range[0])', 'img_mask'], {}), '(q_per_mesh < q_per_range[0], img_mask)\n', (10577, 10616), True, 'import numpy as np\n'), ((10632, 10689), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_per_mesh > q_per_range[1])', 'img_mask'], {}), '(q_per_mesh > q_per_range[1], img_mask)\n', (10650, 10689), True, 'import numpy as np\n'), ((10706, 10763), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_mesh < q_par_range[0])', 'img_mask'], {}), '(q_par_mesh < q_par_range[0], img_mask)\n', (10724, 10763), True, 'import numpy as np\n'), ((10779, 10836), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_mesh > q_par_range[1])', 'img_mask'], {}), '(q_par_mesh > q_par_range[1], img_mask)\n', (10797, 10836), True, 'import numpy as np\n'), ((10850, 10875), 'numpy.mean', 'np.mean', (['img_mask'], {'axis': '(1)'}), '(img_mask, axis=1)\n', (10857, 10875), True, 'import numpy as np\n'), ((11935, 11960), 'numpy.deg2rad', 'np.deg2rad', (['azimuth_range'], {}), '(azimuth_range)\n', (11945, 11960), True, 'import numpy as np\n'), ((12021, 12062), 'numpy.linspace', 'np.linspace', (['q_par[0]', 'q_par[-1]', 'bins[0]'], {}), '(q_par[0], q_par[-1], bins[0])\n', (12032, 12062), True, 'import numpy as np\n'), ((12143, 12164), 'numpy.meshgrid', 'np.meshgrid', (['q_h', 'q_v'], {}), '(q_h, q_v)\n', (12154, 12164), True, 'import numpy as np\n'), ((12181, 12215), 'numpy.sqrt', 'np.sqrt', (['(q_h_te ** 2 + q_v_te ** 2)'], {}), '(q_h_te ** 2 + q_v_te ** 2)\n', (12188, 12215), True, 'import numpy as np\n'), ((12302, 12340), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (12320, 12340), True, 'import numpy as np\n'), ((12357, 12414), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array < radial_range[0])', 'img_mask'], {}), '(tth_array < radial_range[0], img_mask)\n', (12375, 12414), True, 'import numpy as np\n'), ((12430, 12487), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array > radial_range[1])', 'img_mask'], {}), '(tth_array > radial_range[1], img_mask)\n', (12448, 12487), True, 'import numpy as np\n'), ((6482, 6508), 'numpy.arctan2', 'np.arctan2', (['q_h_te', 'q_v_te'], {}), '(q_h_te, q_v_te)\n', (6492, 6508), True, 'import numpy as np\n'), ((12073, 12114), 'numpy.linspace', 'np.linspace', (['q_per[0]', 'q_per[-1]', 'bins[1]'], {}), '(q_per[0], q_per[-1], bins[1])\n', (12084, 12114), True, 'import numpy as np\n'), ((12229, 12255), 'numpy.arctan2', 'np.arctan2', (['q_h_te', 'q_v_te'], {}), '(q_h_te, q_v_te)\n', (12239, 12255), True, 'import numpy as np\n'), ((6138, 6151), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6146, 6151), True, 'import numpy as np\n'), ((6844, 6865), 'numpy.min', 'np.min', (['azimuth_range'], {}), '(azimuth_range)\n', (6850, 6865), True, 'import numpy as np\n'), ((6923, 6944), 'numpy.max', 'np.max', (['azimuth_range'], {}), '(azimuth_range)\n', (6929, 6944), True, 'import numpy as np\n'), ((8896, 8909), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8904, 8909), True, 'import numpy as np\n'), ((10348, 10361), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10356, 10361), True, 'import numpy as np\n'), ((12535, 12556), 'numpy.min', 'np.min', (['azimuth_range'], {}), '(azimuth_range)\n', (12541, 12556), True, 'import numpy as np\n'), ((12614, 12635), 'numpy.max', 'np.max', (['azimuth_range'], {}), '(azimuth_range)\n', (12620, 12635), True, 'import numpy as np\n'), ((13565, 13580), 'numpy.rad2deg', 'np.rad2deg', (['chi'], {}), '(chi)\n', (13575, 13580), True, 'import numpy as np\n'), ((894, 912), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (906, 912), True, 'import numpy as np\n'), ((6199, 6212), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6207, 6212), True, 'import numpy as np\n'), ((8958, 8971), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8966, 8971), True, 'import numpy as np\n'), ((10410, 10423), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10418, 10423), True, 'import numpy as np\n'), ((7135, 7157), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (7147, 7157), True, 'import numpy as np\n'), ((7182, 7200), 'numpy.shape', 'np.shape', (['img_mask'], {}), '(img_mask)\n', (7190, 7200), True, 'import numpy as np\n'), ((7375, 7397), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (7387, 7397), True, 'import numpy as np\n'), ((7422, 7440), 'numpy.shape', 'np.shape', (['img_mask'], {}), '(img_mask)\n', (7430, 7440), True, 'import numpy as np\n'), ((7622, 7639), 'numpy.min', 'np.min', (['tth_array'], {}), '(tth_array)\n', (7628, 7639), True, 'import numpy as np\n'), ((7641, 7658), 'numpy.max', 'np.max', (['tth_array'], {}), '(tth_array)\n', (7647, 7658), True, 'import numpy as np\n'), ((8700, 8717), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (8710, 8717), True, 'import numpy as np\n'), ((8725, 8742), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (8735, 8742), True, 'import numpy as np\n'), ((8801, 8818), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (8811, 8818), True, 'import numpy as np\n'), ((8826, 8843), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (8836, 8843), True, 'import numpy as np\n'), ((10152, 10169), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (10162, 10169), True, 'import numpy as np\n'), ((10177, 10194), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (10187, 10194), True, 'import numpy as np\n'), ((10253, 10270), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (10263, 10270), True, 'import numpy as np\n'), ((10278, 10295), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (10288, 10295), True, 'import numpy as np\n'), ((12834, 12856), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (12846, 12856), True, 'import numpy as np\n'), ((13011, 13033), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (13023, 13033), True, 'import numpy as np\n'), ((13192, 13212), 'numpy.min', 'np.min', (['radial_range'], {}), '(radial_range)\n', (13198, 13212), True, 'import numpy as np\n'), ((13214, 13234), 'numpy.max', 'np.max', (['radial_range'], {}), '(radial_range)\n', (13220, 13234), True, 'import numpy as np\n'), ((13305, 13326), 'numpy.min', 'np.min', (['azimuth_range'], {}), '(azimuth_range)\n', (13311, 13326), True, 'import numpy as np\n'), ((13328, 13349), 'numpy.max', 'np.max', (['azimuth_range'], {}), '(azimuth_range)\n', (13334, 13349), True, 'import numpy as np\n')] |
import numpy as np
from typing import Tuple
import plotly.io
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from IMLearn.metrics import accuracy
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
plotly.io.renderers.default = 'browser'
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
# print("Fitting.......")
adb = AdaBoost(DecisionStump, n_learners).fit(train_X, train_y)
# save it
# with open(f'adb_{train_size}_{test_size}_{noise}noise.pickle', 'wb') as file:
# pickle.dump(adb, file)
# print("saved")
# return
# print("Loading...")
# with open(f'adb_{train_size}_{test_size}_{noise}noise.pickle', 'rb') as file2:
# adb = pickle.load(file2)
# print("Plotting.......")
go.Figure(
data=[
go.Scatter(
x=list(range(1, n_learners + 1)),
y=list(map(lambda n: adb.partial_loss(train_X, train_y, n), list(range(1, n_learners + 1)))),
mode='markers+lines',
name="Training Loss"
),
go.Scatter(
x=list(range(1, n_learners + 1)),
y=list(map(lambda n: adb.partial_loss(test_X, test_y, n), list(range(1, n_learners + 1)))),
mode='markers+lines',
name="Test Loss"
)
],
layout=go.Layout(
title=f"Loss as Function of Num of Learners over Data with {noise} noise",
xaxis_title={'text': "$\\text{Num of Learners}$"},
yaxis_title={'text': "$\\text{Misclassification Loss}$"}
)
).show()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
# preds = [adb.partial_predict(train_X, t) for t in T]
symbols = np.array(["circle", "x", "diamond"])
fig = make_subplots(rows=2,
cols=2,
subplot_titles=[f"Decision Boundary for Ensemble of Size {m}"
for i, m in enumerate(T)],
horizontal_spacing=0.1,
vertical_spacing=.05,
)
# Add traces for data-points setting symbols and colors
for i, m in enumerate(T):
fig.add_traces([go.Scatter(
x=test_X[:, 0],
y=test_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(
color=test_y,
symbol='diamond',
line=dict(color="black", width=1)),
),
decision_surface(lambda x: adb.partial_predict(x, m), lims[0], lims[1], showscale=False)
],
rows=(i // 2) + 1, cols=(i % 2) + 1
)
fig.update_layout(
title=f"Decision Boundaries for Different Ensemble Size <br>",
margin=dict(t=100),
width=1200,
height=1000
)
fig.show()
# Question 3: Decision surface of best performing ensemble
best_ensemble = np.argmin(np.array(
[adb.partial_loss(X=test_X, y=test_y, T=t)
for t in range(1, 251)])) + 1
go.Figure(
data=[
go.Scatter(
x=test_X[:, 0],
y=test_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(
color=test_y,
symbol='diamond',
line=dict(color="black", width=1)),
),
decision_surface(
lambda x: adb.partial_predict(x, best_ensemble),
lims[0], lims[1], showscale=False
)
]
).update_layout(
title=f"Decision Boundaries for Ensemble of Size {best_ensemble}<br>"
f"<sup> With Accuracy of: "
f"{accuracy(test_y, adb.partial_predict(test_X, best_ensemble))}"
f"</sup>",
margin=dict(t=100),
width=1200,
height=1000
).show()
# Question 4: Decision surface with weighted samples
weights = adb.D_ * 10 / np.max(adb.D_)
go.Figure(
data=[
go.Scatter(
x=train_X[:, 0],
y=train_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(
color=weights,
symbol=symbols[train_y.astype(int)],
line=dict(color="black", width=1),
size=weights
)
).update(),
decision_surface(
adb.predict,
lims[0], lims[1], showscale=False
)
]
).update_layout(
title=f"Decision Boundaries for Data with {noise} noise <br>"
f"With Training Set Point Size & Color Proportional To It’s Weight<br>"
f"<sup> x - True label is blue</sup><br>"
f"<sup>diamond - True label is red</sup>",
margin=dict(t=120),
width=1000,
height=1000,
).show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(noise=0)
fit_and_evaluate_adaboost(noise=0.4)
| [
"numpy.ones",
"numpy.random.rand",
"plotly.graph_objects.Layout",
"IMLearn.metalearners.adaboost.AdaBoost",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.random.seed"
] | [((3011, 3047), 'numpy.array', 'np.array', (["['circle', 'x', 'diamond']"], {}), "(['circle', 'x', 'diamond'])\n", (3019, 3047), True, 'import numpy as np\n'), ((6267, 6284), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6281, 6284), True, 'import numpy as np\n'), ((1056, 1066), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1063, 1066), True, 'import numpy as np\n'), ((2917, 2938), 'numpy.array', 'np.array', (['[-0.1, 0.1]'], {}), '([-0.1, 0.1])\n', (2925, 2938), True, 'import numpy as np\n'), ((5270, 5284), 'numpy.max', 'np.max', (['adb.D_'], {}), '(adb.D_)\n', (5276, 5284), True, 'import numpy as np\n'), ((1073, 1095), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (1079, 1095), True, 'import numpy as np\n'), ((1493, 1528), 'IMLearn.metalearners.adaboost.AdaBoost', 'AdaBoost', (['DecisionStump', 'n_learners'], {}), '(DecisionStump, n_learners)\n', (1501, 1528), False, 'from IMLearn.metalearners.adaboost import AdaBoost\n'), ((1026, 1046), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (1040, 1046), True, 'import numpy as np\n'), ((2494, 2700), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'f"""Loss as Function of Num of Learners over Data with {noise} noise"""', 'xaxis_title': "{'text': '$\\\\text{Num of Learners}$'}", 'yaxis_title': "{'text': '$\\\\text{Misclassification Loss}$'}"}), "(title=\n f'Loss as Function of Num of Learners over Data with {noise} noise',\n xaxis_title={'text': '$\\\\text{Num of Learners}$'}, yaxis_title={'text':\n '$\\\\text{Misclassification Loss}$'})\n", (2503, 2700), True, 'import plotly.graph_objects as go\n')] |
# import packages
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime, timedelta
from external_func import random_date
def start_print():
print('\nDAG starting...\n')
def end_print():
print('\nDAG end...\nCONGRATS DD!\n')
default_args = {
'owner': 'DavD',
'depends_on_past': False,
'start_date': datetime(2019, 3, 3),
'email': '<EMAIL>',
'email_on_failure': False,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5),
'concurrency': 1
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(dag_id='dd_test_v1',
default_args=default_args,
schedule_interval=timedelta(minutes=5))
# t1, t2, t3, and t5 are examples of tasks created by instantiating operators
t1 = BashOperator(
task_id='Date',
bash_command='date',
dag=dag)
t2 = PythonOperator(
task_id='py_1',
python_callable=start_print,
dag=dag)
t3 = PythonOperator(
task_id='py_2',
python_callable=random_date,
dag=dag)
t4 = PythonOperator(
task_id='py_3',
python_callable=end_print,
dag=dag)
t1 >> t2 >> t3 >> t4
| [
"datetime.datetime",
"datetime.timedelta",
"airflow.operators.python_operator.PythonOperator",
"airflow.operators.bash_operator.BashOperator"
] | [((957, 1015), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""Date"""', 'bash_command': '"""date"""', 'dag': 'dag'}), "(task_id='Date', bash_command='date', dag=dag)\n", (969, 1015), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((1035, 1103), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""py_1"""', 'python_callable': 'start_print', 'dag': 'dag'}), "(task_id='py_1', python_callable=start_print, dag=dag)\n", (1049, 1103), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1123, 1191), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""py_2"""', 'python_callable': 'random_date', 'dag': 'dag'}), "(task_id='py_2', python_callable=random_date, dag=dag)\n", (1137, 1191), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1211, 1277), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""py_3"""', 'python_callable': 'end_print', 'dag': 'dag'}), "(task_id='py_3', python_callable=end_print, dag=dag)\n", (1225, 1277), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((442, 462), 'datetime.datetime', 'datetime', (['(2019)', '(3)', '(3)'], {}), '(2019, 3, 3)\n', (450, 462), False, 'from datetime import datetime, timedelta\n'), ((585, 605), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (594, 605), False, 'from datetime import datetime, timedelta\n'), ((851, 871), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (860, 871), False, 'from datetime import datetime, timedelta\n')] |
import math
z = [1.0,1 ,1, 1.0]
z_exp = [math.exp(i) for i in z]
print([round(i, 2) for i in z_exp])
sum_z_exp = sum(z_exp)
print(round(sum_z_exp, 2))
softmax = [round(i / sum_z_exp, 3) for i in z_exp]
print(softmax)
| [
"math.exp"
] | [((42, 53), 'math.exp', 'math.exp', (['i'], {}), '(i)\n', (50, 53), False, 'import math\n')] |
import logging
logger = logging.getLogger(__name__)
logger.debug("Loaded " + __name__)
from jsonrpcserver import methods
from .exceptions import *
from .influxdb_api import *
from .meta import *
| [
"logging.getLogger"
] | [((24, 51), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (41, 51), False, 'import logging\n')] |
import os
import pandas as pd
import datetime
from genderperformr import GenderPerformr
from agreementr import Agreementr
from politenessr import Politenessr
from supportr import Supportr
import enchant
import requests
import json
from googleapiclient import discovery
from enchant.checker import SpellChecker
from enchant.tokenize import get_tokenizer
from nltk.tokenize import word_tokenize
import nltk
import time
nltk.download('punkt')
def clean_text(text):
tokens = word_tokenize(text)
words = [word for word in tokens if word.isalpha()]
return ' '.join(words)
def extract_features(tlc):
"""extract features from the text
Args:
tlc (dict[str]): all the attributes of a tlc
Returns:
[dict]: a dictionary of features extracted
"""
text = clean_text(tlc['body'])
fields = dict()
# add features here #
fields['Top_comment_word_count'] = len(text.split(' '))
fields['Top_comment_text'] = text
# Extract time-based features
def get_day_of_week(text):
return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1
def get_day_of_month(text):
return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day
def get_time_of_day(text):
return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour
time_local = time.localtime(tlc['created_utc'])
time_local = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
fields['Top_comment_day'] = get_day_of_month(time_local)
fields['Top_comment_day_of_week'] = get_day_of_week(time_local)
fields['Top_comment_hour'] = get_time_of_day(time_local)
# Extract gender value
gp = GenderPerformr()
probs, _ = gp.predict(tlc['author'])
# Rescale it from [0,1] to [-1,1]
fields['Top_comment_author_gender_value'] = 2 * probs - 1
# Extract percentage of mispellings
check = SpellChecker("en_US")
tokenizer = get_tokenizer("en_US")
# Prevent the denominator from 0
def weird_division(n, d):
return n / d if d else 0
def get_mispellings_percentage(text):
mispelling_count = 0
total_count = 0
if text == 'nan':
return total_count
else:
check.set_text(text)
for err in check:
mispelling_count = mispelling_count + 1
for w in tokenizer(text):
total_count = total_count + 1
value = weird_division(mispelling_count, total_count)
return value
fields['Top_comment_mispellings'] = get_mispellings_percentage(text)
# Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]
ar = Agreementr()
pr = Politenessr()
sr = Supportr()
fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5
fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5
fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5
# Get toxicity scores
KEY = "yourkey.txt" # os.getenv("GOOGLE_API_KEY")
service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)
def get_results(request_id, response, exception):
toxicity_scores.append((request_id, response))
toxicity_scores = []
count = 0
batch = service.new_batch_http_request(callback=get_results)
analyze_request = {
'comment': {'text': text},
"requestedAttributes": {
"TOXICITY": {},
"SEVERE_TOXICITY": {},
"ATTACK_ON_COMMENTER": {}
}
}
batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))
batch.execute()
toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']
attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']
if toxic_score > 0.5:
fields['Top_comment_untuned_toxicity'] = 1
else:
fields['Top_comment_untuned_toxicity'] = 0
if toxic_score > 0.8 and attack_score > 0.5:
fields['Top_comment_tuned_toxicity'] = 1
else:
fields['Top_comment_tuned_toxicity'] = 0
# end of feature extractions #
return fields
def close(istream):
"""call deconstructors if needed
"""
# e.g. close files or disconnect apis
istream.close()
def main():
istream, df = init()
data = ["what's the date today?"]
features = map(extract_features, data)
close(istream)
main()
| [
"supportr.Supportr",
"nltk.download",
"datetime.datetime.strptime",
"politenessr.Politenessr",
"genderperformr.GenderPerformr",
"time.strftime",
"enchant.tokenize.get_tokenizer",
"nltk.tokenize.word_tokenize",
"googleapiclient.discovery.build",
"agreementr.Agreementr",
"time.localtime",
"encha... | [((417, 439), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (430, 439), False, 'import nltk\n'), ((477, 496), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (490, 496), False, 'from nltk.tokenize import word_tokenize\n'), ((1341, 1375), 'time.localtime', 'time.localtime', (["tlc['created_utc']"], {}), "(tlc['created_utc'])\n", (1355, 1375), False, 'import time\n'), ((1393, 1439), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'time_local'], {}), "('%Y-%m-%d %H:%M:%S', time_local)\n", (1406, 1439), False, 'import time\n'), ((1667, 1683), 'genderperformr.GenderPerformr', 'GenderPerformr', ([], {}), '()\n', (1681, 1683), False, 'from genderperformr import GenderPerformr\n'), ((1878, 1899), 'enchant.checker.SpellChecker', 'SpellChecker', (['"""en_US"""'], {}), "('en_US')\n", (1890, 1899), False, 'from enchant.checker import SpellChecker\n'), ((1916, 1938), 'enchant.tokenize.get_tokenizer', 'get_tokenizer', (['"""en_US"""'], {}), "('en_US')\n", (1929, 1938), False, 'from enchant.tokenize import get_tokenizer\n'), ((2670, 2682), 'agreementr.Agreementr', 'Agreementr', ([], {}), '()\n', (2680, 2682), False, 'from agreementr import Agreementr\n'), ((2692, 2705), 'politenessr.Politenessr', 'Politenessr', ([], {}), '()\n', (2703, 2705), False, 'from politenessr import Politenessr\n'), ((2715, 2725), 'supportr.Supportr', 'Supportr', ([], {}), '()\n', (2723, 2725), False, 'from supportr import Supportr\n'), ((3054, 3118), 'googleapiclient.discovery.build', 'discovery.build', (['"""commentanalyzer"""', '"""v1alpha1"""'], {'developerKey': 'KEY'}), "('commentanalyzer', 'v1alpha1', developerKey=KEY)\n", (3069, 3118), False, 'from googleapiclient import discovery\n'), ((1160, 1213), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['text', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(text, '%Y-%m-%d %H:%M:%S')\n", (1186, 1213), False, 'import datetime\n'), ((1265, 1318), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['text', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(text, '%Y-%m-%d %H:%M:%S')\n", (1291, 1318), False, 'import datetime\n'), ((1044, 1097), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['text', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(text, '%Y-%m-%d %H:%M:%S')\n", (1070, 1097), False, 'import datetime\n')] |
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.uic import loadUi
import sys
class End(QtWidgets.QDialog):
def __init__(self, winners):
super(QtWidgets.QDialog, self).__init__()
loadUi("End/end.ui", self)
msg = f"""
<p style="text-align: center; font-size: 30px;"><em><strong>CONGRATS!!!</strong></em></p>
<p style="text-align: center; font-size: 50px;"><em><strong>{winners}</strong></em></p>
<p style="text-align: center; font-size: 30px;"><em><strong>You Have Won the Quiz</strong></em></p>
<p style="text-align: center; font-size: 20px;">by your exceptional performance</p>
"""
self.label.setText(msg)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = QtWidgets.QStackedWidget()
widget.resize(400,300)
widget.addWidget(End("Naveen, Naveen, Naveen, Naveen"))
widget.show()
sys.exit(app.exec_()) | [
"PyQt5.uic.loadUi",
"PyQt5.QtWidgets.QStackedWidget",
"PyQt5.QtWidgets.QApplication"
] | [((766, 798), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (788, 798), False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((812, 838), 'PyQt5.QtWidgets.QStackedWidget', 'QtWidgets.QStackedWidget', ([], {}), '()\n', (836, 838), False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((206, 232), 'PyQt5.uic.loadUi', 'loadUi', (['"""End/end.ui"""', 'self'], {}), "('End/end.ui', self)\n", (212, 232), False, 'from PyQt5.uic import loadUi\n')] |
import os
srcfile = 'DocTools~/assetgraph_from_gdoc.md'
pnglist = 'DocTools~/order.txt'
dstfile = 'Documentation~/assetgraph.md'
num = 1
if os.path.exists(dstfile):
os.remove(dstfile)
with open(srcfile) as f:
doc = f.read()
f.close()
with open(pnglist) as fpng:
while True:
pnglist = fpng.readline()
if not pnglist:
break
pnglist = pnglist.strip()
keyword = "image{0}.png".format(num)
print(keyword + " => " + pnglist)
doc = doc.replace(keyword, pnglist.replace("image","__temp__"))
num+=1
doc = doc.replace("__temp__", "image")
with open(dstfile, mode='w') as fw:
fw.write(doc)
fw.close()
| [
"os.path.exists",
"os.remove"
] | [((143, 166), 'os.path.exists', 'os.path.exists', (['dstfile'], {}), '(dstfile)\n', (157, 166), False, 'import os\n'), ((169, 187), 'os.remove', 'os.remove', (['dstfile'], {}), '(dstfile)\n', (178, 187), False, 'import os\n')] |
import os
# isort: off
# This import has to come before the CheckmateClient import or the functional
# tests break.
# See https://github.com/gevent/gevent/issues/1016
import pywb.apps.frontendapp # pylint:disable=unused-import
# isort: on
import httpretty as httpretty_
import pytest
import webtest
from tests.conftest import environment_variables
from tests.simple_server import serve_content
from viahtml.app import Application
@pytest.fixture(scope="session")
def app(with_environ): # pylint:disable=unused-argument
app = Application()
app.debug = True
return webtest.TestApp(app)
@pytest.fixture(scope="session")
def with_environ():
# WSGI uses repeated elements to express this, which means we can't use
# the standard configparser.ConfigParser to read them. So they are
# duplicated here:
# It's very hard to test with URL signing on, so disable it
env_vars = environment_variables()
env_vars["VIA_DISABLE_AUTHENTICATION"] = "1"
os.environ.update(env_vars)
@pytest.fixture(autouse=True, scope="session")
def upstream_website():
minimal_valid_html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>title</title>
<link rel="manifest" href="/manifest.json" type="text/javascript">
<link rel="other" href="/other.json" type="text/javascript">
<script src="script.js"></script>
</head>
<body>
<!-- upstream content -->
<a href="http://example.com">link</a>
</body>
</html>
"""
with serve_content( # pylint: disable=not-context-manager
minimal_valid_html,
port=8080,
extra_headers={"Cache-Control": "public, max-age=60"},
):
yield
@pytest.fixture
def proxied_content(app):
return app.get(
"/proxy/http://localhost:8080/?via.client.openSidebar=yup", expect_errors=True
)
@pytest.fixture
def httpretty():
"""Monkey-patch Python's socket core module to mock all HTTP responses.
We never want real HTTP requests to be sent by the tests so replace them
all with mock responses. This handles requests sent using the standard
urllib2 library and the third-party httplib2 and requests libraries.
"""
httpretty_.enable(allow_net_connect=False)
yield
httpretty_.disable()
httpretty_.reset()
| [
"viahtml.app.Application",
"httpretty.disable",
"httpretty.enable",
"webtest.TestApp",
"tests.conftest.environment_variables",
"os.environ.update",
"tests.simple_server.serve_content",
"pytest.fixture",
"httpretty.reset"
] | [((438, 469), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (452, 469), False, 'import pytest\n'), ((608, 639), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (622, 639), False, 'import pytest\n'), ((1019, 1064), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""session"""'}), "(autouse=True, scope='session')\n", (1033, 1064), False, 'import pytest\n'), ((537, 550), 'viahtml.app.Application', 'Application', ([], {}), '()\n', (548, 550), False, 'from viahtml.app import Application\n'), ((584, 604), 'webtest.TestApp', 'webtest.TestApp', (['app'], {}), '(app)\n', (599, 604), False, 'import webtest\n'), ((910, 933), 'tests.conftest.environment_variables', 'environment_variables', ([], {}), '()\n', (931, 933), False, 'from tests.conftest import environment_variables\n'), ((988, 1015), 'os.environ.update', 'os.environ.update', (['env_vars'], {}), '(env_vars)\n', (1005, 1015), False, 'import os\n'), ((2275, 2317), 'httpretty.enable', 'httpretty_.enable', ([], {'allow_net_connect': '(False)'}), '(allow_net_connect=False)\n', (2292, 2317), True, 'import httpretty as httpretty_\n'), ((2334, 2354), 'httpretty.disable', 'httpretty_.disable', ([], {}), '()\n', (2352, 2354), True, 'import httpretty as httpretty_\n'), ((2359, 2377), 'httpretty.reset', 'httpretty_.reset', ([], {}), '()\n', (2375, 2377), True, 'import httpretty as httpretty_\n'), ((1584, 1687), 'tests.simple_server.serve_content', 'serve_content', (['minimal_valid_html'], {'port': '(8080)', 'extra_headers': "{'Cache-Control': 'public, max-age=60'}"}), "(minimal_valid_html, port=8080, extra_headers={'Cache-Control':\n 'public, max-age=60'})\n", (1597, 1687), False, 'from tests.simple_server import serve_content\n')] |
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
#
# Setup the SMRT module
from __future__ import print_function, absolute_import, division
from distutils.command.clean import clean
# from setuptools import setup # DO NOT use setuptools!!!!!!
import shutil
import os
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# Hacky, adopted from sklearn. This sets a global variable
# so smrt __init__ can detect if it's being loaded in the setup
# routine, so it won't load submodules that haven't yet been built.
builtins.__SMRT_SETUP__ = True
# metadata
DISTNAME = 'smrt'
DESCRIPTION = 'Handle class imbalance intelligently by using Variational Autoencoders ' \
'to generate synthetic observations of your minority class.'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
LICENSE = 'new BSD'
# import restricted version
import smrt
VERSION = smrt.__version__
# get the installation requirements:
with open('requirements.txt') as req:
REQUIREMENTS = req.read().split(os.linesep)
# Custom clean command to remove build artifacts -- adopted from sklearn
class CleanCommand(clean):
description = "Remove build artifacts from the source tree"
# this is mostly in case we ever add a Cython module to SMRT
def run(self):
clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c & .so files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk(DISTNAME):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
print('Removing file: %s' % filename)
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
# this is for FORTRAN modules, which some of my other packages have used in the past...
for dirname in dirnames:
if dirname == '__pycache__' or dirname.endswith('.so.dSYM'):
print('Removing directory: %s' % dirname)
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
def configuration(parent_package='', top_path=None):
# we know numpy is a valid import now
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage(DISTNAME)
return config
def do_setup():
# setup the config
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Scikit-learn users',
'Programming Language :: Python',
'Topic :: Machine Learning',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7'
],
keywords='sklearn scikit-learn tensorflow auto-encoders neural-networks class-imbalance',
# packages=[DISTNAME],
# install_requires=REQUIREMENTS,
cmdclass=cmdclass)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg-info',
'--version',
'clean'))):
# For these actions, NumPy is not required
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else: # we DO need numpy
try:
from numpy.distutils.core import setup
except ImportError:
raise RuntimeError('Need numpy to build %s' % DISTNAME)
# add the config to the metadata
metadata['configuration'] = configuration
# call setup on the dict
setup(**metadata)
if __name__ == '__main__':
do_setup()
| [
"os.path.exists",
"distutils.command.clean.clean.run",
"distutils.core.setup",
"os.path.join",
"numpy.distutils.misc_util.Configuration",
"os.path.splitext",
"os.path.dirname",
"os.unlink",
"shutil.rmtree",
"os.walk"
] | [((3093, 3138), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (3106, 3138), False, 'from numpy.distutils.misc_util import Configuration\n'), ((5720, 5737), 'distutils.core.setup', 'setup', ([], {}), '(**metadata)\n', (5725, 5737), False, 'from distutils.core import setup\n'), ((1315, 1330), 'distutils.command.clean.clean.run', 'clean.run', (['self'], {}), '(self)\n', (1324, 1330), False, 'from distutils.command.clean import clean\n'), ((1780, 1803), 'os.path.exists', 'os.path.exists', (['"""build"""'], {}), "('build')\n", (1794, 1803), False, 'import os\n'), ((1884, 1901), 'os.walk', 'os.walk', (['DISTNAME'], {}), '(DISTNAME)\n', (1891, 1901), False, 'import os\n'), ((1423, 1448), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1438, 1448), False, 'import os\n'), ((1583, 1617), 'os.path.join', 'os.path.join', (['cwd', '"""cythonize.dat"""'], {}), "(cwd, 'cythonize.dat')\n", (1595, 1617), False, 'import os\n'), ((1633, 1665), 'os.path.exists', 'os.path.exists', (['cython_hash_file'], {}), '(cython_hash_file)\n', (1647, 1665), False, 'import os\n'), ((1817, 1839), 'shutil.rmtree', 'shutil.rmtree', (['"""build"""'], {}), "('build')\n", (1830, 1839), False, 'import shutil\n'), ((1494, 1523), 'os.path.join', 'os.path.join', (['cwd', '"""PKG-INFO"""'], {}), "(cwd, 'PKG-INFO')\n", (1506, 1523), False, 'import os\n'), ((1683, 1710), 'os.unlink', 'os.unlink', (['cython_hash_file'], {}), '(cython_hash_file)\n', (1692, 1710), False, 'import os\n'), ((2240, 2266), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2256, 2266), False, 'import os\n'), ((2150, 2181), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (2162, 2181), False, 'import os\n'), ((2447, 2478), 'os.path.join', 'os.path.join', (['dirpath', 'pyx_file'], {}), '(dirpath, pyx_file)\n', (2459, 2478), False, 'import os\n'), ((2858, 2888), 'os.path.join', 'os.path.join', (['dirpath', 'dirname'], {}), '(dirpath, dirname)\n', (2870, 2888), False, 'import os\n'), ((2515, 2546), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (2527, 2546), False, 'import os\n')] |
import numpy
import re
with open('/home/johnny/Documents/navigate_building/source/assets/images_with_plaques.txt') as f:
LIST_OF_POSITIVES = f.read().split('\n')
class ImageDetectionMetadata():
headers = ['label', 'parsed_text', 'found_contour_area', 'ref_contour_area', 'source_image_location', 'image', 'image_has_plaque', 'plaque_found', 'text_matched', 'text_missed', 'text_misread']
def __init__(self):
self.contour_area = numpy.nan
self.reference_area = numpy.nan
self.image = None
self.thresheld_image = None
self.text = []
self.pose_information = None
self.source_image_location = ''
self.plaque_image_location = ''
self.label = None
self.correct_text = False
self.other = {}
def __repr__(self):
return f"""
countour area: {self.contour_area}
reference area: {self.reference_area}
plaque image location: {self.plaque_image_location}
image included: {isinstance(self.image, numpy.ndarray)}
possible text: {', '.join([x for x in self.text])}
pose information: {self.pose_information}
source image location: {self.source_image_location}
other stuff: {' ... '.join([str(k) + ': ' + str(v) for k,v in self.other.items()])}
"""
def to_list(self):
"""
creates explicitly-ordered list of elements in the object
"""
has_plaque = True if self.source_image_location.split('/')[-1] in LIST_OF_POSITIVES else False
plaque_found = True if self.contour_area > 0 else False
self.label = [x.replace('-', '') for x in re.findall(r"-[0-9,a-z]*-", self.source_image_location)]
matched = list(set(self.text) & set(self.label))
missed = list(set(self.label) - set(self.text))
misread = list(set(self.text) - set(self.label))
meta_list = [
self.label,
self.text,
self.contour_area,
self.reference_area,
self.source_image_location,
self.image,
has_plaque,
plaque_found,
matched,
missed,
misread
]
return meta_list
| [
"re.findall"
] | [((1650, 1704), 're.findall', 're.findall', (['"""-[0-9,a-z]*-"""', 'self.source_image_location'], {}), "('-[0-9,a-z]*-', self.source_image_location)\n", (1660, 1704), False, 'import re\n')] |
# INFO : ini merupakan copy source code dari repo one4ubot, dan sudah mendapatkan izin dari pemilik.
# INFO : This is a copy of the source code from the One4ubot repo, and has the permission of the owner.
try:
from userbot.modules.sql_helper import SESSION, BASE
except ImportError:
raise AttributeError
from sqlalchemy import Column, String
class KRead(BASE):
__tablename__ = "kread"
groupid = Column(String(14), primary_key=True)
def __init__(self, sender):
self.groupid = str(sender)
KRead.__table__.create(checkfirst=True)
def is_kread():
try:
return SESSION.query(KRead).all()
except BaseException:
return None
finally:
SESSION.close()
def kread(chat):
adder = KRead(str(chat))
SESSION.add(adder)
SESSION.commit()
def unkread(chat):
rem = SESSION.query(KRead).get((str(chat)))
if rem:
SESSION.delete(rem)
SESSION.commit()
| [
"userbot.modules.sql_helper.SESSION.commit",
"userbot.modules.sql_helper.SESSION.close",
"userbot.modules.sql_helper.SESSION.delete",
"userbot.modules.sql_helper.SESSION.query",
"userbot.modules.sql_helper.SESSION.add",
"sqlalchemy.String"
] | [((765, 783), 'userbot.modules.sql_helper.SESSION.add', 'SESSION.add', (['adder'], {}), '(adder)\n', (776, 783), False, 'from userbot.modules.sql_helper import SESSION, BASE\n'), ((788, 804), 'userbot.modules.sql_helper.SESSION.commit', 'SESSION.commit', ([], {}), '()\n', (802, 804), False, 'from userbot.modules.sql_helper import SESSION, BASE\n'), ((421, 431), 'sqlalchemy.String', 'String', (['(14)'], {}), '(14)\n', (427, 431), False, 'from sqlalchemy import Column, String\n'), ((697, 712), 'userbot.modules.sql_helper.SESSION.close', 'SESSION.close', ([], {}), '()\n', (710, 712), False, 'from userbot.modules.sql_helper import SESSION, BASE\n'), ((894, 913), 'userbot.modules.sql_helper.SESSION.delete', 'SESSION.delete', (['rem'], {}), '(rem)\n', (908, 913), False, 'from userbot.modules.sql_helper import SESSION, BASE\n'), ((922, 938), 'userbot.modules.sql_helper.SESSION.commit', 'SESSION.commit', ([], {}), '()\n', (936, 938), False, 'from userbot.modules.sql_helper import SESSION, BASE\n'), ((836, 856), 'userbot.modules.sql_helper.SESSION.query', 'SESSION.query', (['KRead'], {}), '(KRead)\n', (849, 856), False, 'from userbot.modules.sql_helper import SESSION, BASE\n'), ((603, 623), 'userbot.modules.sql_helper.SESSION.query', 'SESSION.query', (['KRead'], {}), '(KRead)\n', (616, 623), False, 'from userbot.modules.sql_helper import SESSION, BASE\n')] |
#!/usr/bin/env python3
"""Switch variable case.
A function that takes camel cased strings (i.e. ThisIsCamelCased),
and converts them to snake case (i.e. this_is_camel_cased).
"""
import re
def snake_case(input_str: str, camel_case=False) -> str:
"""
Turn camel case into snake case.
:param input_str: String of variable.
:return: Snake case string of input_str.
"""
if camel_case:
regex = r"([A-Z])"
substitution = r'_\1'
else:
regex = r"([\s])"
substitution = r'_'
snake_case = input_str[0].lower() +\
re.sub(regex,
substitution,
input_str[1:]).lower()
return f'{snake_case}'
if __name__ == "__main__":
print(snake_case('ThisIsCamelCased', camel_case=True))
print(snake_case('This includes spaces'))
| [
"re.sub"
] | [((584, 626), 're.sub', 're.sub', (['regex', 'substitution', 'input_str[1:]'], {}), '(regex, substitution, input_str[1:])\n', (590, 626), False, 'import re\n')] |
import numpy as np
import tensorflow as tf
import unittest
hungarian_module = tf.load_op_library("hungarian.so")
class HungarianTests(unittest.TestCase):
def test_min_weighted_bp_cover_1(self):
W = np.array([[3, 2, 2], [1, 2, 0], [2, 2, 1]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 1, 1])
c_1_t = np.array([1, 1, 0])
M_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
pass
def test_min_weighted_bp_cover_2(self):
W = np.array([[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([5, 6, 5])
c_1_t = np.array([0, 0, 0, 2])
M_t = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_3(self):
W = np.array([[5, 0, 2], [3, 1, 0], [0, 5, 0]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 0, 4])
c_1_t = np.array([3, 1, 0])
M_t = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_4(self):
W = np.array([[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0],
[2, 2, 1]]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([[2, 0, 4], [2, 1, 1]])
c_1_t = np.array([[3, 1, 0], [1, 1, 0]])
M_t = np.array([[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0],
[0, 0, 1]]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_real_values_1(self):
# Test the while loop terminates with real values.
W = np.array(
[[0.90, 0.70, 0.30, 0.20, 0.40, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.80, 0.75, 0.92, 0.10, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.78, 0.85, 0.66, 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.42, 0.55, 0.23, 0.43, 0.33, 0.002, 0.001, 0.001, 0.001, 0.001],
[0.64, 0.44, 0.33, 0.33, 0.34, 0.001, 0.002, 0.001, 0.001, 0.001],
[0.22, 0.55, 0.43, 0.43, 0.14, 0.001, 0.001, 0.002, 0.001, 0.001],
[0.43, 0.33, 0.34, 0.22, 0.14, 0.001, 0.001, 0.001, 0.002, 0.001],
[0.33, 0.42, 0.23, 0.13, 0.43, 0.001, 0.001, 0.001, 0.001, 0.002],
[0.39, 0.24, 0.53, 0.56, 0.89, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.12, 0.34, 0.82, 0.82, 0.77, 0.001, 0.001, 0.001, 0.001, 0.001]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
M_t = np.array(
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])
self.assertTrue((M == M_t).all())
def test_real_values_2(self):
W = np.array([[
0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662,
0.0137996, 0.00403898, 0.0123786, 1e-05
], [
0.00604229, 0.0126071, 0.0117400, 0.0124528, 0.00808971, 0.0162703,
0.0138028, 0.00403935, 0.0123812, 1e-05
], [
0.00604234, 0.0126073, 0.0117402, 0.012453, 0.00808980, 0.0162706,
0.0138030, 0.00403937, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_3(self):
W = np.array([[
0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, 0.0177026,
0.0289461, 0.0214768, 0.0101898, 1e-05
], [
0.00302875, 0.003217, 0.0217628, 0.00836405, 0.0256229, 0.0177137,
0.0289468, 0.0214719, 0.0101904, 1e-05
], [
0.00302897, 0.00321726, 0.0217636, 0.00836369, 0.0256217, 0.0177148,
0.0289468, 0.0214714, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_4(self):
W = np.array([[
1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, 1e-05,
1e-05, 1e-05, 3.9034e-05
], [
1e-05, 3.42696e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1.0122e-05,
3.43236e-05, 1e-05
], [
1e-05, 0.0426792, 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05,
1e-05, 1e-05, 0.102463
], [
1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05, 1e-05,
1e-05, 1.00007e-05
], [
1e-05, 4.22947e-05, 0.00062168, 0.623917, 1.03468e-05, 0.00588984,
1.00004e-05, 1.44433e-05, 1.00014e-05, 0.000213425
], [
1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, 0.000485082, 1e-05,
1e-05, 1.00002e-05, 1e-05
], [
1e-05, 1e-05, 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05,
1.13251e-05
], [
1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,
1e-05
], [
1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, 1.03587e-05,
0.150301, 1e-05, 1.00045e-05
], [
1e-05, 3.97901e-05, 1e-05, 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05,
2.42828e-05, 1e-05, 1.10529e-05
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_5(self):
W = np.array([[
1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05, 1e-05,
1e-05
], [
0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, 0.019553, 1e-05,
1e-05, 1e-05
], [
0.002148, 1e-05, 1e-05, 1.6e-05, 0.651536, 2e-05, 7.4e-05, 0.002359,
1e-05, 1e-05
], [
3.8e-05, 1e-05, 0.000592, 4.7e-05, 0.09173, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05,
1e-05
], [
0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, 1.1e-05,
0.000919, 1e-05
], [
1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.028816, 1e-05
], [
1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_6(self):
W = np.array([[
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(HungarianTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"tensorflow.load_op_library",
"numpy.round",
"tensorflow.Session",
"numpy.array",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((78, 112), 'tensorflow.load_op_library', 'tf.load_op_library', (['"""hungarian.so"""'], {}), "('hungarian.so')\n", (96, 112), True, 'import tensorflow as tf\n'), ((207, 250), 'numpy.array', 'np.array', (['[[3, 2, 2], [1, 2, 0], [2, 2, 1]]'], {}), '([[3, 2, 2], [1, 2, 0], [2, 2, 1]])\n', (215, 250), True, 'import numpy as np\n'), ((407, 426), 'numpy.array', 'np.array', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (415, 426), True, 'import numpy as np\n'), ((439, 458), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (447, 458), True, 'import numpy as np\n'), ((469, 512), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (477, 512), True, 'import numpy as np\n'), ((736, 788), 'numpy.array', 'np.array', (['[[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]]'], {}), '([[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]])\n', (744, 788), True, 'import numpy as np\n'), ((945, 964), 'numpy.array', 'np.array', (['[5, 6, 5]'], {}), '([5, 6, 5])\n', (953, 964), True, 'import numpy as np\n'), ((977, 999), 'numpy.array', 'np.array', (['[0, 0, 0, 2]'], {}), '([0, 0, 0, 2])\n', (985, 999), True, 'import numpy as np\n'), ((1010, 1062), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (1018, 1062), True, 'import numpy as np\n'), ((1276, 1319), 'numpy.array', 'np.array', (['[[5, 0, 2], [3, 1, 0], [0, 5, 0]]'], {}), '([[5, 0, 2], [3, 1, 0], [0, 5, 0]])\n', (1284, 1319), True, 'import numpy as np\n'), ((1476, 1495), 'numpy.array', 'np.array', (['[2, 0, 4]'], {}), '([2, 0, 4])\n', (1484, 1495), True, 'import numpy as np\n'), ((1508, 1527), 'numpy.array', 'np.array', (['[3, 1, 0]'], {}), '([3, 1, 0])\n', (1516, 1527), True, 'import numpy as np\n'), ((1538, 1581), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n', (1546, 1581), True, 'import numpy as np\n'), ((1795, 1880), 'numpy.array', 'np.array', (['[[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0], [2, 2, 1]]]'], {}), '([[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0], [2, 2, 1]]]\n )\n', (1803, 1880), True, 'import numpy as np\n'), ((2086, 2118), 'numpy.array', 'np.array', (['[[2, 0, 4], [2, 1, 1]]'], {}), '([[2, 0, 4], [2, 1, 1]])\n', (2094, 2118), True, 'import numpy as np\n'), ((2131, 2163), 'numpy.array', 'np.array', (['[[3, 1, 0], [1, 1, 0]]'], {}), '([[3, 1, 0], [1, 1, 0]])\n', (2139, 2163), True, 'import numpy as np\n'), ((2174, 2259), 'numpy.array', 'np.array', (['[[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]]'], {}), '([[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]]\n )\n', (2182, 2259), True, 'import numpy as np\n'), ((2569, 3283), 'numpy.array', 'np.array', (['[[0.9, 0.7, 0.3, 0.2, 0.4, 0.001, 0.001, 0.001, 0.001, 0.001], [0.8, 0.75, \n 0.92, 0.1, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001], [0.78, 0.85, 0.66,\n 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001], [0.42, 0.55, 0.23, 0.43,\n 0.33, 0.002, 0.001, 0.001, 0.001, 0.001], [0.64, 0.44, 0.33, 0.33, 0.34,\n 0.001, 0.002, 0.001, 0.001, 0.001], [0.22, 0.55, 0.43, 0.43, 0.14, \n 0.001, 0.001, 0.002, 0.001, 0.001], [0.43, 0.33, 0.34, 0.22, 0.14, \n 0.001, 0.001, 0.001, 0.002, 0.001], [0.33, 0.42, 0.23, 0.13, 0.43, \n 0.001, 0.001, 0.001, 0.001, 0.002], [0.39, 0.24, 0.53, 0.56, 0.89, \n 0.001, 0.001, 0.001, 0.001, 0.001], [0.12, 0.34, 0.82, 0.82, 0.77, \n 0.001, 0.001, 0.001, 0.001, 0.001]]'], {}), '([[0.9, 0.7, 0.3, 0.2, 0.4, 0.001, 0.001, 0.001, 0.001, 0.001], [\n 0.8, 0.75, 0.92, 0.1, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001], [0.78, \n 0.85, 0.66, 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001], [0.42, 0.55,\n 0.23, 0.43, 0.33, 0.002, 0.001, 0.001, 0.001, 0.001], [0.64, 0.44, 0.33,\n 0.33, 0.34, 0.001, 0.002, 0.001, 0.001, 0.001], [0.22, 0.55, 0.43, 0.43,\n 0.14, 0.001, 0.001, 0.002, 0.001, 0.001], [0.43, 0.33, 0.34, 0.22, 0.14,\n 0.001, 0.001, 0.001, 0.002, 0.001], [0.33, 0.42, 0.23, 0.13, 0.43, \n 0.001, 0.001, 0.001, 0.001, 0.002], [0.39, 0.24, 0.53, 0.56, 0.89, \n 0.001, 0.001, 0.001, 0.001, 0.001], [0.12, 0.34, 0.82, 0.82, 0.77, \n 0.001, 0.001, 0.001, 0.001, 0.001]])\n', (2577, 3283), True, 'import numpy as np\n'), ((3448, 3795), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 1, 0, 0, 0, 0, 0, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])\n', (3456, 3795), True, 'import numpy as np\n'), ((3903, 5081), 'numpy.array', 'np.array', (['[[0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662, \n 0.0137996, 0.00403898, 0.0123786, 1e-05], [0.00604229, 0.0126071, \n 0.01174, 0.0124528, 0.00808971, 0.0162703, 0.0138028, 0.00403935, \n 0.0123812, 1e-05], [0.00604234, 0.0126073, 0.0117402, 0.012453, \n 0.0080898, 0.0162706, 0.013803, 0.00403937, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05]]'], {}), '([[0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662,\n 0.0137996, 0.00403898, 0.0123786, 1e-05], [0.00604229, 0.0126071, \n 0.01174, 0.0124528, 0.00808971, 0.0162703, 0.0138028, 0.00403935, \n 0.0123812, 1e-05], [0.00604234, 0.0126073, 0.0117402, 0.012453, \n 0.0080898, 0.0162706, 0.013803, 0.00403937, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05]])\n', (3911, 5081), True, 'import numpy as np\n'), ((5373, 6544), 'numpy.array', 'np.array', (['[[0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, 0.0177026, \n 0.0289461, 0.0214768, 0.0101898, 1e-05], [0.00302875, 0.003217, \n 0.0217628, 0.00836405, 0.0256229, 0.0177137, 0.0289468, 0.0214719, \n 0.0101904, 1e-05], [0.00302897, 0.00321726, 0.0217636, 0.00836369, \n 0.0256217, 0.0177148, 0.0289468, 0.0214714, 0.0101905, 1e-05], [\n 0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149, \n 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, \n 0.0217637, 0.00836364, 0.0256216, 0.0177149, 0.0289468, 0.0214713, \n 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, 0.00836364, \n 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029,\n 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, \n 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, \n 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05\n ], [0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, \n 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, \n 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, \n 0.0101905, 1e-05]]'], {}), '([[0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, \n 0.0177026, 0.0289461, 0.0214768, 0.0101898, 1e-05], [0.00302875, \n 0.003217, 0.0217628, 0.00836405, 0.0256229, 0.0177137, 0.0289468, \n 0.0214719, 0.0101904, 1e-05], [0.00302897, 0.00321726, 0.0217636, \n 0.00836369, 0.0256217, 0.0177148, 0.0289468, 0.0214714, 0.0101905, \n 1e-05], [0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, \n 0.0177149, 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, \n 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149, 0.0289468, \n 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, \n 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05\n ], [0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, \n 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, \n 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, \n 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, 0.00836364, \n 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029,\n 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, \n 0.0214713, 0.0101905, 1e-05]])\n', (5381, 6544), True, 'import numpy as np\n'), ((6825, 7870), 'numpy.array', 'np.array', (['[[1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, 1e-05, \n 1e-05, 1e-05, 3.9034e-05], [1e-05, 3.42696e-05, 1e-05, 1e-05, 1e-05, \n 1e-05, 1e-05, 1.0122e-05, 3.43236e-05, 1e-05], [1e-05, 0.0426792, \n 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05, 1e-05, 1e-05, \n 0.102463], [1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05,\n 1e-05, 1e-05, 1.00007e-05], [1e-05, 4.22947e-05, 0.00062168, 0.623917, \n 1.03468e-05, 0.00588984, 1.00004e-05, 1.44433e-05, 1.00014e-05, \n 0.000213425], [1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, \n 0.000485082, 1e-05, 1e-05, 1.00002e-05, 1e-05], [1e-05, 1e-05, \n 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05, 1.13251e-05],\n [1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,\n 1e-05], [1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, \n 1.03587e-05, 0.150301, 1e-05, 1.00045e-05], [1e-05, 3.97901e-05, 1e-05,\n 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05, 2.42828e-05, 1e-05, \n 1.10529e-05]]'], {}), '([[1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, \n 1e-05, 1e-05, 1e-05, 3.9034e-05], [1e-05, 3.42696e-05, 1e-05, 1e-05, \n 1e-05, 1e-05, 1e-05, 1.0122e-05, 3.43236e-05, 1e-05], [1e-05, 0.0426792,\n 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05, 1e-05, 1e-05, \n 0.102463], [1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05,\n 1e-05, 1e-05, 1.00007e-05], [1e-05, 4.22947e-05, 0.00062168, 0.623917, \n 1.03468e-05, 0.00588984, 1.00004e-05, 1.44433e-05, 1.00014e-05, \n 0.000213425], [1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, \n 0.000485082, 1e-05, 1e-05, 1.00002e-05, 1e-05], [1e-05, 1e-05, \n 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05, 1.13251e-05],\n [1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,\n 1e-05], [1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, \n 1.03587e-05, 0.150301, 1e-05, 1.00045e-05], [1e-05, 3.97901e-05, 1e-05,\n 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05, 2.42828e-05, 1e-05, \n 1.10529e-05]])\n', (6833, 7870), True, 'import numpy as np\n'), ((8206, 9066), 'numpy.array', 'np.array', (['[[1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05, 1e-05, \n 1e-05], [0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, 0.019553, \n 1e-05, 1e-05, 1e-05], [0.002148, 1e-05, 1e-05, 1.6e-05, 0.651536, 2e-05,\n 7.4e-05, 0.002359, 1e-05, 1e-05], [3.8e-05, 1e-05, 0.000592, 4.7e-05, \n 0.09173, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05, 1e-05, \n 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05,\n 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05, 1e-05], [\n 0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, 1.1e-05, \n 0.000919, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, \n 1e-05, 0.028816, 1e-05], [1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05, 1e-05,\n 1e-05, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05,\n 1e-05, 1e-05, 1e-05, 1e-05]]'], {}), '([[1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05,\n 1e-05, 1e-05], [0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, \n 0.019553, 1e-05, 1e-05, 1e-05], [0.002148, 1e-05, 1e-05, 1.6e-05, \n 0.651536, 2e-05, 7.4e-05, 0.002359, 1e-05, 1e-05], [3.8e-05, 1e-05, \n 0.000592, 4.7e-05, 0.09173, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05], [1e-05,\n 1e-05, 1e-05, 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05, 1e-05],\n [1e-05, 1e-05, 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05, \n 1e-05], [0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, \n 1.1e-05, 0.000919, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, \n 1e-05, 1e-05, 0.028816, 1e-05], [1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05,\n 1e-05, 1e-05, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05,\n 1e-05, 1e-05, 1e-05, 1e-05, 1e-05]])\n', (8214, 9066), True, 'import numpy as np\n'), ((9387, 10453), 'numpy.array', 'np.array', (['[[0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, \n 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408,\n 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, \n 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, \n 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531,\n 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, \n 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, \n 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795,\n 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [\n 0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, \n 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408,\n 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, \n 0.010436, 0.003116]]'], {}), '([[0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, \n 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795,\n 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [\n 0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, \n 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408,\n 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, \n 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, \n 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531,\n 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, \n 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, \n 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795,\n 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [\n 0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116]])\n', (9395, 10453), True, 'import numpy as np\n'), ((308, 320), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (318, 320), True, 'import tensorflow as tf\n'), ((846, 858), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (856, 858), True, 'import tensorflow as tf\n'), ((1377, 1389), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1387, 1389), True, 'import tensorflow as tf\n'), ((1987, 1999), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1997, 1999), True, 'import tensorflow as tf\n'), ((3397, 3409), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3407, 3409), True, 'import tensorflow as tf\n'), ((5291, 5303), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5301, 5303), True, 'import tensorflow as tf\n'), ((6743, 6755), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6753, 6755), True, 'import tensorflow as tf\n'), ((8047, 8062), 'numpy.round', 'np.round', (['(W * p)'], {}), '(W * p)\n', (8055, 8062), True, 'import numpy as np\n'), ((8124, 8136), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8134, 8136), True, 'import tensorflow as tf\n'), ((9228, 9243), 'numpy.round', 'np.round', (['(W * p)'], {}), '(W * p)\n', (9236, 9243), True, 'import numpy as np\n'), ((9305, 9317), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9315, 9317), True, 'import tensorflow as tf\n'), ((10629, 10644), 'numpy.round', 'np.round', (['(W * p)'], {}), '(W * p)\n', (10637, 10644), True, 'import numpy as np\n'), ((10706, 10718), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10716, 10718), True, 'import tensorflow as tf\n'), ((10786, 10807), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (10805, 10807), False, 'import unittest\n'), ((10848, 10884), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10871, 10884), False, 'import unittest\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xarray as xr
sns.set()
def plot_range(xlabel, ylabel, title, x, values):
"""x and values should have the same size"""
plt.plot(x, values, 'r-', linewidth=2)
plt.gcf().set_size_inches(8, 2)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_year_multi(*args):
"""*args should be iterateble with 2 elements (value, label);
value should be an array of 365 elements"""
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(1, 1, 1)
ox = np.arange(1, 366, 1)
for arg in args:
ax.plot(ox, arg[0], linewidth=2, label=arg[1])
ax.set_ylabel(r'$values$')
ax.set_xlabel(r'$days$')
ax.legend(loc='best')
def extract_alk(data_train):
ds = xr.open_dataset(data_train[0])
alk_df = ds['B_C_Alk'].to_dataframe()
alk_surface = alk_df.groupby('z').get_group(data_train[1])
alk = alk_surface.loc['2011-01-01':'2011-12-31']
alk = alk.reset_index()
return alk
def show_alk(data_train):
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
for item in data_train:
ax.plot(item[0]['time'],
item[0]['B_C_Alk'], linewidth=2, label=item[1])
ax.legend(loc='best')
ax.set_title('Alkalinity in the surface layer')
plt.show()
if __name__ == '__main__':
print('This is a plot functions module') | [
"seaborn.set",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"xarray.open_dataset",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((93, 102), 'seaborn.set', 'sns.set', ([], {}), '()\n', (100, 102), True, 'import seaborn as sns\n'), ((209, 247), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'values', '"""r-"""'], {'linewidth': '(2)'}), "(x, values, 'r-', linewidth=2)\n", (217, 247), True, 'import matplotlib.pyplot as plt\n'), ((288, 304), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (297, 304), True, 'import matplotlib.pyplot as plt\n'), ((309, 327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (319, 327), True, 'import matplotlib.pyplot as plt\n'), ((332, 350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (342, 350), True, 'import matplotlib.pyplot as plt\n'), ((509, 535), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (519, 535), True, 'import matplotlib.pyplot as plt\n'), ((579, 599), 'numpy.arange', 'np.arange', (['(1)', '(366)', '(1)'], {}), '(1, 366, 1)\n', (588, 599), True, 'import numpy as np\n'), ((803, 833), 'xarray.open_dataset', 'xr.open_dataset', (['data_train[0]'], {}), '(data_train[0])\n', (818, 833), True, 'import xarray as xr\n'), ((1073, 1100), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 2)'}), '(figsize=(10, 2))\n', (1083, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1350, 1352), True, 'import matplotlib.pyplot as plt\n'), ((252, 261), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (259, 261), True, 'import matplotlib.pyplot as plt\n')] |
import pandas as pd
import numpy as np
import math
from nltk.stem.snowball import SnowballStemmer
def add_prefix(prefix, series):
"""
Returns a pandas series that adds a prefix to a string
:param prefix: str
:return: pd.Series
"""
if type(prefix) != str:
raise TypeError(prefix + ' is not of type str')
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: prefix + str(x))
def add_suffix(suffix, series):
"""
Returns a pandas series that adds a suffix to a string
:param suffix: str
:return: pd.Series
"""
if type(suffix) != str:
raise TypeError(suffix + ' is not of type str')
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: str(x) + suffix)
def strip_whitespace(series):
"""
Returns a pandas series that strips whitespace from both
sides of a string
:return: pd.Series
"""
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: x.strip() if type(x) == str else x)
def string_to_float(series):
"""
Returns a string as a float. If no string is provided,
the original element is returned
"""
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: float(x) if type(x) == str else x)
def remove_string(string_to_remove, series):
"""
Returns a pandas series that replaces string_to_remove with ''
:param string_to_remove: str
:return: pd.Series
"""
if type(string_to_remove) != str:
raise TypeError(string_to_remove + ' is not of type str')
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: ' '.join(x.replace(string_to_remove, '').split()) if type(x) == str else x)
def replace_string_with_nan(string_to_replace, series):
"""
Returns a pandas series that replaces a string with np.nan
:param string_to_replace: str
:return: pd.Series
"""
if type(string_to_replace) != str:
raise TypeError(string_to_replace + ' is not of type str')
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: np.nan if str(x) == string_to_replace else x)
def replace_nan_with_string(string_to_replace_nan, series):
"""
Returns a pandas series that replaces a np.nan with string
:param string_to_replace_nan: str
:return: pd.Series
"""
if type(string_to_replace_nan) != str:
raise TypeError(string_to_replace_nan + ' is not of type str')
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
return series.apply(lambda x: string_to_replace_nan if ((type(x) == np.float64 or type(x) == float) and math.isnan(x)) else x)
def like_float_to_int(series):
"""
Takes series of actual floats or a strings with a float representation
and converts it to a series of integers
:return: pd.Series
"""
if type(series) != pd.Series:
raise TypeError(series + ' is not of type pd.Series')
def robust_float_to_int(x):
if type(x) == str:
try:
return int(float(x))
except:
return x
elif type(x) == float:
return int(x)
else:
return x
return series.apply(lambda x: robust_float_to_int(x))
def stem_variable(series):
"""
Stem the text
:param series: pd.Series
:return: pd.Series
"""
stemmer = SnowballStemmer('english')
return series.map(lambda x: ' '.join([stemmer.stem(y) for y in x.decode('utf-8').split(' ')])) | [
"nltk.stem.snowball.SnowballStemmer",
"math.isnan"
] | [((3737, 3763), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (3752, 3763), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((2990, 3003), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (3000, 3003), False, 'import math\n')] |
import os
from pocketsphinx import AudioFile
from pocketsphinx import Pocketsphinx
from src import util
test_video = os.environ['DATA_PATH'] + "/other/sphinx_test_video/beachball.mp4"
test_audio = os.environ['DATA_PATH'] + "/other/sphinx_test_audio/interview.wav"
fps = 100 # default
audio_file = AudioFile(audio_file=test_audio, frate=100)
for phrase in audio_file: # frate (default=100)
print(" ".join([s.word for s in phrase.seg()]))
#print(Pocketsphinx().decode(audio_file=test_audio))
# i'm home i'm a i'm won't home all cool i'm wall long and lulu move up at bay back when when i'm i'm home
| [
"pocketsphinx.AudioFile"
] | [((301, 344), 'pocketsphinx.AudioFile', 'AudioFile', ([], {'audio_file': 'test_audio', 'frate': '(100)'}), '(audio_file=test_audio, frate=100)\n', (310, 344), False, 'from pocketsphinx import AudioFile\n')] |
from mmdet.apis import init_detector, inference_detector, show_result
import mmcv
import os
import argparse
import numpy as np
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Test different models')
parser.add_argument('--epoch', type=str, default="latest", help='dataset version')
parser.add_argument('--gpu', type=str, default='0', help='GPU ID')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
print("Visible GPU ID: %s" % args.gpu)
## Image sources
data_soure = '/data/quan/tomato_classification/Tomato_test_smaller'
save_result = '/data/quan/tomato_classification/result_Joe'
images = [os.path.join(data_soure, x) for x in os.listdir(data_soure)]
# np.random.shuffle(images)
## Load configuration and trained model
config_name = 'tomato_faster_rcnn_x101_64x4d_fpn_1x'
config_file = 'configs/' + config_name + '.py'
if(args.epoch=="latest"):
checkpoint_file = '/home/quan/WorkSpace/mmdetection_new/work_dirs/' + config_name + '/latest.pth'
else:
checkpoint_file = '/home/quan/WorkSpace/mmdetection_new/work_dirs/' + config_name + '/epoch_%s.pth' % args.epoch
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda')
## Test the model
for file_path in tqdm(images[:200]):
result = inference_detector(model, file_path)
_, img_name = os.path.split(file_path)
show_result(file_path, result, model.CLASSES, show=False, out_file=os.path.join(save_result, img_name)) | [
"os.listdir",
"argparse.ArgumentParser",
"mmdet.apis.init_detector",
"tqdm.tqdm",
"os.path.join",
"os.path.split",
"mmdet.apis.inference_detector"
] | [((166, 226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test different models"""'}), "(description='Test different models')\n", (189, 226), False, 'import argparse\n'), ((1216, 1274), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file'], {'device': '"""cuda"""'}), "(config_file, checkpoint_file, device='cuda')\n", (1229, 1274), False, 'from mmdet.apis import init_detector, inference_detector, show_result\n'), ((1314, 1332), 'tqdm.tqdm', 'tqdm', (['images[:200]'], {}), '(images[:200])\n', (1318, 1332), False, 'from tqdm import tqdm\n'), ((663, 690), 'os.path.join', 'os.path.join', (['data_soure', 'x'], {}), '(data_soure, x)\n', (675, 690), False, 'import os\n'), ((1345, 1381), 'mmdet.apis.inference_detector', 'inference_detector', (['model', 'file_path'], {}), '(model, file_path)\n', (1363, 1381), False, 'from mmdet.apis import init_detector, inference_detector, show_result\n'), ((1398, 1422), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1411, 1422), False, 'import os\n'), ((700, 722), 'os.listdir', 'os.listdir', (['data_soure'], {}), '(data_soure)\n', (710, 722), False, 'import os\n'), ((1494, 1529), 'os.path.join', 'os.path.join', (['save_result', 'img_name'], {}), '(save_result, img_name)\n', (1506, 1529), False, 'import os\n')] |
from cartodb_services.refactor.storage.redis_connection_config import RedisMetadataConnectionConfigBuilder
from cartodb_services.refactor.storage.redis_connection import RedisConnectionBuilder
from cartodb_services.refactor.storage.redis_config import RedisUserConfigStorageBuilder
class UserConfigBackendFactory(object):
"""
This class abstracts the creation of a user configuration backend. It will return
an implementation of the ConfigBackendInterface appropriate to the user, depending
on the environment.
"""
def __init__(self, username, environment, server_config_backend):
self._username = username
self._environment = environment
self._server_config_backend = server_config_backend
def get(self):
if self._environment.is_onpremise:
user_config_backend = self._server_config_backend
else:
redis_metadata_connection_config = RedisMetadataConnectionConfigBuilder(self._server_config_backend).get()
redis_metadata_connection = RedisConnectionBuilder(redis_metadata_connection_config).get()
user_config_backend = RedisUserConfigStorageBuilder(redis_metadata_connection, self._username).get()
return user_config_backend
| [
"cartodb_services.refactor.storage.redis_config.RedisUserConfigStorageBuilder",
"cartodb_services.refactor.storage.redis_connection.RedisConnectionBuilder",
"cartodb_services.refactor.storage.redis_connection_config.RedisMetadataConnectionConfigBuilder"
] | [((927, 992), 'cartodb_services.refactor.storage.redis_connection_config.RedisMetadataConnectionConfigBuilder', 'RedisMetadataConnectionConfigBuilder', (['self._server_config_backend'], {}), '(self._server_config_backend)\n', (963, 992), False, 'from cartodb_services.refactor.storage.redis_connection_config import RedisMetadataConnectionConfigBuilder\n'), ((1039, 1095), 'cartodb_services.refactor.storage.redis_connection.RedisConnectionBuilder', 'RedisConnectionBuilder', (['redis_metadata_connection_config'], {}), '(redis_metadata_connection_config)\n', (1061, 1095), False, 'from cartodb_services.refactor.storage.redis_connection import RedisConnectionBuilder\n'), ((1136, 1208), 'cartodb_services.refactor.storage.redis_config.RedisUserConfigStorageBuilder', 'RedisUserConfigStorageBuilder', (['redis_metadata_connection', 'self._username'], {}), '(redis_metadata_connection, self._username)\n', (1165, 1208), False, 'from cartodb_services.refactor.storage.redis_config import RedisUserConfigStorageBuilder\n')] |
import os
import random
from flask import current_app
def save_file(form_file, folder_name):
random_hex = random.token_hex(8)
_, f_ext = os.path.splitext(form_file.filename)
file_fn = random_hex + f_ext
file_path = os.path.join(current_app.root_path, 'static', folder_name, file_fn)
form_file.save(file_path)
return file_path
| [
"os.path.join",
"os.path.splitext",
"random.token_hex"
] | [((112, 131), 'random.token_hex', 'random.token_hex', (['(8)'], {}), '(8)\n', (128, 131), False, 'import random\n'), ((147, 183), 'os.path.splitext', 'os.path.splitext', (['form_file.filename'], {}), '(form_file.filename)\n', (163, 183), False, 'import os\n'), ((233, 300), 'os.path.join', 'os.path.join', (['current_app.root_path', '"""static"""', 'folder_name', 'file_fn'], {}), "(current_app.root_path, 'static', folder_name, file_fn)\n", (245, 300), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from django.conf import settings
from django.urls import path
from rest_framework import routers
from . import views
from . import views_relations
router = routers.DefaultRouter(trailing_slash=False)
router.register(
r'biomes',
views.BiomeViewSet,
basename='biomes'
)
router.register(
r'studies',
views.StudyViewSet,
basename='studies'
)
router.register(
r'super-studies',
views.SuperStudyViewSet,
basename='super-studies'
)
router.register(
r'samples',
views.SampleViewSet,
basename='samples'
)
router.register(
r'runs',
views.RunViewSet,
basename='runs'
)
router.register(
r'assemblies',
views.AssemblyViewSet,
basename='assemblies'
)
router.register(
r'analyses',
views.AnalysisJobViewSet,
basename='analyses'
)
router.register(
r'experiment-types',
views.ExperimentTypeViewSet,
basename='experiment-types'
)
router.register(
r'pipelines',
views.PipelineViewSet,
basename='pipelines'
)
router.register(
r'pipeline-tools',
views.PipelineToolViewSet,
basename='pipeline-tools'
)
router.register(
r'publications',
views.PublicationViewSet,
basename='publications'
)
router.register(
r'pipeline-tools/(?P<tool_name>[^/]+)',
views.PipelineToolVersionViewSet,
basename='pipeline-tools-version'
)
router.register(
r'analyses/(?P<accession>[^/]+)',
views.AnalysisQCChartViewSet,
basename='analysis-qcchart'
)
router.register(
r'analyses/(?P<accession>[^/]+)/krona',
views.KronaViewSet,
basename='analysis-krona'
)
router.register(
r'analyses/(?P<accession>[^/]+)/downloads',
views.AnalysisResultDownloadsViewSet,
basename='analysisdownload'
)
router.register(
r'analyses/(?P<accession>[^/]+)/file',
views.AnalysisResultDownloadViewSet,
basename='analysisdownload'
)
router.register(
r'studies/(?P<accession>[^/]+)/downloads',
views.StudiesDownloadsViewSet,
basename='studydownload'
)
# relationship views
router.register(
r'studies/(?P<accession>[^/]+)/analyses',
views_relations.StudyAnalysisResultViewSet,
basename='studies-analyses'
)
router.register(
r'super-studies/(?P<super_study_id>[^/]+)/flagship-studies',
views_relations.SuperStudyFlagshipStudiesViewSet,
basename='super-studies-flagship-studies'
)
router.register(
r'super-studies/(?P<super_study_id>[^/]+)/related-studies',
views_relations.SuperStudyRelatedStudiesViewSet,
basename='super-studies-related-studies'
)
router.register(
r'runs/(?P<accession>[^/]+)/analyses',
views_relations.RunAnalysisViewSet,
basename='runs-analyses'
)
router.register(
r'runs/(?P<accession>[^/]+)/assemblies',
views_relations.RunAssemblyViewSet,
basename='runs-assemblies'
)
router.register(
r'assemblies/(?P<accession>[^/]+)/analyses',
views_relations.AssemblyAnalysisViewSet,
basename='assemblies-analyses'
)
router.register(
r'assemblies/(?P<accession>[^/]+)/runs',
views_relations.AssemblyRunsViewSet,
basename='assemblies-runs'
)
router.register(
r'studies/(?P<accession>[^/]+)'
r'/pipelines/(?P<release_version>[0-9\.]+)/file',
views_relations.StudiesDownloadViewSet,
basename='studydownload'
)
router.register(
r'biomes/(?P<lineage>[^/]+)/children',
views_relations.BiomeTreeViewSet,
basename='biomes-children'
)
router.register(
r'biomes/(?P<lineage>[^/]+)/studies',
views_relations.BiomeStudyRelationshipViewSet,
basename='biomes-studies'
)
router.register(
r'biomes/(?P<lineage>[^/]+)/samples',
views_relations.BiomeSampleRelationshipViewSet,
basename='biomes-samples'
)
router.register(
r'biomes/(?P<lineage>[^/]+)/genomes',
views_relations.BiomeGenomeRelationshipViewSet,
basename='biomes-genomes'
)
router.register(
r'biomes/(?P<lineage>[^/]+)/genome-catalogues',
views_relations.BiomeGenomeCatalogueRelationshipViewSet,
basename='biomes-genome-catalogues'
)
router.register(
r'publications/(?P<pubmed_id>[0-9\.]+)/studies',
views_relations.PublicationStudyRelationshipViewSet,
basename='publications-studies'
)
router.register(
r'studies/(?P<accession>[^/]+)/geocoordinates',
views_relations.StudyGeoCoordinateRelationshipViewSet,
basename='studies-geoloc'
)
router.register(
r'studies/(?P<accession>[a-zA-Z0-9]+)/studies',
views_relations.StudyStudyRelationshipViewSet,
basename='studies-studies'
)
router.register(
r'studies/(?P<accession>[^/]+)/samples',
views_relations.StudySampleRelationshipViewSet,
basename='studies-samples'
)
router.register(
r'studies/(?P<accession>[^/]+)/publications',
views_relations.StudyPublicationRelationshipViewSet,
basename='studies-publications'
)
# router.register(
# r'pipelines/(?P<release_version>[0-9\.]+)/studies',
# views_relations.PipelineStudyRelationshipViewSet,
# basename='pipelines-studies'
# )
router.register(
r'pipelines/(?P<release_version>[0-9\.]+)/samples',
views_relations.PipelineSampleRelationshipViewSet,
basename='pipelines-samples'
)
router.register(
r'pipelines/(?P<release_version>[0-9\.]+)/analyses',
views_relations.PipelineAnalysisRelationshipViewSet,
basename='pipelines-analyses'
)
router.register(
r'pipelines/(?P<release_version>[0-9\.]+)/tools',
views_relations.PipelinePipelineToolRelationshipViewSet,
basename='pipelines-pipeline-tools'
)
router.register(
r'experiment-types/(?P<experiment_type>[^/]+)/samples',
views_relations.ExperimentTypeSampleRelationshipViewSet,
basename='experiment-types-samples'
)
router.register(
r'publications/(?P<pubmed_id>[0-9\.]+)/samples',
views_relations.PublicationSampleRelationshipViewSet,
basename='publications-samples'
)
router.register(
r'samples/(?P<accession>[^/]+)/runs',
views_relations.SampleRunRelationshipViewSet,
basename='samples-runs'
)
router.register(
r'experiment-types/(?P<experiment_type>[^/]+)/runs',
views_relations.ExperimentTypeRunRelationshipViewSet,
basename='experiment-types-runs'
)
router.register(
r'experiment-types/(?P<experiment_type>[^/]+)/analyses',
views_relations.ExperimentTypeAnalysisRelationshipViewSet,
basename='experiment-types-analyses'
)
router.register(
r'samples/(?P<accession>[^/]+)/studies',
views_relations.SampleStudiesRelationshipViewSet,
basename='samples-studies'
)
router.register(
r'samples/(?P<accession>[^/]+)/metadata',
views_relations.SampleMetadataRelationshipViewSet,
basename='samples-metadata'
)
mydata_router = routers.DefaultRouter(trailing_slash=False)
mydata_router.register(
r'mydata',
views.MyDataViewSet,
basename='mydata'
)
utils_router = routers.DefaultRouter(trailing_slash=False)
utils_router.register(
r'utils',
views.UtilsViewSet,
basename='csrf'
)
router.register(
r'genomes',
views.GenomeViewSet,
basename='genomes'
)
# Proxy Genome Search requests to microservice backend
router.register(
r'genome-search',
views.GenomeFragmentSearchViewSet,
basename='genome-search'
)
# Sourmash search
router.register(
r'genomes-search/gather',
views.GenomeSearchGatherViewSet,
basename='genomes-gather'
)
router.register(
r'genomes/(?P<accession>[^/]+)/cogs',
views_relations.GenomeCogsRelationshipsViewSet,
basename='genome-cog'
)
router.register(
r'genomes/(?P<accession>[^/]+)/kegg-class',
views_relations.GenomeKeggClassRelationshipsViewSet,
basename='genome-kegg-class'
)
router.register(
r'genomes/(?P<accession>[^/]+)/kegg-module',
views_relations.GenomeKeggModuleRelationshipsViewSet,
basename='genome-kegg-module'
)
router.register(
r'genomes/(?P<accession>[^/]+)/kegg-class',
views_relations.GenomeKeggClassRelationshipsViewSet,
basename='genome-kegg-class'
)
router.register(
r'genomes/(?P<accession>[^/]+)/antismash-genecluster',
views_relations.GenomeAntiSmashGeneClustersRelationshipsViewSet,
basename='genome-antismash-genecluster'
)
router.register(
r'genomes/(?P<accession>[^/]+)/downloads',
views.GenomeDownloadViewSet,
basename='genome-download'
)
router.register(
r'genome-catalogues',
views.GenomeCatalogueViewSet,
basename='genome-catalogues'
)
router.register(
r'genome-catalogues/(?P<catalogue_id>[^/]+)/genomes',
views_relations.GenomeCatalogueGenomeRelationshipViewSet,
basename='genome-catalogue-genomes'
)
router.register(
r'genome-catalogues/(?P<catalogue_id>[^/]+)/downloads',
views.GenomeCatalogueDownloadViewSet,
basename='genome-catalogue-downloads'
)
router.register(
r'genomeset',
views.GenomeSetViewSet,
basename='genomeset'
)
router.register(
r'genomeset/(?P<name>[^/]+)/genomes',
views_relations.GenomeSetGenomes,
basename='genomeset-genomes'
)
router.register(
r'cogs',
views.CogCatViewSet,
basename='cogs'
)
router.register(
r'kegg-modules',
views.KeggModuleViewSet,
basename='kegg-modules'
)
router.register(
r'kegg-classes',
views.KeggClassViewSet,
basename='kegg-classes'
)
router.register(
r'antismash-geneclusters',
views.AntiSmashGeneClustersViewSet,
basename='antismash-geneclusters'
)
urlpatterns = [
path(r'v1/banner-message',
views.BannerMessageView.as_view(),
name='banner-message'),
path(r'v1/ebi-search-download/<str:domain>',
views.EBISearchCSVDownload.as_view(),
name='ebi-search-download'),
url(r'^v1/genomes-search/status/(?P<job_id>[^/]+)',
views.GenomeSearchStatusView.as_view(),
name='genomes-status'),
url(r'^v1/genomes-search/results/(?P<job_id>[^/]+)',
views.GenomeSearchResultsView.as_view(),
name='genomes-results')
]
if settings.ADMIN:
urlpatterns += [
path(r'v1/biom-prediction',
views.BiomePrediction.as_view(),
name='biom-prediction')
]
| [
"rest_framework.routers.DefaultRouter"
] | [((815, 858), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {'trailing_slash': '(False)'}), '(trailing_slash=False)\n', (836, 858), False, 'from rest_framework import routers\n'), ((7304, 7347), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {'trailing_slash': '(False)'}), '(trailing_slash=False)\n', (7325, 7347), False, 'from rest_framework import routers\n'), ((7452, 7495), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {'trailing_slash': '(False)'}), '(trailing_slash=False)\n', (7473, 7495), False, 'from rest_framework import routers\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 10:47:38 2016
@author: ahefny
Policies are BLIND to the representation of states, which could be (1) observation,
(2) original latent state or (3) predictive state.
Policies takes the "state" dimension x_dim, the number of actions/dim of action as input.
"""
import numpy as np
import scipy.stats
class BasePolicy(object):
def reset(self):
pass
def sample_action(self, state):
'''
Samples an action and returns a tuple consisting of:
chosen action, action probability,
dictionary of diagnostic information (values must be numbers or vectors)
'''
raise NotImplementedError
def _load(self, params):
raise NotImplementedError
def _save(self):
raise NotImplementedError
class RandomDiscretePolicy(BasePolicy):
def __init__(self, num_actions, rng=None):
self.num_actions = num_actions
self.rng = rng
def sample_action(self, state):
action = self.rng.randint(0, self.num_actions)
return action, 1. / self.num_actions, {}
class RandomGaussianPolicy(BasePolicy):
def __init__(self, num_actions, rng=None):
self.num_actions = num_actions
self.rng = rng
def sample_action(self, state):
action = self.rng.randn(self.num_actions)
return action, np.prod(scipy.stats.norm.pdf(action)), {}
class UniformContinuousPolicy(BasePolicy):
def __init__(self, low, high, rng=None):
self._low = low
self._high = high
self._prob = 1.0 / np.prod(self._high - self._low)
self.rng = rng
def sample_action(self, state):
dim = len(self._high)
action = self.rng.rand(dim)
action = action * (self._high - self._low) + self._low
return action, self._prob, {}
class LinearPolicy(BasePolicy):
def __init__(self, K, sigma, rng=None):
self._K = K
self._sigma = sigma
self.rng = rng
def reset(self):
pass
def sample_action(self, state):
mean = np.dot(self._K, state)
noise = self.rng.randn(len(mean))
sigma = self._sigma
action = mean + noise * sigma
return action, np.prod(scipy.stats.norm.pdf(noise)), {}
class SineWavePolicy(BasePolicy):
def __init__(self, amps, periods, phases):
self._amps = amps
self._scales = 2 * np.pi / periods
self._phases = phases * np.pi / 180.0
self._t = 0
def reset(self):
self._t = 0
def sample_action(self, state):
a = self._amps * np.sin(self._t * self._scales + self._phases)
self._t += 1
return a, 1.0, {}
| [
"numpy.sin",
"numpy.prod",
"numpy.dot"
] | [((2094, 2116), 'numpy.dot', 'np.dot', (['self._K', 'state'], {}), '(self._K, state)\n', (2100, 2116), True, 'import numpy as np\n'), ((1598, 1629), 'numpy.prod', 'np.prod', (['(self._high - self._low)'], {}), '(self._high - self._low)\n', (1605, 1629), True, 'import numpy as np\n'), ((2612, 2657), 'numpy.sin', 'np.sin', (['(self._t * self._scales + self._phases)'], {}), '(self._t * self._scales + self._phases)\n', (2618, 2657), True, 'import numpy as np\n')] |
import os
import sys
from datetime import datetime
from subprocess import run
name = datetime.utcnow().strftime("%Y%m%d-%H%M%S.md")
try:
# -t stands for "topic"
topic_index = sys.argv.index('-t')
path = os.path.join(sys.argv[topic_index + 1], name)
except:
path = name
run(['code', path])
| [
"sys.argv.index",
"subprocess.run",
"os.path.join",
"datetime.datetime.utcnow"
] | [((287, 306), 'subprocess.run', 'run', (["['code', path]"], {}), "(['code', path])\n", (290, 306), False, 'from subprocess import run\n'), ((184, 204), 'sys.argv.index', 'sys.argv.index', (['"""-t"""'], {}), "('-t')\n", (198, 204), False, 'import sys\n'), ((216, 261), 'os.path.join', 'os.path.join', (['sys.argv[topic_index + 1]', 'name'], {}), '(sys.argv[topic_index + 1], name)\n', (228, 261), False, 'import os\n'), ((86, 103), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (101, 103), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""module for Superfetch."""
import os, sys
import time
from datetime import datetime, timedelta
from modules import logger
from modules import manager
from modules import interface
from modules.windows_superfetch import sfexport2
from dfvfs.lib import definitions as dfvfs_definitions
class SUPERFETCHConnector(interface.ModuleConnector):
NAME = 'superfetch_connector'
DESCRIPTION = 'Module for Superfetch'
_plugin_classes = {}
def __init__(self):
super(SUPERFETCHConnector, self).__init__()
def Connect(self, par_id, configuration, source_path_spec, knowledge_base):
this_file_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'schema' + os.sep
# 모든 yaml 파일 리스트
yaml_list = [this_file_path + 'lv1_os_win_superfetch.yaml']
# 모든 테이블 리스트
table_list = ['lv1_os_win_superfetch']
if not self.check_table_from_yaml(configuration, yaml_list, table_list):
return False
# extension -> sig_type 변경해야 함
query_separator = self.GetQuerySeparator(source_path_spec, configuration)
path_separator = self.GetPathSeparator(source_path_spec)
query = f"SELECT name, parent_path, extension, ctime, ctime_nano FROM file_info WHERE par_id='{par_id}' and " \
f"parent_path like 'root{query_separator}Windows{query_separator}Prefetch' " \
f"and (extension = '7db' or extension = 'db' or extension = 'ebd');"
superfetch_files = configuration.cursor.execute_query_mul(query)
if len(superfetch_files) == 0:
# print("There are no superfetch files")
return False
insert_superfetch_info = []
for superfetch in superfetch_files:
superfetch_path = superfetch[1][superfetch[1].find(path_separator):] + path_separator + superfetch[0] # full path
fileName = superfetch[0]
output_path = configuration.root_tmp_path + os.path.sep + configuration.case_id \
+ os.path.sep + configuration.evidence_id + os.path.sep + par_id
if not os.path.exists(output_path):
os.mkdir(output_path)
self.ExtractTargetFileToPath(
source_path_spec=source_path_spec,
configuration=configuration,
file_path=superfetch_path,
output_path=output_path)
fn = output_path + os.path.sep + fileName
try:
results = sfexport2.main(fn) # filename
except Exception:
continue
if not results:
os.remove(output_path + os.sep + fileName)
continue
# superfetch_info
for result in results['reference_point']:
insert_superfetch_info.append(tuple([par_id, configuration.case_id, configuration.evidence_id,
results['file_info']['Name'], results['file_info']['Volume Name'],
results['file_info']['Volume ID'], result]))
os.remove(output_path + os.sep + fileName)
query = "Insert into lv1_os_win_superfetch values (%s, %s, %s, %s, %s, %s, %s);"
configuration.cursor.bulk_execute(query, insert_superfetch_info)
manager.ModulesManager.RegisterModule(SUPERFETCHConnector) | [
"os.path.exists",
"modules.windows_superfetch.sfexport2.main",
"os.mkdir",
"os.path.abspath",
"modules.manager.ModulesManager.RegisterModule",
"os.remove"
] | [((3350, 3408), 'modules.manager.ModulesManager.RegisterModule', 'manager.ModulesManager.RegisterModule', (['SUPERFETCHConnector'], {}), '(SUPERFETCHConnector)\n', (3387, 3408), False, 'from modules import manager\n'), ((3142, 3184), 'os.remove', 'os.remove', (['(output_path + os.sep + fileName)'], {}), '(output_path + os.sep + fileName)\n', (3151, 3184), False, 'import os, sys\n'), ((2128, 2155), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (2142, 2155), False, 'import os, sys\n'), ((2173, 2194), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (2181, 2194), False, 'import os, sys\n'), ((2516, 2534), 'modules.windows_superfetch.sfexport2.main', 'sfexport2.main', (['fn'], {}), '(fn)\n', (2530, 2534), False, 'from modules.windows_superfetch import sfexport2\n'), ((2647, 2689), 'os.remove', 'os.remove', (['(output_path + os.sep + fileName)'], {}), '(output_path + os.sep + fileName)\n', (2656, 2689), False, 'import os, sys\n'), ((670, 695), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (685, 695), False, 'import os, sys\n')] |
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_200_OK
from river.models import Function
from river_admin.views import get, post, put, delete
from river_admin.views.serializers import UpdateFunctionDto, CreateFunctionDto, FunctionDto
@get(r'^function/get/(?P<pk>\w+)/$')
def get_it(request, pk):
function = get_object_or_404(Function.objects.all(), pk=pk)
return Response(FunctionDto(function).data, status=HTTP_200_OK)
@get(r'^function/list/$')
def list_it(request):
return Response(FunctionDto(Function.objects.all(), many=True).data, status=HTTP_200_OK)
@post(r'^function/create/')
def create_it(request):
create_function_request = CreateFunctionDto(data=request.data)
if create_function_request.is_valid():
function = create_function_request.save()
return Response({"id": function.id}, status=HTTP_200_OK)
else:
return Response(create_function_request.errors, status=HTTP_400_BAD_REQUEST)
@put(r'^function/update/(?P<pk>\w+)/$')
def update_it(request, pk):
function = get_object_or_404(Function.objects.all(), pk=pk)
update_function_request = UpdateFunctionDto(data=request.data, instance=function)
if update_function_request.is_valid():
update_function_request.save()
return Response({"message": "Function is updated"}, status=HTTP_200_OK)
else:
return Response(update_function_request.errors, status=HTTP_400_BAD_REQUEST)
@delete(r'^function/delete/(?P<pk>\w+)/$')
def delete_it(request, pk):
function = get_object_or_404(Function.objects.all(), pk=pk)
function.delete()
return Response(status=HTTP_200_OK)
| [
"river_admin.views.get",
"river.models.Function.objects.all",
"river_admin.views.serializers.CreateFunctionDto",
"river_admin.views.put",
"rest_framework.response.Response",
"river_admin.views.serializers.UpdateFunctionDto",
"river_admin.views.delete",
"river_admin.views.post",
"river_admin.views.se... | [((350, 385), 'river_admin.views.get', 'get', (['"""^function/get/(?P<pk>\\\\w+)/$"""'], {}), "('^function/get/(?P<pk>\\\\w+)/$')\n", (353, 385), False, 'from river_admin.views import get, post, put, delete\n'), ((546, 569), 'river_admin.views.get', 'get', (['"""^function/list/$"""'], {}), "('^function/list/$')\n", (549, 569), False, 'from river_admin.views import get, post, put, delete\n'), ((689, 714), 'river_admin.views.post', 'post', (['"""^function/create/"""'], {}), "('^function/create/')\n", (693, 714), False, 'from river_admin.views import get, post, put, delete\n'), ((1063, 1101), 'river_admin.views.put', 'put', (['"""^function/update/(?P<pk>\\\\w+)/$"""'], {}), "('^function/update/(?P<pk>\\\\w+)/$')\n", (1066, 1101), False, 'from river_admin.views import get, post, put, delete\n'), ((1541, 1582), 'river_admin.views.delete', 'delete', (['"""^function/delete/(?P<pk>\\\\w+)/$"""'], {}), "('^function/delete/(?P<pk>\\\\w+)/$')\n", (1547, 1582), False, 'from river_admin.views import get, post, put, delete\n'), ((770, 806), 'river_admin.views.serializers.CreateFunctionDto', 'CreateFunctionDto', ([], {'data': 'request.data'}), '(data=request.data)\n', (787, 806), False, 'from river_admin.views.serializers import UpdateFunctionDto, CreateFunctionDto, FunctionDto\n'), ((1224, 1279), 'river_admin.views.serializers.UpdateFunctionDto', 'UpdateFunctionDto', ([], {'data': 'request.data', 'instance': 'function'}), '(data=request.data, instance=function)\n', (1241, 1279), False, 'from river_admin.views.serializers import UpdateFunctionDto, CreateFunctionDto, FunctionDto\n'), ((1708, 1736), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (1716, 1736), False, 'from rest_framework.response import Response\n'), ((444, 466), 'river.models.Function.objects.all', 'Function.objects.all', ([], {}), '()\n', (464, 466), False, 'from river.models import Function\n'), ((915, 964), 'rest_framework.response.Response', 'Response', (["{'id': function.id}"], {'status': 'HTTP_200_OK'}), "({'id': function.id}, status=HTTP_200_OK)\n", (923, 964), False, 'from rest_framework.response import Response\n'), ((990, 1059), 'rest_framework.response.Response', 'Response', (['create_function_request.errors'], {'status': 'HTTP_400_BAD_REQUEST'}), '(create_function_request.errors, status=HTTP_400_BAD_REQUEST)\n', (998, 1059), False, 'from rest_framework.response import Response\n'), ((1163, 1185), 'river.models.Function.objects.all', 'Function.objects.all', ([], {}), '()\n', (1183, 1185), False, 'from river.models import Function\n'), ((1378, 1442), 'rest_framework.response.Response', 'Response', (["{'message': 'Function is updated'}"], {'status': 'HTTP_200_OK'}), "({'message': 'Function is updated'}, status=HTTP_200_OK)\n", (1386, 1442), False, 'from rest_framework.response import Response\n'), ((1468, 1537), 'rest_framework.response.Response', 'Response', (['update_function_request.errors'], {'status': 'HTTP_400_BAD_REQUEST'}), '(update_function_request.errors, status=HTTP_400_BAD_REQUEST)\n', (1476, 1537), False, 'from rest_framework.response import Response\n'), ((1644, 1666), 'river.models.Function.objects.all', 'Function.objects.all', ([], {}), '()\n', (1664, 1666), False, 'from river.models import Function\n'), ((495, 516), 'river_admin.views.serializers.FunctionDto', 'FunctionDto', (['function'], {}), '(function)\n', (506, 516), False, 'from river_admin.views.serializers import UpdateFunctionDto, CreateFunctionDto, FunctionDto\n'), ((625, 647), 'river.models.Function.objects.all', 'Function.objects.all', ([], {}), '()\n', (645, 647), False, 'from river.models import Function\n')] |
import torch
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import skimage.io as io
from path import Path
import cv2
import torch.nn.functional as F
class ETH_LFB(Dataset):
def __init__(self, configs):
"""
dataset for eth local feature benchmark
"""
super(ETH_LFB, self).__init__()
self.configs = configs
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
# self.imfs = []
self.sift = cv2.SIFT_create()
imdir = Path(self.configs['data_path'])
folder_dir = imdir/self.configs['subfolder']
images_dir = folder_dir/'images'
imgs = images_dir.glob('*')
self.imfs = imgs
self.imfs.sort()
def __getitem__(self, item):
imf = self.imfs[item]
im = io.imread(imf)
name = imf.name
name = '{}/{}'.format(self.configs['subfolder'], name)
if len(im.shape) != 3: #gray images
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
im = im.copy()
im_tensor = self.transform(im) #
c, h, w = im_tensor.shape
# pad_b = 16 - h%16
# pad_r = 16 - w%16
# pad = (0,pad_r,0,pad_b)
# im_tensor = F.pad(im_tensor.unsqueeze(0), pad, mode='replicate').squeeze(0)
pad=(0,0,0,0)
# now use crop to get suitable size
crop_r = w%16
crop_b = h%16
im_tensor = im_tensor[:,:h-crop_b,:w-crop_r]
im = im[:h-crop_b,:w-crop_r,:]
# using sift keypoints
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
kpts = self.sift.detect(gray)
kpts = np.array([[kp.pt[0], kp.pt[1]] for kp in kpts])
coord = torch.from_numpy(kpts).float()
out = {'im1': im_tensor, 'im1_ori':im, 'coord1': coord, 'name1': name, 'pad1':pad}
return out
def __len__(self):
return len(self.imfs) | [
"torch.from_numpy",
"numpy.array",
"path.Path",
"cv2.SIFT_create",
"skimage.io.imread",
"cv2.cvtColor",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] | [((752, 769), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {}), '()\n', (767, 769), False, 'import cv2\n'), ((786, 817), 'path.Path', 'Path', (["self.configs['data_path']"], {}), "(self.configs['data_path'])\n", (790, 817), False, 'from path import Path\n'), ((1075, 1089), 'skimage.io.imread', 'io.imread', (['imf'], {}), '(imf)\n', (1084, 1089), True, 'import skimage.io as io\n'), ((1799, 1835), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2GRAY'], {}), '(im, cv2.COLOR_RGB2GRAY)\n', (1811, 1835), False, 'import cv2\n'), ((1889, 1936), 'numpy.array', 'np.array', (['[[kp.pt[0], kp.pt[1]] for kp in kpts]'], {}), '([[kp.pt[0], kp.pt[1]] for kp in kpts])\n', (1897, 1936), True, 'import numpy as np\n'), ((1239, 1275), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_GRAY2RGB'], {}), '(im, cv2.COLOR_GRAY2RGB)\n', (1251, 1275), False, 'import cv2\n'), ((448, 469), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (467, 469), True, 'import torchvision.transforms as transforms\n'), ((516, 591), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n', (536, 591), True, 'import torchvision.transforms as transforms\n'), ((1953, 1975), 'torch.from_numpy', 'torch.from_numpy', (['kpts'], {}), '(kpts)\n', (1969, 1975), False, 'import torch\n')] |
"""This file contains functions for processing image"""
import cv2
import math
import copy
import numpy as np
import matplotlib.pyplot as plt
def binarize_image(image):
"""Binarize image pixel values to 0 and 255."""
unique_values = np.unique(image)
if len(unique_values) == 2:
if (unique_values == np.array([0., 255.])).all():
return image
mean = image.mean()
image[image > mean] = 255
image[image <= mean] = 0
return image
def read_gray_image(path):
"""Read in a gray scale image."""
image = cv2.imread(path, 0)
return image
def read_image(path):
"""Read in a RGB image."""
image = cv2.imread(path)
return image
def save_image(image, path):
"""Save image using cv2."""
cv2.imwrite(path, image)
def get_black_area(raw_mask):
"""Get the area of black values which needs to be filled with Tangram.
Input:
raw_mask: input image of the Tangram problem, np.array with black area == 0
Return:
black: area of black values
"""
h, w = raw_mask.shape
black = h * w - np.count_nonzero(raw_mask)
return black
def get_unit_length(raw_mask, standard_s=64.):
"""Get the unit length for a Tangram problem.
For example, if an input mask has an area == 64, while a typical 13 Tangram has an area == 64,
then the unit length will be 1 for this problem.
Input:
raw_mask: input image of the Tangram problem, np.array with black area == 0
standard_s: standard square of a set of 13 Tangram, typically 64
Return:
unit_length: the length in the mask that equals to 1 in a typical Tangram
"""
black_area = get_black_area(raw_mask)
unit_length = math.sqrt(float(black_area) / float(standard_s))
return unit_length
def show_gray_image(image):
"""Show gray scale image."""
plt.imshow(image, cmap='gray')
plt.show()
def show_image(image):
"""Show RGB image."""
plt.imshow(image)
plt.show()
def get_final_result(grid, elements, colors):
"""Draw elements on grid and returns the final solution."""
img = copy.deepcopy(grid)
if len(img.shape) == 2:
# extend it to RGB form image
img = np.stack([img, img, img], axis=-1)
for i in range(len(elements)):
for j in range(elements[i].area):
img[elements[i].coordinates[j][0], elements[i].coordinates[j][1]] = colors[i]
return img
def segment_image(image, tangram_s):
"""Since we know all elements in a 13 Tangram can be decomposed into small 1x1 squares,
I want to segment the original image into grid form that,
each pixel corresponds to one 1x1 square.
"""
# get unit_length
unit_length = int(round(get_unit_length(image, tangram_s)))
# first reverse image to set black area == 1 and white area == 0
mask = np.zeros_like(image, dtype=np.uint8)
mask[image > 128] = 0
mask[image <= 128] = 1
w_sum = np.sum(mask, axis=0)
h_sum = np.sum(mask, axis=1)
loc1 = np.where(h_sum >= unit_length * 0.5)
start_x = loc1[0][0]
end_x = loc1[0][-1] + 1
loc2 = np.where(w_sum >= unit_length * 0.5)
start_y = loc2[0][0]
end_y = loc2[0][-1] + 1
h = end_x - start_x
w = end_y - start_y
assert (h % unit_length == 0 and w % unit_length == 0)
# pad image
ori_h, ori_w = mask.shape
new_h = (ori_h // unit_length + 2) * unit_length
new_w = (ori_w // unit_length + 2) * unit_length
new_image = np.ones((new_h, new_w), dtype=np.uint8) * 255
pad_x_start = unit_length - (start_x % unit_length)
pad_y_start = unit_length - (start_y % unit_length)
new_image[pad_x_start:pad_x_start + ori_h, pad_y_start:pad_y_start + ori_w] = image
# generate grid
h = new_h // unit_length
w = new_w // unit_length
grid = np.ones((h, w), dtype=np.uint8) * 255
# iterate over small squares and compare areas
mask = np.zeros_like(new_image, dtype=np.uint8)
mask[new_image > 128] = 0
mask[new_image <= 128] = 1
for i in range(h):
for j in range(w):
area = \
np.sum(mask[unit_length * i:unit_length * (i + 1), unit_length * j:unit_length * (j + 1)])
if area > (unit_length ** 2) * 0.5:
grid[i, j] = 0
return unit_length, new_image, grid
| [
"matplotlib.pyplot.imshow",
"cv2.imwrite",
"numpy.unique",
"numpy.ones",
"numpy.where",
"numpy.zeros_like",
"numpy.count_nonzero",
"numpy.sum",
"numpy.stack",
"numpy.array",
"copy.deepcopy",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((255, 271), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (264, 271), True, 'import numpy as np\n'), ((580, 599), 'cv2.imread', 'cv2.imread', (['path', '(0)'], {}), '(path, 0)\n', (590, 599), False, 'import cv2\n'), ((692, 708), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (702, 708), False, 'import cv2\n'), ((801, 825), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (812, 825), False, 'import cv2\n'), ((1934, 1964), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1944, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1978, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2058), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (2051, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2074), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2072, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2221), 'copy.deepcopy', 'copy.deepcopy', (['grid'], {}), '(grid)\n', (2215, 2221), False, 'import copy\n'), ((2954, 2990), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (2967, 2990), True, 'import numpy as np\n'), ((3059, 3079), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3065, 3079), True, 'import numpy as np\n'), ((3093, 3113), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (3099, 3113), True, 'import numpy as np\n'), ((3126, 3162), 'numpy.where', 'np.where', (['(h_sum >= unit_length * 0.5)'], {}), '(h_sum >= unit_length * 0.5)\n', (3134, 3162), True, 'import numpy as np\n'), ((3230, 3266), 'numpy.where', 'np.where', (['(w_sum >= unit_length * 0.5)'], {}), '(w_sum >= unit_length * 0.5)\n', (3238, 3266), True, 'import numpy as np\n'), ((4057, 4097), 'numpy.zeros_like', 'np.zeros_like', (['new_image'], {'dtype': 'np.uint8'}), '(new_image, dtype=np.uint8)\n', (4070, 4097), True, 'import numpy as np\n'), ((1141, 1167), 'numpy.count_nonzero', 'np.count_nonzero', (['raw_mask'], {}), '(raw_mask)\n', (1157, 1167), True, 'import numpy as np\n'), ((2305, 2339), 'numpy.stack', 'np.stack', (['[img, img, img]'], {'axis': '(-1)'}), '([img, img, img], axis=-1)\n', (2313, 2339), True, 'import numpy as np\n'), ((3607, 3646), 'numpy.ones', 'np.ones', (['(new_h, new_w)'], {'dtype': 'np.uint8'}), '((new_h, new_w), dtype=np.uint8)\n', (3614, 3646), True, 'import numpy as np\n'), ((3953, 3984), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (3960, 3984), True, 'import numpy as np\n'), ((4252, 4347), 'numpy.sum', 'np.sum', (['mask[unit_length * i:unit_length * (i + 1), unit_length * j:unit_length * (\n j + 1)]'], {}), '(mask[unit_length * i:unit_length * (i + 1), unit_length * j:\n unit_length * (j + 1)])\n', (4258, 4347), True, 'import numpy as np\n'), ((335, 357), 'numpy.array', 'np.array', (['[0.0, 255.0]'], {}), '([0.0, 255.0])\n', (343, 357), True, 'import numpy as np\n')] |
import logging
from logging.config import dictConfig
import dbnd
from dbnd.testing.helpers import run_dbnd_subprocess__with_home
from dbnd_airflow_contrib.dbnd_airflow_default_logger import DEFAULT_LOGGING_CONFIG
class TestDbndAirflowLogging(object):
def test_dbnd_airflow_logging_conifg(self):
# we implement it as a separte test, as we don't want to affect current logging system
dbnd_config = DEFAULT_LOGGING_CONFIG
assert dbnd_config
def test_can_be_loaded(self):
# we can't just load config, it will affect all future tests
output = run_dbnd_subprocess__with_home([__file__.replace(".pyc", ".py")])
assert "test_can_be_loaded OK" in output
logging.error("Done")
if __name__ == "__main__":
print(
dbnd.__version__
) # we need it first to import, before we import any airflow code
dbnd_config = DEFAULT_LOGGING_CONFIG
dictConfig(dbnd_config)
logging.info("test_can_be_loaded OK")
| [
"logging.config.dictConfig",
"logging.info",
"logging.error"
] | [((919, 942), 'logging.config.dictConfig', 'dictConfig', (['dbnd_config'], {}), '(dbnd_config)\n', (929, 942), False, 'from logging.config import dictConfig\n'), ((947, 984), 'logging.info', 'logging.info', (['"""test_can_be_loaded OK"""'], {}), "('test_can_be_loaded OK')\n", (959, 984), False, 'import logging\n'), ((715, 736), 'logging.error', 'logging.error', (['"""Done"""'], {}), "('Done')\n", (728, 736), False, 'import logging\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#import bibliotek
from keras.applications.resnet50 import ResNet50, decode_predictions,preprocess_input
from keras.preprocessing import image
import numpy as np
import requests
from io import BytesIO
from PIL import Image
# In[2]:
#podbranie modelu ResNet50
model = ResNet50(weights = 'imagenet')
# In[3]:
#architektura modelu ResNet50
model.summary()
# ## Import zdjecia z internetu
# #wyświetla zdjecie w jupyterze
#
#
#
# ![] (link)
#
# 
# 
# In[4]:
#import zdjecia z internetu
url_img = ('https://natgeo.imgix.net/syndication/d03e14b9-ccf2-40d2-9612-997a20d35b4a/magazine-rights-exempt-2016-08-departments-panda-mania-12.jpg?auto=compress,format&w=1024&h=560&fit=crop')
response = requests.get(url_img)
#zmiana na Bytes
img = Image.open(BytesIO(response.content))
#rozmiar zdjecia 224x 224 bo taki wymaga model
img = img.resize((224,224))
img
# In[5]:
# konwersja zdjecia na tablice o wartosciach 0-255
X = image.img_to_array(img)
#dodanie nowego wymiaru bo model przyjmuje 4 wymiary
X = np.expand_dims(X, axis =0)
#(1,,224,224,3)
# 1 - zdjecie
# 224 - rozmiar
# 224 - rozmiar
# 3 - RBG
X.shape
# In[6]:
np.expand_dims(X, axis =0).shape
# In[7]:
#predykcja
y_pred = model.predict(X)
# In[8]:
#prawdopodobieństwo co jest na zdjęciu
decode_predictions(y_pred, top = 5)
# In[9]:
#inne przypadki
url_money =('http://3.bp.blogspot.com/-CU3Mg-LeVC4/VWSAi6Ff3dI/AAAAAAAAAkM/UnHJHUkba3c/s400/IMG_9240.JPG')
url_dolar =('https://s3.amazonaws.com/ngccoin-production/us-coin-explorer-category/2718362-020o.jpg')
url_kasa =('https://ocdn.eu/pulscms-transforms/1/MesktkpTURBXy82NDZmNjk1MTExMzVmN2Q5ZmMwMWE1YjUxODU5YzdkNC5qcGeSlQMAAM0QbM0JPZMFzQNSzQHe')
url_snow =('https://miastodzieci.pl/wp-content/uploads/2015/09/snowman-1073800_1920.jpg')
url_dolares = ('https://wf2.xcdn.pl/files/17/04/12/984916_hI4O_17123251389_bed3c3a1ba_b_83.jpg')
url_cash = ('http://m.wm.pl/2018/07/orig/pieniadze-22-482228.jpg')
response = requests.get(url_cash)
img = Image.open(BytesIO(response.content))
#rozmiar zdjecia 224x 224
img = img.resize((224,224))
img
# In[10]:
X = image.img_to_array(img)
X = np.expand_dims(X, axis =0)
X.shape
# In[11]:
#predykcja
y_pred = model.predict(X)
# In[12]:
#prawdopodobieństwo co jest na zdjęciu
decode_predictions(y_pred, top = 5)
# In[ ]:
| [
"keras.preprocessing.image.img_to_array",
"keras.applications.resnet50.decode_predictions",
"io.BytesIO",
"requests.get",
"numpy.expand_dims",
"keras.applications.resnet50.ResNet50"
] | [((323, 351), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (331, 351), False, 'from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input\n'), ((1204, 1225), 'requests.get', 'requests.get', (['url_img'], {}), '(url_img)\n', (1216, 1225), False, 'import requests\n'), ((1436, 1459), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1454, 1459), False, 'from keras.preprocessing import image\n'), ((1518, 1543), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1532, 1543), True, 'import numpy as np\n'), ((1778, 1811), 'keras.applications.resnet50.decode_predictions', 'decode_predictions', (['y_pred'], {'top': '(5)'}), '(y_pred, top=5)\n', (1796, 1811), False, 'from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input\n'), ((2458, 2480), 'requests.get', 'requests.get', (['url_cash'], {}), '(url_cash)\n', (2470, 2480), False, 'import requests\n'), ((2602, 2625), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2620, 2625), False, 'from keras.preprocessing import image\n'), ((2630, 2655), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2644, 2655), True, 'import numpy as np\n'), ((2769, 2802), 'keras.applications.resnet50.decode_predictions', 'decode_predictions', (['y_pred'], {'top': '(5)'}), '(y_pred, top=5)\n', (2787, 2802), False, 'from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input\n'), ((1261, 1286), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (1268, 1286), False, 'from io import BytesIO\n'), ((1643, 1668), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1657, 1668), True, 'import numpy as np\n'), ((2498, 2523), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (2505, 2523), False, 'from io import BytesIO\n')] |
"""
Test all kinds of errors.
@author <EMAIL>
"""
import pytest
from trafficgenerator.tgn_utils import ApiType
from xenavalkyrie.xena_app import init_xena
from xenavalkyrie.xena_object import XenaAttributeError
from .test_base import TestXenaBase
class TestXenaErrors(TestXenaBase):
def setup(self):
super(TestXenaBase, self).setup()
self.xm = init_xena(self.api, self.logger, self.config.get('Xena', 'owner'),
self.server_ip, self.server_port)
def teardown(self):
self.xm.session.disconnect()
def test_errors(self):
# Test invalid chassis IP and port number.
with pytest.raises(Exception) as excinfo:
self.xm.session.add_chassis('InvalidIp')
assert('IOError' in repr(excinfo.value) or 'OSError' in repr(excinfo.value))
assert(len(self.xm.session.chassis_list) == 0)
if self.api == ApiType.socket:
with pytest.raises(Exception) as excinfo:
self.xm.session.add_chassis(self.chassis, -17)
assert('IOError' in repr(excinfo.value) or 'OSError' in repr(excinfo.value))
assert(len(self.xm.session.chassis_list) == 0)
# Reserve port to continue testing...
self.xm.session.add_chassis(self.chassis)
port = self.xm.session.reserve_ports([self.port1], True)[self.port1]
# Test attributes errors.
with pytest.raises(XenaAttributeError) as _:
port.get_attribute('InvalidAttribute')
with pytest.raises(XenaAttributeError) as _:
port.set_attributes(p_reservation=17)
with pytest.raises(XenaAttributeError) as _:
port.set_attributes(p_reservedby=17)
| [
"pytest.raises"
] | [((656, 680), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (669, 680), False, 'import pytest\n'), ((1415, 1448), 'pytest.raises', 'pytest.raises', (['XenaAttributeError'], {}), '(XenaAttributeError)\n', (1428, 1448), False, 'import pytest\n'), ((1519, 1552), 'pytest.raises', 'pytest.raises', (['XenaAttributeError'], {}), '(XenaAttributeError)\n', (1532, 1552), False, 'import pytest\n'), ((1622, 1655), 'pytest.raises', 'pytest.raises', (['XenaAttributeError'], {}), '(XenaAttributeError)\n', (1635, 1655), False, 'import pytest\n'), ((943, 967), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (956, 967), False, 'import pytest\n')] |
import time
from datetime import datetime as dt
import colorama
from colorama import Fore, Back, Style
import socket
import os
import sys
def CS(X):
time.sleep(X)
os.system("clear")
def socknames():
myHostName = socket.gethostname()
myIP = socket.gethostbyname(myHostName)
print("\033[35m[\033[36m*\033[35m] With Loopback Addr -> {}".format(myIP))
def block2():
hosts_path = "/etc/hosts"
redirect = '127.0.0.1'
print("--------------------- UP TO 6 WEBSITES ------------- ")
op1 = str(input(" Website @> "))
op2 = str(input(" Website @> "))
op3 = str(input(" Website @> "))
op4 = str(input(" Website @> "))
op5 = str(input(" Website @> "))
op6 = str(input(" Website @> "))
website_list = [f"{op1}",f"{op2}",f"{op3}",f"{op4}",f"{op5}",f"{op6}" ]
while True:
if dt(dt.now().year, dt.now().month, dt.now().day,8) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day,16):
with open(hosts_path, 'r+') as file:
content = file.read()
for website in website_list:
if website in content:
time.sleep(4)
print(f"\033[35m[\033[36m*\033[35m] Blocking Host -> ", website_list)
socknames()
pass
else:
file.write(redirect + " " + website + "\n")
else:
with open(hosts_path, 'r+') as file:
content=file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in website_list):
file.write(line)
file.truncate()
time.sleep(5)
try:
hosts_path = "/etc/hosts"
with open('/etc/hosts', 'r') as fr:
lines = fr.readlines()
ptr = 1
with open('months.txt', 'w') as fw:
for line in lines:
if ptr != 6:
fw.write(line)
ptr += 1
print("\033[35m[\033[36m*\033[35m] File Write Deleted......")
except:
print("[!] [DATA]->[ERROR]->[SYS]->[WRITE] ")
print(f"[!] HOSTS COULD NOT BE DELETED FROM {hosts_path}")
print("[!] THIS CAN RESULT IN A PERMANANT LOCK FROM THE ")
print("[!] TARGETED WEBSITES CATTING THE HOST FOR VIEW ")
print("--=-=-=-=-=-=-=-=-=--=-=-=-=-=--=-=--=-=-=-=--=-=-")
os.system("cat /etc/hosts")
def block1():
us = str(input(" Website @> "))
website_list = [f"{us}"]
while True:
hosts_path = "/etc/hosts"
if dt(dt.now().year, dt.now().month, dt.now().day,8) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day,16):
with open(hosts_path, 'r+') as file:
content = file.read()
for website in website_list:
if website in content:
time.sleep(4)
print(f"\033[35m[\033[36m*\033[35m] Blocking Host -> ", website_list)
socknames()
pass
else:
file.write(redirect + " " + website + "\n")
else:
with open(hosts_path, 'r+') as file:
content=file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in website_list):
file.write(line)
file.truncate()
time.sleep(5)
try:
hosts_path = "/etc/hosts"
with open('/etc/hosts', 'r') as fr:
lines = fr.readlines()
ptr = 1
with open('months.txt', 'w') as fw:
for line in lines:
if ptr != 6:
fw.write(line)
ptr += 1
print("\033[35m[\033[36m*\033[35m] File Write Deleted......")
except:
print("[!] [DATA]->[ERROR]->[SYS]->[WRITE] ")
print(f"[!] HOSTS COULD NOT BE DELETED FROM {hosts_path}")
print("[!] THIS CAN RESULT IN A PERMANANT LOCK FROM THE ")
print("[!] TARGETED WEBSITES CATTING THE HOST FOR VIEW ")
print("--=-=-=-=-=-=-=-=-=--=-=-=-=-=--=-=--=-=-=-=--=-=-")
os.system("cat /etc/hosts")
def menu():
print("""
__ __ __ ___
| |--.| |.-----..----.| |--. ______ .' _|.-----..----.
| _ || || _ || __|| < |______|| _|| -__|| _|
|_____||__||_____||____||__|__| |__| |_____||__|
Block websites and host names ArkAngeL43
--------------------------------------------------------
[1] -> Block A Website
[2] -> Block Multiple
""")
option = str(input(" Options @> "))
if option == '1':
block1()
elif option == '2':
block2()
if __name__ == "__main__":
try:
CS(1)
menu()
except KeyboardInterrupt:
print("\n")
hosts_path = "/etc/hosts"
print("""\033[31mIF THIS MESSAGE APPEARED PLEASE DELETE IT MANUALLY OR
IF A WEBSITE IS STILL BEING BLOCKED
""")
print("\033[31m----------------------------------------------------")
print("\033[31m[!] [DATA]->[ERROR]->[SYS]->[WRITE] ")
print(f"\033[31m[!] HOSTS COULD NOT BE DELETED FROM {hosts_path}")
print("\033[31m[!] THIS CAN RESULT IN A PERMANANT LOCK FROM THE ")
print("\033[31m[!] TARGETED WEBSITES CATTING THE HOST FOR VIEW ")
print("\033[31m--=-=-=-=-=-=-=-=-=--=-=-=-=-=--=-=--=-=-=-=--=-=-")
print(Fore.GREEN+"Printing File of hosts......")
time.sleep(2)
os.system(f"cat {hosts_path}")
| [
"socket.gethostbyname",
"time.sleep",
"datetime.datetime.now",
"os.system",
"socket.gethostname"
] | [((159, 172), 'time.sleep', 'time.sleep', (['X'], {}), '(X)\n', (169, 172), False, 'import time\n'), ((177, 195), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (186, 195), False, 'import os\n'), ((231, 251), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (249, 251), False, 'import socket\n'), ((263, 295), 'socket.gethostbyname', 'socket.gethostbyname', (['myHostName'], {}), '(myHostName)\n', (283, 295), False, 'import socket\n'), ((1755, 1768), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1765, 1768), False, 'import time\n'), ((3670, 3683), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3680, 3683), False, 'import time\n'), ((893, 901), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (899, 901), True, 'from datetime import datetime as dt\n'), ((2808, 2816), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2814, 2816), True, 'from datetime import datetime as dt\n'), ((5931, 5944), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5941, 5944), False, 'import time\n'), ((5953, 5983), 'os.system', 'os.system', (['f"""cat {hosts_path}"""'], {}), "(f'cat {hosts_path}')\n", (5962, 5983), False, 'import os\n'), ((2583, 2610), 'os.system', 'os.system', (['"""cat /etc/hosts"""'], {}), "('cat /etc/hosts')\n", (2592, 2610), False, 'import os\n'), ((4498, 4525), 'os.system', 'os.system', (['"""cat /etc/hosts"""'], {}), "('cat /etc/hosts')\n", (4507, 4525), False, 'import os\n'), ((844, 852), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (850, 852), True, 'from datetime import datetime as dt\n'), ((859, 867), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (865, 867), True, 'from datetime import datetime as dt\n'), ((875, 883), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (881, 883), True, 'from datetime import datetime as dt\n'), ((907, 915), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (913, 915), True, 'from datetime import datetime as dt\n'), ((922, 930), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (928, 930), True, 'from datetime import datetime as dt\n'), ((938, 946), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (944, 946), True, 'from datetime import datetime as dt\n'), ((2759, 2767), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2765, 2767), True, 'from datetime import datetime as dt\n'), ((2774, 2782), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2780, 2782), True, 'from datetime import datetime as dt\n'), ((2790, 2798), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2796, 2798), True, 'from datetime import datetime as dt\n'), ((2822, 2830), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2828, 2830), True, 'from datetime import datetime as dt\n'), ((2837, 2845), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2843, 2845), True, 'from datetime import datetime as dt\n'), ((2853, 2861), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2859, 2861), True, 'from datetime import datetime as dt\n'), ((1155, 1168), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (1165, 1168), False, 'import time\n'), ((3070, 3083), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (3080, 3083), False, 'import time\n')] |
import json
import pandas as pd
from AnalysisModule.prepare.diagram import BuildingUnit
from AnalysisModule.routines.util import read_jsonfile
"""
4 ways to deal with strange bus
A - exclude bu, keep crystals
A' - merge bu, keep crystals
B - exclude crystals
C - keep all
note 2020/11/24:
- all A are modified to A'
- use bu_0 bu_1 bu_2 to represent bus
- if bu_x does not exist, the field is set to -1
buid: 9 len: 9 C
- H2PO2, reactants include hypophosphorous acid
buid: 18 len: 9 A'
- this is tricky: some of them come from PO4 across pbc, some of them are HPO3 without hydrogens
- HPO3: (BU0)
BEZVIO
BEZVOU
BEZVUA
BEZWAH
CASWIE
TEXSEV
- PO4: (BU1)
CUHCIR
POVMOC
QOBWEJ
buid: 19 len: 21 B
- some of them are MeOH e.g. coordinated to a metal, some of them are MeO with O acting as a bridge e.g. between metals
buid: 21 len: 5 B
- ethylene glycol
buid: 23 len: 8 B
- ethanol
buid: 24 len: 9 A (2020/11/24 -> 1 A', BU1)
- PFO3
- similar to BU25, HPF6 is used as input
buid: 25 len: 2 A (2020/11/24 -> 1 A', BU1)
- PF2O2
- HPF6 involved in both synthesis
buid: 28 len: 1 A (2020/11/24 -> 1 A', BU5)
- octahedral SiO6(2-)
buid: 29 len: 1 B
- O-C(O)-CH2-O
- synthesis uses glycolic acid
buid: 31 len: 7 B
- O2C-CH2-S-CH2-CO2
- uranyl thiodigycolate is used in synthesis
buid: 32 len: 1 A'
- KAPSUR, distorted NO3 (BU10)
buid: 34 len: 1 A'
- SiO3, just broken SiO4 (BU5) by pbc
buid: 36 len: 1 A'
- WAQVOZ, glitched CO3 (BU15)
"""
records = pd.read_csv("3_bulist.csv").to_dict("records")
curated_records = []
curated_bus = []
for ir in range(len(records)):
records[ir]["bus"] = json.loads(records[ir]["bus"])
identifier = records[ir]["identifier"]
# merge A'
if 18 in records[ir]["bus"]:
if identifier in ["BEZVIO", "BEZVOU", "BEZVUA", "BEZWAH", "CASWIE", "TEXSEV", ]:
records[ir]["bus"] = [0 if x == 18 else x for x in records[ir]["bus"]]
elif identifier in ["CUHCIR", "POVMOC", "QOBWEJ", ]:
records[ir]["bus"] = [1 if x == 18 else x for x in records[ir]["bus"]]
else:
raise NotImplementedError("BU 18 not merged due to unknown identifier: {}".format(identifier))
# aprime_dict = {32: 10, 34: 5, 36: 15}
aprime_dict = {32: 10, 34: 5, 36: 15, 24: 1, 25: 1, 28: 5} # 2020/11/24
for buid_aprime in aprime_dict.keys():
if buid_aprime in records[ir]["bus"]:
records[ir]["bus"] = [aprime_dict[buid_aprime] if x == buid_aprime else x for x in records[ir]["bus"]]
# exclude crystals with B
if set(records[ir]["bus"]).intersection({19, 21, 23, 29, 31}):
continue
# very few crystal has more than 2 bus
if len(records[ir]["bus"]) > 2:
print("bus len > 2: ", len(records[ir]["bus"]), records[ir]["identifier"])
curated_bus += records[ir]["bus"]
curated_records.append(records[ir])
df = pd.DataFrame.from_records(curated_records)
df.to_csv("4_bucurate.csv", index=False)
df[["identifier"]].to_csv("4_bucurate.gcd", index=False, header=False)
curated_bus = set(curated_bus)
print("# of curated bus", len(curated_bus))
bus = read_jsonfile("3_bulist.json")
outf = open("4_bucurate.html", "w")
for bu in bus:
bu: BuildingUnit
if bu.buid in curated_bus:
svgtext = bu.draw_by_cdg(title=bu.buid)
outf.write(svgtext)
outf.close()
| [
"pandas.DataFrame.from_records",
"json.loads",
"AnalysisModule.routines.util.read_jsonfile",
"pandas.read_csv"
] | [((2992, 3034), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['curated_records'], {}), '(curated_records)\n', (3017, 3034), True, 'import pandas as pd\n'), ((3229, 3259), 'AnalysisModule.routines.util.read_jsonfile', 'read_jsonfile', (['"""3_bulist.json"""'], {}), "('3_bulist.json')\n", (3242, 3259), False, 'from AnalysisModule.routines.util import read_jsonfile\n'), ((1747, 1777), 'json.loads', 'json.loads', (["records[ir]['bus']"], {}), "(records[ir]['bus'])\n", (1757, 1777), False, 'import json\n'), ((1606, 1633), 'pandas.read_csv', 'pd.read_csv', (['"""3_bulist.csv"""'], {}), "('3_bulist.csv')\n", (1617, 1633), True, 'import pandas as pd\n')] |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Milestone'
db.create_table('roadmap_milestone', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('start_date', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('roadmap', ['Milestone'])
# Adding model 'MilestoneTranslation'
db.create_table('roadmap_milestonetranslation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('milestone', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['roadmap.Milestone'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('language', self.gf('django.db.models.fields.CharField')(max_length=5)),
))
db.send_create_signal('roadmap', ['MilestoneTranslation'])
def backwards(self, orm):
# Deleting model 'Milestone'
db.delete_table('roadmap_milestone')
# Deleting model 'MilestoneTranslation'
db.delete_table('roadmap_milestonetranslation')
models = {
'roadmap.milestone': {
'Meta': {'object_name': 'Milestone'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
'roadmap.milestonetranslation': {
'Meta': {'object_name': 'MilestoneTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['roadmap.Milestone']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['roadmap']
| [
"south.db.db.send_create_signal",
"south.db.db.delete_table"
] | [((472, 519), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""roadmap"""', "['Milestone']"], {}), "('roadmap', ['Milestone'])\n", (493, 519), False, 'from south.db import db\n'), ((1010, 1068), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""roadmap"""', "['MilestoneTranslation']"], {}), "('roadmap', ['MilestoneTranslation'])\n", (1031, 1068), False, 'from south.db import db\n'), ((1146, 1182), 'south.db.db.delete_table', 'db.delete_table', (['"""roadmap_milestone"""'], {}), "('roadmap_milestone')\n", (1161, 1182), False, 'from south.db import db\n'), ((1240, 1287), 'south.db.db.delete_table', 'db.delete_table', (['"""roadmap_milestonetranslation"""'], {}), "('roadmap_milestonetranslation')\n", (1255, 1287), False, 'from south.db import db\n')] |
from base import CQPartsTest
from base import testlabel
# units under test
from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener
# ---------- Test Assembly ----------
import cadquery
import cqparts
from partslib.basic import Box
from cqparts import constraint
from cqparts.utils import CoordSystem
class FastenedAssembly(cqparts.Assembly):
def make_components(self):
base = Box(length=20, width=20, height=12)
top = Box(length=18, width=18, height=18)
return {
'base': base,
'top': top,
'fastener': NutAndBoltFastener(parts=[base, top]),
}
def make_constraints(self):
base = self.components['base']
top = self.components['top']
fastener = self.components['fastener']
return [
constraint.Fixed(base.mate_bottom),
constraint.Coincident(top.mate_bottom, base.mate_top),
constraint.Coincident(fastener.mate_origin, top.mate_top + CoordSystem((1, 2, 0))),
]
# ---------- Unit Tests ----------
class ScrewFastenerTest(CQPartsTest):
def test_fastener(self):
obj = FastenedAssembly()
bolt = obj.find('fastener.bolt')
nut = obj.find('fastener.nut')
self.assertEquals(bolt.world_coords.origin, cadquery.Vector((1, 2, 30)))
self.assertGreater(
bolt.bounding_box.zlen,
obj.find('top').height + obj.find('base').height
)
self.assertEquals(nut.world_coords.origin, cadquery.Vector((1, 2, 0)))
| [
"partslib.basic.Box",
"cqparts.utils.CoordSystem",
"cqparts_fasteners.fasteners.nutbolt.NutAndBoltFastener",
"cqparts.constraint.Coincident",
"cadquery.Vector",
"cqparts.constraint.Fixed"
] | [((403, 438), 'partslib.basic.Box', 'Box', ([], {'length': '(20)', 'width': '(20)', 'height': '(12)'}), '(length=20, width=20, height=12)\n', (406, 438), False, 'from partslib.basic import Box\n'), ((453, 488), 'partslib.basic.Box', 'Box', ([], {'length': '(18)', 'width': '(18)', 'height': '(18)'}), '(length=18, width=18, height=18)\n', (456, 488), False, 'from partslib.basic import Box\n'), ((580, 617), 'cqparts_fasteners.fasteners.nutbolt.NutAndBoltFastener', 'NutAndBoltFastener', ([], {'parts': '[base, top]'}), '(parts=[base, top])\n', (598, 617), False, 'from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener\n'), ((814, 848), 'cqparts.constraint.Fixed', 'constraint.Fixed', (['base.mate_bottom'], {}), '(base.mate_bottom)\n', (830, 848), False, 'from cqparts import constraint\n'), ((862, 915), 'cqparts.constraint.Coincident', 'constraint.Coincident', (['top.mate_bottom', 'base.mate_top'], {}), '(top.mate_bottom, base.mate_top)\n', (883, 915), False, 'from cqparts import constraint\n'), ((1294, 1321), 'cadquery.Vector', 'cadquery.Vector', (['(1, 2, 30)'], {}), '((1, 2, 30))\n', (1309, 1321), False, 'import cadquery\n'), ((1509, 1535), 'cadquery.Vector', 'cadquery.Vector', (['(1, 2, 0)'], {}), '((1, 2, 0))\n', (1524, 1535), False, 'import cadquery\n'), ((988, 1010), 'cqparts.utils.CoordSystem', 'CoordSystem', (['(1, 2, 0)'], {}), '((1, 2, 0))\n', (999, 1010), False, 'from cqparts.utils import CoordSystem\n')] |
# Generated by Django 3.0.8 on 2021-02-20 13:51
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('eventsapi', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='EventGallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gallery_image', models.TextField()),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_gallery', to='eventsapi.Event')),
],
),
migrations.CreateModel(
name='EventDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_date', models.DateField()),
('description', ckeditor.fields.RichTextField(blank=True, null=True)),
('main_image', models.TextField()),
('banner_image', models.TextField()),
('event', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='event_description', to='eventsapi.Event')),
],
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] | [((382, 475), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (398, 475), False, 'from django.db import migrations, models\n'), ((609, 702), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (625, 702), False, 'from django.db import migrations, models\n'), ((735, 753), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (751, 753), False, 'from django.db import migrations, models\n'), ((782, 901), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""event_gallery"""', 'to': '"""eventsapi.Event"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='event_gallery', to='eventsapi.Event')\n", (799, 901), False, 'from django.db import migrations, models\n'), ((1034, 1127), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1050, 1127), False, 'from django.db import migrations, models\n'), ((1157, 1175), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1173, 1175), False, 'from django.db import migrations, models\n'), ((1296, 1314), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1312, 1314), False, 'from django.db import migrations, models\n'), ((1350, 1368), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1366, 1368), False, 'from django.db import migrations, models\n'), ((1397, 1522), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""event_description"""', 'to': '"""eventsapi.Event"""'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='event_description', to='eventsapi.Event')\n", (1417, 1522), False, 'from django.db import migrations, models\n')] |
"""
This file is for initializing the uWSGI application. It defines a post-fork command in
order to avoid an issue in uWSGI that prevents MongoEngine from lazily creating
mongo connections
"""
from uwsgidecorators import postfork
from mongomail.rest_app import app, db, connection
@postfork
def init_db():
db.init_app(app)
print("initializing db")
if not connection.get_api_keys():
print("No Key found")
key = connection.generate_api_key()
print("Generated key: " + key)
| [
"mongomail.rest_app.connection.get_api_keys",
"mongomail.rest_app.db.init_app",
"mongomail.rest_app.connection.generate_api_key"
] | [((313, 329), 'mongomail.rest_app.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (324, 329), False, 'from mongomail.rest_app import app, db, connection\n'), ((370, 395), 'mongomail.rest_app.connection.get_api_keys', 'connection.get_api_keys', ([], {}), '()\n', (393, 395), False, 'from mongomail.rest_app import app, db, connection\n'), ((441, 470), 'mongomail.rest_app.connection.generate_api_key', 'connection.generate_api_key', ([], {}), '()\n', (468, 470), False, 'from mongomail.rest_app import app, db, connection\n')] |
import cupy as cp
def get_batches(X,y,batch_size,seed,shuffle):
cp.random.seed(seed)
m = X.shape[0]
mini_batches = []
# 第一步打乱X,Y
if shuffle:
permutation = cp.random.permutation(m) # 返回一个长度为m的list,里面的值为0到m-1
shuffled_X = X[permutation]
shuffled_y = y[permutation]
else:
shuffled_X=X
shuffled_y=y
# 第二步分割数据
complete_batch_nums = m // batch_size # 完整的minibatch个数
for i in range(complete_batch_nums):
mini_batch_X = shuffled_X[batch_size * i:batch_size * (i + 1)]
mini_batch_y = shuffled_y[batch_size * i:batch_size * (i + 1)]
mini_batch = (mini_batch_X, mini_batch_y)
mini_batches.append(mini_batch)
if m % batch_size != 0:
mini_batch_X = shuffled_X[ batch_size * complete_batch_nums:]
mini_batch_y = shuffled_y[ batch_size * complete_batch_nums:]
mini_batch = (mini_batch_X, mini_batch_y)
mini_batches.append(mini_batch)
return mini_batches | [
"cupy.random.seed",
"cupy.random.permutation"
] | [((78, 98), 'cupy.random.seed', 'cp.random.seed', (['seed'], {}), '(seed)\n', (92, 98), True, 'import cupy as cp\n'), ((198, 222), 'cupy.random.permutation', 'cp.random.permutation', (['m'], {}), '(m)\n', (219, 222), True, 'import cupy as cp\n')] |
# -*- coding: utf-8 -*-
"""
Plot comparisons of IHME projections to actual data for US states.
IHME data per IHME:
https://covid19.healthdata.org/united-states-of-america
IHME data stored here in the "..\data\ihme" directory for each release
that was obtained.
State-level data per Covid tracking project:
https://covidtracking.com/
Data for the COVID-19 repo is contained here in the
"..\data\covid19_tracker" directory for each day that the state
historical values were obtained.
"""
import os
import numpy as np
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from datetime import date
from scipy.signal import medfilt
from read_data import get_data_ctrack, get_data_ihme, format_date_ihme
def intfun(s):
try:
return int(s)
except ValueError:
return 0
# Select states and set data dates for display
#state = 'NY'
#state_long = 'New York'
#state = 'GA'
#state_long = 'Georgia'
#state = 'KY'
#state_long = 'Kentucky'
#state = 'CA'
#state_long = 'California'
#state = 'WI'
#state_long = 'Wisconsin'
#ylpct = [0., 15.]
#state = 'IA'
#state_long = 'Iowa'
state = 'AL'
state_long = 'Alabama'
#state = 'OR'
#state_long = 'Oregon'
#state = 'FL'
#state_long = 'Florida'
#ylpct = [0.,25.]
#state = 'MI'
#state_long = 'Michigan'
#state = 'WA'
#state_long = 'Washington'
#state = 'DC'
#state_long = 'District of Columbia'
#state = 'NJ'
#state_long = 'New Jersey'
#state = 'OK'
#state_long = 'Oklahoma'
#state = 'SD'
#state_long = 'South Dakota'
# TODO: Have to add all state data together for the covid tracker data
#state = 'US'
#state_long = 'US'
#state = 'TX'
#state_long = 'Texas'
#state = 'GA'
#state_long = 'Georgia'
#state = 'MN'
#state_long = 'Minnesota'
#state = 'CO'
#state_long = 'Colorado'
ylpct = [0., 30.]
# Set files which we're loading from and set data dates for display
data_filename = r'..\data\covid19_tracker\states-daily_20200504.csv'
data_date = '04 May'
#model_fname = r'..\data\ihme\2020_03_31.1\Hospitalization_all_locs.csv'
#project_date = '31 March'
#model_fname = r'..\data\ihme\2020_04_12.02\Hospitalization_all_locs.csv'
#project_date = '13 April'
model_fname = r'..\data\ihme\2020_04_16.05\Hospitalization_all_locs.csv'
project_date = '17 April'
# When to stop the plotting
start_date = '20200401'
stop_date = '20200510'
# Which plots to make
plot_testing = True
plot_hosp_death = True
today = date.today()
# Load data and format
data = get_data_ctrack(state, data_filename)
dates = data['date']
start_date_ind = list(dates).index(start_date)
dates = dates[start_date_ind:]
pos = data['positive']
neg = data['negative']
hosp = data['hospitalizedCurrently']
icu = data['inIcuCurrently']
vent = data['onVentilatorCurrently']
death = data['death']
date_inds = range(len(dates))
dpos = np.diff(pos, prepend = 0)
dneg = np.diff(neg, prepend = 0)
dhosp = np.diff(hosp, prepend = 0.)
ddhosp = np.diff(dhosp, prepend = 0)
ddeath = np.diff(death, prepend = 0)
pos = pos[start_date_ind:]
neg = neg[start_date_ind:]
hosp = hosp[start_date_ind:]
death = death[start_date_ind:]
dpos = dpos[start_date_ind:]
dneg = dneg[start_date_ind:]
dhosp = dhosp[start_date_ind:]
ddeath = ddeath[start_date_ind:]
xticks = date_inds[::4]
xticklabels = ['%s/%s' % (s[-3], s[-2:]) for s in dates[::4]]
# Load ihme data
data_ihme = get_data_ihme(model_fname)[state_long]
dates_ihme = [format_date_ihme(s) for s in data_ihme['date']]
# Trim to desired range
start_ihme = dates_ihme.index(start_date)
stop_ihme = dates_ihme.index(stop_date)
dates_ihme = dates_ihme[start_ihme:stop_ihme]
date_inds_ihme = range(len(dates_ihme))
dhosp_ihme_m, dhosp_ihme_l, dhosp_ihme_u = (data_ihme['admis_mean'][start_ihme:stop_ihme],
data_ihme['admis_lower'][start_ihme:stop_ihme],
data_ihme['admis_upper'][start_ihme:stop_ihme])
hosp_ihme_m, hosp_ihme_l, hosp_ihme_u = (data_ihme['allbed_mean'][start_ihme:stop_ihme],
data_ihme['allbed_lower'][start_ihme:stop_ihme],
data_ihme['allbed_upper'][start_ihme:stop_ihme])
death_ihme_m, death_ihme_l, death_ihme_u = (data_ihme['totdea_mean'][start_ihme:stop_ihme],
data_ihme['totdea_lower'][start_ihme:stop_ihme],
data_ihme['totdea_upper'][start_ihme:stop_ihme])
ddeath_ihme_m, ddeath_ihme_l, ddeath_ihme_u = (data_ihme['deaths_mean'][start_ihme:stop_ihme],
data_ihme['deaths_lower'][start_ihme:stop_ihme],
data_ihme['deaths_upper'][start_ihme:stop_ihme])
xticks = date_inds_ihme[::4]
xticklabels = ['%s/%s' % (s[-3], s[-2:]) for s in dates_ihme[::4]]
#%% Data on tests
if plot_testing:
fig, ax = plt.subplots(1, 3, figsize = (17, 5))
gray = 0.3*np.array([1, 1, 1])
lightblue = [0.3, 0.3, 0.8]
darkblue = [0.2, 0.2, 0.6]
red = [0.6, 0.2, 0.2]
lightred = [0.8, 0.4, 0.4]
dtotal = dpos + dneg
avg_7 = medfilt(dtotal, 7)
ax[0].plot(dates, dtotal, 'o', label = 'Total Tests',
color = darkblue, markerfacecolor = lightblue)
ax[0].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticklabels)
ax[0].set_ylabel('Number of Tests', fontsize = 12, fontweight = 'bold')
ax[0].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
ax[1].plot(dates, dpos, 'o', label = 'Positive Tests',
color = red, markerfacecolor = lightred)
avg_3 = medfilt(dpos, 3)
avg_7 = medfilt(dpos, 7)
# ax[1].plot(dates, avg_3, 'b--', label = '3 Day Moving Average')
ax[1].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticklabels)
ax[1].set_ylabel('Number of Positives', fontsize = 12, fontweight = 'bold')
ax[1].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
avg_7 = medfilt(100*dpos/dtotal, 7)
ax[2].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[2].plot(dates, 100*dpos/dtotal, 'o', color = 'k',
markerfacecolor = gray)
ax[2].set_xticks(xticks)
ax[2].set_xticklabels(xticklabels)
ax[2].set_xlabel('Date', fontweight = 'bold', fontsize = 12)
ax[2].set_ylabel('Percentage of Positive Tests',
fontweight = 'bold', fontsize = 12)
ax[0].set_title('All Tests', fontsize = 12, fontweight = 'bold')
ax[1].set_title('Positive Tests', fontsize = 12, fontweight = 'bold')
ax[2].set_title('Percentage of Tests Positive', fontsize = 12, fontweight = 'bold')
yl0 = ax[0].get_ylim()
yl1 = ax[1].get_ylim()
yl2 = ax[2].get_ylim()
ax[0].set_ylim([-5, yl0[1]])
ax[0].set_xlim([0, len(dates)])
ax[1].set_ylim([-5, yl1[1]])
ax[1].set_xlim([0, len(dates)])
ax[1].legend()
if ylpct is None:
ax[2].set_ylim([-5, yl2[1]])
else:
ax[2].set_ylim(ylpct)
ax[2].set_xlim([0, len(dates)])
fig.suptitle('%s: All Tests, Positive Tests, and Positive Test Percentages' %
state_long, fontsize = 14, fontweight = 'bold')
impath = '../images/test_data'
imname = '%s_data%s_%s.png' % (state_long, data_date, str(today))
plt.savefig(os.path.join(impath, imname), bbox_inches = 'tight')
#%% Show info on hospitalizations and deaths
if plot_hosp_death:
impath = '../images/ihme_compare'
imname = '%s_data%s_project%s_%s.png' % (state_long, data_date, project_date, str(today))
lightblue = [0.3, 0.3, 0.8]
darkblue = [0.2, 0.2, 0.6]
fig, ax = plt.subplots(2, 2, figsize = (12, 6))
ax = ax.flatten()
ax[0].plot(dates, hosp, 'o', label = 'Reported',
color = darkblue, markerfacecolor = lightblue)
ax[0].plot(dates_ihme, hosp_ihme_m, 'k-', label = 'IHME Projected [Mean]')
ax[0].plot(dates_ihme, hosp_ihme_l, 'r--', label = 'IHME Projected [Lower CI]')
ax[0].plot(dates_ihme, hosp_ihme_u, 'r--', label = 'IHME Projected [Upper CI]')
ax[0].set_xlim(0, date_inds_ihme[-1])
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticklabels)
ax[0].legend()
ax[0].set_ylabel('Total Hospitalized', fontsize = 12, fontweight = 'bold')
ax[0].set_title('Hospitalizations', fontsize = 12, fontweight = 'bold')
ax[2].plot(dates, dhosp, 'o',
color = darkblue, markerfacecolor = lightblue)
ax[2].plot(dates_ihme, dhosp_ihme_m, 'k-')
ax[2].plot(dates_ihme, dhosp_ihme_l, 'r--')
ax[2].plot(dates_ihme, dhosp_ihme_u, 'r--')
ax[2].set_xlim(0, date_inds_ihme[-1])
ax[2].set_xticks(xticks)
ax[2].set_xticklabels(xticklabels)
ax[2].set_ylabel('New Hospitalized', fontsize = 12, fontweight = 'bold')
ax[2].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
ax[1].plot(dates, death, 'o', label = 'Reported',
color = darkblue, markerfacecolor = lightblue)
ax[1].plot(dates_ihme, death_ihme_m, 'k-', label = 'IHME Projected [Mean]')
ax[1].plot(dates_ihme, death_ihme_l, 'r--', label = 'IHME Projected [Lower CI]')
ax[1].plot(dates_ihme, death_ihme_u, 'r--', label = 'IHME Projected [Upper CI]')
ax[1].set_xlim(0, date_inds_ihme[-1])
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticklabels)
ax[1].legend()
ax[1].set_ylabel('Total Deaths', fontsize = 12, fontweight = 'bold')
ax[1].set_title('Deaths', fontsize = 12, fontweight = 'bold')
ax[3].plot(dates, ddeath, 'o',
color = darkblue, markerfacecolor = lightblue)
ax[3].plot(dates_ihme, ddeath_ihme_m, 'k-')
ax[3].plot(dates_ihme, ddeath_ihme_l, 'r--')
ax[3].plot(dates_ihme, ddeath_ihme_u, 'r--')
ax[3].set_xlim(0, date_inds_ihme[-1])
ax[3].set_xticks(xticks)
ax[3].set_xticklabels(xticklabels)
ax[3].set_ylabel('New Deaths', fontsize = 12, fontweight = 'bold')
ax[3].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
# plt.tight_layout()
fig.suptitle('%s: Reported Data [%s] vs IHME Projections [%s]' %
(state_long, data_date, project_date), fontsize = 14, fontweight = 'bold')
plt.savefig(os.path.join(impath, imname), bbox_inches = 'tight')
| [
"read_data.get_data_ctrack",
"os.path.join",
"read_data.format_date_ihme",
"numpy.diff",
"read_data.get_data_ihme",
"numpy.array",
"scipy.signal.medfilt",
"datetime.date.today",
"matplotlib.pyplot.subplots"
] | [((2462, 2474), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2472, 2474), False, 'from datetime import date\n'), ((2506, 2543), 'read_data.get_data_ctrack', 'get_data_ctrack', (['state', 'data_filename'], {}), '(state, data_filename)\n', (2521, 2543), False, 'from read_data import get_data_ctrack, get_data_ihme, format_date_ihme\n'), ((2863, 2886), 'numpy.diff', 'np.diff', (['pos'], {'prepend': '(0)'}), '(pos, prepend=0)\n', (2870, 2886), True, 'import numpy as np\n'), ((2896, 2919), 'numpy.diff', 'np.diff', (['neg'], {'prepend': '(0)'}), '(neg, prepend=0)\n', (2903, 2919), True, 'import numpy as np\n'), ((2930, 2956), 'numpy.diff', 'np.diff', (['hosp'], {'prepend': '(0.0)'}), '(hosp, prepend=0.0)\n', (2937, 2956), True, 'import numpy as np\n'), ((2967, 2992), 'numpy.diff', 'np.diff', (['dhosp'], {'prepend': '(0)'}), '(dhosp, prepend=0)\n', (2974, 2992), True, 'import numpy as np\n'), ((3004, 3029), 'numpy.diff', 'np.diff', (['death'], {'prepend': '(0)'}), '(death, prepend=0)\n', (3011, 3029), True, 'import numpy as np\n'), ((3388, 3414), 'read_data.get_data_ihme', 'get_data_ihme', (['model_fname'], {}), '(model_fname)\n', (3401, 3414), False, 'from read_data import get_data_ctrack, get_data_ihme, format_date_ihme\n'), ((3441, 3460), 'read_data.format_date_ihme', 'format_date_ihme', (['s'], {}), '(s)\n', (3457, 3460), False, 'from read_data import get_data_ctrack, get_data_ihme, format_date_ihme\n'), ((5016, 5051), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(17, 5)'}), '(1, 3, figsize=(17, 5))\n', (5028, 5051), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5274), 'scipy.signal.medfilt', 'medfilt', (['dtotal', '(7)'], {}), '(dtotal, 7)\n', (5263, 5274), False, 'from scipy.signal import medfilt\n'), ((5811, 5827), 'scipy.signal.medfilt', 'medfilt', (['dpos', '(3)'], {}), '(dpos, 3)\n', (5818, 5827), False, 'from scipy.signal import medfilt\n'), ((5840, 5856), 'scipy.signal.medfilt', 'medfilt', (['dpos', '(7)'], {}), '(dpos, 7)\n', (5847, 5856), False, 'from scipy.signal import medfilt\n'), ((6225, 6256), 'scipy.signal.medfilt', 'medfilt', (['(100 * dpos / dtotal)', '(7)'], {}), '(100 * dpos / dtotal, 7)\n', (6232, 6256), False, 'from scipy.signal import medfilt\n'), ((7919, 7954), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(12, 6)'}), '(2, 2, figsize=(12, 6))\n', (7931, 7954), True, 'import matplotlib.pyplot as plt\n'), ((5069, 5088), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5077, 5088), True, 'import numpy as np\n'), ((7579, 7607), 'os.path.join', 'os.path.join', (['impath', 'imname'], {}), '(impath, imname)\n', (7591, 7607), False, 'import os\n'), ((10494, 10522), 'os.path.join', 'os.path.join', (['impath', 'imname'], {}), '(impath, imname)\n', (10506, 10522), False, 'import os\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import os.path
from PIL import Image
import logging
import numpy as np
from .base_analyzer import BaseAnnotator
if os.environ.get('PYTORCH_MODE',False):
import dvalib.crnn.utils as utils
import dvalib.crnn.dataset as dataset
import torch
from torch.autograd import Variable
import dvalib.crnn.models.crnn as crnn
logging.info("In pytorch mode, not importing TF")
elif os.environ.get('CAFFE_MODE',False):
pass
else:
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.python.training import saver as tf_saver
slim = tf.contrib.slim
def inception_preprocess(image, central_fraction=0.875):
image = tf.cast(tf.image.decode_jpeg(image, channels=3), tf.float32)
# image = tf.image.central_crop(image, central_fraction=central_fraction)
image = tf.expand_dims(image, [0])
# TODO try tf.image.resize_image_with_crop_or_pad and tf.image.extract_glimpse
image = tf.image.resize_bilinear(image, [299, 299], align_corners=False)
# Center the image about 128.0 (which is done during training) and normalize.
image = tf.multiply(image, 1.0 / 127.5)
return tf.subtract(image, 1.0)
class OpenImagesAnnotator(BaseAnnotator):
def __init__(self,model_path,gpu_fraction=None):
super(OpenImagesAnnotator, self).__init__()
self.name = "inception"
self.object_name = "tag"
self.net = None
self.tf = True
self.session = None
self.label_set = 'open_images_tags'
self.graph_def = None
self.input_image = None
self.predictions = None
self.num_classes = 6012
self.top_n = 25
self.network_path = model_path
self.labelmap_path = model_path.replace('open_images.ckpt','open_images_labelmap.txt')
self.dict_path = model_path.replace('open_images.ckpt','open_images_dict.csv')
self.labelmap = [line.rstrip() for line in file(self.labelmap_path).readlines()]
if gpu_fraction:
self.gpu_fraction = gpu_fraction
else:
self.gpu_fraction = float(os.environ.get('GPU_MEMORY', 0.15))
def load(self):
if self.session is None:
if len(self.labelmap) != self.num_classes:
logging.error("{} lines while the number of classes is {}".format(len(self.labelmap), self.num_classes))
self.label_dict = {}
for line in tf.gfile.GFile(self.dict_path).readlines():
words = [word.strip(' "\n') for word in line.split(',', 1)]
self.label_dict[words[0]] = words[1]
logging.warning("Loading the network {} , first apply / query will be slower".format(self.name))
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_fraction
g = tf.Graph()
with g.as_default():
self.input_image = tf.placeholder(tf.string)
processed_image = inception_preprocess(self.input_image)
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, end_points = inception.inception_v3(processed_image, num_classes=self.num_classes, is_training=False)
self.predictions = end_points['multi_predictions'] = tf.nn.sigmoid(logits, name='multi_predictions')
saver = tf_saver.Saver()
self.session = tf.InteractiveSession(config=config)
saver.restore(self.session, self.network_path)
def apply(self,image_path):
if self.session is None:
self.load()
img_data = tf.gfile.FastGFile(image_path).read()
predictions_eval = np.squeeze(self.session.run(self.predictions, {self.input_image: img_data}))
results = {self.label_dict.get(self.labelmap[idx], 'unknown'):predictions_eval[idx]
for idx in predictions_eval.argsort()[-self.top_n:][::-1]}
labels = [t for t,v in results.iteritems() if v > 0.1]
text = " ".join(labels)
metadata = {t:round(100.0*v,2) for t,v in results.iteritems() if v > 0.1}
return self.object_name,text,metadata,labels
class CRNNAnnotator(BaseAnnotator):
def __init__(self,model_path):
super(CRNNAnnotator, self).__init__()
self.session = None
self.object_name = "text"
self.model_path = model_path
self.alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
self.cuda = False
def load(self):
logging.info("Loding CRNN model first apply will be slow")
if torch.cuda.is_available():
self.session = crnn.CRNN(32, 1, 37, 256, 1).cuda()
self.cuda = True
else:
self.session = crnn.CRNN(32, 1, 37, 256, 1)
self.session.load_state_dict(torch.load(self.model_path))
self.session.eval()
self.converter = utils.strLabelConverter(self.alphabet)
self.transformer = dataset.resizeNormalize((100, 32))
def apply(self,image_path):
if self.session is None:
self.load()
image = Image.open(image_path).convert('L')
if self.cuda:
image = self.transformer(image).cuda()
else:
image = self.transformer(image)
image = image.view(1, *image.size())
image = Variable(image)
preds = self.session(image)
_, preds = preds.max(2)
preds = preds.squeeze(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = self.converter.decode(preds.data, preds_size.data, raw=False)
return self.object_name,sim_pred,{},None
| [
"tensorflow.python.training.saver.Saver",
"tensorflow.multiply",
"tensorflow.gfile.FastGFile",
"dvalib.crnn.dataset.resizeNormalize",
"torch.cuda.is_available",
"tensorflow.gfile.GFile",
"logging.info",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.image.resize_bilinear",
"tensorflow... | [((470, 519), 'logging.info', 'logging.info', (['"""In pytorch mode, not importing TF"""'], {}), "('In pytorch mode, not importing TF')\n", (482, 519), False, 'import logging\n'), ((983, 1009), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '[0]'], {}), '(image, [0])\n', (997, 1009), True, 'import tensorflow as tf\n'), ((1105, 1169), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[299, 299]'], {'align_corners': '(False)'}), '(image, [299, 299], align_corners=False)\n', (1129, 1169), True, 'import tensorflow as tf\n'), ((1264, 1295), 'tensorflow.multiply', 'tf.multiply', (['image', '(1.0 / 127.5)'], {}), '(image, 1.0 / 127.5)\n', (1275, 1295), True, 'import tensorflow as tf\n'), ((1307, 1330), 'tensorflow.subtract', 'tf.subtract', (['image', '(1.0)'], {}), '(image, 1.0)\n', (1318, 1330), True, 'import tensorflow as tf\n'), ((840, 879), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image'], {'channels': '(3)'}), '(image, channels=3)\n', (860, 879), True, 'import tensorflow as tf\n'), ((4647, 4705), 'logging.info', 'logging.info', (['"""Loding CRNN model first apply will be slow"""'], {}), "('Loding CRNN model first apply will be slow')\n", (4659, 4705), False, 'import logging\n'), ((4717, 4742), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4740, 4742), False, 'import torch\n'), ((5025, 5063), 'dvalib.crnn.utils.strLabelConverter', 'utils.strLabelConverter', (['self.alphabet'], {}), '(self.alphabet)\n', (5048, 5063), True, 'import dvalib.crnn.utils as utils\n'), ((5091, 5125), 'dvalib.crnn.dataset.resizeNormalize', 'dataset.resizeNormalize', (['(100, 32)'], {}), '((100, 32))\n', (5114, 5125), True, 'import dvalib.crnn.dataset as dataset\n'), ((5460, 5475), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (5468, 5475), False, 'from torch.autograd import Variable\n'), ((2873, 2889), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2887, 2889), True, 'import tensorflow as tf\n'), ((2989, 2999), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2997, 2999), True, 'import tensorflow as tf\n'), ((4877, 4905), 'dvalib.crnn.models.crnn.CRNN', 'crnn.CRNN', (['(32)', '(1)', '(37)', '(256)', '(1)'], {}), '(32, 1, 37, 256, 1)\n', (4886, 4905), True, 'import dvalib.crnn.models.crnn as crnn\n'), ((4943, 4970), 'torch.load', 'torch.load', (['self.model_path'], {}), '(self.model_path)\n', (4953, 4970), False, 'import torch\n'), ((3068, 3093), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {}), '(tf.string)\n', (3082, 3093), True, 'import tensorflow as tf\n'), ((3439, 3486), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {'name': '"""multi_predictions"""'}), "(logits, name='multi_predictions')\n", (3452, 3486), True, 'import tensorflow as tf\n'), ((3511, 3527), 'tensorflow.python.training.saver.Saver', 'tf_saver.Saver', ([], {}), '()\n', (3525, 3527), True, 'from tensorflow.python.training import saver as tf_saver\n'), ((3559, 3595), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'config'}), '(config=config)\n', (3580, 3595), True, 'import tensorflow as tf\n'), ((3768, 3798), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['image_path'], {}), '(image_path)\n', (3786, 3798), True, 'import tensorflow as tf\n'), ((5232, 5254), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (5242, 5254), False, 'from PIL import Image\n'), ((2570, 2600), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.dict_path'], {}), '(self.dict_path)\n', (2584, 2600), True, 'import tensorflow as tf\n'), ((3281, 3373), 'tensorflow.contrib.slim.python.slim.nets.inception.inception_v3', 'inception.inception_v3', (['processed_image'], {'num_classes': 'self.num_classes', 'is_training': '(False)'}), '(processed_image, num_classes=self.num_classes,\n is_training=False)\n', (3303, 3373), False, 'from tensorflow.contrib.slim.python.slim.nets import inception\n'), ((4771, 4799), 'dvalib.crnn.models.crnn.CRNN', 'crnn.CRNN', (['(32)', '(1)', '(37)', '(256)', '(1)'], {}), '(32, 1, 37, 256, 1)\n', (4780, 4799), True, 'import dvalib.crnn.models.crnn as crnn\n'), ((3203, 3237), 'tensorflow.contrib.slim.python.slim.nets.inception.inception_v3_arg_scope', 'inception.inception_v3_arg_scope', ([], {}), '()\n', (3235, 3237), False, 'from tensorflow.contrib.slim.python.slim.nets import inception\n')] |
from datetime import datetime
from typing import Callable
def add(a: int, b: int) -> int:
return a + b
def subtract(a: int, b: int) -> int:
return a - b
def calculate(operation: Callable[[int, int], int],
a: int, b: int) -> int:
"""Demonstration of first class citizen"""
return operation(a, b)
def calc_decorator(func):
def wrapper(*args, **kwargs):
print('Something is happening before the calculation is performed.')
res = func(*args, **kwargs)
print('Something is happening after the calculation is performed.')
return res
return wrapper
def not_during_the_night(func):
def wrapper(*args, **kwargs):
if 7 <= datetime.now().hour < 22:
return func(*args, **kwargs)
else:
raise RuntimeError('Not allowed to work between 22pm and 07am')
return wrapper
@not_during_the_night
def do_work(a: int, b: int) -> int:
return a + b
if __name__ == '__main__':
# first class citizen
print(f'1 + 1 = {calculate(add, 1, 1)}')
print(f'1 - 1 = {calculate(subtract, 1, 1)}')
# decorated function (simple example)
decorated_add = calc_decorator(add)
print(f'1 + 1 = {decorated_add(1, 1)}')
# syntactic sugar
print(f'1 + 1 = {do_work(1, 1)}')
| [
"datetime.datetime.now"
] | [((705, 719), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (717, 719), False, 'from datetime import datetime\n')] |
import collections
from puzzle.constraints import solution_constraints
from puzzle.steps import generate_solutions
from spec.mamba import *
_SOLUTIONS = collections.OrderedDict((
('early_low', 0.1),
('early_high', 1.0),
('after_early_high', 0.9),
('mid_low', 0.2),
('late_mid', 0.5),
('late_low', 0.3),
('late_high', 0.8),
))
def _source() -> generate_solutions.Solutions:
yield from _SOLUTIONS.items()
with description('generate_solutions') as self:
with before.each:
self.constraints = solution_constraints.SolutionConstraints()
with description('constructor'):
with it('constructs without error'):
expect(calling(
generate_solutions.GenerateSolutions, self.constraints, _source)
).not_to(raise_error)
with it('does not read source until needed'):
source = mock.Mock(return_value=_source())
generate_solutions.GenerateSolutions(self.constraints, source)
expect(source).not_to(have_been_called_once)
with description('generation'):
with before.each:
self.source_iter = _source()
self.source = mock.Mock(return_value=self.source_iter)
self.ex = generate_solutions.GenerateSolutions(
self.constraints, self.source)
with it('produces solutions'):
expect(self.ex.solutions()).to(equal(_SOLUTIONS))
with it('only calls source once'):
self.ex.solutions()
self.ex.solutions()
expect(self.source).to(have_been_called_once)
with it('constrains solutions'):
self.constraints.weight_threshold = 1.0
expect(self.ex.solutions()).to(equal({'early_high': 1.0}))
with it('only calls source once, even if reconstrained'):
self.ex.solutions()
self.constraints.weight_threshold = 0.5
self.ex.solutions()
expect(self.source).to(have_been_called_once)
with it('solutions() returns all results'):
expect(list(self.ex.solutions().items())).to(equal(
list(sorted(_SOLUTIONS.items(), key=lambda x: x[1], reverse=True))))
with it('solutions stream via yield'):
expect(self.ex.solution).to(equal('early_high'))
# Prove there are still items left in the iterator:
expect(calling(next, self.source_iter)).to(equal(
('after_early_high', 0.9)))
with description('event broadcasting'):
with it('sends an event solutions have changed'):
on_change_stub = mock.Mock()
self.ex.subscribe(on_change_stub)
self.constraints.weight_threshold = 0.5
expect(on_change_stub.on_next).to(have_been_called_once)
with description('interrupting iteration'):
with before.each:
self.iterations = 0
self.stop_iterations = 0
def source_iter() -> generate_solutions.Solutions:
for k, v in _SOLUTIONS.items():
while v < self.constraints.weight_threshold:
self.stop_iterations += 1
yield StopIteration()
self.iterations += 1
yield k, v
self.source_iter = source_iter()
self.source = mock.Mock(return_value=self.source_iter)
self.ex = generate_solutions.GenerateSolutions(self.constraints, self.source)
with it('still produces solutions'):
expect(self.ex.solutions()).to(equal(_SOLUTIONS))
expect(self.iterations).to(equal(len(_SOLUTIONS)))
with it('stops if constraints require'):
self.constraints.weight_threshold = 0.5
self.ex.solutions()
expect(self.iterations).to(be_below(len(_SOLUTIONS)))
expect(self.stop_iterations).to(equal(1))
with it('resumes if constraints change'):
self.constraints.weight_threshold = 0.5
self.ex.solutions()
self.constraints.weight_threshold = 0.0
self.ex.solutions()
expect(self.iterations).to(equal(len(_SOLUTIONS)))
| [
"collections.OrderedDict",
"puzzle.constraints.solution_constraints.SolutionConstraints",
"puzzle.steps.generate_solutions.GenerateSolutions"
] | [((155, 334), 'collections.OrderedDict', 'collections.OrderedDict', (["(('early_low', 0.1), ('early_high', 1.0), ('after_early_high', 0.9), (\n 'mid_low', 0.2), ('late_mid', 0.5), ('late_low', 0.3), ('late_high', 0.8))"], {}), "((('early_low', 0.1), ('early_high', 1.0), (\n 'after_early_high', 0.9), ('mid_low', 0.2), ('late_mid', 0.5), (\n 'late_low', 0.3), ('late_high', 0.8)))\n", (178, 334), False, 'import collections\n'), ((514, 556), 'puzzle.constraints.solution_constraints.SolutionConstraints', 'solution_constraints.SolutionConstraints', ([], {}), '()\n', (554, 556), False, 'from puzzle.constraints import solution_constraints\n'), ((865, 927), 'puzzle.steps.generate_solutions.GenerateSolutions', 'generate_solutions.GenerateSolutions', (['self.constraints', 'source'], {}), '(self.constraints, source)\n', (901, 927), False, 'from puzzle.steps import generate_solutions\n'), ((1148, 1215), 'puzzle.steps.generate_solutions.GenerateSolutions', 'generate_solutions.GenerateSolutions', (['self.constraints', 'self.source'], {}), '(self.constraints, self.source)\n', (1184, 1215), False, 'from puzzle.steps import generate_solutions\n'), ((3073, 3140), 'puzzle.steps.generate_solutions.GenerateSolutions', 'generate_solutions.GenerateSolutions', (['self.constraints', 'self.source'], {}), '(self.constraints, self.source)\n', (3109, 3140), False, 'from puzzle.steps import generate_solutions\n')] |
from torch.utils.data import Dataset
from torchvision.transforms.functional import to_tensor
from utils.image.processor import ImagePreprocessor, colorFormats
from PIL import Image
import glob, random
class ImageData(Dataset):
def __init__(
self, srcPath, crop=True, cropSize=96, colorFromat="RGB", processorList=None
):
super(ImageData, self).__init__()
imgs = glob.glob(srcPath + "/**", recursive=True)
imgs = filter(
lambda path: path.endswith("png")
or path.endswith("jpg")
or path.endswith("jpeg"),
imgs,
)
self.imgs = list(imgs)
self.imgNum = len(self.imgs)
if colorFromat not in colorFormats:
raise KeyError("only RGB or YUV or L")
self.imgPreprocessor = ImagePreprocessor(
crop=crop,
cropSize=cropSize,
colorFromat=colorFromat,
processorList=processorList,
)
def __len__(self):
return self.imgNum
def __getitem__(self, index):
imgPath = self.imgs[index]
img = Image.open(imgPath)
lr, hr = self.imgPreprocessor.process(img)
return to_tensor(lr), to_tensor(hr)
| [
"torchvision.transforms.functional.to_tensor",
"PIL.Image.open",
"glob.glob",
"utils.image.processor.ImagePreprocessor"
] | [((396, 438), 'glob.glob', 'glob.glob', (["(srcPath + '/**')"], {'recursive': '(True)'}), "(srcPath + '/**', recursive=True)\n", (405, 438), False, 'import glob, random\n'), ((804, 909), 'utils.image.processor.ImagePreprocessor', 'ImagePreprocessor', ([], {'crop': 'crop', 'cropSize': 'cropSize', 'colorFromat': 'colorFromat', 'processorList': 'processorList'}), '(crop=crop, cropSize=cropSize, colorFromat=colorFromat,\n processorList=processorList)\n', (821, 909), False, 'from utils.image.processor import ImagePreprocessor, colorFormats\n'), ((1100, 1119), 'PIL.Image.open', 'Image.open', (['imgPath'], {}), '(imgPath)\n', (1110, 1119), False, 'from PIL import Image\n'), ((1186, 1199), 'torchvision.transforms.functional.to_tensor', 'to_tensor', (['lr'], {}), '(lr)\n', (1195, 1199), False, 'from torchvision.transforms.functional import to_tensor\n'), ((1201, 1214), 'torchvision.transforms.functional.to_tensor', 'to_tensor', (['hr'], {}), '(hr)\n', (1210, 1214), False, 'from torchvision.transforms.functional import to_tensor\n')] |
def main():
from sys import stdin
n = int(stdin.readline())
estrelas = list(map(int, stdin.readline().split()))
total_de_carneiros = sum(estrelas)
i = 0
estrelas_atacadas = list()
carneiros_roubados = 0
while 0 <= i < n:
if i not in estrelas_atacadas:
estrelas_atacadas.append(i)
if estrelas[i] % 2 != 0:
if estrelas[i] > 0:
carneiros_roubados += 1
estrelas[i] -= 1
i += 1
else:
if estrelas[i] > 0:
carneiros_roubados += 1
estrelas[i] -= 1
i -= 1
carneiros_nao_roubados = total_de_carneiros - carneiros_roubados
print(f'{len(estrelas_atacadas)} {carneiros_nao_roubados}')
if __name__ == "__main__":
main()
| [
"sys.stdin.readline"
] | [((50, 66), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (64, 66), False, 'from sys import stdin\n'), ((97, 113), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (111, 113), False, 'from sys import stdin\n')] |
"""LC-QuAD 2.0: A Large Scale Complex Question Answering Dataset."""
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{dubey2017lc2,
title={LC-QuAD 2.0: A Large Dataset for Complex Question Answering over Wikidata and DBpedia},
author={<NAME> <NAME>},
booktitle={Proceedings of the 18th International Semantic Web Conference (ISWC)},
year={2019},
organization={Springer}
}
"""
_DESCRIPTION = """\
LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework.
"""
_URL = "http://lc-quad.sda.tech/index.html"
_LCQUAD2_URLS = {
"train": "https://s3-eu-west-1.amazonaws.com/pfigshare-u-files/15738824/train.json",
"test": "https://s3-eu-west-1.amazonaws.com/pfigshare-u-files/15738818/test.json"
}
class LCQuAD2Config(datasets.BuilderConfig):
"""BuilderConfig for LC-QuAD 2.0"""
def __init__(self,
data_url,
data_dir,
**kwargs):
"""BuilderConfig for LC-QuAD 2.0.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(LCQuAD2Config, self).__init__(**kwargs)
self.data_url = data_url
self.data_dir = data_dir
class LCQuAD2(datasets.GeneratorBasedBuilder):
"""LC-QuAD 2.0: A Large Scale Complex Question Answering Dataset."""
BUILDER_CONFIGS = [
LCQuAD2Config(
name="lcquad2-wikidata",
description="LCQuAD2 Wikidata",
data_url="",
data_dir="LCQuAD2"
),
LCQuAD2Config(
name="lcquad2-dbpedia",
description="LCQuAD2 DBpedia",
data_url="",
data_dir="LCQuAD2"
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
features=datasets.Features(
{
"NNQT_question": datasets.Value("string"),
"uid": datasets.Value("string"),
"subgraph": datasets.Sequence(
datasets.Value("string")
),
"template_index": datasets.Value("string"),
"question": datasets.Value("string"),
"sparql": datasets.Value("string"),
"template": datasets.Sequence(
datasets.Value("string")
),
"template_id": datasets.Value("string"),
"answer": datasets.Sequence(
datasets.Value("string")
),
"paraphrased_question": datasets.Sequence(
datasets.Value("string")
)
}
)
)
def _split_generators(self, dl_manager):
data_dir = None
lcquad2_files = dl_manager.download(
{
"train": _LCQUAD2_URLS["train"],
"test": _LCQUAD2_URLS["test"]
}
)
if self.config.name == "lcquad2-wikidata":
kb = "wikidata"
elif self.config.name == "lcquad2-dbpedia":
kb = "dbpedia"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir or "", lcquad2_files["train"]),
"split": "train",
"kb": kb
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(data_dir or "", lcquad2_files["test"]),
"split": "test",
"kb": kb
}
)
]
def _generate_examples(self, data_file, split, kb, **kwargs):
with open(data_file, encoding="utf8") as f:
lcquad2 = json.load(f)
for idx, question in enumerate(lcquad2):
paraphrases = []
templates = []
subgraphs = []
if isinstance(question["paraphrased_question"], list):
paraphrases = question["paraphrased_question"]
elif isinstance(question["paraphrased_question"], str):
paraphrases.append(question["paraphrased_question"])
if isinstance(question["template"], list):
templates = question["template"]
elif isinstance(question["template"], str):
templates.append(question["template"])
if isinstance(question["subgraph"], list):
subgraphs = question["subgraph"]
elif isinstance(question["subgraph"], str):
subgraphs.append(question["subgraph"])
if kb == "wikidata":
sparql = question["sparql_wikidata"]
elif kb == "dbpedia":
sparql = question["sparql_dbpedia18"]
yield idx, {
"NNQT_question": question["NNQT_question"],
"uid": str(question["uid"]),
"subgraph": subgraphs,
"template_index": str(question["template_index"]),
"question": question["question"],
"sparql": sparql,
"template": templates,
"template_id": str(question["template_id"]),
"answer": question["answer"],
"paraphrased_question": paraphrases
}
| [
"json.load",
"datasets.Value",
"os.path.join",
"datasets.logging.get_logger"
] | [((119, 156), 'datasets.logging.get_logger', 'datasets.logging.get_logger', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'import datasets\n'), ((4285, 4297), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4294, 4297), False, 'import json\n'), ((2262, 2286), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2276, 2286), False, 'import datasets\n'), ((2315, 2339), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2329, 2339), False, 'import datasets\n'), ((2502, 2526), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2516, 2526), False, 'import datasets\n'), ((2560, 2584), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2574, 2584), False, 'import datasets\n'), ((2616, 2640), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2630, 2640), False, 'import datasets\n'), ((2800, 2824), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2814, 2824), False, 'import datasets\n'), ((3688, 3740), 'os.path.join', 'os.path.join', (["(data_dir or '')", "lcquad2_files['train']"], {}), "(data_dir or '', lcquad2_files['train'])\n", (3700, 3740), False, 'import os\n'), ((3983, 4034), 'os.path.join', 'os.path.join', (["(data_dir or '')", "lcquad2_files['test']"], {}), "(data_dir or '', lcquad2_files['test'])\n", (3995, 4034), False, 'import os\n'), ((2416, 2440), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2430, 2440), False, 'import datasets\n'), ((2717, 2741), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2731, 2741), False, 'import datasets\n'), ((2899, 2923), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2913, 2923), False, 'import datasets\n'), ((3034, 3058), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (3048, 3058), False, 'import datasets\n')] |
# Enter your code here. Read input from STDIN. Print output to STDOUT
from typing import List
from itertools import product
def sum_of_square_mod_m( nums:tuple, mod_m ):
return ( sum( map( lambda x: x**2, nums) ) % mod_m )
def maximize_function_value( list_of_list:List , mod_m):
count_of_list = len( list_of_list )
all_possible_combination = list( product( *list_of_list ) )
max_fn_value = -1
for one_trial in all_possible_combination:
# update max function value
max_fn_value = max(sum_of_square_mod_m( one_trial, mod_m ), max_fn_value)
print( max_fn_value )
if __name__ == '__main__':
buf = list( map( int, input().split() ) )
k, m = buf[0], buf[1]
list_of_list = []
for _ in range(k):
list_of_list.append( list( map( int, input().split() ) )[1:] )
maximize_function_value( list_of_list, m )
| [
"itertools.product"
] | [((368, 390), 'itertools.product', 'product', (['*list_of_list'], {}), '(*list_of_list)\n', (375, 390), False, 'from itertools import product\n')] |
from unittest import TestCase
from tests import get_data
from pytezos.operation.forge import forge_operation_group
class OperationForgingTestooZWSJ(TestCase):
def setUp(self):
self.maxDiff = None
def test_forge_ooZWSJ(self):
expected = get_data(
path='operations/ooZWSJqGRqi4hirFmqvFinKp9JGU3vqR23Q15tMvDXMnWtX1jne/forged.hex')
actual = forge_operation_group(get_data(
path='operations/ooZWSJqGRqi4hirFmqvFinKp9JGU3vqR23Q15tMvDXMnWtX1jne/unsigned.json'))
self.assertEqual(expected, actual)
| [
"tests.get_data"
] | [((273, 373), 'tests.get_data', 'get_data', ([], {'path': '"""operations/ooZWSJqGRqi4hirFmqvFinKp9JGU3vqR23Q15tMvDXMnWtX1jne/forged.hex"""'}), "(path=\n 'operations/ooZWSJqGRqi4hirFmqvFinKp9JGU3vqR23Q15tMvDXMnWtX1jne/forged.hex'\n )\n", (281, 373), False, 'from tests import get_data\n'), ((416, 519), 'tests.get_data', 'get_data', ([], {'path': '"""operations/ooZWSJqGRqi4hirFmqvFinKp9JGU3vqR23Q15tMvDXMnWtX1jne/unsigned.json"""'}), "(path=\n 'operations/ooZWSJqGRqi4hirFmqvFinKp9JGU3vqR23Q15tMvDXMnWtX1jne/unsigned.json'\n )\n", (424, 519), False, 'from tests import get_data\n')] |
from machine import SPI, Pin
import tinypico as TinyPICO
import machine
from dotstar import DotStar
import time, random, micropython, gc
import bluetooth
from ble_simple_peripheral import BLESimplePeripheral
# Configure SPI for controlling the DotStar
# Internally we are using software SPI for this as the pins being used are not hardware SPI pins
spi = SPI(
sck=Pin(TinyPICO.DOTSTAR_CLK),
mosi=Pin(TinyPICO.DOTSTAR_DATA),
miso=Pin(TinyPICO.SPI_MISO),
)
# Create a DotStar instance
dotstar = DotStar(spi, 1, brightness=0.25) # Just one DotStar, quarter brightness
# Say hello
print("--------------------\n")
machine.freq(80000000)
print("Machine freq: {}\n".format(machine.freq()))
# check if the device woke from a deep sleep
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print("woke from a deep sleep")
# Show some info on boot
print("Battery Voltage is {}V".format(TinyPICO.get_battery_voltage()))
print("Battery Charge State is {}\n".format(TinyPICO.get_battery_charging()))
## Show available memory
# print("Memory Info - micropython.mem_info()")
# print("------------------------------------")
# micropython.mem_info()
ble = bluetooth.BLE()
p = BLESimplePeripheral(ble, "tinypico")
light_on = False
def on_rx(v):
global light_on
print("RX", v)
fail = False
for statement in str(v, "utf-8").split(";"):
args = statement.split(",")
cmd = args.pop(0)
if cmd == "on" and len(args) == 4:
light_on = True
TinyPICO.set_dotstar_power(True)
r = int(args[0])
g = int(args[1])
b = int(args[2])
a = float(args[3])
dotstar[0] = (r, g, b, a)
elif cmd == "off":
light_on = False
TinyPICO.set_dotstar_power(False)
elif cmd == "bat":
p.send("{}".format(TinyPICO.get_battery_voltage()))
else:
fail = True
if not fail:
p.send("ack")
else:
p.send("nack")
p.on_write(on_rx)
def on_interval(t):
print("light_on: {}".format(light_on))
if not light_on:
print("light off... might as well sleep for a bit")
machine.deepsleep(30000)
timer0 = machine.Timer(0)
timer0.init(period=30000, mode=machine.Timer.PERIODIC, callback=on_interval)
| [
"dotstar.DotStar",
"tinypico.set_dotstar_power",
"bluetooth.BLE",
"machine.Timer",
"machine.deepsleep",
"ble_simple_peripheral.BLESimplePeripheral",
"machine.freq",
"machine.reset_cause",
"machine.Pin",
"tinypico.get_battery_charging",
"tinypico.get_battery_voltage"
] | [((526, 558), 'dotstar.DotStar', 'DotStar', (['spi', '(1)'], {'brightness': '(0.25)'}), '(spi, 1, brightness=0.25)\n', (533, 558), False, 'from dotstar import DotStar\n'), ((648, 670), 'machine.freq', 'machine.freq', (['(80000000)'], {}), '(80000000)\n', (660, 670), False, 'import machine\n'), ((1202, 1217), 'bluetooth.BLE', 'bluetooth.BLE', ([], {}), '()\n', (1215, 1217), False, 'import bluetooth\n'), ((1223, 1259), 'ble_simple_peripheral.BLESimplePeripheral', 'BLESimplePeripheral', (['ble', '"""tinypico"""'], {}), "(ble, 'tinypico')\n", (1242, 1259), False, 'from ble_simple_peripheral import BLESimplePeripheral\n'), ((2289, 2305), 'machine.Timer', 'machine.Timer', (['(0)'], {}), '(0)\n', (2302, 2305), False, 'import machine\n'), ((775, 796), 'machine.reset_cause', 'machine.reset_cause', ([], {}), '()\n', (794, 796), False, 'import machine\n'), ((384, 409), 'machine.Pin', 'Pin', (['TinyPICO.DOTSTAR_CLK'], {}), '(TinyPICO.DOTSTAR_CLK)\n', (387, 409), False, 'from machine import SPI, Pin\n'), ((421, 447), 'machine.Pin', 'Pin', (['TinyPICO.DOTSTAR_DATA'], {}), '(TinyPICO.DOTSTAR_DATA)\n', (424, 447), False, 'from machine import SPI, Pin\n'), ((459, 481), 'machine.Pin', 'Pin', (['TinyPICO.SPI_MISO'], {}), '(TinyPICO.SPI_MISO)\n', (462, 481), False, 'from machine import SPI, Pin\n'), ((706, 720), 'machine.freq', 'machine.freq', ([], {}), '()\n', (718, 720), False, 'import machine\n'), ((929, 959), 'tinypico.get_battery_voltage', 'TinyPICO.get_battery_voltage', ([], {}), '()\n', (957, 959), True, 'import tinypico as TinyPICO\n'), ((1007, 1038), 'tinypico.get_battery_charging', 'TinyPICO.get_battery_charging', ([], {}), '()\n', (1036, 1038), True, 'import tinypico as TinyPICO\n'), ((2250, 2274), 'machine.deepsleep', 'machine.deepsleep', (['(30000)'], {}), '(30000)\n', (2267, 2274), False, 'import machine\n'), ((1558, 1590), 'tinypico.set_dotstar_power', 'TinyPICO.set_dotstar_power', (['(True)'], {}), '(True)\n', (1584, 1590), True, 'import tinypico as TinyPICO\n'), ((1823, 1856), 'tinypico.set_dotstar_power', 'TinyPICO.set_dotstar_power', (['(False)'], {}), '(False)\n', (1849, 1856), True, 'import tinypico as TinyPICO\n'), ((1917, 1947), 'tinypico.get_battery_voltage', 'TinyPICO.get_battery_voltage', ([], {}), '()\n', (1945, 1947), True, 'import tinypico as TinyPICO\n')] |
from feature import Feature
from itertools import product
import numpy as np
import random
class Node:
def __init__(self, K, Cweights, Dweights, seed):
self.K = K
self.seed = seed
self.Kd = int(K*2/3)
self.Kc = int(K*1/3)
self.Cfeatures = [Feature(False, seed) for k in range(self.Kc)]
self.Dfeatures = [Feature(True, seed) for k in range(self.Kd)]
self.CfeatureWeights = Cweights
self.DfeatureWeights = Dweights
self.group = np.arange(self.Kd).reshape((4,-1))
for i in range(self.Kc):
self.Cfeatures[i].weight = self.CfeatureWeights[i]
for i in range(self.Kd):
self.Dfeatures[i].weight = self.DfeatureWeights[i]
for i in range(self.group.shape[0]):
one = np.random.randint(self.group[i,0], self.group[i,0]+self.group.shape[1])
for k in self.group[i,:]:
self.Dfeatures[k].xhat = 0
self.Dfeatures[one].xhat = 1
self.u = np.random.rand()
def getAlpha(self):
alpha = 0
for f in self.features[self.Kd:]:
alpha += min(f.weight * max(f.xhat - f.range, 0), f.weight * min(f.xhat + f.range, 1))
alpha = np.exp(alpha)
return alpha
def getBeta(self):
beta = 0
for f in self.features[self.Kd:]:
beta += max(f.weight * max(f.xhat - f.range, 0), f.weight * min(f.xhat + f.range, 1))
beta = np.exp(beta)
return beta
def getMaxCost(self):
maxCost = 0
for f in self.Cfeatures:
maxCost += f.cost * min(f.xhat, f.range, 1-f.xhat)
for f in self.Dfeatures:
maxCost += f.cost
return maxCost
def printFeatures(self):
for f in self.features:
feat = {"xhat": f.xhat, "cost": f.cost, "range": f.range, "weight": f.weight}
print(feat)
def generateCorrelation(self):
K = 10
corr = [3, 2, 2]
l = sum(corr)
m1 = [(1,0,0), (0,1,0), (0,0,1)]
m2 = [(1,0), (0,1)]
m3 = m2
m4 = list(product(range(2), repeat=K-l))
m = list(product(m1, m2, m3, m4))
self.discreteSpace = [i[0] + i[1] + i[2] + i[3] for i in m]
self.discreteCost = [random.random()*10 for i in self.discreteSpace]
ws = np.array([f.weight for f in self.features[:self.Kd]])
self.discreteScore = [np.exp(np.dot(ws, np.array(i))) for i in self.discreteSpace]
| [
"numpy.random.rand",
"itertools.product",
"feature.Feature",
"numpy.exp",
"numpy.array",
"numpy.random.randint",
"random.random",
"numpy.arange"
] | [((1003, 1019), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1017, 1019), True, 'import numpy as np\n'), ((1220, 1233), 'numpy.exp', 'np.exp', (['alpha'], {}), '(alpha)\n', (1226, 1233), True, 'import numpy as np\n'), ((1451, 1463), 'numpy.exp', 'np.exp', (['beta'], {}), '(beta)\n', (1457, 1463), True, 'import numpy as np\n'), ((2322, 2375), 'numpy.array', 'np.array', (['[f.weight for f in self.features[:self.Kd]]'], {}), '([f.weight for f in self.features[:self.Kd]])\n', (2330, 2375), True, 'import numpy as np\n'), ((284, 304), 'feature.Feature', 'Feature', (['(False)', 'seed'], {}), '(False, seed)\n', (291, 304), False, 'from feature import Feature\n'), ((356, 375), 'feature.Feature', 'Feature', (['(True)', 'seed'], {}), '(True, seed)\n', (363, 375), False, 'from feature import Feature\n'), ((792, 867), 'numpy.random.randint', 'np.random.randint', (['self.group[i, 0]', '(self.group[i, 0] + self.group.shape[1])'], {}), '(self.group[i, 0], self.group[i, 0] + self.group.shape[1])\n', (809, 867), True, 'import numpy as np\n'), ((2139, 2162), 'itertools.product', 'product', (['m1', 'm2', 'm3', 'm4'], {}), '(m1, m2, m3, m4)\n', (2146, 2162), False, 'from itertools import product\n'), ((502, 520), 'numpy.arange', 'np.arange', (['self.Kd'], {}), '(self.Kd)\n', (511, 520), True, 'import numpy as np\n'), ((2261, 2276), 'random.random', 'random.random', ([], {}), '()\n', (2274, 2276), False, 'import random\n'), ((2424, 2435), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (2432, 2435), True, 'import numpy as np\n')] |
##−∗−coding : utf−8−∗−
import sqlite3 as lite
import logging
import sys
from collections import OrderedDict
import conf
LOG_FORMAT = "%(levelname)s > Line:%(lineno)s - %(message)s"
logging.basicConfig(filename="debug.log",
level=logging.DEBUG,
format=LOG_FORMAT,
filemode="w",
)
logger = logging.getLogger(__name__)
# encode: string -> byte
# unicode.encode() -> bytes
# decode: bytes -> string
# bytes.decode() -> unicode
# no need to encoding or decode
def decode_to_text(text):
if isinstance(text, str):
print(text)
if "সময়" == text:
logger.debug(str(text.encode('utf-8')))
logger.debug(str("সময়".encode('utf-8')))
logger.debug(str("সময়".encode('utf-8')))
# decode_text = text.encode('utf-8').decode('utf-8')
decode_text = text
else:
decode_text = text
return decode_text
def convert_into_dic(columns, rows):
"""
Return query value into dictionary
:type columns: list
:type rows: tuple
"""
column_name = None
row_val = None
query_val = OrderedDict()
length_c = len(columns)
for c in range(0,length_c):
column_name = columns[c]
query_val[column_name] = [] # create key name with empty list value
for r in range(0,len(rows)):
row_val = decode_to_text(rows[r][c])
query_val[column_name].append(row_val)
return query_val
def run_query(query):
"""
Return query result
sql: rawstring of sql
"""
con = None
data = None
try:
con = lite.connect('VoiceCommand.db')
cur = con.cursor()
cur.execute(query)
# TODO: Simplified it
if True in map(lambda x: x.lower() in query.lower(),['update','insert','delete']):
conf.NEW_COMMAND = True
data = cur.fetchall()
print(data)
if cur.description:
column_name = [c[0] for c in cur.description]
if data:
data = convert_into_dic(column_name, data)
con.commit()
except lite.Error as e:
print("Error {}:".format(e.args[0]))
sys.exit(1)
finally:
if con:
con.close()
return data
| [
"logging.basicConfig",
"logging.getLogger",
"collections.OrderedDict",
"sqlite3.connect",
"sys.exit"
] | [((186, 286), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""debug.log"""', 'level': 'logging.DEBUG', 'format': 'LOG_FORMAT', 'filemode': '"""w"""'}), "(filename='debug.log', level=logging.DEBUG, format=\n LOG_FORMAT, filemode='w')\n", (205, 286), False, 'import logging\n'), ((373, 400), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (390, 400), False, 'import logging\n'), ((1150, 1163), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1161, 1163), False, 'from collections import OrderedDict\n'), ((1671, 1702), 'sqlite3.connect', 'lite.connect', (['"""VoiceCommand.db"""'], {}), "('VoiceCommand.db')\n", (1683, 1702), True, 'import sqlite3 as lite\n'), ((2316, 2327), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2324, 2327), False, 'import sys\n')] |
from pathlib import Path
from typing import List
from pmfp.utils.fs_utils import get_abs_path
def _find_pypackage(final_path: Path, packs: List[str]) -> None:
has_init = False
for i in final_path.iterdir():
if i.name == "__init__.py":
has_init = True
if not has_init:
return
else:
lates_p = final_path.name
packs.append(lates_p)
_find_pypackage(final_path.parent, packs)
def find_pypackage_string(to_path: str) -> str:
"""find_pypackage_string.
Args:
to_path (str): 目标地址
Returns:
str: package地址
"""
packs: List[str] = []
final_path = get_abs_path(to_path)
_find_pypackage(final_path, packs)
packstr = ".".join(reversed(packs))
return packstr
| [
"pmfp.utils.fs_utils.get_abs_path"
] | [((648, 669), 'pmfp.utils.fs_utils.get_abs_path', 'get_abs_path', (['to_path'], {}), '(to_path)\n', (660, 669), False, 'from pmfp.utils.fs_utils import get_abs_path\n')] |
import pydot
from grit.decisiontree.handcrafted_trees import scenario_trees
from grit.core.base import get_img_dir
def build_pydot_tree(graph, root, idx='R'):
node = pydot.Node(idx, label=str(root))
graph.add_node(node)
if root.decision is not None:
true_child = build_pydot_tree(graph, root.decision.true_child, idx + 'T')
false_child = build_pydot_tree(graph, root.decision.false_child, idx + 'F')
true_weight = root.decision.true_child.value / root.value
false_weight = root.decision.false_child.value / root.value
graph.add_edge(pydot.Edge(node, true_child, label='T: {:.2f}'.format(true_weight)))
graph.add_edge(pydot.Edge(node, false_child, label='F: {:.2f}'.format(false_weight)))
return node
scenario_name = 'heckstrasse'
for goal_idx, goal_types in scenario_trees[scenario_name].items():
for goal_type, root in goal_types.items():
graph = pydot.Dot(graph_type='digraph')
build_pydot_tree(graph, root)
graph.write_png(get_img_dir() + 'handcrafted_tree_{}_G{}_{}.png'.format(
scenario_name, goal_idx, goal_type))
| [
"pydot.Dot",
"grit.core.base.get_img_dir"
] | [((930, 961), 'pydot.Dot', 'pydot.Dot', ([], {'graph_type': '"""digraph"""'}), "(graph_type='digraph')\n", (939, 961), False, 'import pydot\n'), ((1024, 1037), 'grit.core.base.get_img_dir', 'get_img_dir', ([], {}), '()\n', (1035, 1037), False, 'from grit.core.base import get_img_dir\n')] |
#https://github.com/Newmu/Theano-Tutorials/blob/master/1_linear_regression.py
import theano
from theano import tensor as T
import numpy as np
trX = np.linspace(-1, 1, 101)
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33
X = T.scalar()
Y = T.scalar()
def model(X, w):
return X * w
w = theano.shared(np.asarray(0., dtype=theano.config.floatX))
y = model(X, w)
cost = T.mean(T.sqr(y - Y))
gradient = T.grad(cost=cost, wrt=w)
updates = [[w, w - gradient * 0.01]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
for i in range(100):
for x, y in zip(trX, trY):
train(x, y)
print(w.get_value()) #something around 2
#https://raw.githubusercontent.com/Newmu/Theano-Tutorials/master/2_logistic_regression.py
import theano
from theano import tensor as T
import numpy as np
from fuel.datasets import MNIST
from matplotlib import pyplot, cm
dataset = MNIST(('train',), sources=('features',))
state = dataset.open()
image, = dataset.get_data(state=state, request=[1234])
pyplot.imshow(image.reshape((28, 28)), cmap=cm.Greys_r, interpolation='nearest')
pyplot.show()
dataset.close(state)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def model(X, w):
return T.nnet.softmax(T.dot(X, w))
trX, teX, trY, teY = mnist(onehot=True)
X = T.fmatrix()
Y = T.fmatrix()
w = init_weights((784, 10))
py_x = model(X, w)
y_pred = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
gradient = T.grad(cost=cost, wrt=w)
update = [[w, w - gradient * 0.05]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=update, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
cost = train(trX[start:end], trY[start:end])
print(i, np.mean(np.argmax(teY, axis=1) == predict(teX)))
| [
"theano.tensor.nnet.categorical_crossentropy",
"theano.function",
"matplotlib.pyplot.show",
"theano.tensor.dot",
"numpy.asarray",
"numpy.argmax",
"theano.tensor.sqr",
"numpy.linspace",
"theano.tensor.fmatrix",
"theano.tensor.argmax",
"theano.tensor.scalar",
"numpy.random.randn",
"fuel.datase... | [((150, 173), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(101)'], {}), '(-1, 1, 101)\n', (161, 173), True, 'import numpy as np\n'), ((230, 240), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (238, 240), True, 'from theano import tensor as T\n'), ((245, 255), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (253, 255), True, 'from theano import tensor as T\n'), ((410, 434), 'theano.tensor.grad', 'T.grad', ([], {'cost': 'cost', 'wrt': 'w'}), '(cost=cost, wrt=w)\n', (416, 434), True, 'from theano import tensor as T\n'), ((481, 573), 'theano.function', 'theano.function', ([], {'inputs': '[X, Y]', 'outputs': 'cost', 'updates': 'updates', 'allow_input_downcast': '(True)'}), '(inputs=[X, Y], outputs=cost, updates=updates,\n allow_input_downcast=True)\n', (496, 573), False, 'import theano\n'), ((927, 967), 'fuel.datasets.MNIST', 'MNIST', (["('train',)"], {'sources': "('features',)"}), "(('train',), sources=('features',))\n", (932, 967), False, 'from fuel.datasets import MNIST\n'), ((1127, 1140), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1138, 1140), False, 'from matplotlib import pyplot, cm\n'), ((1425, 1436), 'theano.tensor.fmatrix', 'T.fmatrix', ([], {}), '()\n', (1434, 1436), True, 'from theano import tensor as T\n'), ((1441, 1452), 'theano.tensor.fmatrix', 'T.fmatrix', ([], {}), '()\n', (1450, 1452), True, 'from theano import tensor as T\n'), ((1511, 1533), 'theano.tensor.argmax', 'T.argmax', (['py_x'], {'axis': '(1)'}), '(py_x, axis=1)\n', (1519, 1533), True, 'from theano import tensor as T\n'), ((1602, 1626), 'theano.tensor.grad', 'T.grad', ([], {'cost': 'cost', 'wrt': 'w'}), '(cost=cost, wrt=w)\n', (1608, 1626), True, 'from theano import tensor as T\n'), ((1672, 1763), 'theano.function', 'theano.function', ([], {'inputs': '[X, Y]', 'outputs': 'cost', 'updates': 'update', 'allow_input_downcast': '(True)'}), '(inputs=[X, Y], outputs=cost, updates=update,\n allow_input_downcast=True)\n', (1687, 1763), False, 'import theano\n'), ((1770, 1840), 'theano.function', 'theano.function', ([], {'inputs': '[X]', 'outputs': 'y_pred', 'allow_input_downcast': '(True)'}), '(inputs=[X], outputs=y_pred, allow_input_downcast=True)\n', (1785, 1840), False, 'import theano\n'), ((310, 353), 'numpy.asarray', 'np.asarray', (['(0.0)'], {'dtype': 'theano.config.floatX'}), '(0.0, dtype=theano.config.floatX)\n', (320, 353), True, 'import numpy as np\n'), ((385, 397), 'theano.tensor.sqr', 'T.sqr', (['(y - Y)'], {}), '(y - Y)\n', (390, 397), True, 'from theano import tensor as T\n'), ((1189, 1230), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': 'theano.config.floatX'}), '(X, dtype=theano.config.floatX)\n', (1199, 1230), True, 'import numpy as np\n'), ((1549, 1589), 'theano.tensor.nnet.categorical_crossentropy', 'T.nnet.categorical_crossentropy', (['py_x', 'Y'], {}), '(py_x, Y)\n', (1580, 1589), True, 'from theano import tensor as T\n'), ((190, 217), 'numpy.random.randn', 'np.random.randn', (['*trX.shape'], {}), '(*trX.shape)\n', (205, 217), True, 'import numpy as np\n'), ((1366, 1377), 'theano.tensor.dot', 'T.dot', (['X', 'w'], {}), '(X, w)\n', (1371, 1377), True, 'from theano import tensor as T\n'), ((1289, 1312), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1304, 1312), True, 'import numpy as np\n'), ((2016, 2038), 'numpy.argmax', 'np.argmax', (['teY'], {'axis': '(1)'}), '(teY, axis=1)\n', (2025, 2038), True, 'import numpy as np\n')] |
"""General functions for working with observations.
"""
import collections
import os
from shapely.geometry import Point
import pandas as pd
from gisutils import df2shp, project
from mfsetup.obs import make_obsname
from mfsetup.units import convert_length_units, convert_volume_units, convert_time_units
from mapgwm.utils import makedirs, assign_geographic_obsgroups, cull_data_to_active_area
def format_site_ids(iterable, add_leading_zeros=False):
"""Cast site ids to strings"""
str_ids = []
for id in iterable:
if add_leading_zeros:
str_ids.append(format_usgs_sw_site_id(id))
else:
str_ids.append(str(id))
return str_ids
def format_usgs_sw_site_id(stationID):
"""Add leading zeros to NWIS surface water sites, if they are missing.
See https://help.waterdata.usgs.gov/faq/sites/do-station-numbers-have-any-particular-meaning.
Zeros are only added to numeric site numbers less than 15 characters in length.
"""
if not str(stationID).startswith('0') and str(stationID).isdigit() and \
0 < int(str(stationID)[0]) < 10 and len(str(stationID)) < 15:
return '0{}'.format(stationID)
return str(stationID)
def preprocess_obs(data, metadata=None, data_columns=['flow'],
start_date=None, active_area=None,
active_area_id_column=None,
active_area_feature_id=None,
source_crs=4269, dest_crs=5070,
datetime_col='datetime',
site_no_col='site_no',
line_id_col='line_id',
x_coord_col='x',
y_coord_col='y',
name_col='name',
qualifier_column=None,
default_qualifier='measured',
obstype='flow',
include_sites=None,
include_line_ids=None,
source_length_units='ft',
source_time_units='s',
dest_length_units='m',
dest_time_units='d',
geographic_groups=None,
geographic_groups_col=None,
max_obsname_len=None,
add_leading_zeros_to_sw_site_nos=False,
column_renames=None,
outfile=None,
):
"""Preprocess observation data, for example, from NWIS or another data source that
outputs time series in CSV format with site locations and identifiers.
* Data are reprojected from a `source_crs` (Coordinate reference system; assumed to be in geographic coordinates)
to the CRS of the model (`dest_crs`)
* Data are culled to a `start_date` and optionally, a polygon or set of polygons defining the model area
* length and time units are converted to those of the groundwater model.
* Prefixes for observation names (with an optional length limit) that identify the location are generated
* Preliminary observation groups can also be assigned, based on geographic areas defined by polygons
(`geographic_groups` parameter)
Parameters
----------
data : csv file or DataFrame
Time series of observations.
Columns:
===================== ======================================
site_no site identifier
datetime measurement dates/times
x x-coordinate of site
y y-coordinate of site
data_columns Columns of observed values
qualifier_column Optional column with qualifiers for values
===================== ======================================
Notes:
* x and y columns can alternatively be in the metadata table
* data_columns are denoted in `data_columns`; multiple
columns can be included to process base flow and total flow, or
other statistics in tandem
* For example, `qualifier_column` may have "estimated" or "measured"
flags denoting whether streamflows were derived from measured values
or statistical estimates.
metadata : csv file or DataFrame
Observation site information.
May include columns:
================= ================================================================================
site_no site identifier
x x-coordinate of site
y y-coordinate of site
name name of site
line_id_col Identifier for a line in a hydrography dataset that the site is associated with.
================= ================================================================================
Notes:
* other columns in metadata will be passed through to the metadata output
data_columns : list of strings
Columns in data with values or their statistics.
By default, ['q_cfs']
start_date : str (YYYY-mm-dd)
Simulation start date (cull observations before this date)
active_area : str
Shapefile with polygon to cull observations to. Automatically reprojected
to dest_crs if the shapefile includes a .prj file.
by default, None.
active_area_id_column : str, optional
Column in active_area with feature ids.
By default, None, in which case all features are used.
active_area_feature_id : str, optional
ID of feature to use for active area
By default, None, in which case all features are used.
source_crs : obj
Coordinate reference system of the head observation locations.
A Python int, dict, str, or :class:`pyproj.crs.CRS` instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
By default, epsg:4269
dest_crs : obj
Coordinate reference system of the model. Same input types
as ``source_crs``.
By default, epsg:5070
datetime_col : str, optional
Column name in data with observation date/times,
by default 'datetime'
site_no_col : str, optional
Column name in data and metadata with site identifiers,
by default 'site_no'
line_id_col : str, optional
Column name in data or metadata with identifiers for
hydrography lines associated with observation sites.
by default 'line_id'
x_coord_col : str, optional
Column name in data or metadata with x-coordinates,
by default 'x'
y_coord_col : str, optional
Column name in data or metadata with y-coordinates,
by default 'y'
name_col : str, optional
Column name in data or metadata with observation site names,
by default 'name'
qualifier_column : str, optional
Column name in data with observation qualifiers, such
as "measured" or "estimated"
by default 'category'
default_qualifier : str, optional
Default qualifier to populate qualifier_column if it
is None. By default, "measured"
obstype : str, optional
Modflow-6 observation type (e.g. 'downstream-flow' or 'stage').
The last part of the name (after the last hyphen) is used as a suffix in the output
``obsprefix`` column. E.g. 07275000-flow for downstream or upstream-flow at site 07275000.
By default, 'flow'
include_sites : list-like, optional
Exclude output to these sites.
by default, None (include all sites)
include_line_ids : list-like, optional
Exclude output to these sites, represented by line identifiers.
by default, None (include all sites)
source_length_units : str, 'm3', 'm', 'cubic meters', 'ft3', etc.
Length or volume units of the source data. By default, 'ft3'
source_time_units : str, 's', 'seconds', 'days', etc.
Time units of the source data. By default, 's'
dest_length_units : str, 'm3', 'cubic meters', 'ft3', etc.
Length or volume units of the output (model). By default, 'm'
dest_time_units : str, 's', 'seconds', 'days', etc.
Time units of the output (model). By default, 'd'
geographic_groups : file, dict or list-like
Option to group observations by area(s) of interest. Can
be a shapefile, list of shapefiles, or dictionary of shapely polygons.
A 'group' column will be created in the metadata, and observation
sites within each polygon will be assigned the group name
associated with that polygon.
For example::
geographic_groups='../source_data/extents/CompositeHydrographArea.shp'
geographic_groups=['../source_data/extents/CompositeHydrographArea.shp']
geographic_groups={'cha': <shapely Polygon>}
Where 'cha' is an observation group name for observations located within the
the area defined by CompositeHydrographArea.shp. For shapefiles,
group names are provided in a `geographic_groups_col`.
geographic_groups_col : str
Field name in the `geographic_groups` shapefile(s) containing the
observation group names associated with each polygon.
max_obsname_len : int or None
Maximum length for observation name prefix. Default of 13
allows for a PEST obsnme of 20 characters or less with
<prefix>_yyyydd or <prefix>_<per>d<per>
(e.g. <prefix>_2d1 for a difference between stress periods 2 and 1)
If None, observation names will not be truncated. PEST++ does not have
a limit on observation name length.
add_leading_zeros_to_sw_site_nos : bool
Whether or not to pad site numbers using the
:func:~`mapgwm.swflows.format_usgs_sw_site_id` function.
By default, False.
column_renames : dict, optional
Option to rename columns in the data or metadata that are different than those listed above.
For example, if the data file has a 'SITE_NO' column instead of 'SITE_BADGE'::
column_renames={'SITE_NO': 'site_no'}
by default None, in which case the renames listed above will be used.
Note that the renames must be the same as those listed above for
:func:`mapgwm.swflows.preprocess_obs` to work.
outfile : str
Where output file will be written. Metadata are written to a file
with the same name, with an additional "_info" suffix prior to
the file extension.
Returns
-------
data : DataFrame
Preprocessed time series
metadata : DataFrame
Preprocessed metadata
References
----------
`The PEST++ Manual <https://github.com/usgs/pestpp/tree/master/documentation>`
Notes
-----
"""
# outputs
if outfile is not None:
outpath, filename = os.path.split(outfile)
makedirs(outpath)
outname, ext = os.path.splitext(outfile)
out_info_csvfile = outname + '_info.csv'
out_data_csvfile = outfile
out_shapefile = outname + '_info.shp'
# read the source data
if not isinstance(data, pd.DataFrame):
df = pd.read_csv(data, dtype={site_no_col: object})
else:
df = data.copy()
# check the columns
for col in [datetime_col] + data_columns:
assert col in df.columns, "Column {} not found in {}".format(col,
data)
assert any({site_no_col, line_id_col}.intersection(df.columns)), \
"Neither {} or {} found in {}. Need to specify a site_no_col or line_id_col".format(site_no_col,
line_id_col, data)
# rename input columns to these names,
# for consistent output
dest_columns = {datetime_col: 'datetime',
site_no_col: 'site_no',
line_id_col: 'line_id',
x_coord_col: 'x',
y_coord_col: 'y',
name_col: 'name',
qualifier_column: 'category'
}
# update the default column renames
# with any supplied via column_renames parameter
if isinstance(column_renames, collections.Mapping):
dest_columns.update(column_renames)
df.rename(columns=dest_columns, inplace=True)
data_columns = [c if c not in dest_columns else dest_columns[c]
for c in data_columns]
# convert site numbers to strings;
# add leading 0s to any USGS sites that should have them
if 'site_no' in df.columns:
df['site_no'] = format_site_ids(df['site_no'], add_leading_zeros_to_sw_site_nos)
else:
df['site_no'] = df[line_id_col]
# make obsprefix names with site and observation type
df['obsprefix'] = [f"{site_no}-{obstype.split('-')[-1]}"
for site_no in df['site_no']]
# read the source data
if metadata is not None:
if not isinstance(metadata, pd.DataFrame):
md = pd.read_csv(metadata, dtype={site_no_col: object})
else:
md = metadata.copy()
if site_no_col not in md.columns or 'site_no' not in df.columns:
raise IndexError('If metadata are supplied, both data and metadata must '
'have a site_no column.')
md.rename(columns=dest_columns, inplace=True)
md['site_no'] = format_site_ids(md['site_no'], add_leading_zeros_to_sw_site_nos)
md.index = md['site_no']
by_site = df.groupby('site_no')
md['start_dt'] = pd.DataFrame(by_site['datetime'].first())
else:
by_site = df.groupby('site_no')
md = pd.DataFrame(by_site['datetime'].first())
md.columns = ['start_dt']
md['site_no'] = md.index
md['end_dt'] = pd.DataFrame(by_site['datetime'].last())
md['n'] = pd.DataFrame(by_site['datetime'].count())
md.reset_index(inplace=True, drop=True)
# assign metadata if supplied
for col in 'x', 'y', 'line_id', 'name':
if col in df.columns and col not in md.columns:
by_site_no = dict(zip(df['site_no'], df[col]))
md[col] = [by_site_no[sn] for sn in md['site_no']]
if col != 'line_id':
df.drop(col, axis=1, inplace=True)
# index the dataframe to times;
# truncate data before start date
df.index = pd.to_datetime(df['datetime'])
df.index.name = 'datetime'
df = df.loc[start_date:].copy()
# project x, y to model crs
x_pr, y_pr = project((md.x.values, md.y.values), source_crs, dest_crs)
md['x'], md['y'] = x_pr, y_pr
md['geometry'] = [Point(x, y) for x, y in zip(x_pr, y_pr)]
# cull data to that within the model area
if active_area is not None:
df, md = cull_data_to_active_area(df, active_area,
active_area_id_column,
active_area_feature_id,
data_crs=dest_crs, metadata=md)
# get the hydrography IDs corresponding to each site
# using the included lookup table
#if 'line_id' not in df.columns:
# assert line_id_lookup is not None, \
# "need to include line_ids in a column, or line_id_lookup dictionary mapping line_ids to site numbers"
# df = df.loc[df['site_no'].isin(line_id_lookup)].copy()
# df['line_id'] = [line_id_lookup[sn] for sn in df['site_no']]
if include_sites is not None:
md = md.loc[md.site_no.isin(include_sites)]
df = df.loc[df.site_no.isin(include_sites)]
if include_line_ids is not None:
md = md.loc[md.line_id.isin(include_line_ids)]
df = df.loc[df.line_id.isin(include_line_ids)]
# convert units
# ensure that values are numeric (may be objects if taken directly from NWIS)
if obstype == 'stage':
unit_conversion = convert_length_units(source_length_units, dest_length_units)
else:
unit_conversion = (convert_volume_units(source_length_units, dest_length_units) /
convert_time_units(source_time_units, dest_time_units))
for obs_col in data_columns:
df[obs_col] = pd.to_numeric(df[obs_col], errors='coerce') * unit_conversion
df.dropna(subset=data_columns, axis=0, inplace=True)
# reformat qualifiers for consistent output
# (lump to dest category columns of either estimated or measured)
# with measured including values derived from baseflow separation or actual measurements)
# output column name for qualifier column:
dest_qualifier_column = 'category'
if qualifier_column is not None:
qualifiers = {'calculated': 'measured', # 'measured',
'base flow separated from measured values': 'measured', # 'measured',
'measured total flow': 'measured',
'estimated gaged': 'estimated',
'estimated ungaged': 'estimated'}
df[dest_qualifier_column] = df[qualifier_column].replace(qualifiers)
else:
df['category'] = default_qualifier
# make unique n-character prefixes (site identifiers) for each observation location
# 13 character length allows for prefix_yyyymmm in 20 character observation names
# (BeoPEST limit)
unique_obsnames = set()
obsnames = []
for sn in md['site_no'].tolist():
if max_obsname_len is not None:
name = make_obsname(sn, unique_names=unique_obsnames,
maxlen=max_obsname_len)
assert name not in unique_obsnames
else:
name = sn
name = name + f"-{obstype.split('-')[-1]}"
unique_obsnames.add(name)
obsnames.append(name)
md['obsprefix'] = obsnames
# add area of interest information
md['group'] = 'fluxes'
md = assign_geographic_obsgroups(md, geographic_groups,
geographic_groups_col,
metadata_crs=dest_crs)
# data columns
data_cols = ['site_no', 'line_id', 'datetime', 'obsprefix'] + data_columns + ['category']
#if 'line_id' in md.columns and 'line_id' not in df.columns:
# # only map line_ids to data if there are more site numbers
# # implying that no site number maps to more than one line_id
# if len(set(df.site_no)) >= len(set(df.line_id)):
# ids = dict(zip(md['site_no'], md['line_id']))
# df['line_id'] = [ids[sn] for sn in df['site_no']]
data_cols = [c for c in data_cols if c in df.columns]
df = df[data_cols]
md.index = md['site_no']
# save out the results
if outfile is not None:
df2shp(md.drop(['x', 'y'], axis=1),
out_shapefile, crs=dest_crs)
print('writing {}'.format(out_info_csvfile))
md.drop('geometry', axis=1).to_csv(out_info_csvfile, index=False, float_format='%g')
print('writing {}'.format(out_data_csvfile))
df.to_csv(out_data_csvfile, index=False, float_format='%g')
return df, md
| [
"mfsetup.units.convert_time_units",
"gisutils.project",
"mapgwm.utils.assign_geographic_obsgroups",
"mapgwm.utils.makedirs",
"pandas.read_csv",
"mfsetup.units.convert_volume_units",
"os.path.splitext",
"os.path.split",
"shapely.geometry.Point",
"mapgwm.utils.cull_data_to_active_area",
"pandas.to... | [((14908, 14938), 'pandas.to_datetime', 'pd.to_datetime', (["df['datetime']"], {}), "(df['datetime'])\n", (14922, 14938), True, 'import pandas as pd\n'), ((15056, 15113), 'gisutils.project', 'project', (['(md.x.values, md.y.values)', 'source_crs', 'dest_crs'], {}), '((md.x.values, md.y.values), source_crs, dest_crs)\n', (15063, 15113), False, 'from gisutils import df2shp, project\n'), ((18404, 18504), 'mapgwm.utils.assign_geographic_obsgroups', 'assign_geographic_obsgroups', (['md', 'geographic_groups', 'geographic_groups_col'], {'metadata_crs': 'dest_crs'}), '(md, geographic_groups, geographic_groups_col,\n metadata_crs=dest_crs)\n', (18431, 18504), False, 'from mapgwm.utils import makedirs, assign_geographic_obsgroups, cull_data_to_active_area\n'), ((11326, 11348), 'os.path.split', 'os.path.split', (['outfile'], {}), '(outfile)\n', (11339, 11348), False, 'import os\n'), ((11357, 11374), 'mapgwm.utils.makedirs', 'makedirs', (['outpath'], {}), '(outpath)\n', (11365, 11374), False, 'from mapgwm.utils import makedirs, assign_geographic_obsgroups, cull_data_to_active_area\n'), ((11398, 11423), 'os.path.splitext', 'os.path.splitext', (['outfile'], {}), '(outfile)\n', (11414, 11423), False, 'import os\n'), ((11638, 11684), 'pandas.read_csv', 'pd.read_csv', (['data'], {'dtype': '{site_no_col: object}'}), '(data, dtype={site_no_col: object})\n', (11649, 11684), True, 'import pandas as pd\n'), ((15170, 15181), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (15175, 15181), False, 'from shapely.geometry import Point\n'), ((15307, 15431), 'mapgwm.utils.cull_data_to_active_area', 'cull_data_to_active_area', (['df', 'active_area', 'active_area_id_column', 'active_area_feature_id'], {'data_crs': 'dest_crs', 'metadata': 'md'}), '(df, active_area, active_area_id_column,\n active_area_feature_id, data_crs=dest_crs, metadata=md)\n', (15331, 15431), False, 'from mapgwm.utils import makedirs, assign_geographic_obsgroups, cull_data_to_active_area\n'), ((16432, 16492), 'mfsetup.units.convert_length_units', 'convert_length_units', (['source_length_units', 'dest_length_units'], {}), '(source_length_units, dest_length_units)\n', (16452, 16492), False, 'from mfsetup.units import convert_length_units, convert_volume_units, convert_time_units\n'), ((13549, 13599), 'pandas.read_csv', 'pd.read_csv', (['metadata'], {'dtype': '{site_no_col: object}'}), '(metadata, dtype={site_no_col: object})\n', (13560, 13599), True, 'import pandas as pd\n'), ((16530, 16590), 'mfsetup.units.convert_volume_units', 'convert_volume_units', (['source_length_units', 'dest_length_units'], {}), '(source_length_units, dest_length_units)\n', (16550, 16590), False, 'from mfsetup.units import convert_length_units, convert_volume_units, convert_time_units\n'), ((16617, 16671), 'mfsetup.units.convert_time_units', 'convert_time_units', (['source_time_units', 'dest_time_units'], {}), '(source_time_units, dest_time_units)\n', (16635, 16671), False, 'from mfsetup.units import convert_length_units, convert_volume_units, convert_time_units\n'), ((16728, 16771), 'pandas.to_numeric', 'pd.to_numeric', (['df[obs_col]'], {'errors': '"""coerce"""'}), "(df[obs_col], errors='coerce')\n", (16741, 16771), True, 'import pandas as pd\n'), ((17996, 18066), 'mfsetup.obs.make_obsname', 'make_obsname', (['sn'], {'unique_names': 'unique_obsnames', 'maxlen': 'max_obsname_len'}), '(sn, unique_names=unique_obsnames, maxlen=max_obsname_len)\n', (18008, 18066), False, 'from mfsetup.obs import make_obsname\n')] |
from django.contrib.auth import views as auth_views
from django.urls import path
from django.urls.base import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from . import views, webhooks
from .forms.authorization import CosmosPasswordChangeForm, CosmosPasswordResetForm, CosmosSetPasswordForm
app_name = "cosmos_users"
urlpatterns = [
# auth urls
path("login/", views.CosmosLoginView.as_view(), name="login"),
path("logout/", auth_views.LogoutView.as_view(), name="logout"),
path(
"password_change/",
auth_views.PasswordChangeView.as_view(
form_class=CosmosPasswordChangeForm, success_url=reverse_lazy("cosmos_users:password_change_done")
),
name="password_change",
),
path("password_change/done/", auth_views.PasswordChangeDoneView.as_view(), name="password_change_done"),
path(
"password_reset/",
auth_views.PasswordResetView.as_view(
form_class=CosmosPasswordResetForm, success_url=reverse_lazy("cosmos_users:password_reset_done")
),
name="password_reset",
),
path("password_reset/done/", auth_views.PasswordResetDoneView.as_view(), name="password_reset_done"),
path(
"reset/<uidb64>/<token>/",
auth_views.PasswordResetConfirmView.as_view(
form_class=CosmosSetPasswordForm, success_url=reverse_lazy("cosmos_users:password_reset_complete")
),
name="password_reset_confirm",
),
path("reset/done/", auth_views.PasswordResetCompleteView.as_view(), name="password_reset_complete"),
# custom urls
path("profile/", views.profile, name="user_profile"),
path("delete/", views.delete, name="user_delete"),
path(
"register/",
views.RegistrationWizard.as_view(views.FORMS, condition_dict=views.CONDITION_DICT),
name="user_register",
),
path("register/done/", views.registration_done, name="registration_done"),
path("confirm/<uidb64>/<token>/", views.activate, name="confirm_registration"),
path("hook/", csrf_exempt(webhooks.SendGridWebhook.as_view()), name="email_hook"),
]
| [
"django.contrib.auth.views.PasswordResetDoneView.as_view",
"django.urls.base.reverse_lazy",
"django.contrib.auth.views.PasswordResetCompleteView.as_view",
"django.contrib.auth.views.LogoutView.as_view",
"django.contrib.auth.views.PasswordChangeDoneView.as_view",
"django.urls.path"
] | [((1604, 1656), 'django.urls.path', 'path', (['"""profile/"""', 'views.profile'], {'name': '"""user_profile"""'}), "('profile/', views.profile, name='user_profile')\n", (1608, 1656), False, 'from django.urls import path\n'), ((1662, 1711), 'django.urls.path', 'path', (['"""delete/"""', 'views.delete'], {'name': '"""user_delete"""'}), "('delete/', views.delete, name='user_delete')\n", (1666, 1711), False, 'from django.urls import path\n'), ((1877, 1950), 'django.urls.path', 'path', (['"""register/done/"""', 'views.registration_done'], {'name': '"""registration_done"""'}), "('register/done/', views.registration_done, name='registration_done')\n", (1881, 1950), False, 'from django.urls import path\n'), ((1956, 2034), 'django.urls.path', 'path', (['"""confirm/<uidb64>/<token>/"""', 'views.activate'], {'name': '"""confirm_registration"""'}), "('confirm/<uidb64>/<token>/', views.activate, name='confirm_registration')\n", (1960, 2034), False, 'from django.urls import path\n'), ((460, 491), 'django.contrib.auth.views.LogoutView.as_view', 'auth_views.LogoutView.as_view', ([], {}), '()\n', (489, 491), True, 'from django.contrib.auth import views as auth_views\n'), ((789, 832), 'django.contrib.auth.views.PasswordChangeDoneView.as_view', 'auth_views.PasswordChangeDoneView.as_view', ([], {}), '()\n', (830, 832), True, 'from django.contrib.auth import views as auth_views\n'), ((1138, 1180), 'django.contrib.auth.views.PasswordResetDoneView.as_view', 'auth_views.PasswordResetDoneView.as_view', ([], {}), '()\n', (1178, 1180), True, 'from django.contrib.auth import views as auth_views\n'), ((1501, 1547), 'django.contrib.auth.views.PasswordResetCompleteView.as_view', 'auth_views.PasswordResetCompleteView.as_view', ([], {}), '()\n', (1545, 1547), True, 'from django.contrib.auth import views as auth_views\n'), ((655, 704), 'django.urls.base.reverse_lazy', 'reverse_lazy', (['"""cosmos_users:password_change_done"""'], {}), "('cosmos_users:password_change_done')\n", (667, 704), False, 'from django.urls.base import reverse_lazy\n'), ((1007, 1055), 'django.urls.base.reverse_lazy', 'reverse_lazy', (['"""cosmos_users:password_reset_done"""'], {}), "('cosmos_users:password_reset_done')\n", (1019, 1055), False, 'from django.urls.base import reverse_lazy\n'), ((1367, 1419), 'django.urls.base.reverse_lazy', 'reverse_lazy', (['"""cosmos_users:password_reset_complete"""'], {}), "('cosmos_users:password_reset_complete')\n", (1379, 1419), False, 'from django.urls.base import reverse_lazy\n')] |
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from custom_model.CXLNetModel import PosNetModel
from transformers import XLNetLMHeadModel, XLNetPreTrainedModel, XLNetModel, Conv1D
from transformers.modeling_utils import prune_conv1d_layer
from transformers.modeling_xlnet import XLNetLayer
from torch.nn import functional as F
class CPXLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
# self.genre_embedding= nn.Embedding(config.n_genres, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
return pos_emb
def forward(
self,
encoder_logits=None,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
enc_len=0
if encoder_logits is not None:
enc_len=encoder_logits.size(-2)
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
elif encoder_logits is not None:
enc_mask = torch.zeros([data_mask.shape[0], enc_len, bsz]).to(data_mask)
data_mask = torch.cat([enc_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
if enc_len >0:
non_tgt_mask = torch.cat([torch.zeros([qlen, enc_len]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if encoder_logits is not None:
output_h = torch.cat((encoder_logits.permute(1,0,2),output_h), dim=0).to(output_h)
#########genre_embedding################
# if genre_idxs is not None:
# output_h += self.genre_embedding(genre_idxs)
#########################################
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
)
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
class CPXLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.encoder = PosNetModel(config)
self.transformer = CPXLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def forward(
self,
input_ids=None,
facts_tokens=None,
facts_embeds=None,
facts_input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
encoder_out = self.encoder(input_ids=facts_tokens, fact_embeds=facts_embeds, input_mask=facts_input_mask)[0]
transformer_outputs = self.transformer(
input_ids=input_ids,
encoder_logits=encoder_out,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
class Attention(nn.Module):
def __init__(self, nx, n_ctx, n_head, scale=False):
super().__init__()
self.output_attentions = False
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(0)
self.resid_dropout = nn.Dropout(0)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, n_embd): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(0)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, dsize, n_head):
super().__init__()
self.ln_1 = nn.LayerNorm(dsize, eps=1e-5)
self.attn = Attention(dsize, dsize, n_head)
self.ln_2 = nn.LayerNorm(dsize, eps=1e-5)
self.mlp = MLP(4 * dsize, dsize)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(
self.ln_1(x), layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
| [
"torch.triu",
"torch.nn.Dropout",
"transformers.Conv1D",
"torch.nn.CrossEntropyLoss",
"torch.sin",
"torch.pow",
"torch.cos",
"torch.arange",
"torch.tril",
"torch.eye",
"torch.nn.LayerNorm",
"transformers.modeling_utils.prune_conv1d_layer",
"torch.matmul",
"torch.nn.Embedding",
"torch.ein... | [((999, 1046), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.d_model'], {}), '(config.vocab_size, config.d_model)\n', (1011, 1046), False, 'from torch import nn\n'), ((1314, 1340), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout'], {}), '(config.dropout)\n', (1324, 1340), False, 'from torch import nn\n'), ((2325, 2349), 'torch.ones', 'torch.ones', (['[qlen, qlen]'], {}), '([qlen, qlen])\n', (2335, 2349), False, 'import torch\n'), ((2368, 2401), 'torch.triu', 'torch.triu', (['attn_mask'], {'diagonal': '(1)'}), '(attn_mask, diagonal=1)\n', (2378, 2401), False, 'import torch\n'), ((2426, 2451), 'torch.zeros', 'torch.zeros', (['[qlen, mlen]'], {}), '([qlen, mlen])\n', (2437, 2451), False, 'import torch\n'), ((2466, 2508), 'torch.cat', 'torch.cat', (['[attn_mask_pad, mask_up]'], {'dim': '(1)'}), '([attn_mask_pad, mask_up], dim=1)\n', (2475, 2508), False, 'import torch\n'), ((3243, 3285), 'torch.einsum', 'torch.einsum', (['"""i,d->id"""', 'pos_seq', 'inv_freq'], {}), "('i,d->id', pos_seq, inv_freq)\n", (3255, 3285), False, 'import torch\n'), ((3648, 3701), 'torch.arange', 'torch.arange', (['(0)', 'self.d_model', '(2.0)'], {'dtype': 'torch.float'}), '(0, self.d_model, 2.0, dtype=torch.float)\n', (3660, 3701), False, 'import torch\n'), ((14737, 14756), 'custom_model.CXLNetModel.PosNetModel', 'PosNetModel', (['config'], {}), '(config)\n', (14748, 14756), False, 'from custom_model.CXLNetModel import PosNetModel\n'), ((14829, 14884), 'torch.nn.Linear', 'nn.Linear', (['config.d_model', 'config.vocab_size'], {'bias': '(True)'}), '(config.d_model, config.vocab_size, bias=True)\n', (14838, 14884), False, 'from torch import nn\n'), ((16993, 17016), 'transformers.Conv1D', 'Conv1D', (['(n_state * 3)', 'nx'], {}), '(n_state * 3, nx)\n', (16999, 17016), False, 'from transformers import XLNetLMHeadModel, XLNetPreTrainedModel, XLNetModel, Conv1D\n'), ((17039, 17058), 'transformers.Conv1D', 'Conv1D', (['n_state', 'nx'], {}), '(n_state, nx)\n', (17045, 17058), False, 'from transformers import XLNetLMHeadModel, XLNetPreTrainedModel, XLNetModel, Conv1D\n'), ((17087, 17100), 'torch.nn.Dropout', 'nn.Dropout', (['(0)'], {}), '(0)\n', (17097, 17100), False, 'from torch import nn\n'), ((17130, 17143), 'torch.nn.Dropout', 'nn.Dropout', (['(0)'], {}), '(0)\n', (17140, 17143), False, 'from torch import nn\n'), ((17275, 17330), 'torch.ones', 'torch.ones', (['self.n_head', '(self.split_size // self.n_head)'], {}), '(self.n_head, self.split_size // self.n_head)\n', (17285, 17330), False, 'import torch\n'), ((17777, 17849), 'torch.cat', 'torch.cat', (['[index, index + self.split_size, index + 2 * self.split_size]'], {}), '([index, index + self.split_size, index + 2 * self.split_size])\n', (17786, 17849), False, 'import torch\n'), ((17905, 17955), 'transformers.modeling_utils.prune_conv1d_layer', 'prune_conv1d_layer', (['self.c_attn', 'index_attn'], {'dim': '(1)'}), '(self.c_attn, index_attn, dim=1)\n', (17923, 17955), False, 'from transformers.modeling_utils import prune_conv1d_layer\n'), ((17978, 18023), 'transformers.modeling_utils.prune_conv1d_layer', 'prune_conv1d_layer', (['self.c_proj', 'index'], {'dim': '(0)'}), '(self.c_proj, index, dim=0)\n', (17996, 18023), False, 'from transformers.modeling_utils import prune_conv1d_layer\n'), ((18329, 18347), 'torch.matmul', 'torch.matmul', (['q', 'k'], {}), '(q, k)\n', (18341, 18347), False, 'import torch\n'), ((20687, 20706), 'transformers.Conv1D', 'Conv1D', (['n_state', 'nx'], {}), '(n_state, nx)\n', (20693, 20706), False, 'from transformers import XLNetLMHeadModel, XLNetPreTrainedModel, XLNetModel, Conv1D\n'), ((20729, 20748), 'transformers.Conv1D', 'Conv1D', (['nx', 'n_state'], {}), '(nx, n_state)\n', (20735, 20748), False, 'from transformers import XLNetLMHeadModel, XLNetPreTrainedModel, XLNetModel, Conv1D\n'), ((20796, 20809), 'torch.nn.Dropout', 'nn.Dropout', (['(0)'], {}), '(0)\n', (20806, 20809), False, 'from torch import nn\n'), ((21044, 21074), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['dsize'], {'eps': '(1e-05)'}), '(dsize, eps=1e-05)\n', (21056, 21074), False, 'from torch import nn\n'), ((21146, 21176), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['dsize'], {'eps': '(1e-05)'}), '(dsize, eps=1e-05)\n', (21158, 21176), False, 'from torch import nn\n'), ((1162, 1201), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(1)', 'config.d_model'], {}), '(1, 1, config.d_model)\n', (1179, 1201), False, 'import torch\n'), ((2560, 2594), 'torch.tril', 'torch.tril', (['attn_mask'], {'diagonal': '(-1)'}), '(attn_mask, diagonal=-1)\n', (2570, 2594), False, 'import torch\n'), ((2613, 2671), 'torch.cat', 'torch.cat', (['[ret[:, :qlen] + mask_lo, ret[:, qlen:]]'], {'dim': '(1)'}), '([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)\n', (2622, 2671), False, 'import torch\n'), ((3725, 3766), 'torch.pow', 'torch.pow', (['(10000)', '(freq_seq / self.d_model)'], {}), '(10000, freq_seq / self.d_model)\n', (3734, 3766), False, 'import torch\n'), ((4134, 4181), 'torch.arange', 'torch.arange', (['beg', 'end', '(-1.0)'], {'dtype': 'torch.float'}), '(beg, end, -1.0, dtype=torch.float)\n', (4146, 4181), False, 'import torch\n'), ((4208, 4256), 'torch.arange', 'torch.arange', (['(-beg)', '(-end)', '(1.0)'], {'dtype': 'torch.float'}), '(-beg, -end, 1.0, dtype=torch.float)\n', (4220, 4256), False, 'import torch\n'), ((4865, 4909), 'torch.cat', 'torch.cat', (['[fwd_pos_emb, bwd_pos_emb]'], {'dim': '(1)'}), '([fwd_pos_emb, bwd_pos_emb], dim=1)\n', (4874, 4909), False, 'import torch\n'), ((4950, 4978), 'torch.arange', 'torch.arange', (['beg', 'end', '(-1.0)'], {}), '(beg, end, -1.0)\n', (4962, 4978), False, 'import torch\n'), ((16214, 16232), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (16230, 16232), False, 'from torch.nn import CrossEntropyLoss\n'), ((18661, 18679), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (18671, 18679), False, 'from torch import nn\n'), ((18836, 18854), 'torch.matmul', 'torch.matmul', (['w', 'v'], {}), '(w, v)\n', (18848, 18854), False, 'import torch\n'), ((20011, 20045), 'torch.cat', 'torch.cat', (['(past_key, key)'], {'dim': '(-1)'}), '((past_key, key), dim=-1)\n', (20020, 20045), False, 'import torch\n'), ((20066, 20104), 'torch.cat', 'torch.cat', (['(past_value, value)'], {'dim': '(-2)'}), '((past_value, value), dim=-2)\n', (20075, 20104), False, 'import torch\n'), ((1239, 1257), 'transformers.modeling_xlnet.XLNetLayer', 'XLNetLayer', (['config'], {}), '(config)\n', (1249, 1257), False, 'from transformers.modeling_xlnet import XLNetLayer\n'), ((3053, 3091), 'torch.cat', 'torch.cat', (['[prev_mem, curr_out]'], {'dim': '(0)'}), '([prev_mem, curr_out], dim=0)\n', (3062, 3091), False, 'import torch\n'), ((3315, 3338), 'torch.sin', 'torch.sin', (['sinusoid_inp'], {}), '(sinusoid_inp)\n', (3324, 3338), False, 'import torch\n'), ((3340, 3363), 'torch.cos', 'torch.cos', (['sinusoid_inp'], {}), '(sinusoid_inp)\n', (3349, 3363), False, 'import torch\n'), ((8346, 8386), 'torch.cat', 'torch.cat', (['[mems_mask, data_mask]'], {'dim': '(1)'}), '([mems_mask, data_mask], dim=1)\n', (8355, 8386), False, 'import torch\n'), ((10529, 10586), 'torch.zeros', 'torch.zeros', (['[mlen, bsz]'], {'dtype': 'torch.long', 'device': 'device'}), '([mlen, bsz], dtype=torch.long, device=device)\n', (10540, 10586), False, 'import torch\n'), ((10613, 10656), 'torch.cat', 'torch.cat', (['[mem_pad, token_type_ids]'], {'dim': '(0)'}), '([mem_pad, token_type_ids], dim=0)\n', (10622, 10656), False, 'import torch\n'), ((8549, 8588), 'torch.cat', 'torch.cat', (['[enc_mask, data_mask]'], {'dim': '(1)'}), '([enc_mask, data_mask], dim=1)\n', (8558, 8588), False, 'import torch\n'), ((10886, 10919), 'torch.nn.functional.one_hot', 'F.one_hot', (['seg_mat'], {'num_classes': '(2)'}), '(seg_mat, num_classes=2)\n', (10895, 10919), True, 'from torch.nn import functional as F\n'), ((8259, 8303), 'torch.zeros', 'torch.zeros', (['[data_mask.shape[0], mlen, bsz]'], {}), '([data_mask.shape[0], mlen, bsz])\n', (8270, 8303), False, 'import torch\n'), ((8902, 8917), 'torch.eye', 'torch.eye', (['qlen'], {}), '(qlen)\n', (8911, 8917), False, 'import torch\n'), ((16828, 16852), 'torch.ones', 'torch.ones', (['n_ctx', 'n_ctx'], {}), '(n_ctx, n_ctx)\n', (16838, 16852), False, 'import torch\n'), ((8459, 8506), 'torch.zeros', 'torch.zeros', (['[data_mask.shape[0], enc_len, bsz]'], {}), '([data_mask.shape[0], enc_len, bsz])\n', (8470, 8506), False, 'import torch\n'), ((8999, 9024), 'torch.zeros', 'torch.zeros', (['[qlen, mlen]'], {}), '([qlen, mlen])\n', (9010, 9024), False, 'import torch\n'), ((9133, 9161), 'torch.zeros', 'torch.zeros', (['[qlen, enc_len]'], {}), '([qlen, enc_len])\n', (9144, 9161), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""Make the double periodic shear test grid"""
import matplotlib.pyplot as plt
from configparser import ConfigParser
import numpy as np
import sys
import os
sys.path.append(os.path.abspath("../../.."))
from pycato import *
# Make the empty grid
domain = make_uniform_grid(
n_cells=(256, 256),
xrange=(0, 2 * np.pi),
yrange=(0, 2 * np.pi),
input_file="input.ini",
)
# Set the initial conditions
rho_0 = np.pi / 15
delta = 1
domain["rho"] = domain["rho"] * rho_0
domain["p"] = domain["p"] * 4.0
x = domain["xc"].m
y = domain["yc"].m
u = domain["u"].m
# The u and v arrays depend on the location w/in the grid.
# Since they're cell-centered quantities, they need the location
# of the cell center (xc, yc)
v = delta * np.sin(x)
for i in range(y.shape[0]):
for j in range(y.shape[1]):
if y[i, j] <= np.pi:
u[i, j] = np.tanh((y[i, j] - np.pi / 2) / rho_0)
else:
u[i, j] = np.tanh((1.5 * np.pi - y[i, j]) / rho_0)
domain["u"] = u * ureg("cm/s")
domain["v"] = v * ureg("cm/s")
write_initial_hdf5(filename="double_shear", initial_condition_dict=domain)
# Plot the results
fig, (ax1, ax2) = plt.subplots(figsize=(18, 8), nrows=1, ncols=2)
vc = ax1.pcolormesh(
domain["x"].m,
domain["y"].m,
domain["v"].m,
edgecolor="k",
lw=0.001,
cmap="RdBu",
antialiased=True,
)
fig.colorbar(vc, ax=ax1, label="Y Velocity")
ax1.set_xlabel("X")
ax1.set_ylabel("Y")
uc = ax2.pcolormesh(
domain["x"].m,
domain["y"].m,
domain["u"].m,
edgecolor="k",
lw=0.001,
cmap="RdBu",
antialiased=True,
)
ax2.set_xlabel("X")
ax2.set_ylabel("Y")
fig.colorbar(uc, ax=ax2, label="X Velocity")
ax1.axis("equal")
ax2.axis("equal")
plt.show()
| [
"numpy.sin",
"numpy.tanh",
"os.path.abspath",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1174, 1221), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18, 8)', 'nrows': '(1)', 'ncols': '(2)'}), '(figsize=(18, 8), nrows=1, ncols=2)\n', (1186, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1743, 1745), True, 'import matplotlib.pyplot as plt\n'), ((198, 225), 'os.path.abspath', 'os.path.abspath', (['"""../../.."""'], {}), "('../../..')\n", (213, 225), False, 'import os\n'), ((760, 769), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (766, 769), True, 'import numpy as np\n'), ((881, 919), 'numpy.tanh', 'np.tanh', (['((y[i, j] - np.pi / 2) / rho_0)'], {}), '((y[i, j] - np.pi / 2) / rho_0)\n', (888, 919), True, 'import numpy as np\n'), ((956, 996), 'numpy.tanh', 'np.tanh', (['((1.5 * np.pi - y[i, j]) / rho_0)'], {}), '((1.5 * np.pi - y[i, j]) / rho_0)\n', (963, 996), True, 'import numpy as np\n')] |