code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import random
import struct
import sys
import time
from pathlib import Path
import numpy as np
from SimulaQron.general.hostConfig import *
from SimulaQron.cqc.backend.cqcHeader import *
from SimulaQron.cqc.pythonLib.cqc import *
from flow import circuit_file_to_flow, count_qubits_in_sequence
from angle import measure_angle
# Randomly select circuit from circuits directory
circuits_path = Path(".") / "circuits"
circuit_file_paths = list(circuits_path.glob("*.json"))
circuit = random.choice(circuit_file_paths)
# Load circuit as MBQC flow
print("Client Loading {}".format(circuit))
seq_out = circuit_file_to_flow("./circuits/circuit1.json")
# Determine number of cubits our circuit needs
nQubits = count_qubits_in_sequence(seq_out)
# Initialize measurements count and entanglement lists
nMeasurement = 0
E1 = []
E2 = []
# We use the flow sequence to build entanglemtn lists and count measurements
for s in seq_out:
s.printinfo()
if s.type == "E":
E1.append(s.qubits[0])
E2.append(s.qubits[1])
if s.type == "M":
nMeasurement += 1
# Outcome of each qubit will be stored in this outcome list
outcome = nQubits * [-1]
server_name = "Charlie"
with CQCConnection("Bob") as client:
print("Client Sending (classical): Create {} qubits".format(nQubits))
client.sendClassical(server_name, nQubits)
angles = []
for i in range(0, nQubits):
rand_angle = int(256 * random.random())
angles.append(rand_angle)
q = qubit(client)
q.rot_Y(64) # |+> state
q.rot_Z(rand_angle)
print("Client Sending (quantum): qubit {}".format(i + 1))
client.sendQubit(q, server_name)
time.sleep(1)
print("Client Sending (classical): Ask to perform {} measurements".format(nQubits))
client.sendClassical(server_name, nMeasurement)
time.sleep(1)
print("Client Sending (classical): List of 1st Qubits to Entangle".format(nQubits))
client.sendClassical(server_name, E1)
time.sleep(1)
print("Client Sending (classical): List of 2nd Qubits to Entangle".format(nQubits))
client.sendClassical(server_name, E2)
for s in seq_out:
if s.type == "M":
# Which qubit are we measuring?
qubit_n = s.qubit
# What is the angle we wish to measure
computation_angle = s.angle
input_angle = angles[qubit_n]
# Calclate the angle to send with randomisation applied
r = np.round(random.random())
angle_to_send = measure_angle(
qubit_n, seq_out, outcome, input_angle, computation_angle
) + r * (np.pi)
print("Client Sending (classical): ask to measure qubit {}".format(qubit_n))
time.sleep(1)
client.sendClassical(server_name, qubit_n)
print(
"Client Sending (classical): measurement angle {}".format(angle_to_send)
)
time.sleep(1)
client.sendClassical(server_name, angle_to_send)
m = int.from_bytes(client.recvClassical(), "little")
print("Client Received: result {}".format(m))
# We adjust for the randomness only we know we added
if r == 1:
outcome[qubit_n - 1] = 1 - m
else:
outcome[qubit_n - 1] = m
print("Client Output: {}".format(outcome))
sys.exit(0)
| [
"flow.count_qubits_in_sequence",
"random.choice",
"pathlib.Path",
"angle.measure_angle",
"time.sleep",
"flow.circuit_file_to_flow",
"sys.exit",
"random.random"
] | [((483, 516), 'random.choice', 'random.choice', (['circuit_file_paths'], {}), '(circuit_file_paths)\n', (496, 516), False, 'import random\n'), ((599, 647), 'flow.circuit_file_to_flow', 'circuit_file_to_flow', (['"""./circuits/circuit1.json"""'], {}), "('./circuits/circuit1.json')\n", (619, 647), False, 'from flow import circuit_file_to_flow, count_qubits_in_sequence\n'), ((706, 739), 'flow.count_qubits_in_sequence', 'count_qubits_in_sequence', (['seq_out'], {}), '(seq_out)\n', (730, 739), False, 'from flow import circuit_file_to_flow, count_qubits_in_sequence\n'), ((394, 403), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (398, 403), False, 'from pathlib import Path\n'), ((1675, 1688), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1685, 1688), False, 'import time\n'), ((1833, 1846), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1843, 1846), False, 'import time\n'), ((1981, 1994), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1991, 1994), False, 'import time\n'), ((3388, 3399), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3396, 3399), False, 'import sys\n'), ((2740, 2753), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2750, 2753), False, 'import time\n'), ((2944, 2957), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2954, 2957), False, 'import time\n'), ((1425, 1440), 'random.random', 'random.random', ([], {}), '()\n', (1438, 1440), False, 'import random\n'), ((2476, 2491), 'random.random', 'random.random', ([], {}), '()\n', (2489, 2491), False, 'import random\n'), ((2521, 2593), 'angle.measure_angle', 'measure_angle', (['qubit_n', 'seq_out', 'outcome', 'input_angle', 'computation_angle'], {}), '(qubit_n, seq_out, outcome, input_angle, computation_angle)\n', (2534, 2593), False, 'from angle import measure_angle\n')] |
import unittest
from unittest.mock import MagicMock
from datetime import timedelta
from osgar.bus import Bus
from osgar.node import Node
class NodeTest(unittest.TestCase):
def test_usage(self):
empty_config = {}
bus = Bus(logger=MagicMock())
node = Node(config=empty_config, bus=bus.handle('mynode'))
node.start()
node.request_stop()
node.join()
def test_update(self):
empty_config = {}
bus = Bus(logger=MagicMock())
node = Node(config=empty_config, bus=bus.handle('mynode'))
tester = bus.handle('tester')
tester.register('vel')
bus.connect('tester.vel', 'mynode.vel')
dt = tester.publish('vel', 3)
node.update()
self.assertEqual(node.time, dt)
self.assertEqual(node.vel, 3)
node2 = Node(config=empty_config, bus=bus.handle('mynode2'))
self.assertNotIn('vel', dir(node2))
# vim: expandtab sw=4 ts=4
| [
"unittest.mock.MagicMock"
] | [((253, 264), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (262, 264), False, 'from unittest.mock import MagicMock\n'), ((481, 492), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (490, 492), False, 'from unittest.mock import MagicMock\n')] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(executable_path="E:/SQA/chromedriver_win32/chromedriver.exe")
driver.get("http://newtours.demoaut.com/")
time.sleep(5)
print(driver.title)
driver.get("https://www.google.com/")
time.sleep(5)
print(driver.title)
driver.back()
time.sleep(5)
print(driver.title)
driver.forward()
time.sleep(5)
print(driver.title)
driver.close() | [
"selenium.webdriver.Chrome",
"time.sleep"
] | [((101, 179), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': '"""E:/SQA/chromedriver_win32/chromedriver.exe"""'}), "(executable_path='E:/SQA/chromedriver_win32/chromedriver.exe')\n", (117, 179), False, 'from selenium import webdriver\n'), ((223, 236), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (233, 236), False, 'import time\n'), ((298, 311), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (308, 311), False, 'import time\n'), ((349, 362), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (359, 362), False, 'import time\n'), ((403, 416), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (413, 416), False, 'import time\n')] |
import asyncio
import datetime
import json
import warnings
from pathlib import Path
from typing import Tuple, Optional, TYPE_CHECKING
import aiorpcx
from modules import config
from modules.electrum_mods.functions import BIP32Node, pubkey_to_address, address_to_script, \
script_to_scripthash, constants
from modules.electrumx import ElectrumX, ElectrumError
from modules.helpers import inv_dict, timestamp
from modules.logging import logger
from modules.models import database, Payment, Invoice
from modules.webhooks import send_webhook
if TYPE_CHECKING:
from modules.electrum_mods import PartialTransaction
class CoinNetwork(constants.AbstractNet):
symbol = ""
name = ""
segwit = True
address_format = "p2wpkh"
bip21_intent = "bitcoin:"
kraken = ""
electrumX = None
decimals = 8
watched_payments = {}
TESTNET = False
SIGHASH_FLAG = 0x00000001
TX_VERSION = 2
XPRV_HEADERS = {}
XPUB_HEADERS = {}
WIF_PREFIX = None
BIP44_COIN_TYPE = -1
PEER_DEFAULT_PORTS = {'t': 50001, 's': 50002}
def __init__(self):
self.electrumX = ElectrumX(self.symbol, default_ports=self.PEER_DEFAULT_PORTS)
self.XPRV_HEADERS_INV = inv_dict(self.XPRV_HEADERS)
self.XPUB_HEADERS_INV = inv_dict(self.XPUB_HEADERS)
self._block_queue = None
self._block_lock = None
self._relayfee: Optional[Tuple[int, float]] = None
self._current_block: Optional[int] = None
self._current_feerate: Optional[Tuple[int, float]] = None
self._svg_icon = None
xpub = config.xpubs.get(self.symbol)
if xpub is None:
warnings.warn(f"No xPub added for {self.symbol} - address generation disabled")
self.xpub_node = None
self.xpub_derivation = None
else:
self.xpub_node = BIP32Node.from_xkey(xpub['xpub'], net=self)
self.xpub_derivation = xpub['derivation_path']
@property
def icon(self):
if self._svg_icon is not None:
return self._svg_icon
filename = Path(__file__).parent / 'icons' / f"{self.symbol[1:] if self.TESTNET else self.symbol}.svg"
if filename.exists():
self._svg_icon = filename.read_text()
else:
self._svg_icon = ""
return self._svg_icon
@property
def icon_b64(self):
return self._svg_icon
def sats_to_coin(self, sats: Optional[int]) -> Optional[float]:
if sats is None:
return None
return round(sats * 10 ** -self.decimals, self.decimals)
def coin_to_sats(self, amt):
return int(round(amt * 10 ** self.decimals))
@property
def root_symbol(self):
if self.TESTNET:
return self.symbol[1:]
return self.symbol
@property
def default_xpub_derivation_path(self):
return "/".join(['84h' if self.segwit else '44h',
'1h' if self.TESTNET else f"{self.BIP44_COIN_TYPE}h",
'0h'])
@property
async def relay_fee(self):
res = await self.electrum_call(ElectrumX.blockchain_relayfee, [])
assert res > 0
# check response
relay_fee = int(res * 10 ** self.decimals)
relay_fee = max(0, relay_fee)
return relay_fee
@property
async def current_block(self):
if self._block_lock is None:
self._block_lock = asyncio.Lock()
async with self._block_lock:
if self._current_block is not None:
return self._current_block
self._block_queue = asyncio.Queue()
ret = await self.electrumX.call(ElectrumX.blockchain_headers_subscribe, [], self._block_queue)
self._current_block = int(ret[0]['height'])
asyncio.create_task(self.watch_blocks())
return self._current_block
async def watch_blocks(self):
while True:
ret = await self._block_queue.get()
try:
self.electrumX.validate_elextrumx_call(ElectrumX.blockchain_headers_subscribe, ret)
except ElectrumError:
await self.electrumX.penalize_server()
continue
logger.info(f"{self.symbol} - new block @ {ret[0]['height']}")
self._current_block = int(ret[0]['height'])
@property
async def current_feerate(self):
# Cached feerate
if self._current_feerate is not None and self._current_feerate[0] == (await self.current_block):
return self._current_feerate[1]
fee_estimate = await self.electrum_call(ElectrumX.blockchain_estimatefee, [1])
if fee_estimate and fee_estimate > 0:
# Convert to sat/byte
fee_estimate /= 1024
fee_estimate *= 10 ** self.decimals
else:
# Fallback to coin default
fee_estimate = float(self.config('fallback_feerate'))
return fee_estimate
def config(self, key, default=None):
"""
Shorthand to get config entries for the parent coin, equivalent to `modules.config.get(key, coin=self.symbol)`
"""
return config.get(key, coin=self.symbol, default=default)
async def get_transactions(self, script_hash, ignored_tx_hashes=None):
transactions = {}
history = await self.electrum_call(ElectrumX.blockchain_scripthash_get_history, [script_hash])
for tx in history:
tx_hash = tx.get("tx_hash")
if ignored_tx_hashes and tx_hash in ignored_tx_hashes:
continue
transactions[tx_hash] = await self.electrum_call(ElectrumX.blockchain_transaction_get, [tx_hash, True])
if 'fee' in tx:
transactions[tx_hash]['mempool_fee'] = tx['fee']
return transactions
async def watch_payment(self, payment: dict):
logger.info(f"Watching payment {payment['uuid']}")
queue = asyncio.Queue()
self.watched_payments[payment['uuid']] = payment
original_payment = payment.copy()
first_loop = True
awaiting_mempool = True
awaiting_confirmations = True
current_block = await self.current_block
script_hash = payment['scripthash']
ignored_tx_hashes = set()
while True:
if awaiting_mempool:
if first_loop:
first_loop = False
logger.info(f"Subscribing to scripthash")
await self.electrum_call(ElectrumX.blockchain_scripthash_subscribe, [script_hash], queue)
else:
logger.info(f"Waiting for scripthash changes")
await queue.get()
logger.info(f"Done waiting")
elif awaiting_confirmations:
logger.info("waiting for new block")
while current_block == await self.current_block:
await asyncio.sleep(1)
current_block = await self.current_block
else:
logger.info(f"Finished watching payment {payment['uuid']}")
break
logger.info(f"Payment Update: {payment['uuid']} - {script_hash}")
all_transactions = await self.get_transactions(script_hash, ignored_tx_hashes=ignored_tx_hashes)
# Check if "received"
valid_tx = []
for x in all_transactions.values():
if 'time' not in x:
valid_tx.append(x)
elif datetime.datetime.utcfromtimestamp(x.get('time', 0) or 0) > payment['creation_date']:
valid_tx.append(x)
else:
ignored_tx_hashes.add(x.get("tx_hash"))
payment['transactions'] = valid_tx
mempool_sats = 0
chain_sats = 0
confirmed_sats = 0
req_confirmations = int(self.config('required_confirmations', default=6))
for tx in valid_tx:
for vout in tx['vout']:
if "addresses" in vout['scriptPubKey']:
addresses = vout['scriptPubKey']['addresses']
elif "address" in vout['scriptPubKey']:
addresses = [vout['scriptPubKey']['address']]
else:
raise ElectrumError("No Addresses in vout")
for addr in addresses:
if addr == payment['address']:
sats = int(round(vout['value'] * 10 ** self.decimals))
mempool_sats += sats
confirmations = tx.get("confirmations", 0)
# If instantsend lock is present, treat as if 1 confirmation
if confirmations == 0 and tx.get("instantlock"):
warnings.warn("test instantlock")
confirmations = 1
if confirmations > 0 or req_confirmations == 0:
chain_sats += sats
if confirmations >= req_confirmations:
if req_confirmations == 0 and confirmations == 0:
warnings.warn("Check zeroconf fees")
# CHECK IF mempool_fee is greater than the coins current next-block feerate
mempool_fee = tx.get("mempool_fee")
# If ElectrumX doesn't return this it will need to get calculated manually
confirmed_sats += sats
if datetime.datetime.utcnow() > payment['expiry_date'] and payment['status'] == "pending":
payment['status'] = "expired"
payment['last_update'] = timestamp()
awaiting_confirmations = False
else:
if confirmed_sats >= payment['amount_sats']:
awaiting_confirmations = False
if payment['status'] != "confirmed":
payment['status'] = "confirmed"
payment['payment_date'] = payment['payment_date'] or datetime.datetime.utcnow()
payment['paid_amount_sats'] = payment['paid_amount_sats'] or mempool_sats
payment['last_update'] = timestamp()
if mempool_sats >= payment['amount_sats']:
if payment['status'] == 'pending':
payment['status'] = 'paid'
payment['payment_date'] = payment['payment_date'] or datetime.datetime.utcnow()
payment['paid_amount_sats'] = payment['paid_amount_sats'] or mempool_sats
payment['last_update'] = timestamp()
if awaiting_mempool:
if payment['status'] in ('expired', 'confirmed') or chain_sats > payment['amount_sats']:
awaiting_mempool = False
await self.unsubscribe_electrumx("blockchain.scripthash.unsubscribe",
[payment['scripthash']], queue)
changes = {k: payment[k] for k, v in original_payment.items() if v != payment[k] and k != "transactions"}
if (tx_serialized := json.dumps(valid_tx)) != original_payment.get('transactions'):
changes['transactions'] = tx_serialized
if changes:
await database.execute(Payment.update()
.where(Payment.c.id == payment['id'])
.values(**changes))
# If the payment changes, the invoice should as well, calculate the invoice status now
invoice = await database.fetch_one(Invoice.select().where(Invoice.c.id == payment['invoice_id']))
invoice = dict(invoice)
original_invoice = invoice.copy()
if payment['status'] == 'confirmed' and invoice['status'] in ('pending', 'paid'):
invoice['status'] = "confirmed"
elif payment['status'] == 'paid' and invoice['status'] == 'pending':
invoice['status'] = "paid"
if invoice['status'] != original_invoice['status']:
if config.get("payment_callback_url"):
asyncio.create_task(send_webhook(invoice, payment))
await database.execute(Invoice.update().where(Invoice.c.id == invoice['id']).values(
**{"status": invoice['status'], "payment_date": payment['payment_date']}))
if invoice['status'] == 'confirmed':
if config.check("email_notifications", namespace="EMAIL"):
from modules.email import email_invoice
asyncio.create_task(email_invoice(invoice))
self.watched_payments[payment['uuid']] = payment
def make_address(self, xpub_node: BIP32Node = None, account=0, index=0) -> str:
assert isinstance(index, int) and index >= 0
assert isinstance(account, int) and account >= 0
xpub_node = xpub_node or self.xpub_node
subkey = xpub_node.subkey_at_public_derivation(f"/{account}/{index}")
pubkey = subkey.eckey.get_public_key_bytes(compressed=True).hex()
return pubkey_to_address(self.address_format, pubkey, net=self)
def address_to_script(self, address):
return address_to_script(address, net=self)
def address_to_scripthash(self, address):
script = self.address_to_script(address)
return script_to_scripthash(script)
async def electrum_call(self, method, params=None, queue=None):
return await self.electrumX.call(method, params, queue=queue)
async def unsubscribe_electrumx(self, method, params=None, queue=None):
assert queue is not None
logger.info(f"Unsubscribing from {method} {params}")
await self.electrumX.get_session()
try:
await self.electrumX.session.send_request(method, params)
except aiorpcx.RPCError:
# not all servers implement this
pass
self.electrumX.unsubscribe(queue)
def estimate_tx_size(self, tx: 'PartialTransaction'):
return tx.estimated_size(self)
| [
"modules.helpers.timestamp",
"modules.config.get",
"modules.config.check",
"modules.models.Payment.update",
"modules.electrumx.ElectrumX",
"modules.models.Invoice.select",
"modules.logging.logger.info",
"modules.electrumx.ElectrumError",
"modules.electrum_mods.functions.pubkey_to_address",
"pathli... | [((1114, 1175), 'modules.electrumx.ElectrumX', 'ElectrumX', (['self.symbol'], {'default_ports': 'self.PEER_DEFAULT_PORTS'}), '(self.symbol, default_ports=self.PEER_DEFAULT_PORTS)\n', (1123, 1175), False, 'from modules.electrumx import ElectrumX, ElectrumError\n'), ((1208, 1235), 'modules.helpers.inv_dict', 'inv_dict', (['self.XPRV_HEADERS'], {}), '(self.XPRV_HEADERS)\n', (1216, 1235), False, 'from modules.helpers import inv_dict, timestamp\n'), ((1268, 1295), 'modules.helpers.inv_dict', 'inv_dict', (['self.XPUB_HEADERS'], {}), '(self.XPUB_HEADERS)\n', (1276, 1295), False, 'from modules.helpers import inv_dict, timestamp\n'), ((1583, 1612), 'modules.config.xpubs.get', 'config.xpubs.get', (['self.symbol'], {}), '(self.symbol)\n', (1599, 1612), False, 'from modules import config\n'), ((5148, 5198), 'modules.config.get', 'config.get', (['key'], {'coin': 'self.symbol', 'default': 'default'}), '(key, coin=self.symbol, default=default)\n', (5158, 5198), False, 'from modules import config\n'), ((5859, 5909), 'modules.logging.logger.info', 'logger.info', (['f"""Watching payment {payment[\'uuid\']}"""'], {}), '(f"Watching payment {payment[\'uuid\']}")\n', (5870, 5909), False, 'from modules.logging import logger\n'), ((5927, 5942), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (5940, 5942), False, 'import asyncio\n'), ((13416, 13472), 'modules.electrum_mods.functions.pubkey_to_address', 'pubkey_to_address', (['self.address_format', 'pubkey'], {'net': 'self'}), '(self.address_format, pubkey, net=self)\n', (13433, 13472), False, 'from modules.electrum_mods.functions import BIP32Node, pubkey_to_address, address_to_script, script_to_scripthash, constants\n'), ((13531, 13567), 'modules.electrum_mods.functions.address_to_script', 'address_to_script', (['address'], {'net': 'self'}), '(address, net=self)\n', (13548, 13567), False, 'from modules.electrum_mods.functions import BIP32Node, pubkey_to_address, address_to_script, script_to_scripthash, constants\n'), ((13679, 13707), 'modules.electrum_mods.functions.script_to_scripthash', 'script_to_scripthash', (['script'], {}), '(script)\n', (13699, 13707), False, 'from modules.electrum_mods.functions import BIP32Node, pubkey_to_address, address_to_script, script_to_scripthash, constants\n'), ((13965, 14017), 'modules.logging.logger.info', 'logger.info', (['f"""Unsubscribing from {method} {params}"""'], {}), "(f'Unsubscribing from {method} {params}')\n", (13976, 14017), False, 'from modules.logging import logger\n'), ((1650, 1729), 'warnings.warn', 'warnings.warn', (['f"""No xPub added for {self.symbol} - address generation disabled"""'], {}), "(f'No xPub added for {self.symbol} - address generation disabled')\n", (1663, 1729), False, 'import warnings\n'), ((1847, 1890), 'modules.electrum_mods.functions.BIP32Node.from_xkey', 'BIP32Node.from_xkey', (["xpub['xpub']"], {'net': 'self'}), "(xpub['xpub'], net=self)\n", (1866, 1890), False, 'from modules.electrum_mods.functions import BIP32Node, pubkey_to_address, address_to_script, script_to_scripthash, constants\n'), ((3421, 3435), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (3433, 3435), False, 'import asyncio\n'), ((3598, 3613), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (3611, 3613), False, 'import asyncio\n'), ((4207, 4269), 'modules.logging.logger.info', 'logger.info', (['f"""{self.symbol} - new block @ {ret[0][\'height\']}"""'], {}), '(f"{self.symbol} - new block @ {ret[0][\'height\']}")\n', (4218, 4269), False, 'from modules.logging import logger\n'), ((7126, 7191), 'modules.logging.logger.info', 'logger.info', (['f"""Payment Update: {payment[\'uuid\']} - {script_hash}"""'], {}), '(f"Payment Update: {payment[\'uuid\']} - {script_hash}")\n', (7137, 7191), False, 'from modules.logging import logger\n'), ((9838, 9849), 'modules.helpers.timestamp', 'timestamp', ([], {}), '()\n', (9847, 9849), False, 'from modules.helpers import inv_dict, timestamp\n'), ((2077, 2091), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2081, 2091), False, 'from pathlib import Path\n'), ((6410, 6451), 'modules.logging.logger.info', 'logger.info', (['f"""Subscribing to scripthash"""'], {}), "(f'Subscribing to scripthash')\n", (6421, 6451), False, 'from modules.logging import logger\n'), ((6604, 6650), 'modules.logging.logger.info', 'logger.info', (['f"""Waiting for scripthash changes"""'], {}), "(f'Waiting for scripthash changes')\n", (6615, 6650), False, 'from modules.logging import logger\n'), ((6709, 6737), 'modules.logging.logger.info', 'logger.info', (['f"""Done waiting"""'], {}), "(f'Done waiting')\n", (6720, 6737), False, 'from modules.logging import logger\n'), ((6795, 6831), 'modules.logging.logger.info', 'logger.info', (['"""waiting for new block"""'], {}), "('waiting for new block')\n", (6806, 6831), False, 'from modules.logging import logger\n'), ((7031, 7090), 'modules.logging.logger.info', 'logger.info', (['f"""Finished watching payment {payment[\'uuid\']}"""'], {}), '(f"Finished watching payment {payment[\'uuid\']}")\n', (7042, 7090), False, 'from modules.logging import logger\n'), ((9663, 9689), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9687, 9689), False, 'import datetime\n'), ((11343, 11363), 'json.dumps', 'json.dumps', (['valid_tx'], {}), '(valid_tx)\n', (11353, 11363), False, 'import json\n'), ((12362, 12396), 'modules.config.get', 'config.get', (['"""payment_callback_url"""'], {}), "('payment_callback_url')\n", (12372, 12396), False, 'from modules import config\n'), ((12755, 12809), 'modules.config.check', 'config.check', (['"""email_notifications"""'], {'namespace': '"""EMAIL"""'}), "('email_notifications', namespace='EMAIL')\n", (12767, 12809), False, 'from modules import config\n'), ((10391, 10402), 'modules.helpers.timestamp', 'timestamp', ([], {}), '()\n', (10400, 10402), False, 'from modules.helpers import inv_dict, timestamp\n'), ((10820, 10831), 'modules.helpers.timestamp', 'timestamp', ([], {}), '()\n', (10829, 10831), False, 'from modules.helpers import inv_dict, timestamp\n'), ((6923, 6939), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (6936, 6939), False, 'import asyncio\n'), ((8323, 8360), 'modules.electrumx.ElectrumError', 'ElectrumError', (['"""No Addresses in vout"""'], {}), "('No Addresses in vout')\n", (8336, 8360), False, 'from modules.electrumx import ElectrumX, ElectrumError\n'), ((10217, 10243), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10241, 10243), False, 'import datetime\n'), ((10646, 10672), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10670, 10672), False, 'import datetime\n'), ((12442, 12472), 'modules.webhooks.send_webhook', 'send_webhook', (['invoice', 'payment'], {}), '(invoice, payment)\n', (12454, 12472), False, 'from modules.webhooks import send_webhook\n'), ((12919, 12941), 'modules.email.email_invoice', 'email_invoice', (['invoice'], {}), '(invoice)\n', (12932, 12941), False, 'from modules.email import email_invoice\n'), ((8862, 8895), 'warnings.warn', 'warnings.warn', (['"""test instantlock"""'], {}), "('test instantlock')\n", (8875, 8895), False, 'import warnings\n'), ((11834, 11850), 'modules.models.Invoice.select', 'Invoice.select', ([], {}), '()\n', (11848, 11850), False, 'from modules.models import database, Payment, Invoice\n'), ((9260, 9296), 'warnings.warn', 'warnings.warn', (['"""Check zeroconf fees"""'], {}), "('Check zeroconf fees')\n", (9273, 9296), False, 'import warnings\n'), ((11526, 11542), 'modules.models.Payment.update', 'Payment.update', ([], {}), '()\n', (11540, 11542), False, 'from modules.models import database, Payment, Invoice\n'), ((12517, 12533), 'modules.models.Invoice.update', 'Invoice.update', ([], {}), '()\n', (12531, 12533), False, 'from modules.models import database, Payment, Invoice\n')] |
"""Unit tests for direction_updater.py."""
# standard library
import argparse
import unittest
from unittest.mock import MagicMock
# py3tester coverage target
__test_target__ = 'delphi.epidata.acquisition.covidcast.direction_updater'
class UnitTests(unittest.TestCase):
"""Basic unit tests."""
def test_get_argument_parser(self):
"""Return a parser for command-line arguments."""
self.assertIsInstance(get_argument_parser(), argparse.ArgumentParser)
def test_main_successful(self):
"""Run the main program, and successfully commit changes."""
args = MagicMock(partitions=[0, 1])
mock_database = MagicMock()
fake_database_impl = lambda: mock_database
mock_update_loop = MagicMock()
main(
args,
database_impl=fake_database_impl,
update_loop_impl=mock_update_loop)
self.assertTrue(mock_update_loop.called)
self.assertTrue(mock_database.connect.called)
self.assertTrue(mock_database.disconnect.called)
self.assertTrue(mock_database.disconnect.call_args[0][0])
def test_main_unsuccessful(self):
"""Run the main program, but don't commit changes on failure."""
args = MagicMock(partitions=[0, 1])
mock_database = MagicMock()
fake_database_impl = lambda: mock_database
mock_update_loop = MagicMock(side_effect=Exception('testing'))
with self.assertRaises(Exception):
main(
args,
database_impl=fake_database_impl,
update_loop_impl=mock_update_loop)
self.assertTrue(mock_update_loop.called)
self.assertTrue(mock_database.connect.called)
self.assertTrue(mock_database.disconnect.called)
self.assertFalse(mock_database.disconnect.call_args[0][0])
def test_update_loop(self):
"""Update direction for out-of-date covidcast rows."""
mock_direction_impl = MagicMock()
mock_direction_impl.scan_timeseries.return_value = ([10, 11], [20, 21])
mock_database = MagicMock()
mock_database.get_keys_with_potentially_stale_direction.return_value = [
(
'source',
'signal',
'geo_type',
'geo_value',
100,
200,
20200401,
20200423,
123,
)
]
mock_database.get_data_stdev_across_locations.return_value = [
('source', 'signal', 'geo_type', 456),
]
mock_database.get_daily_timeseries_for_direction_update.return_value = [
(1, 2, 3, 4, 5),
]
update_loop(mock_database, direction_impl=mock_direction_impl)
self.assertTrue(mock_direction_impl.scan_timeseries.called)
args = mock_direction_impl.scan_timeseries.call_args[0]
self.assertEqual(args[:-1], ([1], [2], [3], [4], [5]))
# call the direction classifier
get_direction_impl = args[-1]
get_direction_impl('x', 'y')
self.assertTrue(mock_direction_impl.get_direction.called)
args, kwargs = mock_direction_impl.get_direction.call_args
self.assertEqual(args, ('x', 'y'))
expected_kwargs = {
'n': Constants.SLOPE_STERR_SCALE,
'limit': 456 * Constants.BASE_SLOPE_THRESHOLD,
}
self.assertEqual(kwargs, expected_kwargs)
self.assertEqual(mock_database.update_direction.call_count, 2)
call_args_list = mock_database.update_direction.call_args_list
expected_args = (
'source', 'signal', 'day', 'geo_type', 10, 'geo_value', 20,
)
self.assertEqual(call_args_list[0][0], expected_args)
expected_args = (
'source', 'signal', 'day', 'geo_type', 11, 'geo_value', 21,
)
self.assertEqual(call_args_list[1][0], expected_args)
self.assertTrue(mock_database.update_timeseries_direction_updated_timestamp.called)
args = mock_database.update_timeseries_direction_updated_timestamp.call_args[0]
expected_args = ('source', 'signal', 'day', 'geo_type', 'geo_value')
self.assertEqual(args, expected_args)
| [
"unittest.mock.MagicMock"
] | [((579, 607), 'unittest.mock.MagicMock', 'MagicMock', ([], {'partitions': '[0, 1]'}), '(partitions=[0, 1])\n', (588, 607), False, 'from unittest.mock import MagicMock\n'), ((628, 639), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (637, 639), False, 'from unittest.mock import MagicMock\n'), ((710, 721), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (719, 721), False, 'from unittest.mock import MagicMock\n'), ((1161, 1189), 'unittest.mock.MagicMock', 'MagicMock', ([], {'partitions': '[0, 1]'}), '(partitions=[0, 1])\n', (1170, 1189), False, 'from unittest.mock import MagicMock\n'), ((1210, 1221), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1219, 1221), False, 'from unittest.mock import MagicMock\n'), ((1822, 1833), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1831, 1833), False, 'from unittest.mock import MagicMock\n'), ((1930, 1941), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1939, 1941), False, 'from unittest.mock import MagicMock\n')] |
import requests
import os
import logging
import subprocess
import re
import settings
_GITHUB_BASE_URL = 'https://api.github.com'
_REPO_DIR = settings.get('repo_dir', os.getcwd())
_REPO_CNT = settings.get('repo_count', 10)
_QUERY_STRING = settings.get('repo_query_string')
_TOKEN = settings.get('github_token', required=False)
def main():
logging.warning('Starting')
page_num = 1
visited_repo_cnt = 1
while True:
if visited_repo_cnt > _REPO_CNT:
break
headers = {'Authorization': f'token {_TOKEN}'} if _TOKEN else None
r = requests.get(f'{_GITHUB_BASE_URL}/search/repositories?'
f'q={_QUERY_STRING}&page={page_num}&per_page=1',
headers=headers)
data = r.json()
items = data['items']
for item in items:
url = item['clone_url']
repo_name = re.sub('/', '---', item['full_name'])
args = ['git', 'clone', url, os.path.join(_REPO_DIR, repo_name)]
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.communicate()
logging.warning(f'Visited repo={repo_name} [{visited_repo_cnt}/{_REPO_CNT}]')
visited_repo_cnt += 1
if visited_repo_cnt > _REPO_CNT:
break
page_num += 1
logging.warning('Done')
if __name__ == '__main__':
main()
| [
"settings.get",
"subprocess.Popen",
"logging.warning",
"os.path.join",
"requests.get",
"os.getcwd",
"re.sub"
] | [((194, 224), 'settings.get', 'settings.get', (['"""repo_count"""', '(10)'], {}), "('repo_count', 10)\n", (206, 224), False, 'import settings\n'), ((241, 274), 'settings.get', 'settings.get', (['"""repo_query_string"""'], {}), "('repo_query_string')\n", (253, 274), False, 'import settings\n'), ((284, 328), 'settings.get', 'settings.get', (['"""github_token"""'], {'required': '(False)'}), "('github_token', required=False)\n", (296, 328), False, 'import settings\n'), ((169, 180), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (178, 180), False, 'import os\n'), ((347, 374), 'logging.warning', 'logging.warning', (['"""Starting"""'], {}), "('Starting')\n", (362, 374), False, 'import logging\n'), ((1324, 1347), 'logging.warning', 'logging.warning', (['"""Done"""'], {}), "('Done')\n", (1339, 1347), False, 'import logging\n'), ((582, 709), 'requests.get', 'requests.get', (['f"""{_GITHUB_BASE_URL}/search/repositories?q={_QUERY_STRING}&page={page_num}&per_page=1"""'], {'headers': 'headers'}), "(\n f'{_GITHUB_BASE_URL}/search/repositories?q={_QUERY_STRING}&page={page_num}&per_page=1'\n , headers=headers)\n", (594, 709), False, 'import requests\n'), ((896, 933), 're.sub', 're.sub', (['"""/"""', '"""---"""', "item['full_name']"], {}), "('/', '---', item['full_name'])\n", (902, 933), False, 'import re\n'), ((1028, 1074), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (1044, 1074), False, 'import subprocess\n'), ((1116, 1193), 'logging.warning', 'logging.warning', (['f"""Visited repo={repo_name} [{visited_repo_cnt}/{_REPO_CNT}]"""'], {}), "(f'Visited repo={repo_name} [{visited_repo_cnt}/{_REPO_CNT}]')\n", (1131, 1193), False, 'import logging\n'), ((976, 1010), 'os.path.join', 'os.path.join', (['_REPO_DIR', 'repo_name'], {}), '(_REPO_DIR, repo_name)\n', (988, 1010), False, 'import os\n')] |
#!/user/env python3
# -*- coding: utf-8 -*-
import threading
import time
import os.path
from hashlib import sha256
import bjson
import logging
import random
from binascii import hexlify
from ..config import C, V, PeerToPeerError
from ..client import FileReceiveError, ClientCmd
from .utils import AESCipher
class FileShare:
def __init__(self, pc, path):
self.pc = pc
self.name = os.path.split(path)[1]
self.path = path
self.f_contain = list()
self.content = dict()
@staticmethod
def create_ley():
return AESCipher.create_key()
def share_raw_file(self, pwd=None):
if not os.path.exists(self.path):
raise FileExistsError('Not found file.')
if not os.path.isfile(self.path):
raise Exception('It\'s a directory.')
h_list = list()
sha_hash = sha256()
with open(self.path, mode='br') as f:
while True:
raw = f.read(C.MAX_RECEIVE_SIZE)
if not raw:
break
sha_hash.update(raw)
if pwd:
raw = AESCipher.encrypt(key=pwd, raw=raw)
h_list.append(sha256(raw).digest())
self.pc.share_file(data=raw)
self.content = {
'name': self.name,
'path': self.path,
'size': os.path.getsize(self.path) / 1000,
'element': h_list,
'hash': sha_hash.hexdigest(),
'signer': None,
'sign': None,
'date': time.strftime('%Y-%m-%d %H:%M:%S'),
'time': int(time.time())}
def load_share_file(self):
if len(self.content) != 0:
raise Exception('Already loaded share file.')
with open(self.path, mode='br') as f:
self.content = bjson.load(fp=f)
self.f_contain = [False] * len(self.content['element'])
self.name = self.content['name']
self.path = self.content['path']
def recode_raw_file(self, recode_dir, pwd=None, overwrite=False):
if not os.path.exists(recode_dir):
raise FileNotFoundError('Not found recode dir.')
recode_path = os.path.join(recode_dir, self.name)
if os.path.exists(recode_path) and not overwrite:
raise FileExistsError('You try to overwrite file.')
check = self.check()
if len(check) > 0:
complete = str(round(len(check) / len(self.f_contain) * 100, 2))
raise FileNotFoundError('Isn\'t all file downloaded, ({}% complete)'.format(complete))
sha_hash = sha256()
with open(recode_path, mode='ba') as f:
for h in self.content['element']:
raw = self.pc.get_file(file_hash=hexlify(h).decode())
if pwd:
raw = AESCipher.decrypt(key=pwd, enc=raw)
sha_hash.update(raw)
f.write(raw)
if sha_hash.hexdigest() != self.content['hash']:
raise Exception('SHA256 hash don\'t match.')
def recode_share_file(self, path=None, overwrite=False, compress=False):
if path is None:
path = self.path + '.share'
if os.path.exists(path) and not overwrite:
raise FileExistsError('You try to over write file.')
with open(path, mode='bw') as f:
bjson.dump(self.content, fp=f, compress=compress)
def get_all_binary(self, pwd=None):
result = b''
check = self.check()
sha_hash = sha256()
if len(check) > 0:
complete = str(round(len(check) / len(self.f_contain) * 100, 2))
raise FileNotFoundError('Isn\'t all file downloaded, ({}% complete)'.format(complete))
for h in self.content['element']:
raw = self.pc.get_file(file_hash=hexlify(h).decode())
if pwd:
raw = AESCipher.decrypt(key=pwd, enc=raw)
sha_hash.update(raw)
result += raw
if sha_hash.hexdigest() != self.content['hash']:
raise Exception('SHA256 hash don\'t match.')
return result
def check(self):
# return uncompleted element index
return [i for i in range(len(self.f_contain)) if not self.f_contain[i]]
def remove_sharefile_related(self):
for hash_bin in self.content['element']:
self.pc.remove_file(hexlify(hash_bin).decode())
def get_tmp_files(self):
# return [(path, size, time), ...]
files = list()
for f in os.listdir(self.pc.tmp_dir):
path = os.path.join(self.pc.tmp_dir, f)
if not f.startswith('file.'):
continue
if not os.path.isfile(path):
continue
size = os.path.getsize(path)
date = os.path.getmtime(path)
files.append((path, size, date))
return files
def download(self, num=3, wait=True):
if 'element' not in self.content:
return False
request = [i for i in range(len(self.content['element'])) if not self.f_contain[i]]
lock = threading.Lock()
threads = list()
f_finish = [None] * num
for n in range(num):
t = threading.Thread(
target=self.__download, args=(request, f_finish, lock), name='FileShare', daemon=True)
t.start()
threads.append(t)
time.sleep(1)
if wait:
for t in threads:
t.join()
else:
return request, f_finish
def __download(self, request, f_finish, lock):
allow_fail = max(5, len(request) // 1000)
while True:
# check retry counts
if allow_fail < 0:
f_finish.pop()
return
# get index, hash to try
with lock:
try:
i = random.choice(request)
request.remove(i)
except IndexError:
f_finish.pop()
return
hex_hash = hexlify(self.content['element'][i]).decode()
logging.debug("Try %d=0x%s" % (i, hex_hash))
retry = 5
while True:
try:
raw = self.pc.get_file(file_hash=hex_hash, only_check=False)
if raw:
with lock:
self.f_contain[i] = True
logging.debug("Success %d=0x%s" % (i, hex_hash))
break
else:
raise FileReceiveError('Failed get file, retry')
except (FileReceiveError, TimeoutError) as e:
retry -= 1
if retry > 0:
time.sleep(5)
continue
else:
logging.info("Failed %d=0x%s" % (i, hex_hash))
import traceback
traceback.print_exc()
allow_fail -= 1
break
| [
"hashlib.sha256",
"random.choice",
"logging.debug",
"threading.Lock",
"binascii.hexlify",
"time.strftime",
"logging.info",
"time.sleep",
"bjson.dump",
"threading.Thread",
"traceback.print_exc",
"time.time",
"bjson.load"
] | [((862, 870), 'hashlib.sha256', 'sha256', ([], {}), '()\n', (868, 870), False, 'from hashlib import sha256\n'), ((2594, 2602), 'hashlib.sha256', 'sha256', ([], {}), '()\n', (2600, 2602), False, 'from hashlib import sha256\n'), ((3505, 3513), 'hashlib.sha256', 'sha256', ([], {}), '()\n', (3511, 3513), False, 'from hashlib import sha256\n'), ((5086, 5102), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5100, 5102), False, 'import threading\n'), ((1553, 1587), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1566, 1587), False, 'import time\n'), ((1825, 1841), 'bjson.load', 'bjson.load', ([], {'fp': 'f'}), '(fp=f)\n', (1835, 1841), False, 'import bjson\n'), ((3345, 3394), 'bjson.dump', 'bjson.dump', (['self.content'], {'fp': 'f', 'compress': 'compress'}), '(self.content, fp=f, compress=compress)\n', (3355, 3394), False, 'import bjson\n'), ((5205, 5312), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__download', 'args': '(request, f_finish, lock)', 'name': '"""FileShare"""', 'daemon': '(True)'}), "(target=self.__download, args=(request, f_finish, lock),\n name='FileShare', daemon=True)\n", (5221, 5312), False, 'import threading\n'), ((5390, 5403), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5400, 5403), False, 'import time\n'), ((6110, 6154), 'logging.debug', 'logging.debug', (["('Try %d=0x%s' % (i, hex_hash))"], {}), "('Try %d=0x%s' % (i, hex_hash))\n", (6123, 6154), False, 'import logging\n'), ((1613, 1624), 'time.time', 'time.time', ([], {}), '()\n', (1622, 1624), False, 'import time\n'), ((5872, 5894), 'random.choice', 'random.choice', (['request'], {}), '(request)\n', (5885, 5894), False, 'import random\n'), ((6053, 6088), 'binascii.hexlify', 'hexlify', (["self.content['element'][i]"], {}), "(self.content['element'][i])\n", (6060, 6088), False, 'from binascii import hexlify\n'), ((4365, 4382), 'binascii.hexlify', 'hexlify', (['hash_bin'], {}), '(hash_bin)\n', (4372, 4382), False, 'from binascii import hexlify\n'), ((6443, 6491), 'logging.debug', 'logging.debug', (["('Success %d=0x%s' % (i, hex_hash))"], {}), "('Success %d=0x%s' % (i, hex_hash))\n", (6456, 6491), False, 'import logging\n'), ((1197, 1208), 'hashlib.sha256', 'sha256', (['raw'], {}), '(raw)\n', (1203, 1208), False, 'from hashlib import sha256\n'), ((3804, 3814), 'binascii.hexlify', 'hexlify', (['h'], {}), '(h)\n', (3811, 3814), False, 'from binascii import hexlify\n'), ((6772, 6785), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6782, 6785), False, 'import time\n'), ((6869, 6915), 'logging.info', 'logging.info', (["('Failed %d=0x%s' % (i, hex_hash))"], {}), "('Failed %d=0x%s' % (i, hex_hash))\n", (6881, 6915), False, 'import logging\n'), ((6981, 7002), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7000, 7002), False, 'import traceback\n'), ((2746, 2756), 'binascii.hexlify', 'hexlify', (['h'], {}), '(h)\n', (2753, 2756), False, 'from binascii import hexlify\n')] |
import numpy as np
import deepxde as dde
from deepxde.backend import tf
import variable_to_parameter_transform
def sbinn(data_t, data_y, meal_t, meal_q):
def get_variable(v, var):
low, up = v * 0.2, v * 1.8
l = (up - low) / 2
v1 = l * tf.tanh(var) + l + low
return v1
E_ = dde.Variable(0.0)
tp_ = dde.Variable(0.0)
ti_ = dde.Variable(0.0)
td_ = dde.Variable(0.0)
k_ = dde.Variable(0.0)
Rm_ = dde.Variable(0.0)
a1_ = dde.Variable(0.0)
C1_ = dde.Variable(0.0)
C2_ = dde.Variable(0.0)
C4_ = dde.Variable(0.0)
C5_ = dde.Variable(0.0)
Ub_ = dde.Variable(0.0)
U0_ = dde.Variable(0.0)
Um_ = dde.Variable(0.0)
Rg_ = dde.Variable(0.0)
alpha_ = dde.Variable(0.0)
beta_ = dde.Variable(0.0)
var_list_ = [
E_,
tp_,
ti_,
td_,
k_,
Rm_,
a1_,
C1_,
C2_,
C4_,
C5_,
Ub_,
U0_,
Um_,
Rg_,
alpha_,
beta_,
]
def ODE(t, y):
Ip = y[:, 0:1]
Ii = y[:, 1:2]
G = y[:, 2:3]
h1 = y[:, 3:4]
h2 = y[:, 4:5]
h3 = y[:, 5:6]
Vp = 3
Vi = 11
Vg = 10
E = (tf.tanh(E_) + 1) * 0.1 + 0.1
tp = (tf.tanh(tp_) + 1) * 2 + 4
ti = (tf.tanh(ti_) + 1) * 40 + 60
td = (tf.tanh(td_) + 1) * 25 / 6 + 25 / 3
k = get_variable(0.0083, k_)
Rm = get_variable(209, Rm_)
a1 = get_variable(6.6, a1_)
C1 = get_variable(300, C1_)
C2 = get_variable(144, C2_)
C3 = 100
C4 = get_variable(80, C4_)
C5 = get_variable(26, C5_)
Ub = get_variable(72, Ub_)
U0 = get_variable(4, U0_)
Um = get_variable(90, Um_)
Rg = get_variable(180, Rg_)
alpha = get_variable(7.5, alpha_)
beta = get_variable(1.772, beta_)
f1 = Rm * tf.math.sigmoid(G / (Vg * C1) - a1)
f2 = Ub * (1 - tf.math.exp(-G / (Vg * C2)))
kappa = (1 / Vi + 1 / (E * ti)) / C4
f3 = (U0 + Um / (1 + tf.pow(tf.maximum(kappa * Ii, 1e-3), -beta))) / (Vg * C3)
f4 = Rg * tf.sigmoid(alpha * (1 - h3 / (Vp * C5)))
dt = t - meal_t
IG = tf.math.reduce_sum(
0.5 * meal_q * k * tf.math.exp(-k * dt) * (tf.math.sign(dt) + 1),
axis=1,
keepdims=True,
)
tmp = E * (Ip / Vp - Ii / Vi)
dIP_dt = dde.grad.jacobian(y, t, i=0, j=0)
dIi_dt = dde.grad.jacobian(y, t, i=1, j=0)
dG_dt = dde.grad.jacobian(y, t, i=2, j=0)
dh1_dt = dde.grad.jacobian(y, t, i=3, j=0)
dh2_dt = dde.grad.jacobian(y, t, i=4, j=0)
dh3_dt = dde.grad.jacobian(y, t, i=5, j=0)
return [
dIP_dt - (f1 - tmp - Ip / tp),
dIi_dt - (tmp - Ii / ti),
dG_dt - (f4 + IG - f2 - f3 * G),
dh1_dt - (Ip - h1) / td,
dh2_dt - (h1 - h2) / td,
dh3_dt - (h2 - h3) / td,
]
geom = dde.geometry.TimeDomain(data_t[0, 0], data_t[-1, 0])
# Observes
n = len(data_t)
idx = np.append(
np.random.choice(np.arange(1, n - 1), size=n // 5, replace=False), [0, n - 1]
)
observe_y2 = dde.PointSetBC(data_t[idx], data_y[idx, 2:3], component=2)
np.savetxt("glucose_input.dat", np.hstack((data_t[idx], data_y[idx, 2:3])))
data = dde.data.PDE(geom, ODE, [observe_y2], anchors=data_t)
net = dde.maps.FNN([1] + [128] * 3 + [6], "swish", "Glorot normal")
def feature_transform(t):
t = 0.01 * t
return tf.concat(
(t, tf.sin(t), tf.sin(2 * t), tf.sin(3 * t), tf.sin(4 * t), tf.sin(5 * t)),
axis=1,
)
net.apply_feature_transform(feature_transform)
def output_transform(t, y):
idx = 1799
k = (data_y[idx] - data_y[0]) / (data_t[idx] - data_t[0])
b = (data_t[idx] * data_y[0] - data_t[0] * data_y[idx]) / (
data_t[idx] - data_t[0]
)
linear = k * t + b
factor = tf.math.tanh(t) * tf.math.tanh(idx - t)
return linear + factor * tf.constant([1, 1, 1e2, 1, 1, 1]) * y
net.apply_output_transform(output_transform)
model = dde.Model(data, net)
firsttrain = 10000
callbackperiod = 1000
maxepochs = 1000000
model.compile("adam", lr=1e-3, loss_weights=[0, 0, 0, 0, 0, 0, 1e-2])
model.train(epochs=firsttrain, display_every=1000)
model.compile(
"adam",
lr=1e-3,
loss_weights=[1, 1, 1e-2, 1, 1, 1, 1e-2],
external_trainable_variables=var_list_,
)
variablefilename = "variables.csv"
variable = dde.callbacks.VariableValue(
var_list_, period=callbackperiod, filename=variablefilename
)
losshistory, train_state = model.train(
epochs=maxepochs, display_every=1000, callbacks=[variable]
)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
gluc_data = np.hsplit(np.loadtxt("glucose.dat"), [1])
meal_data = np.hsplit(np.loadtxt("meal.dat"), [4])
t = gluc_data[0]
y = gluc_data[1]
meal_t = meal_data[0]
meal_q = meal_data[1]
sbinn(
t[:1800],
y[:1800],
meal_t,
meal_q,
)
variable_to_parameter_transform.variable_file(10000, 1000, 1000000, "variables.csv")
| [
"deepxde.backend.tf.math.tanh",
"deepxde.data.PDE",
"deepxde.PointSetBC",
"deepxde.Variable",
"deepxde.Model",
"numpy.hstack",
"deepxde.backend.tf.constant",
"variable_to_parameter_transform.variable_file",
"deepxde.geometry.TimeDomain",
"deepxde.backend.tf.sigmoid",
"numpy.arange",
"deepxde.b... | [((5369, 5457), 'variable_to_parameter_transform.variable_file', 'variable_to_parameter_transform.variable_file', (['(10000)', '(1000)', '(1000000)', '"""variables.csv"""'], {}), "(10000, 1000, 1000000,\n 'variables.csv')\n", (5414, 5457), False, 'import variable_to_parameter_transform\n'), ((329, 346), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (341, 346), True, 'import deepxde as dde\n'), ((358, 375), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (370, 375), True, 'import deepxde as dde\n'), ((387, 404), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (399, 404), True, 'import deepxde as dde\n'), ((416, 433), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (428, 433), True, 'import deepxde as dde\n'), ((444, 461), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (456, 461), True, 'import deepxde as dde\n'), ((473, 490), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (485, 490), True, 'import deepxde as dde\n'), ((502, 519), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (514, 519), True, 'import deepxde as dde\n'), ((531, 548), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (543, 548), True, 'import deepxde as dde\n'), ((560, 577), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (572, 577), True, 'import deepxde as dde\n'), ((589, 606), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (601, 606), True, 'import deepxde as dde\n'), ((618, 635), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (630, 635), True, 'import deepxde as dde\n'), ((647, 664), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (659, 664), True, 'import deepxde as dde\n'), ((676, 693), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (688, 693), True, 'import deepxde as dde\n'), ((705, 722), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (717, 722), True, 'import deepxde as dde\n'), ((734, 751), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (746, 751), True, 'import deepxde as dde\n'), ((766, 783), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (778, 783), True, 'import deepxde as dde\n'), ((797, 814), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (809, 814), True, 'import deepxde as dde\n'), ((3123, 3175), 'deepxde.geometry.TimeDomain', 'dde.geometry.TimeDomain', (['data_t[0, 0]', 'data_t[-1, 0]'], {}), '(data_t[0, 0], data_t[-1, 0])\n', (3146, 3175), True, 'import deepxde as dde\n'), ((3349, 3407), 'deepxde.PointSetBC', 'dde.PointSetBC', (['data_t[idx]', 'data_y[idx, 2:3]'], {'component': '(2)'}), '(data_t[idx], data_y[idx, 2:3], component=2)\n', (3363, 3407), True, 'import deepxde as dde\n'), ((3505, 3558), 'deepxde.data.PDE', 'dde.data.PDE', (['geom', 'ODE', '[observe_y2]'], {'anchors': 'data_t'}), '(geom, ODE, [observe_y2], anchors=data_t)\n', (3517, 3558), True, 'import deepxde as dde\n'), ((3572, 3633), 'deepxde.maps.FNN', 'dde.maps.FNN', (['([1] + [128] * 3 + [6])', '"""swish"""', '"""Glorot normal"""'], {}), "([1] + [128] * 3 + [6], 'swish', 'Glorot normal')\n", (3584, 3633), True, 'import deepxde as dde\n'), ((4355, 4375), 'deepxde.Model', 'dde.Model', (['data', 'net'], {}), '(data, net)\n', (4364, 4375), True, 'import deepxde as dde\n'), ((4805, 4898), 'deepxde.callbacks.VariableValue', 'dde.callbacks.VariableValue', (['var_list_'], {'period': 'callbackperiod', 'filename': 'variablefilename'}), '(var_list_, period=callbackperiod, filename=\n variablefilename)\n', (4832, 4898), True, 'import deepxde as dde\n'), ((5037, 5101), 'deepxde.saveplot', 'dde.saveplot', (['losshistory', 'train_state'], {'issave': '(True)', 'isplot': '(True)'}), '(losshistory, train_state, issave=True, isplot=True)\n', (5049, 5101), True, 'import deepxde as dde\n'), ((5129, 5154), 'numpy.loadtxt', 'np.loadtxt', (['"""glucose.dat"""'], {}), "('glucose.dat')\n", (5139, 5154), True, 'import numpy as np\n'), ((5184, 5206), 'numpy.loadtxt', 'np.loadtxt', (['"""meal.dat"""'], {}), "('meal.dat')\n", (5194, 5206), True, 'import numpy as np\n'), ((2544, 2577), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(0)', 'j': '(0)'}), '(y, t, i=0, j=0)\n', (2561, 2577), True, 'import deepxde as dde\n'), ((2596, 2629), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(1)', 'j': '(0)'}), '(y, t, i=1, j=0)\n', (2613, 2629), True, 'import deepxde as dde\n'), ((2647, 2680), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(2)', 'j': '(0)'}), '(y, t, i=2, j=0)\n', (2664, 2680), True, 'import deepxde as dde\n'), ((2699, 2732), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(3)', 'j': '(0)'}), '(y, t, i=3, j=0)\n', (2716, 2732), True, 'import deepxde as dde\n'), ((2751, 2784), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(4)', 'j': '(0)'}), '(y, t, i=4, j=0)\n', (2768, 2784), True, 'import deepxde as dde\n'), ((2803, 2836), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(5)', 'j': '(0)'}), '(y, t, i=5, j=0)\n', (2820, 2836), True, 'import deepxde as dde\n'), ((3447, 3489), 'numpy.hstack', 'np.hstack', (['(data_t[idx], data_y[idx, 2:3])'], {}), '((data_t[idx], data_y[idx, 2:3]))\n', (3456, 3489), True, 'import numpy as np\n'), ((2006, 2041), 'deepxde.backend.tf.math.sigmoid', 'tf.math.sigmoid', (['(G / (Vg * C1) - a1)'], {}), '(G / (Vg * C1) - a1)\n', (2021, 2041), False, 'from deepxde.backend import tf\n'), ((2248, 2288), 'deepxde.backend.tf.sigmoid', 'tf.sigmoid', (['(alpha * (1 - h3 / (Vp * C5)))'], {}), '(alpha * (1 - h3 / (Vp * C5)))\n', (2258, 2288), False, 'from deepxde.backend import tf\n'), ((3263, 3282), 'numpy.arange', 'np.arange', (['(1)', '(n - 1)'], {}), '(1, n - 1)\n', (3272, 3282), True, 'import numpy as np\n'), ((4176, 4191), 'deepxde.backend.tf.math.tanh', 'tf.math.tanh', (['t'], {}), '(t)\n', (4188, 4191), False, 'from deepxde.backend import tf\n'), ((4194, 4215), 'deepxde.backend.tf.math.tanh', 'tf.math.tanh', (['(idx - t)'], {}), '(idx - t)\n', (4206, 4215), False, 'from deepxde.backend import tf\n'), ((2066, 2093), 'deepxde.backend.tf.math.exp', 'tf.math.exp', (['(-G / (Vg * C2))'], {}), '(-G / (Vg * C2))\n', (2077, 2093), False, 'from deepxde.backend import tf\n'), ((3733, 3742), 'deepxde.backend.tf.sin', 'tf.sin', (['t'], {}), '(t)\n', (3739, 3742), False, 'from deepxde.backend import tf\n'), ((3744, 3757), 'deepxde.backend.tf.sin', 'tf.sin', (['(2 * t)'], {}), '(2 * t)\n', (3750, 3757), False, 'from deepxde.backend import tf\n'), ((3759, 3772), 'deepxde.backend.tf.sin', 'tf.sin', (['(3 * t)'], {}), '(3 * t)\n', (3765, 3772), False, 'from deepxde.backend import tf\n'), ((3774, 3787), 'deepxde.backend.tf.sin', 'tf.sin', (['(4 * t)'], {}), '(4 * t)\n', (3780, 3787), False, 'from deepxde.backend import tf\n'), ((3789, 3802), 'deepxde.backend.tf.sin', 'tf.sin', (['(5 * t)'], {}), '(5 * t)\n', (3795, 3802), False, 'from deepxde.backend import tf\n'), ((275, 287), 'deepxde.backend.tf.tanh', 'tf.tanh', (['var'], {}), '(var)\n', (282, 287), False, 'from deepxde.backend import tf\n'), ((1315, 1326), 'deepxde.backend.tf.tanh', 'tf.tanh', (['E_'], {}), '(E_)\n', (1322, 1326), False, 'from deepxde.backend import tf\n'), ((1359, 1371), 'deepxde.backend.tf.tanh', 'tf.tanh', (['tp_'], {}), '(tp_)\n', (1366, 1371), False, 'from deepxde.backend import tf\n'), ((1400, 1412), 'deepxde.backend.tf.tanh', 'tf.tanh', (['ti_'], {}), '(ti_)\n', (1407, 1412), False, 'from deepxde.backend import tf\n'), ((2380, 2400), 'deepxde.backend.tf.math.exp', 'tf.math.exp', (['(-k * dt)'], {}), '(-k * dt)\n', (2391, 2400), False, 'from deepxde.backend import tf\n'), ((2404, 2420), 'deepxde.backend.tf.math.sign', 'tf.math.sign', (['dt'], {}), '(dt)\n', (2416, 2420), False, 'from deepxde.backend import tf\n'), ((4250, 4285), 'deepxde.backend.tf.constant', 'tf.constant', (['[1, 1, 100.0, 1, 1, 1]'], {}), '([1, 1, 100.0, 1, 1, 1])\n', (4261, 4285), False, 'from deepxde.backend import tf\n'), ((1443, 1455), 'deepxde.backend.tf.tanh', 'tf.tanh', (['td_'], {}), '(td_)\n', (1450, 1455), False, 'from deepxde.backend import tf\n'), ((2178, 2207), 'deepxde.backend.tf.maximum', 'tf.maximum', (['(kappa * Ii)', '(0.001)'], {}), '(kappa * Ii, 0.001)\n', (2188, 2207), False, 'from deepxde.backend import tf\n')] |
""" Session submodule
"""
from aiohttp import web
from servicelib.session import get_session
from servicelib.session import setup_session as do_setup_session
def setup(app: web.Application):
do_setup_session(app)
# alias
setup_session = setup
__all__ = (
"setup_session",
"get_session",
)
| [
"servicelib.session.setup_session"
] | [((199, 220), 'servicelib.session.setup_session', 'do_setup_session', (['app'], {}), '(app)\n', (215, 220), True, 'from servicelib.session import setup_session as do_setup_session\n')] |
from django.core.exceptions import ValidationError
from django.core.validators import FileExtensionValidator
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.forms import Form, ModelForm, FileInput
from django.forms.fields import *
from captcha.fields import CaptchaField
from .models import Image
from exif import (
WhiteBalance,
ColorSpace,
ExposureMode,
SceneCaptureType,
SensingMethod,
MeteringMode,
)
def validate_image_size(image: InMemoryUploadedFile):
print(image.size)
file_size = image.size
limit = 10 * 1024 * 1024 # 10M
if file_size > limit:
raise ValidationError(f'Максимальный размер файла {limit // (1024 * 1024)}M')
class ImageUploadForm(ModelForm):
ALLOWED_EXTENSIONS = ['jpg', 'jpeg']
captcha = CaptchaField(label='')
img = ImageField(
label='Фото',
widget=FileInput(attrs={'multiple': True}),
validators=[
FileExtensionValidator(allowed_extensions=ALLOWED_EXTENSIONS),
validate_image_size
],
help_text=f'Доступные расщирения: {ALLOWED_EXTENSIONS}',
)
class Meta:
model = Image
fields = ['img']
class ExifEditorForm(Form):
make = CharField(help_text='Hint', required=False)
white_balance = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in WhiteBalance],
help_text='Баланс белого',
required=False)
model = CharField(help_text='Hint', required=False)
software = CharField(help_text='Hint', required=False)
focal_length = FloatField(help_text='Фокусное расстояние', required=False)
color_space = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in ColorSpace],
help_text='Hint',
required=False)
exposure_mode = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in ExposureMode],
help_text='Hint',
required=False)
scene_capture_type = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in SceneCaptureType],
help_text='Hint',
required=False)
x_resolution = FloatField(help_text='Hint', required=False)
y_resolution = FloatField(help_text='Hint', required=False)
digital_zoom_ratio = FloatField(help_text='Hint', required=False)
custom_rendered = IntegerField(help_text='Hint', required=False)
sensing_method = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in SensingMethod],
help_text='Hint',
required=False)
metering_mode = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in MeteringMode],
help_text='Hint',
required=False)
compressed_bits_per_pixel = FloatField(help_text='Hint', required=False)
shutter_speed_value = FloatField(help_text='Hint', required=False)
aperture_value = FloatField(help_text='Hint', required=False)
brightness_value = FloatField(help_text='Hint', required=False, initial=1.0)
exposure_bias_value = FloatField(help_text='Hint', required=False)
max_aperture_value = FloatField(help_text='Hint', required=False)
datetime = CharField(help_text='YYYY:MM:DD HH:MM:SS', required=False)
datetime_original = CharField(help_text='YYYY:MM:DD HH:MM:SS', required=False)
datetime_digitized = CharField(help_text='YYYY:MM:DD HH:MM:SS', required=False)
# image_width = IntegerField(help_text='ширина изображения', required=False)
# image_height = IntegerField(help_text='высота изображиния', required=False)
# bits_per_sample = CharField(help_text='Hint', required=False)
# compression = CharField(help_text='Hint', required=False)
# photometric_interpretation = CharField(help_text='Hint', required=False)
# orientation = CharField(help_text='Hint', required=False)
# samples_per_pixel = CharField(help_text='Hint', required=False)
# planar_configuration = CharField(help_text='Hint', required=False)
# subsampling_ratio_of_y_to_c = CharField(help_text='Hint', required=False)
# y_and_c_positioning = CharField(help_text='Hint', required=False)
# resolution_unit = CharField(help_text='Hint', required=False)
# strip_offsets = CharField(help_text='Hint', required=False)
# rows_per_strip = CharField(help_text='Hint', required=False)
# strip_byte_counts = CharField(help_text='Hint', required=False)
# jpeg_interchange_format = CharField(help_text='Hint', required=False)
# jpeg_interchange_format_length = CharField(help_text='Hint', required=False)
# transfer_function = CharField(help_text='Hint', required=False)
# white_point = CharField(help_text='Hint', required=False)
# primary_chromaticities = CharField(help_text='Hint', required=False)
# matrix_coefficients = CharField(help_text='Hint', required=False)
# reference_black_white = CharField(help_text='Hint', required=False)
# image_description = CharField(help_text='Hint', required=False)
# artist = CharField(help_text='Hint', required=False)
# copyright = CharField(help_text='Hint', required=False)
# exposure_time = CharField(help_text='Hint', required=False)
# f_number = CharField(help_text='Hint', required=False)
# exposure_program = CharField(help_text='Hint', required=False)
# spectral_sensitivity = CharField(help_text='Hint', required=False)
# photographic_sensitivity = CharField(help_text='Hint', required=False)
# oecf = CharField(help_text='Hint', required=False)
# sensitivity_type = CharField(help_text='Hint', required=False)
# standard_output_sensitivity = CharField(help_text='Hint', required=False)
# recommended_exposure_index = CharField(help_text='Hint', required=False)
# iso_speed = CharField(help_text='Hint', required=False)
# iso_speed_latitude_yyy = CharField(help_text='Hint', required=False)
# iso_speed_latitude_zzz = CharField(help_text='Hint', required=False)
# exif_version = CharField(help_text='Hint', required=False)
# offset_time = CharField(help_text='Hint', required=False)
# offset_time_original = CharField(help_text='Hint', required=False)
# offset_time_digitized = CharField(help_text='Hint', required=False)
# components_configuration = CharField(help_text='Hint', required=False)
# subject_distance = CharField(help_text='Hint', required=False)
# light_source = CharField(help_text='Hint', required=False)
# flash = CharField(help_text='Hint', required=False)
# subject_area = CharField(help_text='Hint', required=False)
# maker_note = CharField(help_text='Hint', required=False)
# user_comment = CharField(help_text='Hint', required=False)
# subsec_time = CharField(help_text='Hint', required=False)
# subsec_time_original = CharField(help_text='Hint', required=False)
# subsec_time_digitized = CharField(help_text='Hint', required=False)
# temperature = CharField(help_text='Hint', required=False)
# humidity = CharField(help_text='Hint', required=False)
# pressure = CharField(help_text='Hint', required=False)
# water_depth = CharField(help_text='Hint', required=False)
# acceleration = CharField(help_text='Hint', required=False)
# camera_elevation_angle = CharField(help_text='Hint', required=False)
# xp_title = CharField(help_text='Hint', required=False)
# xp_comment = CharField(help_text='Hint', required=False)
# xp_author = CharField(help_text='Hint', required=False)
# xp_keywords = CharField(help_text='Hint', required=False)
# xp_subject = CharField(help_text='Hint', required=False)
# flashpix_version = CharField(help_text='Hint', required=False)
# pixel_x_dimension = CharField(help_text='Hint', required=False)
# pixel_y_dimension = CharField(help_text='Hint', required=False)
# related_sound_file = CharField(help_text='Hint', required=False)
# flash_energy = CharField(help_text='Hint', required=False)
# spatial_frequency_response = CharField(help_text='Hint', required=False)
# focal_plane_x_resolution = CharField(help_text='Hint', required=False)
# focal_plane_y_resolution = CharField(help_text='Hint', required=False)
# focal_plane_resolution_unit = CharField(help_text='Hint', required=False)
# subject_location = CharField(help_text='Hint', required=False)
# exposure_index = CharField(help_text='Hint', required=False)
# file_source = CharField(help_text='Hint', required=False)
# scene_type = CharField(help_text='Hint', required=False)
# cfa_pattern = CharField(help_text='Hint', required=False)
# focal_length_in_35mm_film = CharField(help_text='Hint', required=False)
# gain_control = CharField(help_text='Hint', required=False)
# contrast = CharField(help_text='Hint', required=False)
# saturation = CharField(help_text='Hint', required=False)
# sharpness = CharField(help_text='Hint', required=False)
# device_setting_description = CharField(help_text='Hint', required=False)
# subject_distance_range = CharField(help_text='Hint', required=False)
# image_unique_id = CharField(help_text='Hint', required=False)
# camera_owner_name = CharField(help_text='Hint', required=False)
# body_serial_number = CharField(help_text='Hint', required=False)
# lens_specification = CharField(help_text='Hint', required=False)
# lens_make = CharField(help_text='Hint', required=False)
# lens_model = CharField(help_text='Hint', required=False)
# lens_serial_number = CharField(help_text='Hint', required=False)
# gamma = CharField(help_text='Hint', required=False)
# gps_version_id = CharField(help_text='Hint', required=False)
# gps_latitude_ref = CharField(help_text='Hint', required=False)
# gps_latitude = CharField(help_text='Hint', required=False)
# gps_longitude_ref = CharField(help_text='Hint', required=False)
# gps_longitude = CharField(help_text='Hint', required=False)
# gps_altitude_ref = CharField(help_text='Hint', required=False)
# gps_altitude = CharField(help_text='Hint', required=False)
# gps_timestamp = CharField(help_text='Hint', required=False)
# gps_satellites = CharField(help_text='Hint', required=False)
# gps_status = CharField(help_text='Hint', required=False)
# gps_measure_mode = CharField(help_text='Hint', required=False)
# gps_dop = CharField(help_text='Hint', required=False)
# gps_speed_ref = CharField(help_text='Hint', required=False)
# gps_speed = CharField(help_text='Hint', required=False)
# gps_track_ref = CharField(help_text='Hint', required=False)
# gps_track = CharField(help_text='Hint', required=False)
# gps_img_direction_ref = CharField(help_text='Hint', required=False)
# gps_img_direction = CharField(help_text='Hint', required=False)
# gps_map_datum = CharField(help_text='Hint', required=False)
# gps_dest_latitude_ref = CharField(help_text='Hint', required=False)
# gps_dest_latitude = CharField(help_text='Hint', required=False)
# gps_dest_longitude_ref = CharField(help_text='Hint', required=False)
# gps_dest_longitude = CharField(help_text='Hint', required=False)
# gps_dest_bearing_ref = CharField(help_text='Hint', required=False)
# gps_dest_bearing = CharField(help_text='Hint', required=False)
# gps_dest_distance_ref = CharField(help_text='Hint', required=False)
# gps_dest_distance = CharField(help_text='Hint', required=False)
# gps_processing_method = CharField(help_text='Hint', required=False)
# gps_area_information = CharField(help_text='Hint', required=False)
# gps_datestamp = CharField(help_text='Hint', required=False)
# gps_differential = CharField(help_text='Hint', required=False)
# gps_horizontal_positioning_error = CharField(help_text='Hint', required=False)
| [
"django.forms.FileInput",
"django.core.validators.FileExtensionValidator",
"django.core.exceptions.ValidationError",
"captcha.fields.CaptchaField"
] | [((803, 825), 'captcha.fields.CaptchaField', 'CaptchaField', ([], {'label': '""""""'}), "(label='')\n", (815, 825), False, 'from captcha.fields import CaptchaField\n'), ((640, 711), 'django.core.exceptions.ValidationError', 'ValidationError', (['f"""Максимальный размер файла {limit // (1024 * 1024)}M"""'], {}), "(f'Максимальный размер файла {limit // (1024 * 1024)}M')\n", (655, 711), False, 'from django.core.exceptions import ValidationError\n'), ((885, 920), 'django.forms.FileInput', 'FileInput', ([], {'attrs': "{'multiple': True}"}), "(attrs={'multiple': True})\n", (894, 920), False, 'from django.forms import Form, ModelForm, FileInput\n'), ((955, 1016), 'django.core.validators.FileExtensionValidator', 'FileExtensionValidator', ([], {'allowed_extensions': 'ALLOWED_EXTENSIONS'}), '(allowed_extensions=ALLOWED_EXTENSIONS)\n', (977, 1016), False, 'from django.core.validators import FileExtensionValidator\n')] |
import os
import sys
import pyspod
import shutil
from setuptools import setup
from setuptools import Command
# GLOBAL VARIABLES
NAME = pyspod.__name__
URL = pyspod.__url__
AUTHOR = pyspod.__author__
EMAIL = pyspod.__email__
VERSION = pyspod.__version__
KEYWORDS='spectral-proper-orthogonal-decomposition spod'
REQUIRED = [
"numpy",
"scipy",
"matplotlib",
"xarray",
"netcdf4",
"h5py",
"psutil",
"tqdm",
"Sphinx",
"sphinx_rtd_theme",
"ecmwf_api_client",
"cdsapi",
"future",
"pytest",
]
EXTRAS = {
'docs': ['Sphinx==3.2.1', 'sphinx_rtd_theme'],
}
DESCR = (
"PySPOD is a Python package that implements the Spectral Proper Orthogonal"
" Decomposition (SPOD). SPOD is used to extract perfectly coherent spatio-temporal"
" patterns in complex datasets. Original work on this technique dates back"
" to (Lumley 1970), with recent development brought forward by (Towne et al. 2017),"
" (Schmidt et al. 2018), (Schmidt et al. 2019).\n"
"\n"
"PySPOD comes with a set of tutorials spanning weather and climate, seismic and "
" fluid mechanics applications, and it can be used for both canonical problems "
" as well as large datasets. \n"
)
CWD = os.path.abspath(os.path.dirname(__file__))
# COMMANDS
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
shutil.rmtree(os.path.join(CWD, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine...')
os.system('python3 twine upload dist/*')
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(VERSION))
os.system('git push --tags')
sys.exit()
# SETUP
setup(
name=NAME,
version=VERSION,
description="Python Spectral Proper Orthogonal Decomposition",
long_description=DESCR,
author=AUTHOR,
author_email=EMAIL,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics'
],
keywords=KEYWORDS,
url=URL,
license='MIT',
packages=[NAME],
package_data={'': [
'plotting_support/coast.mat',
'plotting_support/coast_centred.mat'
]},
data_files=[
('pyspod',['pyspod/plotting_support/coast.mat']),
('pyspod',['pyspod/plotting_support/coast_centred.mat'])],
# package_dir={NAME: NAME},
# package_data={NAME: [
# 'pyspod/plotting_support/*.mat',
# ]},
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
zip_safe=False,
cmdclass={
'upload': UploadCommand,
},)
| [
"setuptools.setup",
"os.path.join",
"os.path.dirname",
"sys.exit",
"os.system"
] | [((2024, 2955), 'setuptools.setup', 'setup', ([], {'name': 'NAME', 'version': 'VERSION', 'description': '"""Python Spectral Proper Orthogonal Decomposition"""', 'long_description': 'DESCR', 'author': 'AUTHOR', 'author_email': 'EMAIL', 'classifiers': "['License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Mathematics']", 'keywords': 'KEYWORDS', 'url': 'URL', 'license': '"""MIT"""', 'packages': '[NAME]', 'package_data': "{'': ['plotting_support/coast.mat', 'plotting_support/coast_centred.mat']}", 'data_files': "[('pyspod', ['pyspod/plotting_support/coast.mat']), ('pyspod', [\n 'pyspod/plotting_support/coast_centred.mat'])]", 'install_requires': 'REQUIRED', 'extras_require': 'EXTRAS', 'include_package_data': '(True)', 'zip_safe': '(False)', 'cmdclass': "{'upload': UploadCommand}"}), "(name=NAME, version=VERSION, description=\n 'Python Spectral Proper Orthogonal Decomposition', long_description=\n DESCR, author=AUTHOR, author_email=EMAIL, classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Mathematics'], keywords=KEYWORDS,\n url=URL, license='MIT', packages=[NAME], package_data={'': [\n 'plotting_support/coast.mat', 'plotting_support/coast_centred.mat']},\n data_files=[('pyspod', ['pyspod/plotting_support/coast.mat']), (\n 'pyspod', ['pyspod/plotting_support/coast_centred.mat'])],\n install_requires=REQUIRED, extras_require=EXTRAS, include_package_data=\n True, zip_safe=False, cmdclass={'upload': UploadCommand})\n", (2029, 2955), False, 'from setuptools import setup\n'), ((1176, 1201), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1191, 1201), False, 'import os\n'), ((1849, 1889), 'os.system', 'os.system', (['"""python3 twine upload dist/*"""'], {}), "('python3 twine upload dist/*')\n", (1858, 1889), False, 'import os\n'), ((1973, 2001), 'os.system', 'os.system', (['"""git push --tags"""'], {}), "('git push --tags')\n", (1982, 2001), False, 'import os\n'), ((2004, 2014), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2012, 2014), False, 'import sys\n'), ((1580, 1605), 'os.path.join', 'os.path.join', (['CWD', '"""dist"""'], {}), "(CWD, 'dist')\n", (1592, 1605), False, 'import os\n')] |
import os
import json
import pickle
import collections
import numpy as np
from s2and.consts import CONFIG
DATA_DIR = CONFIG["main_data_dir"]
OUTPUT_DIR = os.path.join(DATA_DIR, "s2and_mini")
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# excluding MEDLINE because it has no clusters
DATASETS = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
BIG_BLOCK_CUTOFF = 500
TOP_BLOCKS_TO_KEEP = 1000
# load all of the artifacts of each dataset
clusters_all = []
signatures_all = []
X_all = []
keys_all = []
papers_all = []
for dataset in DATASETS:
print()
print(f"Loading data from {dataset}...")
for file_name in os.listdir(os.path.join(DATA_DIR, dataset)):
file_name = os.path.join(DATA_DIR, dataset, file_name)
if "specter" in file_name:
with open(file_name, "rb") as _pickle_file:
X, keys = pickle.load(_pickle_file)
X_all.append(X)
keys_all.append(keys)
elif "cluster" in file_name:
with open(file_name) as _json_file:
clusters = json.load(_json_file)
new_clusters = {}
for cluster_id, v in clusters.items():
new_cluster_id = f"{dataset}_{cluster_id}"
new_v = {
"cluster_id": new_cluster_id,
"signature_ids": [f"{dataset}_{i}" for i in v["signature_ids"]],
"model_version": v["model_version"],
}
new_clusters[new_cluster_id] = new_v
clusters_all.append(new_clusters)
elif "paper" in file_name:
with open(file_name) as _json_file:
papers = json.load(_json_file)
papers_all.append(papers)
elif "signature" in file_name:
with open(file_name) as _json_file:
signatures = json.load(_json_file)
new_signatures = {}
for signature_id, v in signatures.items():
new_signature_id = f"{dataset}_{signature_id}"
new_v = {
"author_id": v["author_id"], # maybe this needs to be prepended by dataset?
"paper_id": v["paper_id"],
"signature_id": new_signature_id,
"author_info": v["author_info"],
}
new_signatures[new_signature_id] = new_v
signatures_all.append(new_signatures)
else:
print(f"WARNING: Ignoring {file_name} in {dataset}")
print("Finished loading data. Filtering...")
# the goal is speed so we'll remove the largest blocks
# also only keep top 1000 blocks max
# aminer has 32k, inspire has 15k, and kisti has 7k blocks
for dataset, s, c, p, X, k in zip(DATASETS, signatures_all, clusters_all, papers_all, X_all, keys_all):
blocks = []
for v in s.values():
blocks.append(v["author_info"]["block"])
vc = collections.Counter(blocks)
blocks_to_keep = set([k for k, v in sorted(vc.items()) if v <= BIG_BLOCK_CUTOFF][:TOP_BLOCKS_TO_KEEP])
s_filtered = {k: v for k, v in s.items() if v["author_info"]["block"] in blocks_to_keep}
# filter the clusters too
c_filtered = {k: v for k, v in c.items() if np.all([i in s_filtered for i in v["signature_ids"]])}
# go back through the clusters and find the signatures we'll actually need
# need to do this because sometimes the block name is just... corrupted
# e.g. "g miller" for most signatures but "g mller" for one...
signature_keys_to_keep = set()
for v in c_filtered.values():
signature_keys_to_keep.update(v["signature_ids"])
s_filtered = {k: v for k, v in s.items() if k in signature_keys_to_keep}
# we don't need all the papers anymore. just the ones in signatures
# also the references of those
paper_ids = set([v["paper_id"] for v in s_filtered.values()])
ref_paper_ids = set()
for v in p.values():
if v["references"] is not None:
ref_paper_ids.update(v["references"])
p_filtered = {k: v for k, v in p.items() if int(k) in paper_ids or int(k) in ref_paper_ids}
# filter down the specters to those in papers only since we don't use specters for references
keys_filtered_flag = np.array([i in paper_ids for i in k.astype(int)])
k_filtered = k[keys_filtered_flag]
X_filtered = X[keys_filtered_flag, :]
# save all of the data
data_output_dir = os.path.join(DATA_DIR, "s2and_mini", dataset)
if not os.path.exists(data_output_dir):
os.mkdir(data_output_dir)
with open(os.path.join(data_output_dir, f"{dataset}_clusters.json"), "w") as _json_file:
json.dump(c_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_signatures.json"), "w") as _json_file:
json.dump(s_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_papers.json"), "w") as _json_file:
json.dump(p_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_specter.pickle"), "wb") as _pickle_file:
pickle.dump((X_filtered, k_filtered), _pickle_file)
| [
"os.path.exists",
"pickle.dump",
"os.path.join",
"pickle.load",
"collections.Counter",
"os.mkdir",
"json.load",
"numpy.all",
"json.dump"
] | [((156, 192), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""s2and_mini"""'], {}), "(DATA_DIR, 's2and_mini')\n", (168, 192), False, 'import os\n'), ((200, 226), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (214, 226), False, 'import os\n'), ((232, 252), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (240, 252), False, 'import os\n'), ((3052, 3079), 'collections.Counter', 'collections.Counter', (['blocks'], {}), '(blocks)\n', (3071, 3079), False, 'import collections\n'), ((4560, 4605), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""s2and_mini"""', 'dataset'], {}), "(DATA_DIR, 's2and_mini', dataset)\n", (4572, 4605), False, 'import os\n'), ((705, 736), 'os.path.join', 'os.path.join', (['DATA_DIR', 'dataset'], {}), '(DATA_DIR, dataset)\n', (717, 736), False, 'import os\n'), ((759, 801), 'os.path.join', 'os.path.join', (['DATA_DIR', 'dataset', 'file_name'], {}), '(DATA_DIR, dataset, file_name)\n', (771, 801), False, 'import os\n'), ((4617, 4648), 'os.path.exists', 'os.path.exists', (['data_output_dir'], {}), '(data_output_dir)\n', (4631, 4648), False, 'import os\n'), ((4658, 4683), 'os.mkdir', 'os.mkdir', (['data_output_dir'], {}), '(data_output_dir)\n', (4666, 4683), False, 'import os\n'), ((4786, 4819), 'json.dump', 'json.dump', (['c_filtered', '_json_file'], {}), '(c_filtered, _json_file)\n', (4795, 4819), False, 'import json\n'), ((4924, 4957), 'json.dump', 'json.dump', (['s_filtered', '_json_file'], {}), '(s_filtered, _json_file)\n', (4933, 4957), False, 'import json\n'), ((5058, 5091), 'json.dump', 'json.dump', (['p_filtered', '_json_file'], {}), '(p_filtered, _json_file)\n', (5067, 5091), False, 'import json\n'), ((5198, 5249), 'pickle.dump', 'pickle.dump', (['(X_filtered, k_filtered)', '_pickle_file'], {}), '((X_filtered, k_filtered), _pickle_file)\n', (5209, 5249), False, 'import pickle\n'), ((3360, 3415), 'numpy.all', 'np.all', (["[(i in s_filtered) for i in v['signature_ids']]"], {}), "([(i in s_filtered) for i in v['signature_ids']])\n", (3366, 3415), True, 'import numpy as np\n'), ((4699, 4756), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_clusters.json"""'], {}), "(data_output_dir, f'{dataset}_clusters.json')\n", (4711, 4756), False, 'import os\n'), ((4835, 4894), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_signatures.json"""'], {}), "(data_output_dir, f'{dataset}_signatures.json')\n", (4847, 4894), False, 'import os\n'), ((4973, 5028), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_papers.json"""'], {}), "(data_output_dir, f'{dataset}_papers.json')\n", (4985, 5028), False, 'import os\n'), ((5107, 5165), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_specter.pickle"""'], {}), "(data_output_dir, f'{dataset}_specter.pickle')\n", (5119, 5165), False, 'import os\n'), ((919, 944), 'pickle.load', 'pickle.load', (['_pickle_file'], {}), '(_pickle_file)\n', (930, 944), False, 'import pickle\n'), ((1127, 1148), 'json.load', 'json.load', (['_json_file'], {}), '(_json_file)\n', (1136, 1148), False, 'import json\n'), ((1772, 1793), 'json.load', 'json.load', (['_json_file'], {}), '(_json_file)\n', (1781, 1793), False, 'import json\n'), ((1952, 1973), 'json.load', 'json.load', (['_json_file'], {}), '(_json_file)\n', (1961, 1973), False, 'import json\n')] |
import matplotlib.pyplot as plt
from sdaudio.callables import Circular
from sdaudio.callables import Constant
from sdaudio import draw
from sdaudio import wavio
from sdaudio.wt_oscillators import Choruses
def main():
#-------------------------------------------------------------------------
# sawtooth demo
print("Generating 60 Hz sawtooth, no chorus")
sr = 8000
dur = 7.0
freqs = draw.line(sr, dur, 60, 60)
x = draw.sawtooth(sr, dur, Circular(freqs), n = 5)
plt.figure()
plt.plot(x)
plt.xlim([0, 3000])
plt.grid(True)
plt.title('Sawtooth, n = 5, no chorus')
fout = 'saw-no-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
#-------------------------------------------------------------------------
# sawtooth oscillator with chorus
print("Generating 60 Hz sawtooth, with chorus")
table = draw.sawtooth(sr, 1.0, Constant(1.0))
chorus = [0.99, 1.0, 1.01]
chorus = [0.97, 1.0, 1.03]
chorus = [0.991234134, 1.012983475290375]
gen = Choruses(sr, table, chorus)
x = gen.generate(dur, Circular(freqs))
plt.figure()
plt.plot(x)
plt.xlim([0, 3000])
plt.grid(True)
plt.title('Sawtooth, n = 5, with chorus')
fout = 'saw-with-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
#-------------------------------------------------------------------------
# freq ramp
print("Generating sawtooth ramp, no chorus")
freqs = draw.line(sr, dur, 40, 200)
x = draw.sawtooth(sr, dur, Circular(freqs))
fout = 'saw-ramp-no-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
print("Generating sawtooth ramp, with chorus")
x = gen.generate(dur, Circular(freqs))
fout = 'saw-ramp-with-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
plt.show()
if __name__ == "__main__":
main() | [
"sdaudio.draw.line",
"sdaudio.callables.Circular",
"matplotlib.pyplot.grid",
"sdaudio.wavio.write",
"sdaudio.wt_oscillators.Choruses",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"sdaudio.callables.Constant",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"... | [((415, 441), 'sdaudio.draw.line', 'draw.line', (['sr', 'dur', '(60)', '(60)'], {}), '(sr, dur, 60, 60)\n', (424, 441), False, 'from sdaudio import draw\n'), ((503, 515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (513, 515), True, 'import matplotlib.pyplot as plt\n'), ((520, 531), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (528, 531), True, 'import matplotlib.pyplot as plt\n'), ((536, 555), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3000]'], {}), '([0, 3000])\n', (544, 555), True, 'import matplotlib.pyplot as plt\n'), ((560, 574), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (568, 574), True, 'import matplotlib.pyplot as plt\n'), ((579, 618), 'matplotlib.pyplot.title', 'plt.title', (['"""Sawtooth, n = 5, no chorus"""'], {}), "('Sawtooth, n = 5, no chorus')\n", (588, 618), True, 'import matplotlib.pyplot as plt\n'), ((689, 721), 'sdaudio.wavio.write', 'wavio.write', (['fout', '(0.666 * x)', 'sr'], {}), '(fout, 0.666 * x, sr)\n', (700, 721), False, 'from sdaudio import wavio\n'), ((1066, 1093), 'sdaudio.wt_oscillators.Choruses', 'Choruses', (['sr', 'table', 'chorus'], {}), '(sr, table, chorus)\n', (1074, 1093), False, 'from sdaudio.wt_oscillators import Choruses\n'), ((1143, 1155), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1153, 1155), True, 'import matplotlib.pyplot as plt\n'), ((1160, 1171), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (1168, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1176, 1195), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3000]'], {}), '([0, 3000])\n', (1184, 1195), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1214), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1208, 1214), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1260), 'matplotlib.pyplot.title', 'plt.title', (['"""Sawtooth, n = 5, with chorus"""'], {}), "('Sawtooth, n = 5, with chorus')\n", (1228, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1365), 'sdaudio.wavio.write', 'wavio.write', (['fout', '(0.666 * x)', 'sr'], {}), '(fout, 0.666 * x, sr)\n', (1344, 1365), False, 'from sdaudio import wavio\n'), ((1525, 1552), 'sdaudio.draw.line', 'draw.line', (['sr', 'dur', '(40)', '(200)'], {}), '(sr, dur, 40, 200)\n', (1534, 1552), False, 'from sdaudio import draw\n'), ((1677, 1709), 'sdaudio.wavio.write', 'wavio.write', (['fout', '(0.666 * x)', 'sr'], {}), '(fout, 0.666 * x, sr)\n', (1688, 1709), False, 'from sdaudio import wavio\n'), ((1883, 1915), 'sdaudio.wavio.write', 'wavio.write', (['fout', '(0.666 * x)', 'sr'], {}), '(fout, 0.666 * x, sr)\n', (1894, 1915), False, 'from sdaudio import wavio\n'), ((1921, 1931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1929, 1931), True, 'import matplotlib.pyplot as plt\n'), ((474, 489), 'sdaudio.callables.Circular', 'Circular', (['freqs'], {}), '(freqs)\n', (482, 489), False, 'from sdaudio.callables import Circular\n'), ((929, 942), 'sdaudio.callables.Constant', 'Constant', (['(1.0)'], {}), '(1.0)\n', (937, 942), False, 'from sdaudio.callables import Constant\n'), ((1121, 1136), 'sdaudio.callables.Circular', 'Circular', (['freqs'], {}), '(freqs)\n', (1129, 1136), False, 'from sdaudio.callables import Circular\n'), ((1585, 1600), 'sdaudio.callables.Circular', 'Circular', (['freqs'], {}), '(freqs)\n', (1593, 1600), False, 'from sdaudio.callables import Circular\n'), ((1789, 1804), 'sdaudio.callables.Circular', 'Circular', (['freqs'], {}), '(freqs)\n', (1797, 1804), False, 'from sdaudio.callables import Circular\n')] |
"""
<NAME>
<NAME>
<NAME>
<NAME>
CISC 204
Modelling project
Wed december 9th 2020
Professor Muise
"""
#Import
from nnf import Var
from nnf import Or
import nnf
from lib204 import Encoding
from csvReader import readCSV
'''
Customer class
Used to create a class containing the various restrictions a
person might have with a restaurant
Paramaters:
price: Price range being searched for
diet: any diet restrictions
dine_opt: preferred dining options
'''
class customer:
def __init__(self, price_opt, diet_opt, dine_opt,distance):
self.userprice = price_opt
self.userdiet = diet_opt
self.userdine_opt = dine_opt
self.distance = distance
#Defining variables for encoding
#Price point variables
low = Var('low')
med = Var('med')
high = Var('high')
#Dietary restriction food options variables
vegetarian = Var('vegetarian')
vegan = Var('vegan')
gluten = Var('gluten')
lactose = Var('lactose')
#Dining variables
dine_in = Var('dine-in')
take_out = Var('take-out')
delivery = Var('delivery')
#Distance variables
time_under_10 = Var('under 10')
time_10_to_20 = Var('10 to 20')
time_over_20 = Var('over 20')
#Constraints
"""
If the user selected a price constraint and it matches
$,$$,$$$. If the restaurant matches the price point then
the constraint will get returned so that its only holds true
for that instance.
Parameters: Restaurant object, Customer object
Returns: A price constraint
"""
def price_constraint(restaurant,customer):
#For low price point
if "low" in customer.userprice:
if restaurant.price == "$":
return low & ~med & ~high
else:
return low & ~low
#For the med price point
if "med" in customer.userprice:
if restaurant.price == "$$":
return med & ~high & ~low
else:
return med & ~med
#For the high price point
if "high" in customer.userprice:
if restaurant.price == "$$$":
return high & ~low & ~med
else:
return high & ~high
"""
If the user selected a single dietary restriction the
appropriate constraint will get returned so it only
holds true for that instance.
Parameters: Restaurant object, Customer object
Returns: A single dietary restriction constraint
"""
def single_diet_constraint(restaurant, customer):
#For gluten free
if 'gluten' in customer.userdiet:
if 'TRUE' in restaurant.diet[2]:
return gluten & ~vegan & ~vegetarian & ~lactose
else:
return ~gluten & gluten
#For lactose
elif 'lactose' in customer.userdiet:
if 'TRUE' in restaurant.diet[3]:
return ~gluten & ~vegan & ~vegetarian & lactose
else:
return ~lactose & lactose
#For vegetarian
elif 'vegetarian' in customer.userdiet:
if 'TRUE' in restaurant.diet[1]:
return ~gluten & ~vegan & vegetarian & ~lactose
else:
return ~vegetarian & vegetarian
#For vegan
elif 'vegan' in customer.userdiet:
if 'TRUE' in restaurant.diet[0]:
return ~gluten & vegan & ~vegetarian & ~lactose
else:
return ~vegan & vegan
"""If the user selected two dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: A single two dietary restriction constraint
"""
def two_diet_constraint(restaurant, customer):
#For vegetarian and vegan customers
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & vegan & ~lactose & ~gluten
else:
return vegetarian & ~vegetarian
#For vegan and lactose free customers
elif ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & vegan & lactose & ~gluten
else:
return vegan & ~vegan
#For vegetarian and gluten free customers
elif ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[2]):
return ~vegetarian & vegan & ~lactose & gluten
else:
return vegan & ~vegan
#For gluten free and lactose free customers
elif ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & ~vegan & lactose & gluten
else:
return gluten & ~gluten
#For gluten free and vegitarian customers
elif ('gluten' in customer.userdiet) and ('vegitarian' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & ~vegan & ~lactose & gluten
else:
return gluten & ~gluten
#For lactose free and vegetarian customers
elif ('lactose' in customer.userdiet) and ('vegitarian' in customer.userdiet):
if ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & ~vegan & lactose & ~gluten
else:
return lactose & ~lactose
"""If the user selected three dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single three dietary constraint
"""
def three_diet_constraint(restaurant,customer):
# For vegetarian and vegan and gluten free customers
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]):
return vegetarian & vegan & ~lactose & gluten
else:
return vegetarian & ~vegetarian
# For vegetarian and vegan and lactose free customers
elif ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):
return vegetarian & vegan & lactose & ~gluten
else:
return vegetarian & ~vegetarian
# For gluten free and vegan and lactose free customers
elif ('gluten' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & vegan & lactose & gluten
else:
return vegetarian & ~vegetarian
"""If the user selected all dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single all dietary constraint
"""
def all_diet_constraint(restaurant,customer):
# For users that have all the dietary restrictions
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):
return vegetarian & vegan & lactose & gluten
else:
return vegetarian & ~vegetarian
"""If the user selected one dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single dining constraint
"""
def one_dining_constraints(restaurant, customer):
# For dine in customers
if 'dine-in' in customer.userdine_opt:
if restaurant.delivery[0] == 'TRUE':
return dine_in
else:
return ~dine_in & dine_in
# For take out customers
elif 'take-out' in customer.userdine_opt:
if restaurant.delivery[1] == 'TRUE':
return take_out
else:
return ~take_out & take_out
# For delivery customers
elif 'delivery' in customer.userdine_opt:
if restaurant.delivery[2] == 'TRUE':
return delivery
else:
return ~delivery & delivery
"""If the user selected two dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: two dining constraint
"""
def two_dining_constraints(restaurant, customer):
#For users that want dine in and take out
if ('dine-in' in customer.userdine_opt) and ('take-out' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE':
return dine_in & take_out & ~delivery
else:
return ~dine_in & dine_in
#For users that want Dine in and Delivery
elif ('dine-in' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return dine_in & ~take_out & delivery
else:
return ~dine_in & dine_in
#For users that want Take out and Delivery
elif ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):
if restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return ~dine_in & take_out & delivery
else:
return ~dine_in & dine_in
"""If the user selected all dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: all dining constraint
"""
def all_dining_constraints(restaurant, customer):
# For users that want dine in, Take out and delivery
if ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt) and ('dine-in' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return dine_in & take_out & delivery
else:
return ~dine_in & dine_in
"""If the user selected distance restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: distance constraint
"""
def distanceConstraint(restaurant,customer):
#For customers that want under 10 to campus
if customer.distance == 'under 10':
if restaurant.distance[0] == 'TRUE':
return time_under_10 & ~time_10_to_20 & ~time_over_20
else:
return time_under_10 & ~time_under_10
# For customers that want 10-20 min to campus
if customer.distance == '10 to 20':
if restaurant.distance[1] == 'TRUE':
return time_10_to_20 & ~time_under_10 & ~time_over_20
else:
return time_10_to_20 & ~time_10_to_20
# For customers that dont mind over the distance being over 20 minutes to campus
if customer.distance == 'over 20':
if restaurant.distance[2] == 'TRUE':
return time_over_20 & ~time_10_to_20 & ~time_under_10
else:
return time_over_20 & ~time_over_20
"""
This function is where the constraints get added to our
theory.
Parameters: Restaurant object and Customer object
"""
def example_theory(restaurant,customer):
# Shorter variables for the objects
r = restaurant
c = customer
# Defining encoding variable
E = Encoding()
# Add distance constraint
E.add_constraint(distanceConstraint(r,c))
E.add_constraint(price_constraint(r,c))
# Add dining constraints
if len(user.userdine_opt) == 1:
E.add_constraint(one_dining_constraints(r,c))
elif len(user.userdine_opt) == 2:
E.add_constraint(two_dining_constraints(r,c))
elif len(user.userdine_opt) == 3:
E.add_constraint(all_dining_constraints(r,c))
# Add Diet constraints
if len(user.userdiet) == 1:
if 5 in user.userdiet:
pass
else:
E.add_constraint(single_diet_constraint(r,c))
elif len(user.userdiet) == 2:
E.add_constraint(two_diet_constraint(r,c))
elif len(user.userdiet) == 3:
E.add_constraint(three_diet_constraint(r,c))
elif len(user.userdiet) == 4:
E.add_constraint(all_diet_constraint(r,c))
# return the Encoding variable
return E
"""
Main method: Where the implementation happens. The theory gets solved
where a sorted list from best result to worst result is displayed
to the screen.
The user also inputs their prefrences
"""
if __name__ == "__main__":
# This is where we will get user input information
flag = True
restaurant_list = readCSV()
# While loop to start
while flag:
# creating example theory
# T = example_theory()
# Asking if user wants to continue or exit
prog_exit = input('Welcome to the Queens restuarant finder! Press Q to quit or enter to continue.\n')
# if statement to exit
if prog_exit.lower() == 'q':
break
# Getting users price range information
user_price = int(input('Please select a price range: \n 1. $ - most affordable'\
'\n 2. $$ - intermediate \n 3. $$$ - most expensive\n'))
# Telling user which price was selected as well as some exception handling
if user_price in [1,2,3]:
if user_price == 1:
price = 'low'
print('You selected $.')
elif user_price == 2:
price = 'med'
print('You selected $$.')
else:
price = 'high'
print('You selected $$$')
else:
print('Invalid input: Must be either option 1, 2 or 3')
# Getting diet restrictions of the user
user_restrictions_in = input('Please select the following diet restrictions '
'(please separate by a comma if selecting multiple):'
' \n 1. Vegan \n 2. Vegetarian \n 3. Gluten-free \n'
' 4. lactose intolerant \n 5. No restrictions\n')
# Since there is a possibility of having multiple restrictions, split into list
user_selected_restrictions = user_restrictions_in.split(',')
# Turning list of strings into list of integers
for entry in range(len(user_selected_restrictions)):
user_selected_restrictions[entry] = int(user_selected_restrictions[entry])
# Getting user input for dietary restrictions
diet = []
if 1 in user_selected_restrictions:
diet.append('vegan')
if 2 in user_selected_restrictions:
diet.append('vegetarian')
if 3 in user_selected_restrictions:
diet.append('gluten')
if 4 in user_selected_restrictions:
diet.append('lactose')
# Getting user preference for dining options
user_dine_option = input('Please select a dining option. If multiple separate by a comma: \n 1. Dine-in \n 2. Take-out\n 3. Delivery\n')
dine_in_list = user_dine_option.split(',')
final_list = []
if '1' in dine_in_list:
final_list.append('dine-in')
if '2' in dine_in_list:
final_list.append('take-out')
if '3' in dine_in_list:
final_list.append('delivery')
# Getting user preference for distance
user_distance_option = int(input('Please select a distance from Queens campus:'
' \n 1. Under 10 minutes \n 2. Between 10 and 20 minutes \n 3. Over 20 minutes\n'))
if user_distance_option == 1:
distance = 'under 10'
elif user_distance_option == 2:
distance = '10 to 20'
else:
distance = 'over 20'
# Creating customer class to store information in an object for easier access
user = customer(price, diet, final_list, distance)
# Need to iterate through the list and find which restaurants match with the users preference
# using the example theory function. Use T.solve to find the solution to the users preferences and then match with
# restaurants that match up
# T = example_theory(user)
# List to display results
finalListR = []
# Loops through each restaurant in the csv file
for entry in restaurant_list:
# Variable for example theory method
T = example_theory(entry, user)
""" Checks if the theory is satisfiable for each restaurant.
this is where we determine if the restaurant is a good fit
or not"""
y = example_theory(entry,user).is_satisfiable()
# if the theory is satified
if y == True:
finalListR.insert(0, entry.name)
else:
finalListR.insert(len(finalListR), entry.name)
# to display all the results of restaurants best fit to worst fit
for i in range(len(finalListR)):
if i < 4:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★ ★ ★')
elif i >= 4 and i < 7:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★ ★')
elif i <= 7 and i < 11:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★')
elif i <= 11 and i < 15:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★')
else:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★') | [
"csvReader.readCSV",
"nnf.Var",
"lib204.Encoding"
] | [((749, 759), 'nnf.Var', 'Var', (['"""low"""'], {}), "('low')\n", (752, 759), False, 'from nnf import Var\n'), ((766, 776), 'nnf.Var', 'Var', (['"""med"""'], {}), "('med')\n", (769, 776), False, 'from nnf import Var\n'), ((784, 795), 'nnf.Var', 'Var', (['"""high"""'], {}), "('high')\n", (787, 795), False, 'from nnf import Var\n'), ((854, 871), 'nnf.Var', 'Var', (['"""vegetarian"""'], {}), "('vegetarian')\n", (857, 871), False, 'from nnf import Var\n'), ((880, 892), 'nnf.Var', 'Var', (['"""vegan"""'], {}), "('vegan')\n", (883, 892), False, 'from nnf import Var\n'), ((902, 915), 'nnf.Var', 'Var', (['"""gluten"""'], {}), "('gluten')\n", (905, 915), False, 'from nnf import Var\n'), ((926, 940), 'nnf.Var', 'Var', (['"""lactose"""'], {}), "('lactose')\n", (929, 940), False, 'from nnf import Var\n'), ((970, 984), 'nnf.Var', 'Var', (['"""dine-in"""'], {}), "('dine-in')\n", (973, 984), False, 'from nnf import Var\n'), ((996, 1011), 'nnf.Var', 'Var', (['"""take-out"""'], {}), "('take-out')\n", (999, 1011), False, 'from nnf import Var\n'), ((1023, 1038), 'nnf.Var', 'Var', (['"""delivery"""'], {}), "('delivery')\n", (1026, 1038), False, 'from nnf import Var\n'), ((1076, 1091), 'nnf.Var', 'Var', (['"""under 10"""'], {}), "('under 10')\n", (1079, 1091), False, 'from nnf import Var\n'), ((1108, 1123), 'nnf.Var', 'Var', (['"""10 to 20"""'], {}), "('10 to 20')\n", (1111, 1123), False, 'from nnf import Var\n'), ((1139, 1153), 'nnf.Var', 'Var', (['"""over 20"""'], {}), "('over 20')\n", (1142, 1153), False, 'from nnf import Var\n'), ((11810, 11820), 'lib204.Encoding', 'Encoding', ([], {}), '()\n', (11818, 11820), False, 'from lib204 import Encoding\n'), ((13063, 13072), 'csvReader.readCSV', 'readCSV', ([], {}), '()\n', (13070, 13072), False, 'from csvReader import readCSV\n')] |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
def Compute_Block(cell_gradient_box):
k=0
hog_vector = np.zeros((bin_size*4*(cell_gradient_box.shape[0] - 1)*(cell_gradient_box.shape[1] - 1)))
for i in range(cell_gradient_box.shape[0] - 1):
for j in range(cell_gradient_box.shape[1] - 1):
histogram_block = np.concatenate([cell_gradient_box[i][j],cell_gradient_box[i][j + 1],cell_gradient_box[i+1][j],cell_gradient_box[i+1][j + 1]])
#顯示圖片
#x = np.arange(1,37,1)
#plt.title('histogram_block')
#plt.bar(x,histogram_block)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
#做L2範數
L2_norm = histogram_block * histogram_block
L2_norm = L2_norm.sum()
L2_norm = np.power(L2_norm,0.5)
extre_min = np.power(0.0001,2) #創一個極小值 怕L2_norm為零 分母為零
L2_norm = L2_norm + extre_min
histogram_block = histogram_block / L2_norm
#顯示圖片
#x = np.arange(1,37,1)
#plt.title('histogram_block_L2')
#plt.bar(x,histogram_block)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
#把histogram_block串接起來
hog_vector[36*k : 36*(k+1)] = histogram_block
k=k+1
return hog_vector
#計算直方圖
def Cell_Gradient(cell_mag, cell_angle):
histogram_cell = np.zeros(bin_size) # 0 20 40 60 80 100 120 140 160
for k in range(cell_size):
for l in range(cell_size):
cell_mag_catch = cell_mag[k][l] #讀取[0,0]幅值
cell_angle_catch = cell_angle[k][l]#讀取[0,0]角度值
if(cell_angle_catch % 20 == 0): #如果角度是0 20 40 60 80 100 120 140 160 180直接丟值進去
bin_number = int(cell_angle_catch / 20) % bin_size #有%bin_size是因為180要丟進0的裡面設計的
histogram_cell[bin_number] += cell_mag_catch
else:#其他角度要將幅值分配
bin_number_small = int(cell_angle_catch / 20) % bin_size
bin_number_big = (bin_number_small + 1) % bin_size #有%bin_size是因為假如bin_number_small為8的話再加1會變9也就是要放進第0格裡面
ratio = cell_angle_catch % 20 #依照比例丟進bin_number_small與bin_number_big
histogram_cell[bin_number_small] += (cell_mag_catch * (1 - (ratio / 20)))
histogram_cell[bin_number_big] += (cell_mag_catch * (ratio / 20))
#顯示直方圖
#x = np.arange(0,180,20)
#plt.xlabel("angle")
#plt.ylabel("mag")
#plt.title("Histogram of Gradient")
#plt.bar(x,histogram_cell,width = 3)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
return histogram_cell
def Computer_Cell(mag, angle):
cell_gradient_box = np.zeros(((int)(128 / cell_size), (int)(64 / cell_size), bin_size))
#輸入為128*64大小的影像應該會被分為->(16,8,9)
for i in range(cell_gradient_box.shape[0]): #先算cell左上角的格子的直方圖,左至右、上到下
for j in range(cell_gradient_box.shape[1]):
#找第0格~第8格的幅值
cell_mag = mag[i * cell_size:(i + 1) * cell_size,j * cell_size:(j + 1) * cell_size]
#找第0格~第8格的角度值
cell_angle = angle[i * cell_size:(i + 1) * cell_size,j * cell_size:(j + 1) * cell_size]
#計算直方圖
cell_gradient_box[i][j] = Cell_Gradient(cell_mag, cell_angle)
return cell_gradient_box
def Compute_Sobel(img):
gradient_values_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=1)
gradient_values_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gradient_values_x, gradient_values_y, angleInDegrees=True)
#angle範圍會為0~360之間,但我們只要0~180度
for i in range(angle.shape[0]):
for j in range(angle.shape[1]):
if(angle[i][j] > 180):
angle[i][j] = angle[i][j] - 180
'''
#顯示Compute_Sobel後的影像
while True:
abs_x = abs(gradient_values_x)
abs_x = np.uint8(abs_x)
cv2.namedWindow("gradient_values_x",0)
cv2.resizeWindow("gradient_values_x", 256, 512)
cv2.imshow("gradient_values_x",abs_x)
abs_y = abs(gradient_values_y)
abs_y = np.uint8(abs_y)
cv2.namedWindow("gradient_values_y",0)
cv2.resizeWindow("gradient_values_y", 256, 512)
cv2.imshow("gradient_values_y",abs_y)
mag_uint8 = np.uint8(mag)
cv2.namedWindow("mag",0)
cv2.resizeWindow("mag", 256, 512)
cv2.imshow("mag",mag_uint8)
k = cv2.waitKey(0)
if k == 27:
#按Esc
cv2.destroyAllWindows()
break
'''
return mag, angle
#Image_Pretreatment影像預處理
def Image_Pretreatment(img):
#resize調整大小
img_resize = cv2.resize(img, (64,128), interpolation=cv2.INTER_CUBIC)
img_resize_32 = np.float32(img_resize)
'''
#顯示影像
cv2.namedWindow("Resize",0)
cv2.resizeWindow("Resize", 256, 512)
cv2.imshow("Resize",img_resize)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#預處理(一)強度除以最大值
#img_after = (img_resize_32/np.max(img_resize_32))
#預處理(二)強度除以255
#img_after = (img_resize_32/255)
#預處理(三)gamma函式
#img_after = np.power(img_resize_32,0.9)
'''
img_after_uint8 = np.uint8(img_after)
cv2.namedWindow("img_after",0)
cv2.resizeWindow("img_after", 256, 512)
cv2.imshow("img_after", img_after_uint8)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#return img_after
return img_resize_32
#Histogram_of_Oriented_Gradients梯度方向直方圖
def Histogram_of_Oriented_Gradients():
#讀取灰階圖
img = cv2.imread(input_image_path,0)
#Image_Pretreatment影像預處理
img_finshed = Image_Pretreatment(img)
#計算Sobel
mag, angle = Compute_Sobel(img_finshed)
#計算Cell
cell_gradient_box = Computer_Cell(mag, angle)
#計算Block
hog_vector = Compute_Block(cell_gradient_box)
return hog_vector
if __name__ == '__main__':
#input_image_path = (r'路徑\檔名.png')
this_file_path = os.getcwd() #讀取當前資料夾位置
#input_image_path = (r'{}\running_man_1.png'.format(this_file_path))
input_image_path = (r'{}\running_man_2.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_3.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_4.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_5.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_6.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_7.png'.format(this_file_path))
#input_image_path = (r'{}\landscape.png'.format(this_file_path))
#參數
bin_size = 9
cell_size = 8
#執行程式
hog_vector = Histogram_of_Oriented_Gradients() #輸出為hog_vector
#print輸出長度
print ("輸出HOG長度為{}".format(hog_vector.shape[0]))
#將HOG輸出特徵向量可視化
x = np.arange(hog_vector.shape[0])
plt.title('HOG')
plt.bar(x,hog_vector,color='red')
#plt.savefig(r'{}\running_man_1_result.png'.format(this_file_path))
#plt.savefig(r'路徑\檔名.png')
plt.show()
| [
"numpy.arange",
"numpy.power",
"cv2.cartToPolar",
"os.getcwd",
"numpy.zeros",
"matplotlib.pyplot.bar",
"numpy.concatenate",
"matplotlib.pyplot.title",
"cv2.resize",
"cv2.imread",
"numpy.float32",
"cv2.Sobel",
"matplotlib.pyplot.show"
] | [((141, 238), 'numpy.zeros', 'np.zeros', (['(bin_size * 4 * (cell_gradient_box.shape[0] - 1) * (cell_gradient_box.shape\n [1] - 1))'], {}), '(bin_size * 4 * (cell_gradient_box.shape[0] - 1) * (\n cell_gradient_box.shape[1] - 1))\n', (149, 238), True, 'import numpy as np\n'), ((1537, 1555), 'numpy.zeros', 'np.zeros', (['bin_size'], {}), '(bin_size)\n', (1545, 1555), True, 'import numpy as np\n'), ((3617, 3658), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(1)'}), '(img, cv2.CV_64F, 1, 0, ksize=1)\n', (3626, 3658), False, 'import cv2\n'), ((3684, 3725), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(1)'}), '(img, cv2.CV_64F, 0, 1, ksize=1)\n', (3693, 3725), False, 'import cv2\n'), ((3746, 3820), 'cv2.cartToPolar', 'cv2.cartToPolar', (['gradient_values_x', 'gradient_values_y'], {'angleInDegrees': '(True)'}), '(gradient_values_x, gradient_values_y, angleInDegrees=True)\n', (3761, 3820), False, 'import cv2\n'), ((4971, 5028), 'cv2.resize', 'cv2.resize', (['img', '(64, 128)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (64, 128), interpolation=cv2.INTER_CUBIC)\n', (4981, 5028), False, 'import cv2\n'), ((5049, 5071), 'numpy.float32', 'np.float32', (['img_resize'], {}), '(img_resize)\n', (5059, 5071), True, 'import numpy as np\n'), ((5889, 5920), 'cv2.imread', 'cv2.imread', (['input_image_path', '(0)'], {}), '(input_image_path, 0)\n', (5899, 5920), False, 'import cv2\n'), ((6339, 6350), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6348, 6350), False, 'import os\n'), ((7200, 7230), 'numpy.arange', 'np.arange', (['hog_vector.shape[0]'], {}), '(hog_vector.shape[0])\n', (7209, 7230), True, 'import numpy as np\n'), ((7236, 7252), 'matplotlib.pyplot.title', 'plt.title', (['"""HOG"""'], {}), "('HOG')\n", (7245, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7258, 7293), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'hog_vector'], {'color': '"""red"""'}), "(x, hog_vector, color='red')\n", (7265, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7403, 7413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7411, 7413), True, 'import matplotlib.pyplot as plt\n'), ((371, 507), 'numpy.concatenate', 'np.concatenate', (['[cell_gradient_box[i][j], cell_gradient_box[i][j + 1], cell_gradient_box[i +\n 1][j], cell_gradient_box[i + 1][j + 1]]'], {}), '([cell_gradient_box[i][j], cell_gradient_box[i][j + 1],\n cell_gradient_box[i + 1][j], cell_gradient_box[i + 1][j + 1]])\n', (385, 507), True, 'import numpy as np\n'), ((877, 899), 'numpy.power', 'np.power', (['L2_norm', '(0.5)'], {}), '(L2_norm, 0.5)\n', (885, 899), True, 'import numpy as np\n'), ((924, 943), 'numpy.power', 'np.power', (['(0.0001)', '(2)'], {}), '(0.0001, 2)\n', (932, 943), True, 'import numpy as np\n')] |
import sys
n = int(sys.stdin.readline().strip())
dp = [[0, 0] for _ in range(n + 1)]
dp[1][1] = 1
for i in range(2, n + 1):
dp[i][0] = dp[i - 1][0] + dp[i - 1][1]
dp[i][1] = dp[i - 1][0]
result = dp[n][0] + dp[n][1]
sys.stdout.write(str(result))
| [
"sys.stdin.readline"
] | [((20, 40), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (38, 40), False, 'import sys\n')] |
from django.conf import settings
from django.urls import path, register_converter
from mapentity.registry import registry
from rest_framework.routers import DefaultRouter
from geotrek.common.urls import LangConverter
from geotrek.feedback import models as feedback_models
from .views import CategoryList, FeedbackOptionsView, ReportAPIViewSet
register_converter(LangConverter, 'lang')
app_name = 'feedback'
urlpatterns = [
path('api/<lang:lang>/feedback/categories.json', CategoryList.as_view(), name="categories_json"),
path('api/<lang:lang>/feedback/options.json', FeedbackOptionsView.as_view(), name="options_json"),
]
router = DefaultRouter(trailing_slash=False)
router.register(r'^api/(?P<lang>[a-z]{2})/reports', ReportAPIViewSet, basename='report')
urlpatterns += router.urls
urlpatterns += registry.register(feedback_models.Report, menu=settings.REPORT_MODEL_ENABLED)
| [
"mapentity.registry.registry.register",
"rest_framework.routers.DefaultRouter",
"django.urls.register_converter"
] | [((346, 387), 'django.urls.register_converter', 'register_converter', (['LangConverter', '"""lang"""'], {}), "(LangConverter, 'lang')\n", (364, 387), False, 'from django.urls import path, register_converter\n'), ((644, 679), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {'trailing_slash': '(False)'}), '(trailing_slash=False)\n', (657, 679), False, 'from rest_framework.routers import DefaultRouter\n'), ((811, 888), 'mapentity.registry.registry.register', 'registry.register', (['feedback_models.Report'], {'menu': 'settings.REPORT_MODEL_ENABLED'}), '(feedback_models.Report, menu=settings.REPORT_MODEL_ENABLED)\n', (828, 888), False, 'from mapentity.registry import registry\n')] |
from unittest import TestCase
from unittest.mock import MagicMock, patch
from openslides.utils import views
@patch('builtins.super')
class SingleObjectMixinTest(TestCase):
def test_get_object_cache(self, mock_super):
"""
Test that the method get_object caches his result.
Tests that get_object from the django view is only called once, even if
get_object on our class is called twice.
"""
view = views.SingleObjectMixin()
view.get_object()
view.get_object()
mock_super().get_object.assert_called_once_with()
def test_dispatch_with_existin_object(self, mock_super):
view = views.SingleObjectMixin()
view.object = 'old_object'
view.get_object = MagicMock()
view.dispatch()
mock_super().dispatch.assert_called_with()
self.assertEqual(
view.object,
'old_object',
"view.object should not be changed")
self.assertFalse(
view.get_object.called,
"view.get_object() should not be called")
def test_dispatch_without_existin_object(self, mock_super):
view = views.SingleObjectMixin()
view.get_object = MagicMock(return_value='new_object')
view.dispatch()
mock_super().dispatch.assert_called_with()
self.assertEqual(
view.object,
'new_object',
"view.object should be changed")
self.assertTrue(
view.get_object.called,
"view.get_object() should be called")
class TestAPIView(TestCase):
def test_class_creation(self):
"""
Tests that the APIView has all relevant methods
"""
http_methods = set(('get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace'))
self.assertTrue(
http_methods.issubset(views.APIView.__dict__),
"All http methods should be defined in the APIView")
self.assertFalse(
hasattr(views.APIView, 'method_call'),
"The APIView should not have the method 'method_call'")
class TestCSRFMixin(TestCase):
@patch('builtins.super')
def test_as_view(self, mock_super):
"""
Tests, that ensure_csrf_cookie is called.
"""
mock_super().as_view.return_value = 'super_view'
with patch('openslides.utils.views.ensure_csrf_cookie') as ensure_csrf_cookie:
views.CSRFMixin.as_view()
ensure_csrf_cookie.assert_called_once_with('super_view')
| [
"unittest.mock.MagicMock",
"openslides.utils.views.SingleObjectMixin",
"unittest.mock.patch",
"openslides.utils.views.CSRFMixin.as_view"
] | [((112, 135), 'unittest.mock.patch', 'patch', (['"""builtins.super"""'], {}), "('builtins.super')\n", (117, 135), False, 'from unittest.mock import MagicMock, patch\n'), ((2141, 2164), 'unittest.mock.patch', 'patch', (['"""builtins.super"""'], {}), "('builtins.super')\n", (2146, 2164), False, 'from unittest.mock import MagicMock, patch\n'), ((452, 477), 'openslides.utils.views.SingleObjectMixin', 'views.SingleObjectMixin', ([], {}), '()\n', (475, 477), False, 'from openslides.utils import views\n'), ((667, 692), 'openslides.utils.views.SingleObjectMixin', 'views.SingleObjectMixin', ([], {}), '()\n', (690, 692), False, 'from openslides.utils import views\n'), ((754, 765), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (763, 765), False, 'from unittest.mock import MagicMock, patch\n'), ((1165, 1190), 'openslides.utils.views.SingleObjectMixin', 'views.SingleObjectMixin', ([], {}), '()\n', (1188, 1190), False, 'from openslides.utils import views\n'), ((1217, 1253), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '"""new_object"""'}), "(return_value='new_object')\n", (1226, 1253), False, 'from unittest.mock import MagicMock, patch\n'), ((2349, 2399), 'unittest.mock.patch', 'patch', (['"""openslides.utils.views.ensure_csrf_cookie"""'], {}), "('openslides.utils.views.ensure_csrf_cookie')\n", (2354, 2399), False, 'from unittest.mock import MagicMock, patch\n'), ((2435, 2460), 'openslides.utils.views.CSRFMixin.as_view', 'views.CSRFMixin.as_view', ([], {}), '()\n', (2458, 2460), False, 'from openslides.utils import views\n')] |
from flask import Flask
from wdb.ext import WdbMiddleware
class Wdb(object):
def __init__(self, app=None):
self.app = app
if app:
self.init_app(self.app)
def init_app(self, app):
if app.config.get('WDB_ENABLED', app.debug):
start_disabled = app.config.get('WDB_START_DISABLED', False)
theme = app.config.get('WDB_THEME', 'dark')
app.wsgi_app = WdbMiddleware(app.wsgi_app, start_disabled, theme)
# Patch app.run to disable Werkzeug debugger
app.run = self._run
def _run(self, *args, **kwargs):
kwargs["use_debugger"] = False
return Flask.run(self.app, *args, **kwargs)
| [
"flask.Flask.run",
"wdb.ext.WdbMiddleware"
] | [((663, 699), 'flask.Flask.run', 'Flask.run', (['self.app', '*args'], {}), '(self.app, *args, **kwargs)\n', (672, 699), False, 'from flask import Flask\n'), ((430, 480), 'wdb.ext.WdbMiddleware', 'WdbMiddleware', (['app.wsgi_app', 'start_disabled', 'theme'], {}), '(app.wsgi_app, start_disabled, theme)\n', (443, 480), False, 'from wdb.ext import WdbMiddleware\n')] |
import cv2
#image = cv2.imread('/home/brenda/Documentos/independent_study/clevr-dataset-gen/output/images/CLEVR_new_000000.png')
#image = cv2.imread('/home/brenda/Escritorio/tmpps5tswcu.png')
image = cv2.imread('/home/brenda/Escritorio/tmp47s462az.png')
print("imagen: ", image.shape, " ", image.shape[0]* image.shape[1])
#row, columns X,Y
cv2.circle(image, (57, 96), 5, (0, 255, 0))
cv2.circle(image, (152, 169), 5, (0, 0, 255))
cv2.circle(image, (217, 141), 5, (255, 0, 0))
cv2.circle(image, (264, 120), 5, (255, 0, 0))
cv2.circle(image, (125, 84), 5, (255, 0, 0))
pixel_center1 = 76800 - ((96 * 320) + 57)
pixel_center2 = 76800 - ((169 * 320) + 152)
pixel_center3 = 76800 - ((141 * 320) + 217)
pixel_center4 = 76800 - ((120 * 320) + 264)
pixel_center5 = 76800 - ((84 * 320) + 125)
#image[96][57] = [255, 116, 140] X Y
#image[169][152] = [255, 116, 140]
#image[141][217] = [255, 116, 140]
#image[120][264] = [255, 116, 140]
#image[84][125] = [255, 116, 140]
#w and h column 124 row 84
#(3,5) would lie in column number 3 and row number 5.
#index = (84 * w320) + 124
############ index = (row * width) + column //// 122 * 320 + 245
#320 es largo
#primero ancho y luego alto
#guardado como 245 (ancho. col), 122 (alto, row)
#pixel_center = (obj["pixel_coords"][1] * height) + obj["pixel_coords"][0]
"""
pixel_center1 = (96 * 320) + 57
pixel_center2 = (169 * 320) + 152
pixel_center3 = (141 * 320) + 217
pixel_center4 = (120 * 320) + 264
pixel_center5 = (84 * 320) + 125
"""
"""
pixel_center1 = (96 * 240) + 57
pixel_center2 = (169 * 240) + 152
pixel_center3 = (141 * 240) + 217
pixel_center4 = (120 * 240) + 264
pixel_center5 = (84 * 240) + 125
"""
"""
pixel_center1 = (57 * 240) + 96
pixel_center2 = (152 * 240) + 169
pixel_center3 = (217 * 240) + 141
pixel_center4 = (264 * 240) + 120
pixel_center5 = (125 * 240) + 84
"""
"""
pixel_center1 = (57 * 320) + 96
pixel_center2 = (152 * 320) + 169
pixel_center3 = (217 * 320) + 141
pixel_center4 = (264 * 320) + 120
pixel_center5 = (125 * 320) + 84
"""
#print("center: ", pixel_center1)
#print("center: ", pixel_center2)
#print("center: ", pixel_center3)
#print("center: ", pixel_center4)
#print("center: ", pixel_center5)
#cv2.imshow('Test image', image)
# (3,3) 3,0 3,1 3,2 3,3 3,4
# (2,2) 2,0 2,1 2,2 2,3 2,4
# (1,1) 1,0 1,1 1,2 1,3 1,4
# (0,0) 0,0 0,1 0,2 0,3 0,4
# 0,0 0,1 0,2 0,3 0,4
# 1,0 1,1 1,2 1,3 1,4
# 2,0 2,1 2,2 2,3 2,4
# 3,0 3,1 3,2 3,3 3,4
#pixel_center = (args.width * args.height) - (obj["pixel_coords"][1] * args.width) + obj["pixel_coords"][0]
colores = {}
c = 0
r = 3
for i in range(20):
if c >= 5:
r -= 1
c = 0
print(r," --> ",c)
c+=1
"""
counter = 0
for i, row in enumerate(image):
# get the pixel values by iterating
for j, pixel in enumerate(image):
if counter == pixel_center1 or counter == pixel_center2 or counter == pixel_center3 or counter == pixel_center4:
print("COLOR PINTADO", counter)
image[i][j] = [255, 116, 140]
counter += 1
"""
#print("COUNTER", counter)
'''
for i, row in enumerate(image):
# get the pixel values by iterating
for j, pixel in enumerate(image):
if (i == j or i + j == image.shape[0]):
#print("imprimir: ", list(image[i][j]))
if (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) in colores:
colores[ (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) ] +=1
else:
colores[ (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) ] = 1
# update the pixel value to black
for i, row in enumerate(image):
# get the pixel values by iterating
for j, pixel in enumerate(image):
if (i == j or i + j == image.shape[0]):
#print("imprimir: ", list(image[i][j]
if (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) == '64-64-64':
image[i][j] = [255, 255, 0]
'''
cv2.imshow('Test image', image)
#170, 250
print(colores)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.imshow",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.imread"
] | [((203, 256), 'cv2.imread', 'cv2.imread', (['"""/home/brenda/Escritorio/tmp47s462az.png"""'], {}), "('/home/brenda/Escritorio/tmp47s462az.png')\n", (213, 256), False, 'import cv2\n'), ((346, 389), 'cv2.circle', 'cv2.circle', (['image', '(57, 96)', '(5)', '(0, 255, 0)'], {}), '(image, (57, 96), 5, (0, 255, 0))\n', (356, 389), False, 'import cv2\n'), ((390, 435), 'cv2.circle', 'cv2.circle', (['image', '(152, 169)', '(5)', '(0, 0, 255)'], {}), '(image, (152, 169), 5, (0, 0, 255))\n', (400, 435), False, 'import cv2\n'), ((436, 481), 'cv2.circle', 'cv2.circle', (['image', '(217, 141)', '(5)', '(255, 0, 0)'], {}), '(image, (217, 141), 5, (255, 0, 0))\n', (446, 481), False, 'import cv2\n'), ((482, 527), 'cv2.circle', 'cv2.circle', (['image', '(264, 120)', '(5)', '(255, 0, 0)'], {}), '(image, (264, 120), 5, (255, 0, 0))\n', (492, 527), False, 'import cv2\n'), ((528, 572), 'cv2.circle', 'cv2.circle', (['image', '(125, 84)', '(5)', '(255, 0, 0)'], {}), '(image, (125, 84), 5, (255, 0, 0))\n', (538, 572), False, 'import cv2\n'), ((4017, 4048), 'cv2.imshow', 'cv2.imshow', (['"""Test image"""', 'image'], {}), "('Test image', image)\n", (4027, 4048), False, 'import cv2\n'), ((4076, 4090), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4087, 4090), False, 'import cv2\n'), ((4091, 4114), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4112, 4114), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
Simulador de credito
- Monto del prestamo del carro
- Ingresa tu cuota inicial
- Ingresos mensuales
- Numero de meses del prestamo
- Datos personales
----------------------------------------
Ingresos mensuales
- 908.526 - 1.000.000 >>>>>>>>>>>>>> 20.000.000
- 1.000.000 - 2.000.000 >>>>>>>>>> 25.000.000
- 2.000.000 - 2.500.000 >>>>>>>>>> 30.000.000
- 2.500.000 - 3.000.000 >>>>>>>>>> 40.000.000
- 3.000.000 - 4.500.000 >>>>>>>>>> 60.000.000
- 4.500.000 o mas >>>>>>>>>>>>>>>> 120.000.000
12 Meses - 1.772.488
20.000.000 ->>> 100
21.269.586 ->>> 106.3% /// equivale a un 6.3% de interes total
1.269.856
105.798 de interès cada mes
24 Meses - 936.993
20.000.000 ->>> 100
22.487.832 ->>> 112.4% /// equivale a un 12.4% de interes total
2.500.000
104.659 de interes cada mes
36 meses - 659.710
20.000.000 ->>> 100
23.749.560 ->>> 118.7% /// equivale a un 18.7% de interes total
48 meses - 521.976
20.000.000 ->>> 100
25.054.848 ->>> 125,2% /// equivale a un 25.2% de interes total
60 meses - 440.053
20.000.000 ->>> 100
25.054.848 ->>> 132% /// equivale a un 32% de interes total
72 meses - 386.030
20.000.000 ->>> 100
27.794.160 ->>> 138.9% /// equivale a un 38.9% de interes total
"""
import sys
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("|||||||||||||BIENVENIDO AL SIMULADOR DE CREDITO DE VEHICULO|||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("||||||||||||||||||compra el vehiculo de tus sueños||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
dp_nombre = str(input("Por favor digita tu nombre y apellido: "))
dp_edad = int(input("Por favor digita tu edad: "))
if dp_edad < 18:
sys.exit("El credito es solo para mayores de edad")
if dp_edad >= 70:
sys.exit("El credito es solo para personas entre los 18 y 69 años")
if dp_edad > 18 and dp_edad <= 70:
dp_salario = int(input("por favor digita tu salario: "))
if dp_salario < 908526:
sys.exit("Monto no valido, valor minimo de prestamo 1SMLV")
# 908.526 - 1.000.000 >>>>>>>>>>>>>> 20.000.000
if dp_salario > 908526 and dp_salario <= 1000000:
print("El prestamo podra ser desde 1.000.000 hasta 20.000.000 cop")
# 1.000.000 - 2.000.000 >>>>>>>>>> 25.000.000
if dp_salario > 1000000 and dp_salario <= 2000000:
print("El prestamo podra ser desde 1.000.000 hasta 25.000.000 cop")
# 2.000.000 - 2.500.000 >>>>>>>>>> 30.000.000
if dp_salario > 2000000 and dp_salario <= 2500000:
print("El prestamo podra ser desde 1.000.000 hasta 30.000.000 cop")
# 2.500.000 - 3.000.000 >>>>>>>>>> 40.000.000
if dp_salario > 2500000 and dp_salario <= 3000000:
print("El prestamo podra ser desde 1.000.000 hasta 40.000.000 cop")
# 3.000.000 - 4.500.000 >>>>>>>>>> 60.000.000
if dp_salario > 3000000 and dp_salario <= 4500000:
print("El prestamo podra ser desde 1.000.000 hasta 60.000.000 cop")
# 4.500.000 o mas >>>>>>>>>>>>>>>> 120.000.000
if dp_salario > 4500000:
print("El prestamo podra ser desde 1.000.000 hasta 120.000.000 cop")
#####################################################
if dp_salario > 908526:
credito1 = int(input("Ingrese el valor del vehiculo: "))
while credito1 < 1000000:
print("El valor no es valido, inserte un valor desde 1'000.000")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 908526 and dp_salario <= 1000000 and credito1 > 20000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 20.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 1000000 and dp_salario <= 2000000 and credito1 > 25000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 25.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 2000000 and dp_salario <= 2500000 and credito1 > 30000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 30.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 2500000 and dp_salario <= 3000000 and credito1 > 40000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 40.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 3000000 and dp_salario <= 4500000 and credito1 > 60000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 60.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 4500000 and credito1 > 120000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 120.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
if credito1 > 1000000:
cuota_inicial = int(input("Ingrese el valor de su cuota inicial: "))
dinero_a_financiar = credito1 - cuota_inicial
print("El dinero a financiar sera: ", dinero_a_financiar)
if credito1 > 1000000 and dinero_a_financiar > 1:
cuotas = int(input("Ingrese el numero de cutas a financiar (12) (24) (36) (48) (60) (72): "))
while credito1 > 1000000 and dinero_a_financiar > 1 and cuotas != 12 and cuotas != 24 and cuotas != 36 and cuotas != 48 and cuotas != 60 and cuotas != 72:
print("Numero de cuotas no valido")
cuotas = int(input("Ingrese el numero de cutas a financiar (12) (24) (36) (48) (60) (72): "))
print("Señor(a): ",dp_nombre)
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 12:
interes12 = (dinero_a_financiar * 106.3)/100
print("El valor final aproximado a pagar es: ", round(interes12))
cuota_mensual12 = interes12/12
print("La cuota mensual aproximada es de: ", round(cuota_mensual12))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 24:
interes24 = (dinero_a_financiar * 112.4)/100
print("El valor final a pagar es: ", round(interes24))
cuota_mensual24 = interes24/24
print("La cuota mensual es de: ", round(cuota_mensual24))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 36:
interes36 = (dinero_a_financiar * 118.7)/100
print("El valor final aproximado a pagar es: ", round(interes36))
cuota_mensual36 = interes36/36
print("La cuota mensual aproximada es de: ", round(cuota_mensual36))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 48:
interes48 = (dinero_a_financiar * 125.2)/100
print("El valor final aproximado a pagar es: ", round(interes48))
cuota_mensual48 = interes48/48
print("La cuota mensual aproximada es de: ", round(cuota_mensual48))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 60:
interes60 = (dinero_a_financiar * 132)/100
print("El valor final aproximado a pagar es: ", round(interes60))
cuota_mensual60 = interes60/60
print("La cuota mensual aproximada es de: ", round(cuota_mensual60))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 72:
interes72 = (dinero_a_financiar * 138.9)/100
print("El valor final aproximado a pagar es: ", round(interes72))
cuota_mensual72 = interes72/72
print("La cuota mensual aproximada es de: ", round(cuota_mensual72))
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("||||||||||GRACIAS POR USAR SIMULADOR DE CREDITO DE VEHICULO|||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("||||||||||||||||||compra el vehiculo de tus sueños|||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
"""
Fuentes:
https://www.delftstack.com/es/howto/python/how-to-round-to-two-decimals-in-python/
https://www.delftstack.com/es/howto/python/python-exit-program/
https://www.bancodeoccidente.com.co/occiauto/autogestionado
""" | [
"sys.exit"
] | [((1889, 1940), 'sys.exit', 'sys.exit', (['"""El credito es solo para mayores de edad"""'], {}), "('El credito es solo para mayores de edad')\n", (1897, 1940), False, 'import sys\n'), ((1967, 2034), 'sys.exit', 'sys.exit', (['"""El credito es solo para personas entre los 18 y 69 años"""'], {}), "('El credito es solo para personas entre los 18 y 69 años')\n", (1975, 2034), False, 'import sys\n'), ((2168, 2227), 'sys.exit', 'sys.exit', (['"""Monto no valido, valor minimo de prestamo 1SMLV"""'], {}), "('Monto no valido, valor minimo de prestamo 1SMLV')\n", (2176, 2227), False, 'import sys\n')] |
import g1.asyncs.agents.parts
import g1.messaging.parts.subscribers
from g1.apps import parameters
from g1.apps import utils
from g1.asyncs.bases import queues
from g1.bases import labels
# For now these are just aliases.
from g1.messaging.parts.subscribers import make_subscriber_params
from .. import subscribers # pylint: disable=relative-beyond-top-level
SUBSCRIBER_LABEL_NAMES = (
# Output.
'queue',
# Private.
('subscriber', g1.messaging.parts.subscribers.SUBSCRIBER_LABEL_NAMES),
)
def define_subscriber(module_path=None, **kwargs):
module_path = module_path or subscribers.__name__
module_labels = labels.make_nested_labels(
module_path,
SUBSCRIBER_LABEL_NAMES,
)
setup_subscriber(
module_labels,
parameters.define(module_path, make_subscriber_params(**kwargs)),
)
return module_labels
def setup_subscriber(module_labels, module_params):
utils.define_maker(
make_queue,
{
'return': module_labels.queue,
},
)
utils.define_maker(
subscribers.make_subscriber,
{
'queue': module_labels.queue,
'return': module_labels.subscriber.subscriber,
},
)
g1.messaging.parts.subscribers.setup_subscriber(
module_labels.subscriber,
module_params,
)
def make_queue(shutdown_queue: g1.asyncs.agents.parts.LABELS.shutdown_queue):
queue = queues.Queue(capacity=32)
shutdown_queue.put_nonblocking(queue.close)
return queue
| [
"g1.apps.utils.define_maker",
"g1.messaging.parts.subscribers.make_subscriber_params",
"g1.asyncs.bases.queues.Queue",
"g1.bases.labels.make_nested_labels"
] | [((635, 697), 'g1.bases.labels.make_nested_labels', 'labels.make_nested_labels', (['module_path', 'SUBSCRIBER_LABEL_NAMES'], {}), '(module_path, SUBSCRIBER_LABEL_NAMES)\n', (660, 697), False, 'from g1.bases import labels\n'), ((929, 992), 'g1.apps.utils.define_maker', 'utils.define_maker', (['make_queue', "{'return': module_labels.queue}"], {}), "(make_queue, {'return': module_labels.queue})\n", (947, 992), False, 'from g1.apps import utils\n'), ((1043, 1174), 'g1.apps.utils.define_maker', 'utils.define_maker', (['subscribers.make_subscriber', "{'queue': module_labels.queue, 'return': module_labels.subscriber.subscriber}"], {}), "(subscribers.make_subscriber, {'queue': module_labels.\n queue, 'return': module_labels.subscriber.subscriber})\n", (1061, 1174), False, 'from g1.apps import utils\n'), ((1436, 1461), 'g1.asyncs.bases.queues.Queue', 'queues.Queue', ([], {'capacity': '(32)'}), '(capacity=32)\n', (1448, 1461), False, 'from g1.asyncs.bases import queues\n'), ((805, 837), 'g1.messaging.parts.subscribers.make_subscriber_params', 'make_subscriber_params', ([], {}), '(**kwargs)\n', (827, 837), False, 'from g1.messaging.parts.subscribers import make_subscriber_params\n')] |
import os
import tkinter as tk
from telnetlib import Telnet
import ctp.pdu.apc as apc
class PDUPower():
def __init__(self):
self.window = tk.Tk()
self.pdu1 = tk.IntVar()
self.pdu2 = tk.IntVar()
self.pdu3 = tk.IntVar()
self.pdu4 = tk.IntVar()
self.p1 = tk.Checkbutton(self.window, text='PDU: 1', variable=self.pdu1, onvalue=1, offvalue=0)
self.p2 = tk.Checkbutton(self.window, text='PDU: 2', variable=self.pdu2, onvalue=1, offvalue=0)
self.p3 = tk.Checkbutton(self.window, text='PDU: 3', variable=self.pdu3, onvalue=1, offvalue=0)
self.p4 = tk.Checkbutton(self.window, text='PDU: 4', variable=self.pdu4, onvalue=1, offvalue=0)
def pdu(self, powerStatue='off'):
tmp = [self.pdu1.get(), self.pdu2.get(), self.pdu3.get(), self.pdu4.get()]
pdu_cli = apc.APC('10.0.0.253', usr='apc', pwd='<PASSWORD>')
cmd = ",".join([str(i+1) for i, val in enumerate(tmp) if val == 1])
if powerStatue is 'on':
pdu_cli.power_on(cmd)
for i in cmd.split(','):
[self.p1, self.p2, self.p3, self.p4][int(i)-1].config(bg='green')
if powerStatue is 'off':
pdu_cli.power_off(cmd)
for i in cmd.split(','):
[self.p1, self.p2, self.p3, self.p4][int(i)-1].config(bg='grey')
def __del__(self):
print('destory the window')
self.window.quit()
def PowerControl(self):
self.window.title('PDU Power')
self.window.geometry('500x300')
l = tk.Label(self.window, text='DST PDU Power Control', bg='light blue', font=('Arial', 12), width=30, height=2)
l.pack()
self.p1.pack()
self.p2.pack()
self.p3.pack()
self.p4.pack()
button1 = tk.Button(self.window, text='ready to on', width=10, height=2, command=lambda: self.pdu('on'))
button2 = tk.Button(self.window, text='ready to off', width=10, height=2, command=lambda: self.pdu('off'))
button3 = tk.Button(self.window, text='logout', width=10, height=2, command=self.__del__)
button1.pack()
button2.pack()
button3.pack()
self.window.mainloop()
if __name__ == '__main__':
easyCommand = PDUPower()
easyCommand.PowerControl()
| [
"tkinter.IntVar",
"tkinter.Checkbutton",
"tkinter.Button",
"tkinter.Tk",
"tkinter.Label",
"ctp.pdu.apc.APC"
] | [((160, 167), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (165, 167), True, 'import tkinter as tk\n'), ((189, 200), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (198, 200), True, 'import tkinter as tk\n'), ((222, 233), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (231, 233), True, 'import tkinter as tk\n'), ((255, 266), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (264, 266), True, 'import tkinter as tk\n'), ((288, 299), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (297, 299), True, 'import tkinter as tk\n'), ((319, 408), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.window'], {'text': '"""PDU: 1"""', 'variable': 'self.pdu1', 'onvalue': '(1)', 'offvalue': '(0)'}), "(self.window, text='PDU: 1', variable=self.pdu1, onvalue=1,\n offvalue=0)\n", (333, 408), True, 'import tkinter as tk\n'), ((424, 513), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.window'], {'text': '"""PDU: 2"""', 'variable': 'self.pdu2', 'onvalue': '(1)', 'offvalue': '(0)'}), "(self.window, text='PDU: 2', variable=self.pdu2, onvalue=1,\n offvalue=0)\n", (438, 513), True, 'import tkinter as tk\n'), ((529, 618), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.window'], {'text': '"""PDU: 3"""', 'variable': 'self.pdu3', 'onvalue': '(1)', 'offvalue': '(0)'}), "(self.window, text='PDU: 3', variable=self.pdu3, onvalue=1,\n offvalue=0)\n", (543, 618), True, 'import tkinter as tk\n'), ((634, 723), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.window'], {'text': '"""PDU: 4"""', 'variable': 'self.pdu4', 'onvalue': '(1)', 'offvalue': '(0)'}), "(self.window, text='PDU: 4', variable=self.pdu4, onvalue=1,\n offvalue=0)\n", (648, 723), True, 'import tkinter as tk\n'), ((874, 924), 'ctp.pdu.apc.APC', 'apc.APC', (['"""10.0.0.253"""'], {'usr': '"""apc"""', 'pwd': '"""<PASSWORD>"""'}), "('10.0.0.253', usr='apc', pwd='<PASSWORD>')\n", (881, 924), True, 'import ctp.pdu.apc as apc\n'), ((1600, 1713), 'tkinter.Label', 'tk.Label', (['self.window'], {'text': '"""DST PDU Power Control"""', 'bg': '"""light blue"""', 'font': "('Arial', 12)", 'width': '(30)', 'height': '(2)'}), "(self.window, text='DST PDU Power Control', bg='light blue', font=(\n 'Arial', 12), width=30, height=2)\n", (1608, 1713), True, 'import tkinter as tk\n'), ((2074, 2153), 'tkinter.Button', 'tk.Button', (['self.window'], {'text': '"""logout"""', 'width': '(10)', 'height': '(2)', 'command': 'self.__del__'}), "(self.window, text='logout', width=10, height=2, command=self.__del__)\n", (2083, 2153), True, 'import tkinter as tk\n')] |
import re
from email_validator import validate_email, EmailSyntaxError
from virtool.users.utils import PERMISSIONS
RE_HEX_COLOR = re.compile("^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$")
def strip(value: str) -> str:
"""
Strip flanking whitespace from the passed string. Used to coerce values in Cerberus validators.
:param value: the string to strip
:return: the stripped string
"""
return value.strip()
def is_permission_dict(field: str, value: dict, error: callable):
"""
Checks that all keys included in permissions dictionary are valid permissions.
If invalid key is found, error message is updated to "keys must be valid permissions"
:param field: permissions field to check
:param value: permissions dictionary value
:param error: points to the calling validator’s _error method
"""
if any(key not in PERMISSIONS for key in value):
error(field, "keys must be valid permissions")
def has_unique_segment_names(field: str, value: list, error: callable):
"""
Checks that no duplicate names are used for segment names in list
If duplicate names are found, error message is updated to "list contains duplicate names"
:param field: field to check
:param value: list value
:param error: points to the calling validator’s _error method
"""
if len({seg["name"] for seg in value}) != len(value):
error(field, "list contains duplicate names")
def is_valid_hex_color(field: str, value: str, error: callable):
"""
Checks that color is a valid Hexadecimal color, performs check using regex format comparison
If color is an invalid Hexadecimal color, error message is updated to "This is not a valid Hexadecimal color"
:param field: color field to check
:param value: color string value
:param error: points to the calling validator’s _error method
"""
if not RE_HEX_COLOR.match(value):
error(field, "This is not a valid Hexadecimal color")
def is_valid_email(field: str, value: str, error: callable):
"""
Checks that email is a valid email according to email_validator.validate_email
If email is invalid, error message is updated to "Not a valid email"
:param field: email field to check
:param value: email string value
:param error: points to the calling validator’s _error method
"""
try:
validate_email(value)
except EmailSyntaxError:
error(field, "Not a valid email")
| [
"email_validator.validate_email",
"re.compile"
] | [((133, 181), 're.compile', 're.compile', (['"""^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$"""'], {}), "('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')\n", (143, 181), False, 'import re\n'), ((2384, 2405), 'email_validator.validate_email', 'validate_email', (['value'], {}), '(value)\n', (2398, 2405), False, 'from email_validator import validate_email, EmailSyntaxError\n')] |
import numpy as np
import numpy.testing as npt
import noisyopt
def test_minimize():
deltatol = 1e-3
## basic testing without stochasticity
def quadratic(x):
return (x**2).sum()
res = noisyopt.minimize(quadratic, np.asarray([0.5, 1.0]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2, 0.9, 10.0, -0.3]),
deltatol=deltatol)
npt.assert_allclose(res.x, np.zeros(5), atol=deltatol)
npt.assert_equal(res.free, [False, False, False, False, False])
## test bound handling
res = noisyopt.minimize(quadratic, np.asarray([0.5, 0.5]),
bounds=np.asarray([[0, 1], [0, 1]]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([0.8, 0.8]),
bounds=np.asarray([[0.5, 1], [0.5, 1]]),
deltatol=deltatol)
npt.assert_allclose(res.x, [0.5, 0.5], atol=deltatol)
npt.assert_equal(res.free, [False, False])
## test determination of unconstrained variables
def quadratic_except_last(x):
return (x[:-1]**2).sum()
res = noisyopt.minimize(quadratic_except_last, np.asarray([0.5, 1.0]))
npt.assert_approx_equal(res.x[0], 0.0)
npt.assert_equal(res.free, [False, True])
## test errorcontrol for stochastic function
def stochastic_quadratic(x, seed=None):
prng = np.random if seed is None else np.random.RandomState(seed)
return (x**2).sum() + prng.randn(1) + 0.5*np.random.randn(1)
deltatol = 0.5
# test unpaired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
# test paired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True, paired=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
def test_bisect():
xtol = 1e-6
## simple tests
root = noisyopt.bisect(lambda x: x, -2, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
root = noisyopt.bisect(lambda x: x-1, -2, 2, xtol=xtol)
npt.assert_allclose(root, 1.0, atol=xtol)
## extrapolate if 0 outside of interval
root = noisyopt.bisect(lambda x: x, 1, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
npt.assert_raises(noisyopt.BisectException,
noisyopt.bisect, lambda x: x, 1, 2,
xtol=xtol, outside='raise')
## extrapolate with nonlinear function
root = noisyopt.bisect(lambda x: x+x**2, 1.0, 2, xtol=xtol)
assert root < 1.0
## test with stochastic function
xtol = 1e-1
func = lambda x: x - 0.25 + np.random.normal(scale=0.01)
root = noisyopt.bisect(noisyopt.AveragedFunction(func), -2, 2, xtol=xtol,
errorcontrol=True)
npt.assert_allclose(root, 0.25, atol=xtol)
def test_AveragedFunction():
## averaging a simple function
func = lambda x: np.asarray(x).sum()
avfunc = noisyopt.AveragedFunction(func, N=30)
av, avse = avfunc([1.0, 1.0])
npt.assert_equal(av, 2.0)
npt.assert_equal(avse, 0.0)
# se of function value difference between two points is zero
# (as function evaluation is not stochastic)
diffse = avfunc.diffse([1.0, 1.0], [2.0, 1.0])
npt.assert_equal(diffse, 0.0)
## changing the number of evaluations
avfunc.N *= 2
npt.assert_equal(avfunc.N, 60)
## averaging a stochastic function
func = lambda x: np.asarray(x).sum() + np.random.randn()
avfunc = noisyopt.AveragedFunction(func, N=30)
# check that reevaluation gives the same thing due to caching
av30_1, avse30_1 = avfunc([1.0, 1.0])
av30_2, avse30_2 = avfunc([1.0, 1.0])
npt.assert_equal(av30_1, av30_2)
npt.assert_equal(avse30_1, avse30_2)
# check that se decreases if
avfunc.N *= 2
av60, avse60 = avfunc([1.0, 1.0])
assert av30_1 != av60
assert avse30_1 > avse60
# test with floating point N
noisyopt.AveragedFunction(func, N=30.0, paired=True)
if __name__ == '__main__':
npt.run_module_suite()
| [
"numpy.random.normal",
"numpy.testing.assert_equal",
"numpy.testing.assert_approx_equal",
"numpy.testing.assert_allclose",
"numpy.testing.assert_raises",
"numpy.asarray",
"noisyopt.bisect",
"numpy.array",
"numpy.zeros",
"numpy.testing.run_module_suite",
"numpy.random.randn",
"noisyopt.Averaged... | [((285, 338), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (304, 338), True, 'import numpy.testing as npt\n'), ((343, 385), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (359, 385), True, 'import numpy.testing as npt\n'), ((474, 527), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (493, 527), True, 'import numpy.testing as npt\n'), ((532, 574), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (548, 574), True, 'import numpy.testing as npt\n'), ((767, 830), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False, False, False, False]'], {}), '(res.free, [False, False, False, False, False])\n', (783, 830), True, 'import numpy.testing as npt\n'), ((1010, 1063), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (1029, 1063), True, 'import numpy.testing as npt\n'), ((1068, 1110), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (1084, 1110), True, 'import numpy.testing as npt\n'), ((1295, 1348), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.5, 0.5]'], {'atol': 'deltatol'}), '(res.x, [0.5, 0.5], atol=deltatol)\n', (1314, 1348), True, 'import numpy.testing as npt\n'), ((1353, 1395), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (1369, 1395), True, 'import numpy.testing as npt\n'), ((1598, 1636), 'numpy.testing.assert_approx_equal', 'npt.assert_approx_equal', (['res.x[0]', '(0.0)'], {}), '(res.x[0], 0.0)\n', (1621, 1636), True, 'import numpy.testing as npt\n'), ((1641, 1682), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, True]'], {}), '(res.free, [False, True])\n', (1657, 1682), True, 'import numpy.testing as npt\n'), ((2146, 2199), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (2165, 2199), True, 'import numpy.testing as npt\n'), ((2204, 2246), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (2220, 2246), True, 'import numpy.testing as npt\n'), ((2464, 2517), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (2483, 2517), True, 'import numpy.testing as npt\n'), ((2522, 2564), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (2538, 2564), True, 'import numpy.testing as npt\n'), ((2634, 2680), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x)', '(-2)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x, -2, 2, xtol=xtol)\n', (2649, 2680), False, 'import noisyopt\n'), ((2685, 2726), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(0.0)'], {'atol': 'xtol'}), '(root, 0.0, atol=xtol)\n', (2704, 2726), True, 'import numpy.testing as npt\n'), ((2739, 2789), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x - 1)', '(-2)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x - 1, -2, 2, xtol=xtol)\n', (2754, 2789), False, 'import noisyopt\n'), ((2792, 2833), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(1.0)'], {'atol': 'xtol'}), '(root, 1.0, atol=xtol)\n', (2811, 2833), True, 'import numpy.testing as npt\n'), ((2890, 2935), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x)', '(1)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x, 1, 2, xtol=xtol)\n', (2905, 2935), False, 'import noisyopt\n'), ((2940, 2981), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(0.0)'], {'atol': 'xtol'}), '(root, 0.0, atol=xtol)\n', (2959, 2981), True, 'import numpy.testing as npt\n'), ((2986, 3097), 'numpy.testing.assert_raises', 'npt.assert_raises', (['noisyopt.BisectException', 'noisyopt.bisect', '(lambda x: x)', '(1)', '(2)'], {'xtol': 'xtol', 'outside': '"""raise"""'}), "(noisyopt.BisectException, noisyopt.bisect, lambda x: x, 1,\n 2, xtol=xtol, outside='raise')\n", (3003, 3097), True, 'import numpy.testing as npt\n'), ((3197, 3253), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x + x ** 2)', '(1.0)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x + x ** 2, 1.0, 2, xtol=xtol)\n', (3212, 3253), False, 'import noisyopt\n'), ((3515, 3557), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(0.25)'], {'atol': 'xtol'}), '(root, 0.25, atol=xtol)\n', (3534, 3557), True, 'import numpy.testing as npt\n'), ((3678, 3715), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {'N': '(30)'}), '(func, N=30)\n', (3703, 3715), False, 'import noisyopt\n'), ((3754, 3779), 'numpy.testing.assert_equal', 'npt.assert_equal', (['av', '(2.0)'], {}), '(av, 2.0)\n', (3770, 3779), True, 'import numpy.testing as npt\n'), ((3784, 3811), 'numpy.testing.assert_equal', 'npt.assert_equal', (['avse', '(0.0)'], {}), '(avse, 0.0)\n', (3800, 3811), True, 'import numpy.testing as npt\n'), ((3983, 4012), 'numpy.testing.assert_equal', 'npt.assert_equal', (['diffse', '(0.0)'], {}), '(diffse, 0.0)\n', (3999, 4012), True, 'import numpy.testing as npt\n'), ((4078, 4108), 'numpy.testing.assert_equal', 'npt.assert_equal', (['avfunc.N', '(60)'], {}), '(avfunc.N, 60)\n', (4094, 4108), True, 'import numpy.testing as npt\n'), ((4223, 4260), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {'N': '(30)'}), '(func, N=30)\n', (4248, 4260), False, 'import noisyopt\n'), ((4415, 4447), 'numpy.testing.assert_equal', 'npt.assert_equal', (['av30_1', 'av30_2'], {}), '(av30_1, av30_2)\n', (4431, 4447), True, 'import numpy.testing as npt\n'), ((4452, 4488), 'numpy.testing.assert_equal', 'npt.assert_equal', (['avse30_1', 'avse30_2'], {}), '(avse30_1, avse30_2)\n', (4468, 4488), True, 'import numpy.testing as npt\n'), ((4672, 4724), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {'N': '(30.0)', 'paired': '(True)'}), '(func, N=30.0, paired=True)\n', (4697, 4724), False, 'import noisyopt\n'), ((4757, 4779), 'numpy.testing.run_module_suite', 'npt.run_module_suite', ([], {}), '()\n', (4777, 4779), True, 'import numpy.testing as npt\n'), ((238, 260), 'numpy.asarray', 'np.asarray', (['[0.5, 1.0]'], {}), '([0.5, 1.0])\n', (248, 260), True, 'import numpy as np\n'), ((426, 449), 'numpy.asarray', 'np.asarray', (['[2.5, -3.2]'], {}), '([2.5, -3.2])\n', (436, 449), True, 'import numpy as np\n'), ((615, 655), 'numpy.asarray', 'np.asarray', (['[2.5, -3.2, 0.9, 10.0, -0.3]'], {}), '([2.5, -3.2, 0.9, 10.0, -0.3])\n', (625, 655), True, 'import numpy as np\n'), ((735, 746), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (743, 746), True, 'import numpy as np\n'), ((898, 920), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (908, 920), True, 'import numpy as np\n'), ((1151, 1173), 'numpy.asarray', 'np.asarray', (['[0.8, 0.8]'], {}), '([0.8, 0.8])\n', (1161, 1173), True, 'import numpy as np\n'), ((1570, 1592), 'numpy.asarray', 'np.asarray', (['[0.5, 1.0]'], {}), '([0.5, 1.0])\n', (1580, 1592), True, 'import numpy as np\n'), ((2010, 2031), 'numpy.array', 'np.array', (['[4.55, 3.0]'], {}), '([4.55, 3.0])\n', (2018, 2031), True, 'import numpy as np\n'), ((2315, 2336), 'numpy.array', 'np.array', (['[4.55, 3.0]'], {}), '([4.55, 3.0])\n', (2323, 2336), True, 'import numpy as np\n'), ((3414, 3445), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {}), '(func)\n', (3439, 3445), False, 'import noisyopt\n'), ((957, 985), 'numpy.asarray', 'np.asarray', (['[[0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1]])\n', (967, 985), True, 'import numpy as np\n'), ((1210, 1242), 'numpy.asarray', 'np.asarray', (['[[0.5, 1], [0.5, 1]]'], {}), '([[0.5, 1], [0.5, 1]])\n', (1220, 1242), True, 'import numpy as np\n'), ((1823, 1850), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1844, 1850), True, 'import numpy as np\n'), ((3358, 3386), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.01)'}), '(scale=0.01)\n', (3374, 3386), True, 'import numpy as np\n'), ((4192, 4209), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4207, 4209), True, 'import numpy as np\n'), ((1901, 1919), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (1916, 1919), True, 'import numpy as np\n'), ((3645, 3658), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3655, 3658), True, 'import numpy as np\n'), ((4170, 4183), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4180, 4183), True, 'import numpy as np\n')] |
from django.contrib import admin
from .models import SimpleRedirect
@admin.register(SimpleRedirect)
class SimpleRedirectAdmin(admin.ModelAdmin):
list_display = [
'from_url',
'to_url',
'date_created',
'date_modified',
'date_active_start',
'date_active_end',
]
| [
"django.contrib.admin.register"
] | [((72, 102), 'django.contrib.admin.register', 'admin.register', (['SimpleRedirect'], {}), '(SimpleRedirect)\n', (86, 102), False, 'from django.contrib import admin\n')] |
import json
import py
import textwrap
issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
import requests
def get_issues():
chunksize = 50
start = 0
issues = []
while 1:
post_data = {"accountname": "pytest-dev",
"repo_slug": "pytest",
"start": start,
"limit": chunksize}
print ("getting from", start)
r = requests.get(issues_url, params=post_data)
data = r.json()
issues.extend(data["issues"])
if start + chunksize >= data["count"]:
return issues
start += chunksize
kind2num = "bug enhancement task proposal".split()
status2num = "new open resolved duplicate invalid wontfix".split()
def main(args):
cachefile = py.path.local(args.cache)
if not cachefile.exists() or args.refresh:
issues = get_issues()
cachefile.write(json.dumps(issues))
else:
issues = json.loads(cachefile.read())
open_issues = [x for x in issues
if x["status"] in ("new", "open")]
def kind_and_id(x):
kind = x["metadata"]["kind"]
return kind2num.index(kind), len(issues)-int(x["local_id"])
open_issues.sort(key=kind_and_id)
report(open_issues)
def report(issues):
for issue in issues:
metadata = issue["metadata"]
priority = issue["priority"]
title = issue["title"]
content = issue["content"]
kind = metadata["kind"]
status = issue["status"]
id = issue["local_id"]
link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
print("----")
print(status, kind, link)
print(title)
#print()
#lines = content.split("\n")
#print ("\n".join(lines[:3]))
#if len(lines) > 3 or len(content) > 240:
# print ("...")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("process bitbucket issues")
parser.add_argument("--refresh", action="store_true",
help="invalidate cache, refresh issues")
parser.add_argument("--cache", action="store", default="issues.json",
help="cache file")
args = parser.parse_args()
main(args)
| [
"py.path.local",
"json.dumps",
"argparse.ArgumentParser",
"requests.get"
] | [((799, 824), 'py.path.local', 'py.path.local', (['args.cache'], {}), '(args.cache)\n', (812, 824), False, 'import py\n'), ((1948, 1999), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""process bitbucket issues"""'], {}), "('process bitbucket issues')\n", (1971, 1999), False, 'import argparse\n'), ((441, 483), 'requests.get', 'requests.get', (['issues_url'], {'params': 'post_data'}), '(issues_url, params=post_data)\n', (453, 483), False, 'import requests\n'), ((926, 944), 'json.dumps', 'json.dumps', (['issues'], {}), '(issues)\n', (936, 944), False, 'import json\n')] |
# coding=utf-8
"""Environment Loader feature tests."""
# from pypi
from expects import (
be_true,
expect
)
from pytest_bdd import (
given,
scenarios,
then,
when,
)
# for testing
from .fixtures import katamari
# software under test
from cse_575.data.common import Environment
# Setup
scenarios("../features/environment_loader.feature")
# ********** Keys ********** #
# @scenario('features/environment_loader.feature', 'The environment loader is built')
# def test_the_environment_loader_is_built():
# """The environment loader is built."""
@given('a built environment loader')
def a_built_environment_loader(katamari):
katamari.environment = Environment()
return
@when('the keys are checked')
def the_keys_are_checked(katamari):
environment = katamari.environment
expect(environment.raw_data_folder.is_dir()).to(be_true)
expect(environment.zero_test_images.is_file()).to(be_true)
expect(environment.one_test_images.is_file()).to(be_true)
expect(environment.zero_train_images.is_file()).to(be_true)
expect(environment.one_train_images.is_file()).to(be_true)
return
@then('it has the expected keys')
def it_has_the_expected_keys():
return
| [
"pytest_bdd.then",
"pytest_bdd.when",
"pytest_bdd.scenarios",
"pytest_bdd.given",
"cse_575.data.common.Environment"
] | [((311, 362), 'pytest_bdd.scenarios', 'scenarios', (['"""../features/environment_loader.feature"""'], {}), "('../features/environment_loader.feature')\n", (320, 362), False, 'from pytest_bdd import given, scenarios, then, when\n'), ((574, 609), 'pytest_bdd.given', 'given', (['"""a built environment loader"""'], {}), "('a built environment loader')\n", (579, 609), False, 'from pytest_bdd import given, scenarios, then, when\n'), ((707, 735), 'pytest_bdd.when', 'when', (['"""the keys are checked"""'], {}), "('the keys are checked')\n", (711, 735), False, 'from pytest_bdd import given, scenarios, then, when\n'), ((1138, 1170), 'pytest_bdd.then', 'then', (['"""it has the expected keys"""'], {}), "('it has the expected keys')\n", (1142, 1170), False, 'from pytest_bdd import given, scenarios, then, when\n'), ((679, 692), 'cse_575.data.common.Environment', 'Environment', ([], {}), '()\n', (690, 692), False, 'from cse_575.data.common import Environment\n')] |
import os
import subprocess
from pretty_print import Print_C
class Runner:
run_kases = 3
def __init__(self, scheme, testcases):
self.scheme = scheme
self.testcases = testcases
self.bin_file_template = f"build/test_results/{{testcase}}/bin/{scheme}"
self.myout_template = f"build/output/{{testcase}}/{scheme}.out"
self.runner_log = f"build/log/run_log/{{testcase}}/{scheme}_{{kase}}.out"
for testcase in testcases:
self.__generate_path(testcase)
def __generate_path(self, testcase):
myout_path = f"build/output/{testcase}/"
runner_log_path = f"build/log/run_log/{testcase}/"
if not os.path.exists(myout_path):
os.makedirs(myout_path)
if not os.path.exists(runner_log_path):
os.makedirs(runner_log_path)
def run_single_test(self, testcase, kase):
bin = self.bin_file_template.format(testcase=testcase)
stdin = f"testcases/{testcase}.in"
myout = self.myout_template.format(testcase=testcase)
log = self.runner_log.format(testcase=testcase, kase=kase)
myout_file = open(myout, "a+")
log_file = open(log, "a+")
null_file = open(os.devnull, "w")
Print_C.print_procedure(f"Running {self.scheme}_{testcase} [kase: {kase}]")
if os.path.exists(stdin):
stdin_file = open(stdin, "r")
if kase == 0:
p = subprocess.run(f"{bin}".split(), stdin=stdin_file, stdout=myout_file, stderr=log_file, bufsize=1)
subprocess.run(f"echo".split(), stdout=myout_file, bufsize=1)
subprocess.run(f"echo {p.returncode}".split(), stdout=myout_file, bufsize=1)
else:
p = subprocess.run(f"{bin}".split(), stdin=stdin_file, stdout=null_file, stderr=log_file, bufsize=1)
stdin_file.close()
else:
if kase == 0:
p = subprocess.run(f"{bin}".split(), stdout=myout_file, stderr=log_file, bufsize=1)
subprocess.run(f"echo".split(), stdout=myout_file, bufsize=1)
subprocess.run(f"echo {p.returncode}".split(), stdout=myout_file, bufsize=1)
else:
p = subprocess.run(f"{bin}".split(), stdout=null_file, stderr=log_file, bufsize=1)
myout_file.close()
log_file.close()
def run_all_tests(self):
for kase in range(Runner.run_kases):
Print_C.print_subheader(f"[Running KASE {kase}]")
for testcase in self.testcases:
self.run_single_test(testcase=testcase, kase=kase)
| [
"os.path.exists",
"pretty_print.Print_C.print_subheader",
"pretty_print.Print_C.print_procedure",
"os.makedirs"
] | [((1248, 1323), 'pretty_print.Print_C.print_procedure', 'Print_C.print_procedure', (['f"""Running {self.scheme}_{testcase} [kase: {kase}]"""'], {}), "(f'Running {self.scheme}_{testcase} [kase: {kase}]')\n", (1271, 1323), False, 'from pretty_print import Print_C\n'), ((1336, 1357), 'os.path.exists', 'os.path.exists', (['stdin'], {}), '(stdin)\n', (1350, 1357), False, 'import os\n'), ((684, 710), 'os.path.exists', 'os.path.exists', (['myout_path'], {}), '(myout_path)\n', (698, 710), False, 'import os\n'), ((724, 747), 'os.makedirs', 'os.makedirs', (['myout_path'], {}), '(myout_path)\n', (735, 747), False, 'import os\n'), ((764, 795), 'os.path.exists', 'os.path.exists', (['runner_log_path'], {}), '(runner_log_path)\n', (778, 795), False, 'import os\n'), ((809, 837), 'os.makedirs', 'os.makedirs', (['runner_log_path'], {}), '(runner_log_path)\n', (820, 837), False, 'import os\n'), ((2451, 2500), 'pretty_print.Print_C.print_subheader', 'Print_C.print_subheader', (['f"""[Running KASE {kase}]"""'], {}), "(f'[Running KASE {kase}]')\n", (2474, 2500), False, 'from pretty_print import Print_C\n')] |
import json
from copy import deepcopy
from random import shuffle
cards = ['magician', 'high priestess', 'empress', 'emperor', 'hierophant', 'lovers', 'chariot', 'justice', 'hermit',
'wheel of fortune', 'strength', 'hanged man', 'death', 'temperance', 'devil', 'tower', 'star', 'moon', 'sun',
'judgement', 'world', 'fool', 'king of wands', 'queen of wands', 'knight of wands', 'page of wands',
'ten of wands', 'nine of wands', 'eight of wands', 'seven of wands', 'six of wands', 'five of wands',
'four of wands', 'three of wands', 'two of wands', 'ace of wands', 'king of cups', 'queen of cups',
'knight of cups', 'page of cups', 'ten of cups', 'nine of cups', 'eight of cups', 'seven of cups',
'six of cups', 'five of cups', 'four of cups', 'three of cups', 'two of cups', 'ace of cups', 'king of swords',
'queen of swords', 'knight of swords', 'page of swords', 'ten of swords', 'nine of swords', 'eight of swords',
'seven of swords', 'six of swords', 'five of swords', 'four of swords', 'three of swords', 'two of swords',
'ace of swords', 'king of coins', 'queen of coins', 'knight of coins', 'page of coins', 'ten of coins',
'nine of coins', 'eight of coins', 'seven of coins', 'six of coins', 'five of coins', 'four of coins',
'three of coins', 'two of coins', 'ace of coins']
upright = {'magician': 'creativity, self-confidence, dexterity, sleight of hand,will-power, skill',
'high priestess': 'knowledge, wisdom, learning, intuition, impatience, virtue, purity',
'empress': 'development, accomplishment action, evolution',
'emperor': 'authority, father-figure, structure, solid foundation',
'hierophant': 'mercy, conformity, forgiveness, social approval, bonded, inspiration',
'lovers': 'harmony, trust,romance, optimism, honor, love, harmony',
'chariot': 'perseverance, rushed decision, turmoil, vengeance, adversity',
'justice': 'equality, righteousness, virtue, honor, harmony, balance',
'hermit': 'inner strength, prudence, withdrawal, caution, vigilance',
'wheel of fortune': 'unexpected events, advancement, destiny, fortune, progress',
'strength': 'courage, conviction, strength, determination, action, heroism, virility',
'hanged man': 'change, reversal, boredom, improvement, rebirth, suspension, change',
'death': 'unexpected change, loss, failure, transformation, death, bad luck',
'temperance': 'temperance, patience, good influence, confidence, moderation',
'devil': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'tower': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'star': 'balance, pleasure, optimism, insight, spiritual love, hope, faith',
'moon': 'double-dealing Deception, disillusionment, trickery, error, danger, disgrace',
'sun': 'accomplishment, success, love, joy, happy marriage, satisfaction',
'judgement': 'awakening, renewal, rejuvenation, rebirth, improvement, promotion, atonement, judgment',
'world': 'perfection, recognition, success, fulfillment, eternal life',
'fool': 'beginnings possibilities, pleasure, thoughtlessness, adventure, opportunity',
'king of wands': 'passionate, good leader, noble',
'queen of wands': 'fondness, attraction, command ',
'knight of wands': 'generous, journey, impetuous',
'page of wands': 'enthusiasm, exploration, discovery, free spirit',
'ten of wands': 'pain, ruined, failure',
'nine of wands': 'victory, good health, obstinacy',
'eight of wands': 'new ideas, love, journey',
'seven of wands': 'stiff competition, victory, courage, energy',
'six of wands': 'leadership, good news, success',
'five of wands': 'lawsuit or quarrel, courage, competition',
'four of wands': 'dissatisfaction, kindness, reevaluation ',
'three of wands': 'cooperation, good partnership, success',
'two of wands': 'generous person, courage, patience, courage ',
'ace of wands': 'profitable journey, new business, beginning, new career, birth, inheritance',
'king of cups': 'kindness, willingness, enjoyment',
'queen of cups': 'loving mother, gentle, happiness',
'knight of cups': 'emotional, romantic dreamer, intelligence',
'page of cups': 'sweetness, interest in literature, gentleness',
'ten of cups': 'friendship, happiness, life',
'nine of cups': 'physical well-being, hopes, security',
'eight of cups': 'disappointment, abandonment, misery',
'seven of cups': 'imagination, illusion, directionless',
'six of cups': 'acquaintance, good memories, acquaintance, happiness',
'five of cups': 'broken marriage,vain regret, sorrow, loss',
'four of cups': 'dissatisfaction, kindness, reevaluation, redemption',
'three of cups': 'fortune, hospitality, discovery',
'two of cups': 'romance, friendship, cooperation',
'ace of cups': 'good health, love, joy, beauty',
'king of swords': 'powerful, friendship, counselor',
'queen of swords': 'skillful, brave, clever, rush',
'knight of swords': 'strong man, braver, clever person',
'page of swords': 'grace, diplomacy, dexterity, grace',
'ten of swords': 'defeat, failure, pain',
'nine of swords': 'desolation, illness, suspicion, cruelty',
'eight of swords': 'weakness, indecision, censure',
'seven of swords': 'betrayal, insolence, unwise attempt',
'six of swords': 'harmony, sorrow, journey',
'five of swords': 'defeat, cowardliness, empty victory',
'four of swords': 'temporary exile, strife, retreat',
'three of swords': 'broken relationship, civil war',
'two of swords': 'indecision, trouble, balanced',
'ace of swords': 'love, valiant, victory',
'king of coins': 'reliable person, steadiness ',
'queen of coins': 'thoughtfulness, intelligence, talents, melancholy ',
'knight of coins': 'dull outlook, patience, animal lover, trustworthy ',
'page of coins': 'kindness,new ideas/opinions, scholar ',
'ten of coins': 'wealth, property, stability ',
'nine of coins': 'solitude, well-being, green thumb ',
'eight of coins': 'employment, money, learning, trade',
'seven of coins': 'development, re-evaluation, effort, hard work ',
'six of coins': 'prosperity, philanthropy, charity, gifts ',
'five of coins': 'destitution, poor health, despair, loneliness ',
'four of coins': 'ungenerous, greed, miserly ',
'three of coins': 'abilities, approval,effort, abilities ',
'two of coins': 'harmony, new projects, helpful ',
'ace of coins': 'prosperity, happiness, pleasure'}
reverse = {'magician': 'delay, unimaginative, insecurity, lack of self-confidence',
'high priestess': 'selfishness, shallowness, misunderstanding, ignorance',
'empress': 'inaction, lack on concentration, vacillation, anxiety, infidelity',
'emperor': 'domination, excessive control, rigidity, inflexibility',
'hierophant': 'vulnerability, unconventionality, foolish generosity, impotence, frailty, unorthodoxy',
'lovers': 'separation, frustration, unreliability,fickleness, untrustworthy',
'chariot': 'vanquishment, defeat, failure, unsuccessful',
'justice': 'alse accusation, unfairness, abuse, biased',
'hermit': 'hastiness, rashness,immaturity, imprudence, foolishness',
'wheel of fortune': 'interruption, outside influences, failure, bad luck',
'strength': 'pettiness, sickness, unfaithfulness, weakness',
'hanged man': 'alse prophecy, useless sacrifice, unwillingness',
'death': 'immobility, slow changes, cheating, death, stagnation',
'temperance': 'conflict, disunion, frustration, impatience, discord',
'devil': 'release, enlightenment, divorce, recovery',
'tower': 'entrapment, imprisonment, old ways, rustic',
'star': 'disappointment, bad luck, imbalance, broken dreams',
'moon': 'trifling mistakes, deception discovered, negative advantage',
'sun': 'loneliness, canceled plans, unhappiness, break ups',
'judgement': 'disappointment, indecision, death, failure, ill-health, theft, worry',
'world': 'ack of vision, disappointment, imperfection',
'fool': 'indecision, hesitation, injustice, apathy, bad choice',
'king of wands': 'unyielding, prejudice, quarrels',
'queen of wands': 'jealous, revengeful, infidelity',
'knight of wands': 'suspicion, jealousy, narrow-mindedness',
'page of wands': 'setbacks to new ideas, pessimism, lack of direction',
'ten of wands': 'cleverness, energy, strength',
'nine of wands': 'weakness, ill-health, adversity',
'eight of wands': 'violence, quarrels, courage',
'seven of wands': 'advantage, patience, indecision',
'six of wands': 'postponement, bad news, pride in riches',
'five of wands': 'new opportunities, harmony, generosity',
'four of wands': 'new relationship, new ambitions, action',
'three of wands': 'carelessness, arrogance, pride, mistakes',
'two of wands': 'impatience, domination',
'ace of wands': 'selfishness, lack of determination, setback',
'king of cups': 'double-dealer, scandal, crafty, violent',
'queen of cups': 'perverse, unhappy, gloom, over-active imagination',
'knight of cups': 'idleness, untruthful, fraud, sensuality',
'page of cups': 'poor imagination, selfishness, no desires',
'ten of cups': 'waste, broken relationships, quarrel',
'nine of cups': 'illness, failure, overindulgence',
'eight of cups': 'pleasure, success, joy',
'seven of cups': 'will-power, determination',
'six of cups': 'friendship, disappointment, past',
'five of cups': 'return, summon, hope',
'four of cups': 'new goals, ambitions, beginning',
'three of cups': 'hidden, overindulgence, pain, gossip',
'two of cups': 'violent passion, misunderstanding',
'ace of cups': 'egotism, selfishness, hesitancy',
'king of swords': 'obstinate, evil intentions, judgments',
'queen of swords': 'sly, keen, deceitful',
'knight of swords': 'troublemaker, a crafty, tyranny',
'page of swords': 'imposture, ill-health, cunningness',
'ten of swords': 'courage, positive energy, good health',
'nine of swords': 'unselfishness, good news, healing',
'eight of swords': 'freedom, new beginnings, relaxation',
'seven of swords': 'counsel, helpful, advice',
'six of swords': 'obstacles, difficulties, defeat',
'five of swords': 'unfairness, defeat, loss',
'four of swords': 'social unrest, labor strikes, renewed activity',
'three of swords': 'sorrow, loss, confusion',
'two of swords': 'unscrupulous, release',
'ace of swords': 'obstacles, tyranny, power',
'king of coins': 'bribes, materialistic, calm',
'queen of coins': 'mistrust, suspicion, neglect',
'knight of coins': 'carelessness, standstill, irresponsible',
'page of coins': 'luxury, rebellious, bad news',
'ten of coins': 'dull, slothfulness, misfortune',
'nine of coins': 'caution, possible loss',
'eight of coins': 'void, no ambition, dislike',
'seven of coins': 'impatience, slow progress, investments',
'six of coins': 'jealousy, miserliness, unfairness',
'five of coins': 'employment, courage, revival',
'four of coins': 'spendthrift, obstacles, earthy possessions',
'three of coins': 'preoccupation, ambitions',
'two of coins': 'difficulty, discouragement',
'ace of coins': 'misery, greedy, money'}
class TarotDeck:
def __init__(self):
self.deck = None
self.reset()
def draw(self):
if len(self.deck) == 0:
return 'the deck is empty. use !tarot reset to reset the deck', None
card = self.deck.pop()
ret = []
if card in {'justice', 'strength', 'death', 'temperance', 'judgement'}:
ret.append('you drew {}'.format(card))
ret.append('you drew the {}'.format(card))
ret.append('upright meaning: {}'.format(upright[card]))
ret.append('reverse meaning: {}'.format(reverse[card]))
return '\n'.join(ret), '{}.jpg'.format(card.replace(' ', '_'))
def reset(self):
self.deck = deepcopy(cards)
shuffle(self.deck)
return 'tarot deck reset'
def save(self, save_path):
with open(save_path, 'w') as file:
json.dump(self.deck, file)
print('saved deck')
def load(self, load_path):
with open(load_path, 'r') as file:
self.deck = json.load(file)
print('loaded deck')
| [
"json.load",
"random.shuffle",
"json.dump",
"copy.deepcopy"
] | [((13203, 13218), 'copy.deepcopy', 'deepcopy', (['cards'], {}), '(cards)\n', (13211, 13218), False, 'from copy import deepcopy\n'), ((13227, 13245), 'random.shuffle', 'shuffle', (['self.deck'], {}), '(self.deck)\n', (13234, 13245), False, 'from random import shuffle\n'), ((13367, 13393), 'json.dump', 'json.dump', (['self.deck', 'file'], {}), '(self.deck, file)\n', (13376, 13393), False, 'import json\n'), ((13521, 13536), 'json.load', 'json.load', (['file'], {}), '(file)\n', (13530, 13536), False, 'import json\n')] |
# import lib.pbcvt as pbcvt
import cv2
import numpy as np
import sys
from time import time
def distance(o1, o2):
(x1,y1,w1,h1) = o1
(x2,y2,w2,h2) = o2
c1 = (x1+w1/2,y1+h1/2)
c2 = (x2+w2/2,y2+h2/2)
return np.hypot(c1[0]-c2[0],c1[1]-c2[1])
cv2.namedWindow("preview")
cv2.namedWindow("preview2")
cv2.namedWindow("preview3")
vc = cv2.VideoCapture(int(sys.argv[1]))
vc.set(3,int(sys.argv[2]))
vc.set(4,int(sys.argv[3]))
print(vc.get(3))
print(vc.get(4))
# vout = None
# if (int(sys.argv[5])):
# fourcc = cv2.VideoWriter_fourcc(*'x264')
# vout = cv2.VideoWriter('pupiltest.mp4', fourcc, 24.0, (int(vc.get(3)),int(vc.get(4))))
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
ptime = time()
nf = 0
# face_cascade = cv2.CascadeClassifier('trained/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('trained/haarcascade_eye.xml')
glass_cascade = cv2.CascadeClassifier('trained/haarcascade_eye_tree_eyeglasses.xml')
reye_cascade = cv2.CascadeClassifier('trained/haarcascade_righteye_2splits.xml')
leye_cascade = cv2.CascadeClassifier('trained/haarcascade_lefteye_2splits.xml')
# face = None
# flost = 0
while rval:
roi_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
equalized = cv2.equalizeHist(roi_gray)
roi_color = frame
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# flost = flost+1
# for f in faces:
# if face is not None:
# # print("Face: " + str(distance(f,face)))
# if not (1 < distance(f,face) < 40):
# continue
# face = f
# flost = 0
# if flost < 5 and face is not None:
# (x,y,w,h) = face
# x+=10
# y+=10
# w = int(w*0.85)
# h = int(h*0.5)
# cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# roi_gray = gray[y:y+h, x:x+w]
# roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for e in eyes:
(ex,ey,ew,eh) = e
# ex += 10
# ey += 10
# ew -= 10
# eh -= 10
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
eye_roi_gray = roi_gray[ey:ey+eh, ex:ex+ew]
eye_roi_color = roi_color[ey:ey+eh, ex:ex+ew]
hist = cv2.calcHist([eye_roi_gray],[0],None,[256],[0,256])
# Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# Apply KMeans
compactness,labels,centers = cv2.kmeans(hist,2,None,criteria,100,cv2.KMEANS_RANDOM_CENTERS)
print(np.sqrt(compactness)/10)
print(centers)
# center = pbcvt.findPupil(roi_gray, int(ex), int(ey), int(ew), int(eh))
ret, thresh = cv2.threshold(eye_roi_gray, centers[0]-10, 255, 0)
# thresh = cv2.adaptiveThreshold(eye_roi_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(eye_roi_color, contours, -1, (0,0,255), 3)
for cont in contours:
if len(cont) > 5 and cv2.contourArea(cont) > 1000:
ellipse = cv2.fitEllipse(cont)
cv2.ellipse(eye_roi_color, ellipse, (0,0,255),2)
cv2.circle(eye_roi_color, (int(ellipse[0][0]),int(ellipse[0][1])), 2, (255,0,0), 3)
# cv2.circle(eye_roi_color, center, 2, (0,255,0), 3)
# else:
# face = None
cv2.imshow("preview", roi_gray)
cv2.imshow("preview2", equalized)
cv2.imshow("preview3", thresh)
# if vout:
# vout.write(frame)
nf = nf + 1
if time() - ptime > 5:
print(str(nf/(time()-ptime)))
ptime = time()
nf = 0
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
elif key == 32:
cv2.imwrite('testimage.png',frame);
rval, frame = vc.read()
cv2.destroyWindow("preview")
cv2.destroyWindow("preview2")
cv2.destroyWindow("preview3")
vc.release()
# if vout:
# vout.release()
| [
"cv2.rectangle",
"numpy.sqrt",
"cv2.imshow",
"cv2.ellipse",
"cv2.fitEllipse",
"cv2.CascadeClassifier",
"cv2.calcHist",
"cv2.threshold",
"cv2.contourArea",
"numpy.hypot",
"cv2.waitKey",
"cv2.kmeans",
"cv2.equalizeHist",
"cv2.cvtColor",
"time.time",
"cv2.namedWindow",
"cv2.imwrite",
... | [((261, 287), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview"""'], {}), "('preview')\n", (276, 287), False, 'import cv2\n'), ((288, 315), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview2"""'], {}), "('preview2')\n", (303, 315), False, 'import cv2\n'), ((316, 343), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview3"""'], {}), "('preview3')\n", (331, 343), False, 'import cv2\n'), ((759, 765), 'time.time', 'time', ([], {}), '()\n', (763, 765), False, 'from time import time\n'), ((873, 925), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_eye.xml"""'], {}), "('trained/haarcascade_eye.xml')\n", (894, 925), False, 'import cv2\n'), ((942, 1010), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_eye_tree_eyeglasses.xml"""'], {}), "('trained/haarcascade_eye_tree_eyeglasses.xml')\n", (963, 1010), False, 'import cv2\n'), ((1026, 1091), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_righteye_2splits.xml"""'], {}), "('trained/haarcascade_righteye_2splits.xml')\n", (1047, 1091), False, 'import cv2\n'), ((1107, 1171), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_lefteye_2splits.xml"""'], {}), "('trained/haarcascade_lefteye_2splits.xml')\n", (1128, 1171), False, 'import cv2\n'), ((3951, 3979), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview"""'], {}), "('preview')\n", (3968, 3979), False, 'import cv2\n'), ((3980, 4009), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview2"""'], {}), "('preview2')\n", (3997, 4009), False, 'import cv2\n'), ((4010, 4039), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview3"""'], {}), "('preview3')\n", (4027, 4039), False, 'import cv2\n'), ((226, 264), 'numpy.hypot', 'np.hypot', (['(c1[0] - c2[0])', '(c1[1] - c2[1])'], {}), '(c1[0] - c2[0], c1[1] - c2[1])\n', (234, 264), True, 'import numpy as np\n'), ((1226, 1265), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1238, 1265), False, 'import cv2\n'), ((1282, 1308), 'cv2.equalizeHist', 'cv2.equalizeHist', (['roi_gray'], {}), '(roi_gray)\n', (1298, 1308), False, 'import cv2\n'), ((3519, 3550), 'cv2.imshow', 'cv2.imshow', (['"""preview"""', 'roi_gray'], {}), "('preview', roi_gray)\n", (3529, 3550), False, 'import cv2\n'), ((3555, 3588), 'cv2.imshow', 'cv2.imshow', (['"""preview2"""', 'equalized'], {}), "('preview2', equalized)\n", (3565, 3588), False, 'import cv2\n'), ((3593, 3623), 'cv2.imshow', 'cv2.imshow', (['"""preview3"""', 'thresh'], {}), "('preview3', thresh)\n", (3603, 3623), False, 'import cv2\n'), ((3796, 3811), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (3807, 3811), False, 'import cv2\n'), ((2108, 2178), 'cv2.rectangle', 'cv2.rectangle', (['roi_color', '(ex, ey)', '(ex + ew, ey + eh)', '(0, 0, 255)', '(2)'], {}), '(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 2)\n', (2121, 2178), False, 'import cv2\n'), ((2288, 2344), 'cv2.calcHist', 'cv2.calcHist', (['[eye_roi_gray]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([eye_roi_gray], [0], None, [256], [0, 256])\n', (2300, 2344), False, 'import cv2\n'), ((2549, 2616), 'cv2.kmeans', 'cv2.kmeans', (['hist', '(2)', 'None', 'criteria', '(100)', 'cv2.KMEANS_RANDOM_CENTERS'], {}), '(hist, 2, None, criteria, 100, cv2.KMEANS_RANDOM_CENTERS)\n', (2559, 2616), False, 'import cv2\n'), ((2777, 2829), 'cv2.threshold', 'cv2.threshold', (['eye_roi_gray', '(centers[0] - 10)', '(255)', '(0)'], {}), '(eye_roi_gray, centers[0] - 10, 255, 0)\n', (2790, 2829), False, 'import cv2\n'), ((2977, 3041), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2993, 3041), False, 'import cv2\n'), ((3764, 3770), 'time.time', 'time', ([], {}), '()\n', (3768, 3770), False, 'from time import time\n'), ((3690, 3696), 'time.time', 'time', ([], {}), '()\n', (3694, 3696), False, 'from time import time\n'), ((3886, 3921), 'cv2.imwrite', 'cv2.imwrite', (['"""testimage.png"""', 'frame'], {}), "('testimage.png', frame)\n", (3897, 3921), False, 'import cv2\n'), ((2626, 2646), 'numpy.sqrt', 'np.sqrt', (['compactness'], {}), '(compactness)\n', (2633, 2646), True, 'import numpy as np\n'), ((3231, 3251), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cont'], {}), '(cont)\n', (3245, 3251), False, 'import cv2\n'), ((3268, 3319), 'cv2.ellipse', 'cv2.ellipse', (['eye_roi_color', 'ellipse', '(0, 0, 255)', '(2)'], {}), '(eye_roi_color, ellipse, (0, 0, 255), 2)\n', (3279, 3319), False, 'import cv2\n'), ((3175, 3196), 'cv2.contourArea', 'cv2.contourArea', (['cont'], {}), '(cont)\n', (3190, 3196), False, 'import cv2\n'), ((3732, 3738), 'time.time', 'time', ([], {}), '()\n', (3736, 3738), False, 'from time import time\n')] |
# import speech_recognition as sr
# import sys
#
# # read filename from arguments
# filename = ("C:\\Users\\utkar\\Downloads\\crowd.mp3")
#
# # initialize the recognizer
# r = sr.Recognizer()
#
# # open the file
# with sr.AudioFile(filename) as source:
# # listen for the data (load audio to memory)
# audio_data = r.record(source)
# # recognize (convert from speech to text)
# text = r.recognize_google(audio_data)
# print(text)
#Python 2.x program to transcribe an Audio file
import speech_recognition as sr
import connection
AUDIO_FILE = ("..\\audio\\Welcome.wav")
# use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
#reads the audio file. Here we use record instead of
#listen
audio = r.record(source)
try:
print("The audio file contains: " + r.recognize_google(audio))
connection.insertAudio(2, r.recognize_google(audio), AUDIO_FILE)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
# except sr.RequestError as e:
# print("Could not request results from Google Speech
# Recognition service; {0}".format(e))
| [
"speech_recognition.Recognizer",
"speech_recognition.AudioFile"
] | [((637, 652), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (650, 652), True, 'import speech_recognition as sr\n'), ((659, 683), 'speech_recognition.AudioFile', 'sr.AudioFile', (['AUDIO_FILE'], {}), '(AUDIO_FILE)\n', (671, 683), True, 'import speech_recognition as sr\n')] |
import logging
import os
from pathlib import Path
import re
from typing import Dict, List, Optional, Tuple
from calvin_agent.datasets.base_dataset import BaseDataset
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
process_rgb,
process_state,
)
import numpy as np
import torch
logger = logging.getLogger(__name__)
class NpzDataset(BaseDataset):
"""
Dataset Loader that uses a shared memory cache
parameters
----------
datasets_dir: path of folder containing episode files (string must contain 'validation' or 'training')
save_format: format of episodes in datasets_dir (.pkl or .npz)
obs_space: DictConfig of the observation modalities of the dataset
max_window_size: maximum length of the episodes sampled from the dataset
"""
def __init__(self, *args, skip_frames: int = 0, n_digits: Optional[int] = None, **kwargs): # type: ignore
super().__init__(*args, **kwargs)
self.skip_frames = skip_frames
if self.with_lang:
(
self.episode_lookup,
self.lang_lookup,
self.max_batched_length_per_demo,
self.lang_ann,
) = self.load_file_indices_lang(self.abs_datasets_dir)
else:
self.episode_lookup, self.max_batched_length_per_demo = self.load_file_indices(self.abs_datasets_dir)
self.naming_pattern, self.n_digits = self.lookup_naming_pattern(n_digits)
def lookup_naming_pattern(self, n_digits):
it = os.scandir(self.abs_datasets_dir)
while True:
filename = Path(next(it))
if self.save_format in filename.suffix:
break
aux_naming_pattern = re.split(r"\d+", filename.stem)
naming_pattern = [filename.parent / aux_naming_pattern[0], filename.suffix]
n_digits = n_digits if n_digits is not None else len(re.findall(r"\d+", filename.stem)[0])
assert len(naming_pattern) == 2
assert n_digits > 0
return naming_pattern, n_digits
def get_episode_name(self, idx: int) -> Path:
"""
Convert frame idx to file name
"""
return Path(f"{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}")
def zip_sequence(self, start_idx: int, end_idx: int, idx: int) -> Dict[str, np.ndarray]:
"""
Load consecutive individual frames saved as npy files and combine to episode dict
parameters:
-----------
start_idx: index of first frame
end_idx: index of last frame
returns:
-----------
episode: dict of numpy arrays containing the episode where keys are the names of modalities
"""
episodes = [self.load_episode(self.get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx)]
episode = {key: np.stack([ep[key] for ep in episodes]) for key, _ in episodes[0].items()}
if self.with_lang:
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
return episode
def get_sequences(self, idx: int, window_size: int) -> Dict:
"""
parameters
----------
idx: index of starting frame
window_size: length of sampled episode
returns
----------
seq_state_obs: numpy array of state observations
seq_rgb_obs: tuple of numpy arrays of rgb observations
seq_depth_obs: tuple of numpy arrays of depths observations
seq_acts: numpy array of actions
"""
start_file_indx = self.episode_lookup[idx]
end_file_indx = start_file_indx + window_size
episode = self.zip_sequence(start_file_indx, end_file_indx, idx)
seq_state_obs = process_state(episode, self.observation_space, self.transforms, self.proprio_state)
seq_rgb_obs = process_rgb(episode, self.observation_space, self.transforms)
seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
seq_acts = process_actions(episode, self.observation_space, self.transforms)
info = get_state_info_dict(episode)
seq_lang = {"lang": torch.from_numpy(episode["language"]) if self.with_lang else torch.empty(0)}
seq_dict = {**seq_state_obs, **seq_rgb_obs, **seq_depth_obs, **seq_acts, **info, **seq_lang} # type:ignore
seq_dict["idx"] = idx # type:ignore
return seq_dict
def load_file_indices_lang(self, abs_datasets_dir: Path) -> Tuple[List, List, List, np.ndarray]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
try:
print("trying to load lang data from: ", abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).reshape(
-1
)[0]
except Exception:
print("Exception, trying to load lang data from: ", abs_datasets_dir / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True).reshape(-1)[0]
ep_start_end_ids = lang_data["info"]["indx"]
lang_ann = lang_data["language"]["emb"]
lang_lookup = []
max_batched_length_per_demo = []
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
assert end_idx >= self.max_window_size
cnt = 0
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
if cnt % self.skip_frames == 0:
lang_lookup.append(i)
episode_lookup.append(idx)
cnt += 1
possible_indices = end_idx + 1 - start_idx - self.max_window_size # TODO: check it for skip_frames
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, lang_lookup, max_batched_length_per_demo, lang_ann
def load_file_indices(self, abs_datasets_dir: Path) -> Tuple[List, List]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
logger.info(f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.')
max_batched_length_per_demo = []
for start_idx, end_idx in ep_start_end_ids:
assert end_idx > self.max_window_size
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
episode_lookup.append(idx)
possible_indices = end_idx + 1 - start_idx - self.max_window_size
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, max_batched_length_per_demo
| [
"logging.getLogger",
"re.split",
"calvin_agent.datasets.utils.episode_utils.process_state",
"calvin_agent.datasets.utils.episode_utils.process_depth",
"calvin_agent.datasets.utils.episode_utils.process_rgb",
"pathlib.Path",
"calvin_agent.datasets.utils.episode_utils.get_state_info_dict",
"os.scandir",... | [((368, 395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'import logging\n'), ((1600, 1633), 'os.scandir', 'os.scandir', (['self.abs_datasets_dir'], {}), '(self.abs_datasets_dir)\n', (1610, 1633), False, 'import os\n'), ((1795, 1826), 're.split', 're.split', (['"""\\\\d+"""', 'filename.stem'], {}), "('\\\\d+', filename.stem)\n", (1803, 1826), False, 'import re\n'), ((2247, 2332), 'pathlib.Path', 'Path', (['f"""{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}"""'], {}), "(f'{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}'\n )\n", (2251, 2332), False, 'from pathlib import Path\n'), ((3842, 3930), 'calvin_agent.datasets.utils.episode_utils.process_state', 'process_state', (['episode', 'self.observation_space', 'self.transforms', 'self.proprio_state'], {}), '(episode, self.observation_space, self.transforms, self.\n proprio_state)\n', (3855, 3930), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((3948, 4009), 'calvin_agent.datasets.utils.episode_utils.process_rgb', 'process_rgb', (['episode', 'self.observation_space', 'self.transforms'], {}), '(episode, self.observation_space, self.transforms)\n', (3959, 4009), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((4034, 4097), 'calvin_agent.datasets.utils.episode_utils.process_depth', 'process_depth', (['episode', 'self.observation_space', 'self.transforms'], {}), '(episode, self.observation_space, self.transforms)\n', (4047, 4097), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((4117, 4182), 'calvin_agent.datasets.utils.episode_utils.process_actions', 'process_actions', (['episode', 'self.observation_space', 'self.transforms'], {}), '(episode, self.observation_space, self.transforms)\n', (4132, 4182), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((4198, 4226), 'calvin_agent.datasets.utils.episode_utils.get_state_info_dict', 'get_state_info_dict', (['episode'], {}), '(episode)\n', (4217, 4226), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((7161, 7211), 'numpy.load', 'np.load', (["(abs_datasets_dir / 'ep_start_end_ids.npy')"], {}), "(abs_datasets_dir / 'ep_start_end_ids.npy')\n", (7168, 7211), True, 'import numpy as np\n'), ((2929, 2967), 'numpy.stack', 'np.stack', (['[ep[key] for ep in episodes]'], {}), '([ep[key] for ep in episodes])\n', (2937, 2967), True, 'import numpy as np\n'), ((4255, 4292), 'torch.from_numpy', 'torch.from_numpy', (["episode['language']"], {}), "(episode['language'])\n", (4271, 4292), False, 'import torch\n'), ((4316, 4330), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (4327, 4330), False, 'import torch\n'), ((1972, 2005), 're.findall', 're.findall', (['"""\\\\d+"""', 'filename.stem'], {}), "('\\\\d+', filename.stem)\n", (1982, 2005), False, 'import re\n'), ((5326, 5415), 'numpy.load', 'np.load', (["(abs_datasets_dir / self.lang_folder / 'auto_lang_ann.npy')"], {'allow_pickle': '(True)'}), "(abs_datasets_dir / self.lang_folder / 'auto_lang_ann.npy',\n allow_pickle=True)\n", (5333, 5415), True, 'import numpy as np\n'), ((5611, 5677), 'numpy.load', 'np.load', (["(abs_datasets_dir / 'auto_lang_ann.npy')"], {'allow_pickle': '(True)'}), "(abs_datasets_dir / 'auto_lang_ann.npy', allow_pickle=True)\n", (5618, 5677), True, 'import numpy as np\n')] |
import urllib
import json
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
def getpage(num):
url = "https://forums.eveonline.com/c/marketplace/sales-ads/55?no_definitions=true&page="+str(num)
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
soup_all = soup.findAll('span', class_='link-top-line')
title = []
url = []
for i in soup_all:
post = i.find(class_ = "title raw-link raw-topic-link")
title.append(post.text)
url.append(post.get('href'))
data = pd.DataFrame({'title': title, 'url': url})
return data
dflist = []
for i in range(23):
df = getpage(i)
dflist.append(df)
data = pd.concat(dflist).reset_index().drop(columns = ['index'])
stop = stopwords.words('english')
add_stop_words = ['wts', 'bump', 'hello', 'currently', 'looking', 'to', 'sell', 'the', 'following', 'hey', 'so', 'guys',
'price', 'is']
stop.extend(add_stop_words)
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
def remove_html(text):
sup = BeautifulSoup(text,'lxml')
html_free = sup.get_text()
return html_free
def remove_punc(text):
no_punc = " ".join([c for c in text if c not in string.punctuation])
return no_punc
def remove_stopwords(text):
words = [w for w in text if w not in stop]
return words
def word_lemmatizer(text):
lem_text = [lemmatizer.lemmatize(i) for i in text]
return lem_text
def word_stemmer(text):
stem_text = [stemmer.stem(i) for i in text]
return stem_text
def read_from_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
containers = soup.findAll("div", {"class":"topic-body crawler-post"})
df = pd.DataFrame(columns=['user', 'content'])
count = 0
for container in containers:
user_container = container.findAll("span", {"itemprop":"name"})
user = user_container[0].text
#print("User: " + user.lower())
content_container = container.findAll("div", {"class":"post"})
"""
This if statement should be removed once infinite scorlling bar is handled
"""
if content_container:
content = remove_html(content_container[0].text)
dfcontent = (content.lower()).replace("\t","").replace("\n"," ").replace("https ", "https")\
.replace("…","").replace("we’re", "we are").replace("“","").replace("”","").replace("i’ll", "i will")
gruber = re.compile(r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|
gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|
ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz
|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et
|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id
|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu
|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np
|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|
Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|
uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+
\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>
?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop
|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au
|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|
co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|g
f|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo
|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|
mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|p
n|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy
|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|
ye|yt|yu|za|zm|zw)\b/?(?!@)))""")
split_dfcontent = gruber.split(dfcontent)
for i in range(0, len(split_dfcontent), 2):
split_dfcontent[i] = remove_punc(split_dfcontent[i])
final_dfcontent = " ".join(split_dfcontent)
df.loc[count] = [user.lower()] + [(' '.join(final_dfcontent.split())).lower()]
count += 1
df['stem'] = df['content']
for i in range(len(containers)):
#print(df['Content'][i])
df['stem'][i] = re.split(r'\s{1,}', df['content'][i])
df['stem'] = df['stem'].apply(lambda x : remove_stopwords(x))
"""
df['stem']=df['stem'].apply(lambda x: word_lemmatizer(x))
"""
df['stem'] = df['stem'].apply(lambda x: word_stemmer(x))
return df
data['starter_content'] = ''
data['starter_stem'] = ''
for i in range(len(data)):
subdata = read_from_url(data['url'][i])
starter_content = ''
starter_stem = []
reply_content=''
reply_stem = []
for k in range(len(subdata)):
if subdata['user'][k] == subdata['user'][0]:
starter_content += subdata['content'][k]
starter_stem += subdata['stem'][k]
data['starter_content'][i] = starter_content
data['starter_stem'][i] = starter_stem
data = data.iloc[1:]
title_stem = data.title.apply(lambda x: ' '.join([item for item in x.lower().split() if item not in stop]))
data['title_stem'] = title_stem
data['content'] = data.title_stem + ' ' + data.starter_content
def price_xtrct(text):
b = re.findall(r'\d+[b]\b', text) + re.findall(r'\d+ bil\b', text) + re.findall(r'\d+ billion\b', text)
m = re.findall(r'\d+[m]\b', text) + re.findall(r'\d+ mil\b', text) + re.findall(r'\d+ million\b', text)
k = re.findall(r'\d+[k]\b', text)
price = b + m + k
item = []
idx0 = 0
for i in price:
idx1 = text.index(i)
item.append(text[idx0:idx1-1])
idx0 = idx1 + len(i) + 1
return item, price
item_price = data.content.apply(lambda x: price_xtrct(x))
item = [i[0] for i in item_price]
price = [i[1] for i in item_price]
data['item'] = item
data['price'] = price
for k, v in enumerate(data.title_stem):
if not data.item.iloc[k]:
data.item.iloc[k] = [v]
price = []
item = []
url = []
for k, v in enumerate(data.url):
for i, j in enumerate(data.item.iloc[k]):
item.append(j)
if data.price.iloc[k]:
price.append(data.price.iloc[k][i])
else:
price.append('NA')
url.append(v)
df = pd.DataFrame({'item': item, 'price': price, 'url': url})
df.to_csv('sales_data.csv') | [
"re.split",
"nltk.corpus.stopwords.words",
"re.compile",
"nltk.stem.WordNetLemmatizer",
"requests.get",
"bs4.BeautifulSoup",
"nltk.stem.porter.PorterStemmer",
"pandas.DataFrame",
"re.findall",
"pandas.concat"
] | [((925, 951), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (940, 951), False, 'from nltk.corpus import stopwords\n'), ((1149, 1164), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (1162, 1164), False, 'from nltk.stem.porter import PorterStemmer\n'), ((1179, 1198), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1196, 1198), False, 'from nltk.stem import WordNetLemmatizer\n'), ((7450, 7506), 'pandas.DataFrame', 'pd.DataFrame', (["{'item': item, 'price': price, 'url': url}"], {}), "({'item': item, 'price': price, 'url': url})\n", (7462, 7506), True, 'import pandas as pd\n'), ((378, 395), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (390, 395), False, 'import requests\n'), ((408, 437), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (421, 437), False, 'from bs4 import BeautifulSoup\n'), ((707, 749), 'pandas.DataFrame', 'pd.DataFrame', (["{'title': title, 'url': url}"], {}), "({'title': title, 'url': url})\n", (719, 749), True, 'import pandas as pd\n'), ((1234, 1261), 'bs4.BeautifulSoup', 'BeautifulSoup', (['text', '"""lxml"""'], {}), "(text, 'lxml')\n", (1247, 1261), False, 'from bs4 import BeautifulSoup\n'), ((1767, 1784), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1779, 1784), False, 'import requests\n'), ((1797, 1833), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (1810, 1833), False, 'from bs4 import BeautifulSoup\n'), ((1919, 1960), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['user', 'content']"}), "(columns=['user', 'content'])\n", (1931, 1960), True, 'import pandas as pd\n'), ((6641, 6671), 're.findall', 're.findall', (['"""\\\\d+[k]\\\\b"""', 'text'], {}), "('\\\\d+[k]\\\\b', text)\n", (6651, 6671), False, 'import re\n'), ((2689, 4917), 're.compile', 're.compile', (['"""(?i)\\\\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\\\\-]+[.](?:com|net|org|edu|\n gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|\n ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz\n |ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et\n |eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id\n |ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu\n |lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np\n |nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|\n Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|\n uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\\\\s()<>{}\\\\[\\\\]]+|\\\\([^\\\\s()]*?\\\\([^\\\\s()]+\n \\\\)[^\\\\s()]*?\\\\)|\\\\([^\\\\s]+?\\\\))+(?:\\\\([^\\\\s()]*?\\\\([^\\\\s()]+\\\\)[^\\\\s()]*?\\\\)|\\\\([^\\\\s]+?\\\\)|[^\\\\s`!()\\\\[\\\\]{};:\'".,<>\n ?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\\\\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop\n |info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au\n |aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|\n co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|g\n f|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo\n |jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|\n mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|p\n n|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy\n |sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|\n ye|yt|yu|za|zm|zw)\\\\b/?(?!@)))"""'], {}), '(\n """(?i)\\\\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\\\\-]+[.](?:com|net|org|edu|\n gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|\n ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz\n |ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et\n |eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id\n |ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu\n |lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np\n |nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|\n Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|\n uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\\\\s()<>{}\\\\[\\\\]]+|\\\\([^\\\\s()]*?\\\\([^\\\\s()]+\n \\\\)[^\\\\s()]*?\\\\)|\\\\([^\\\\s]+?\\\\))+(?:\\\\([^\\\\s()]*?\\\\([^\\\\s()]+\\\\)[^\\\\s()]*?\\\\)|\\\\([^\\\\s]+?\\\\)|[^\\\\s`!()\\\\[\\\\]{};:\'".,<>\n ?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\\\\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop\n |info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au\n |aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|\n co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|g\n f|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo\n |jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|\n mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|p\n n|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy\n |sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|\n ye|yt|yu|za|zm|zw)\\\\b/?(?!@)))"""\n )\n', (2699, 4917), False, 'import re\n'), ((5367, 5404), 're.split', 're.split', (['"""\\\\s{1,}"""', "df['content'][i]"], {}), "('\\\\s{1,}', df['content'][i])\n", (5375, 5404), False, 'import re\n'), ((6488, 6523), 're.findall', 're.findall', (['"""\\\\d+ billion\\\\b"""', 'text'], {}), "('\\\\d+ billion\\\\b', text)\n", (6498, 6523), False, 'import re\n'), ((6597, 6632), 're.findall', 're.findall', (['"""\\\\d+ million\\\\b"""', 'text'], {}), "('\\\\d+ million\\\\b', text)\n", (6607, 6632), False, 'import re\n'), ((6423, 6453), 're.findall', 're.findall', (['"""\\\\d+[b]\\\\b"""', 'text'], {}), "('\\\\d+[b]\\\\b', text)\n", (6433, 6453), False, 'import re\n'), ((6455, 6486), 're.findall', 're.findall', (['"""\\\\d+ bil\\\\b"""', 'text'], {}), "('\\\\d+ bil\\\\b', text)\n", (6465, 6486), False, 'import re\n'), ((6532, 6562), 're.findall', 're.findall', (['"""\\\\d+[m]\\\\b"""', 'text'], {}), "('\\\\d+[m]\\\\b', text)\n", (6542, 6562), False, 'import re\n'), ((6564, 6595), 're.findall', 're.findall', (['"""\\\\d+ mil\\\\b"""', 'text'], {}), "('\\\\d+ mil\\\\b', text)\n", (6574, 6595), False, 'import re\n'), ((857, 874), 'pandas.concat', 'pd.concat', (['dflist'], {}), '(dflist)\n', (866, 874), True, 'import pandas as pd\n')] |
import numpy
from NeuralNetworks.Layers.activations import lambda_from_function
class Dense:
def __init__(self, num_nodes = 1, input_dim = None, activation = 'sigmoid'):
# set number of nodes
self.num_nodes = num_nodes
self.input_dim = input_dim
self.activation = activation
# activation and derivate functions
self.activation_function, self.activation_gradient = lambda_from_function(activation)
def init(self, previous_layer):
self.previous_layer = previous_layer
if previous_layer == None:
input_dim = self.input_dim
else:
input_dim = previous_layer.num_nodes
self.weights = numpy.random.normal(0.0, pow(input_dim, -0.5), (self.num_nodes, input_dim))
self.output_shape = (self.num_nodes, 1)
def forward(self, input):
# calculate signals into hidden layer
hidden_input = numpy.dot(self.weights, input)
# calculate s emerging from hidden layer
output = self.activation_function(hidden_input)
assert(self.output_shape == output.shape)
self.layer_output = output
return self.layer_output
def backward(self, learning_rate, error_gradient_in, previous_layer_output):
# delta w = old d_W - alpha * (d_E / d_W) = learningrate * error_next * sigmoid(output_this) * (1 - sigmoid(output_this)) * output_previous
self.weights += learning_rate * numpy.dot(
(error_gradient_in * self.activation_gradient(self.layer_output)),
numpy.transpose(previous_layer_output))
# propagate the gradient error to previous layer
error_gradient_out = numpy.dot(self.weights.T, error_gradient_in)
return error_gradient_out
| [
"numpy.dot",
"numpy.transpose",
"NeuralNetworks.Layers.activations.lambda_from_function"
] | [((424, 456), 'NeuralNetworks.Layers.activations.lambda_from_function', 'lambda_from_function', (['activation'], {}), '(activation)\n', (444, 456), False, 'from NeuralNetworks.Layers.activations import lambda_from_function\n'), ((940, 970), 'numpy.dot', 'numpy.dot', (['self.weights', 'input'], {}), '(self.weights, input)\n', (949, 970), False, 'import numpy\n'), ((1698, 1742), 'numpy.dot', 'numpy.dot', (['self.weights.T', 'error_gradient_in'], {}), '(self.weights.T, error_gradient_in)\n', (1707, 1742), False, 'import numpy\n'), ((1571, 1609), 'numpy.transpose', 'numpy.transpose', (['previous_layer_output'], {}), '(previous_layer_output)\n', (1586, 1609), False, 'import numpy\n')] |
import tensorflow as tf
import matplotlib.pyplot as plt
import argparse
from keras.models import load_model
from data import load_from_H5
from bnn import bnn
from viz import plot_predictions
parser = argparse.ArgumentParser(description='Mauna Loa runner')
parser.add_argument('--trained_model', default='models/mauna_loa.h5',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--train', default=False, type=bool)
parser.add_argument('--model_name', default='mauna_loa.h5', type=str,
help='Dir to save results')
parser.add_argument('--epochs', default=40, type=int, help='Number of epochs to train on')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()
test_hdf5_filepath = 'data/Mauna Loa/test.h5'
train_hdf5_filepath = 'data/Mauna Loa/train.h5'
testset = load_from_H5(test_hdf5_filepath)
trainset = load_from_H5(train_hdf5_filepath)
X_test, y_test = testset
X_train, y_train = trainset
N = 272
# l2 = 0.01
num_hidden_layers = 5
# num hidden units
n_hidden = 1024
epochs = args.epochs
batch_size = 128
epochs_multiplier = 1
epochs_multiplier
tau = 0.1
dropout = 0.1
net = bnn(
X_train,
y_train,
([int(n_hidden)] * num_hidden_layers),
normalize=False,
tau=tau,
dropout=dropout,
activation='relu'
)
if args.train:
print("Training a new model...")
net.train(X_train, y_train, epochs=epochs, batch_size=batch_size,
verbose=1)
net.model.save("models/" + args.model_name)
else:
net.model = load_model(args.trained_model)
plot_predictions(net, trainset, X_test, iters=20, n_std=4)
plt.show()
| [
"keras.models.load_model",
"data.load_from_H5",
"argparse.ArgumentParser",
"viz.plot_predictions",
"matplotlib.pyplot.show"
] | [((203, 258), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mauna Loa runner"""'}), "(description='Mauna Loa runner')\n", (226, 258), False, 'import argparse\n'), ((908, 940), 'data.load_from_H5', 'load_from_H5', (['test_hdf5_filepath'], {}), '(test_hdf5_filepath)\n', (920, 940), False, 'from data import load_from_H5\n'), ((952, 985), 'data.load_from_H5', 'load_from_H5', (['train_hdf5_filepath'], {}), '(train_hdf5_filepath)\n', (964, 985), False, 'from data import load_from_H5\n'), ((1630, 1688), 'viz.plot_predictions', 'plot_predictions', (['net', 'trainset', 'X_test'], {'iters': '(20)', 'n_std': '(4)'}), '(net, trainset, X_test, iters=20, n_std=4)\n', (1646, 1688), False, 'from viz import plot_predictions\n'), ((1689, 1699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1697, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1628), 'keras.models.load_model', 'load_model', (['args.trained_model'], {}), '(args.trained_model)\n', (1608, 1628), False, 'from keras.models import load_model\n')] |
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
class Guest(models.Model):
"""
A temporary user.
Fields:
``user`` - The temporary user.
``last_used`` - The last time we noted this user doing something.
All users with a record in this model are temporary and should be
deleted after GUEST_DELETE_TIME.
"""
user = models.ForeignKey(User,on_delete=models.CASCADE)
last_used = models.DateTimeField(User)
@classmethod
def create_guest(self, user):
guest = Guest(user=user, last_used=datetime.datetime.now())
return guest
| [
"django.db.models.DateTimeField",
"datetime.datetime.now",
"django.db.models.ForeignKey"
] | [((459, 508), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (476, 508), False, 'from django.db import models\n'), ((524, 550), 'django.db.models.DateTimeField', 'models.DateTimeField', (['User'], {}), '(User)\n', (544, 550), False, 'from django.db import models\n'), ((646, 669), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (667, 669), False, 'import datetime\n')] |
import collections
import datetime
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sklear
import core.config as cconfig
import core.data_adapters as cdataa
import core.dataflow.utils as cdu
import core.finance as cfinan
import core.signal_processing as csigna
import core.statistics as cstati
import helpers.dbg as dbg
from core.dataflow.core import DAG, Node
from core.dataflow.nodes.base import (
ColModeMixin,
FitPredictNode,
SeriesToDfColProcessor,
)
from core.dataflow.nodes.sources import ReadDataFromDf
from core.dataflow.nodes.transformers import ColumnTransformer
from core.dataflow.visitors import extract_info
_LOG = logging.getLogger(__name__)
_COL_TYPE = Union[int, str]
_PANDAS_DATE_TYPE = Union[str, pd.Timestamp, datetime.datetime]
_TO_LIST_MIXIN_TYPE = Union[List[_COL_TYPE], Callable[[], List[_COL_TYPE]]]
class SmaModel(FitPredictNode, ColModeMixin):
"""
Fit and predict a smooth moving average (SMA) model.
"""
def __init__(
self,
nid: str,
col: _TO_LIST_MIXIN_TYPE,
steps_ahead: int,
tau: Optional[float] = None,
min_tau_periods: Optional[float] = 2,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and SMA modeling parameters.
:param nid: unique node id
:param col: name of column to model
:param steps_ahead: as in `ContinuousSkLearnModel`
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter. Will be re-learned on each `fit` call.
:param min_tau_periods: similar to `min_periods` as in
`csigna.compute_smooth_moving_average`, but expressed in units of
tau
:param col_mode: `merge_all` or `replace_all`, as in `ColumnTransformer()`
:param nan_mode: as in `ContinuousSkLearnModel`
"""
super().__init__(nid)
self._col = cdu.convert_to_list(col)
dbg.dassert_eq(len(self._col), 1)
self._steps_ahead = steps_ahead
dbg.dassert_lte(
0, self._steps_ahead, "Non-causal prediction attempted! Aborting..."
)
if nan_mode is None:
self._nan_mode = "raise"
else:
self._nan_mode = nan_mode
self._col_mode = col_mode or "replace_all"
dbg.dassert_in(self._col_mode, ["replace_all", "merge_all"])
# Smooth moving average model parameters to learn.
self._must_learn_tau = tau is None
self._tau = tau
self._min_tau_periods = min_tau_periods or 0
self._min_depth = 1
self._max_depth = 1
self._metric = sklear.metrics.mean_absolute_error
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
idx = df_in.index[: -self._steps_ahead]
x_vars = self._col
y_vars = self._col
df = cdu.get_x_and_forward_y_fit_df(
df_in, x_vars, y_vars, self._steps_ahead
)
forward_y_cols = df.drop(x_vars, axis=1).columns
# Handle presence of NaNs according to `nan_mode`.
self._handle_nans(idx, df.index)
# Define and fit model.
if self._must_learn_tau:
forward_y_df = df[forward_y_cols]
# Prepare forward y_vars in sklearn format.
forward_y_fit = cdataa.transform_to_sklearn(
forward_y_df, forward_y_df.columns.tolist()
)
# Prepare `x_vars` in sklearn format.
x_fit = cdataa.transform_to_sklearn(df, self._col)
self._tau = self._learn_tau(x_fit, forward_y_fit)
_LOG.debug("tau=%s", self._tau)
return self._predict_and_package_results(df_in, idx, df.index, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
cdu.validate_df_indices(df_in)
df = df_in.copy()
idx = df.index
# Restrict to times where col has no NaNs.
non_nan_idx = df.loc[idx][self._col].dropna().index
# Handle presence of NaNs according to `nan_mode`.
self._handle_nans(idx, non_nan_idx)
# Use trained model to generate predictions.
dbg.dassert_is_not(
self._tau,
None,
"Parameter tau not found! Check if `fit` has been run.",
)
return self._predict_and_package_results(
df_in, idx, non_nan_idx, fit=False
)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {"_tau": self._tau, "_info['fit']": self._info["fit"]}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]) -> None:
self._tau = fit_state["_tau"]
self._info["fit"] = fit_state["_info['fit']"]
def _predict_and_package_results(
self,
df_in: pd.DataFrame,
idx: pd.Index,
non_nan_idx: pd.Index,
fit: bool = True,
) -> Dict[str, pd.DataFrame]:
data = cdataa.transform_to_sklearn(df_in.loc[non_nan_idx], self._col)
fwd_y_hat = self._predict(data)
forward_y_df = cdu.get_forward_cols(df_in, self._col, self._steps_ahead)
forward_y_df = forward_y_df.loc[non_nan_idx]
# Put predictions in dataflow dataframe format.
fwd_y_hat_vars = [f"{y}_hat" for y in forward_y_df.columns]
fwd_y_hat = cdataa.transform_from_sklearn(
non_nan_idx, fwd_y_hat_vars, fwd_y_hat
)
# Return targets and predictions.
df_out = forward_y_df.reindex(idx).merge(
fwd_y_hat.reindex(idx), left_index=True, right_index=True
)
dbg.dassert_no_duplicates(df_out.columns)
# Select columns for output.
df_out = self._apply_col_mode(
df_in, df_out, cols=self._col, col_mode=self._col_mode
)
# Update `info`.
info = collections.OrderedDict()
info["tau"] = self._tau
info["min_periods"] = self._get_min_periods(self._tau)
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
def _handle_nans(
self, idx: pd.DataFrame.index, non_nan_idx: pd.DataFrame.index
) -> None:
if self._nan_mode == "raise":
if idx.shape[0] != non_nan_idx.shape[0]:
nan_idx = idx.difference(non_nan_idx)
raise ValueError(f"NaNs detected at {nan_idx}")
elif self._nan_mode == "drop":
pass
elif self._nan_mode == "leave_unchanged":
pass
else:
raise ValueError(f"Unrecognized nan_mode `{self._nan_mode}`")
def _learn_tau(self, x: np.array, y: np.array) -> float:
def score(tau: float) -> float:
x_srs = pd.DataFrame(x.flatten())
sma = csigna.compute_smooth_moving_average(
x_srs,
tau=tau,
min_periods=0,
min_depth=self._min_depth,
max_depth=self._max_depth,
)
min_periods = self._get_min_periods(tau)
return self._metric(sma[min_periods:], y[min_periods:])
tau_lb, tau_ub = 1, 1000
# Satisfy 2 * tau_ub * min_tau_periods = len(x).
# This ensures that no more than half of the `fit` series is burned.
if self._min_tau_periods > 0:
tau_ub = int(len(x) / (2 * self._min_tau_periods))
opt_results = sp.optimize.minimize_scalar(
score, method="bounded", bounds=[tau_lb, tau_ub]
)
return opt_results.x
def _get_min_periods(self, tau: float) -> int:
"""
Return burn-in period.
Multiplies `tau` by `min_tau_periods` and converts to an integer.
:param tau: kernel tau (approximately equal to center of mass)
:return: minimum number of periods required to generate a prediction
"""
return int(np.rint(self._min_tau_periods * tau))
def _predict(self, x: np.array) -> np.array:
x_srs = pd.DataFrame(x.flatten())
# TODO(*): Make `min_periods` configurable.
min_periods = int(np.rint(self._min_tau_periods * self._tau))
_LOG.debug("min_periods=%f", min_periods)
x_sma = csigna.compute_smooth_moving_average(
x_srs,
tau=self._tau,
min_periods=min_periods,
min_depth=self._min_depth,
max_depth=self._max_depth,
)
return x_sma.values
class SingleColumnVolatilityModel(FitPredictNode):
def __init__(
self,
nid: str,
steps_ahead: int,
col: _COL_TYPE,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
nan_mode: Optional[str] = None,
out_col_prefix: Optional[str] = None,
) -> None:
"""
Parameters have the same meaning as `SmaModel`.
"""
super().__init__(nid)
self._col = col
self._steps_ahead = steps_ahead
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
self._progress_bar = progress_bar
self._tau = tau
self._learn_tau_on_fit = tau is None
self._nan_mode = nan_mode
self._out_col_prefix = out_col_prefix
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_col": self._col,
"_tau": self._tau,
"_info['fit']": self._info["fit"],
"_out_col_prefix": self._out_col_prefix,
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._col = fit_state["_col"]
self._tau = fit_state["_tau"]
self._info["fit"] = fit_state["_info['fit']"]
self._out_col_prefix = fit_state["_out_col_prefix"]
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return {"df_out": self._fit_predict_helper(df_in, fit=True)}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return {"df_out": self._fit_predict_helper(df_in, fit=False)}
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool) -> pd.DataFrame:
info = collections.OrderedDict()
name = self._out_col_prefix or self._col
name = str(name)
dbg.dassert_not_in(name + "_vol", df_in.columns)
if self._learn_tau_on_fit and fit:
tau = None
else:
tau = self._tau
config = self._get_config(col=self._col, out_col_prefix=name, tau=tau)
dag = self._get_dag(df_in[[self._col]], config)
mode = "fit" if fit else "predict"
df_out = dag.run_leq_node(
"demodulate_using_vol_pred", mode, progress_bar=self._progress_bar
)["df_out"]
info[self._col] = extract_info(dag, [mode])
if self._learn_tau_on_fit and fit:
self._tau = info[self._col]["compute_smooth_moving_average"]["fit"][
"tau"
]
df_out = df_out.reindex(df_in.index)
self._set_info(mode, info)
return df_out
def _get_config(
self,
col: _COL_TYPE,
out_col_prefix: _COL_TYPE,
tau: Optional[float] = None,
) -> cconfig.Config:
"""
Generate a DAG config.
:param col: column whose volatility is to be modeled
:param tau: tau for SMA; if `None`, then to be learned
:return: a complete config to be used with `_get_dag()`
"""
config = cconfig.get_config_from_nested_dict(
{
"calculate_vol_pth_power": {
"cols": [col],
"col_rename_func": lambda x: out_col_prefix + "_vol",
"col_mode": "merge_all",
},
"compute_smooth_moving_average": {
"col": [out_col_prefix + "_vol"],
"steps_ahead": self._steps_ahead,
"tau": tau,
"col_mode": "merge_all",
"nan_mode": self._nan_mode,
},
"calculate_vol_pth_root": {
"cols": [
out_col_prefix + "_vol",
out_col_prefix + "_vol_" + str(self._steps_ahead),
out_col_prefix
+ "_vol_"
+ str(self._steps_ahead)
+ "_hat",
],
"col_mode": "replace_selected",
},
"demodulate_using_vol_pred": {
"signal_cols": [col],
"volatility_col": out_col_prefix
+ "_vol_"
+ str(self._steps_ahead)
+ "_hat",
"signal_steps_ahead": 0,
"volatility_steps_ahead": self._steps_ahead,
"col_rename_func": lambda x: out_col_prefix + "_vol_adj",
"col_mode": "replace_selected",
"nan_mode": self._nan_mode,
},
}
)
return config
def _get_dag(self, df_in: pd.DataFrame, config: cconfig.Config) -> DAG:
"""
Build a DAG from data and config.
:param df_in: data over which to run DAG
:param config: config for configuring DAG nodes
:return: ready-to-run DAG
"""
dag = DAG(mode="strict")
_LOG.debug("%s", config)
# Load `df_in`.
nid = "load_data"
node = ReadDataFromDf(nid, df_in)
tail_nid = self._append(dag, None, node)
# Raise volatility columns to pth power.
nid = "calculate_vol_pth_power"
node = ColumnTransformer(
nid,
transformer_func=lambda x: np.abs(x) ** self._p_moment,
**config[nid].to_dict(),
)
tail_nid = self._append(dag, tail_nid, node)
# Predict pth power of volatility using smooth moving average.
nid = "compute_smooth_moving_average"
node = SmaModel(nid, **config[nid].to_dict())
tail_nid = self._append(dag, tail_nid, node)
# Calculate the pth root of volatility columns.
nid = "calculate_vol_pth_root"
node = ColumnTransformer(
nid,
transformer_func=lambda x: np.abs(x) ** (1.0 / self._p_moment),
**config[nid].to_dict(),
)
tail_nid = self._append(dag, tail_nid, node)
# Divide returns by volatilty prediction.
nid = "demodulate_using_vol_pred"
node = VolatilityModulator(
nid, mode="demodulate", **config[nid].to_dict()
)
self._append(dag, tail_nid, node)
return dag
# TODO(gp): This code has several copies. Move it to the base class.
@staticmethod
def _append(dag: DAG, tail_nid: Optional[str], node: Node) -> str:
dag.add_node(node)
if tail_nid is not None:
dag.connect(tail_nid, node.nid)
return node.nid
class _MultiColVolatilityModelMixin:
def _fit_predict_volatility_model(
self, df: pd.DataFrame, fit: bool, out_col_prefix: Optional[str] = None
) -> Tuple[Dict[str, pd.DataFrame], collections.OrderedDict]:
dfs = {}
info = collections.OrderedDict()
for col in df.columns:
local_out_col_prefix = out_col_prefix or col
scvm = SingleColumnVolatilityModel(
"volatility",
steps_ahead=self._steps_ahead,
col=col,
p_moment=self._p_moment,
progress_bar=self._progress_bar,
tau=self._tau,
nan_mode=self._nan_mode,
out_col_prefix=local_out_col_prefix,
)
if fit:
df_out = scvm.fit(df[[col]])["df_out"]
info_out = scvm.get_info("fit")
self._col_fit_state[col] = scvm.get_fit_state()
else:
scvm.set_fit_state(self._col_fit_state[col])
df_out = scvm.predict(df[[col]])["df_out"]
info_out = scvm.get_info("predict")
dfs[col] = df_out
info[col] = info_out
return dfs, info
class VolatilityModel(
FitPredictNode,
ColModeMixin,
_MultiColVolatilityModelMixin,
):
"""
Fit and predict a smooth moving average volatility model.
Wraps `SmaModel` internally, handling calculation of volatility from
returns and column appends.
"""
def __init__(
self,
nid: str,
steps_ahead: int,
cols: Optional[_TO_LIST_MIXIN_TYPE] = None,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
col_rename_func: Callable[[Any], Any] = lambda x: f"{x}_zscored",
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and smooth moving average (SMA) modeling parameters.
:param nid: unique node id
:param cols: name of columns to model
:param steps_ahead: as in ContinuousSkLearnModel
:param p_moment: exponent to apply to the absolute value of returns
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter
:param col_rename_func: renaming function for z-scored column
:param col_mode:
- If "merge_all" (default), merge all columns from input dataframe and
transformed columns
- If "replace_selected", merge unselected columns from input dataframe
and transformed selected columns
- If "replace_all", leave only transformed selected columns
:param nan_mode: as in ContinuousSkLearnModel
"""
super().__init__(nid)
self._cols = cols
self._steps_ahead = steps_ahead
#
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
#
self._progress_bar = progress_bar
#
dbg.dassert(tau is None or tau > 0)
self._tau = tau
self._col_rename_func = col_rename_func
self._col_mode = col_mode or "merge_all"
self._nan_mode = nan_mode
# State of the model to serialize/deserialize.
self._fit_cols: List[_COL_TYPE] = []
self._col_fit_state = {}
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=False)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_fit_cols": self._fit_cols,
"_col_fit_state": self._col_fit_state,
"_info['fit']": self._info["fit"],
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._fit_cols = fit_state["_fit_cols"]
self._col_fit_state = fit_state["_col_fit_state"]
self._info["fit"] = fit_state["_info['fit']"]
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool):
cdu.validate_df_indices(df_in)
# Get the columns.
self._fit_cols = cdu.convert_to_list(self._cols or df_in.columns.tolist())
df = df_in[self._fit_cols]
dfs, info = self._fit_predict_volatility_model(df, fit=fit)
df_out = pd.concat(dfs.values(), axis=1)
df_out = self._apply_col_mode(
df_in.drop(df_out.columns.intersection(df_in.columns), 1),
df_out,
cols=self._fit_cols,
col_mode=self._col_mode,
)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
class MultiindexVolatilityModel(FitPredictNode, _MultiColVolatilityModelMixin):
"""
Fit and predict a smooth moving average volatility model.
Wraps SmaModel internally, handling calculation of volatility from
returns and column appends.
"""
def __init__(
self,
nid: str,
in_col_group: Tuple[_COL_TYPE],
steps_ahead: int,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and sma modeling parameters.
:param nid: unique node id
:param steps_ahead: as in ContinuousSkLearnModel
:param p_moment: exponent to apply to the absolute value of returns
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter
:param nan_mode: as in ContinuousSkLearnModel
"""
super().__init__(nid)
dbg.dassert_isinstance(in_col_group, tuple)
self._in_col_group = in_col_group
self._out_col_group = in_col_group[:-1]
self._out_col_prefix = str(in_col_group[-1])
#
self._steps_ahead = steps_ahead
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
#
self._progress_bar = progress_bar
#
self._tau = tau
self._nan_mode = nan_mode
#
self._col_fit_state = {}
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=False)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_col_fit_state": self._col_fit_state,
"_info['fit']": self._info["fit"],
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._col_fit_state = fit_state["_col_fit_state"]
self._info["fit"] = fit_state["_info['fit']"]
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool):
cdu.validate_df_indices(df_in)
df = SeriesToDfColProcessor.preprocess(df_in, self._in_col_group)
dfs, info = self._fit_predict_volatility_model(
df, fit=fit, out_col_prefix=self._out_col_prefix
)
df_out = SeriesToDfColProcessor.postprocess(dfs, self._out_col_group)
df_out = cdu.merge_dataframes(df_in, df_out)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
class VolatilityModulator(FitPredictNode, ColModeMixin):
"""
Modulate or demodulate signal by volatility.
Processing steps:
- shift volatility to align it with signal
- multiply/divide signal by volatility
Usage examples:
- Z-scoring
- to obtain volatility prediction, pass in returns into `SmaModel` with
a `steps_ahead` parameter
- to z-score, pass in signal, volatility prediction, `signal_steps_ahead=0`,
`volatility_steps_ahead=steps_ahead`, `mode='demodulate'`
- Undoing z-scoring
- Let's say we have
- forward volatility prediction `n` steps ahead
- prediction of forward z-scored returns `m` steps ahead. Z-scoring
for the target has been done using the volatility prediction above
- To undo z-scoring, we need to pass in the prediction of forward
z-scored returns, forward volatility prediction, `signal_steps_ahead=n`,
`volatility_steps_ahead=m`, `mode='modulate'`
"""
def __init__(
self,
nid: str,
signal_cols: _TO_LIST_MIXIN_TYPE,
volatility_col: _COL_TYPE,
signal_steps_ahead: int,
volatility_steps_ahead: int,
mode: str,
col_rename_func: Optional[Callable[[Any], Any]] = None,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
:param nid: node identifier
:param signal_cols: names of columns to (de)modulate
:param volatility_col: name of volatility column
:param signal_steps_ahead: steps ahead of the signal columns. If signal
is at `t_0`, this value should be `0`. If signal is a forward
prediction of z-scored returns indexed by knowledge time, this
value should be equal to the number of steps of the prediction
:param volatility_steps_ahead: steps ahead of the volatility column. If
volatility column is an output of `SmaModel`, this corresponds to
the `steps_ahead` parameter
:param mode: "modulate" or "demodulate"
:param col_rename_func: as in `ColumnTransformer`
:param col_mode: as in `ColumnTransformer`
"""
super().__init__(nid)
self._signal_cols = cdu.convert_to_list(signal_cols)
self._volatility_col = volatility_col
dbg.dassert_lte(0, signal_steps_ahead)
self._signal_steps_ahead = signal_steps_ahead
dbg.dassert_lte(0, volatility_steps_ahead)
self._volatility_steps_ahead = volatility_steps_ahead
dbg.dassert_in(mode, ["modulate", "demodulate"])
self._mode = mode
self._col_rename_func = col_rename_func or (lambda x: x)
self._col_mode = col_mode or "replace_all"
self._nan_mode = nan_mode or "leave_unchanged"
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
df_out = self._process_signal(df_in)
info = collections.OrderedDict()
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
self._set_info("fit", info)
return {"df_out": df_out}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
df_out = self._process_signal(df_in)
info = collections.OrderedDict()
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
self._set_info("predict", info)
return {"df_out": df_out}
def _process_signal(self, df_in: pd.DataFrame) -> pd.DataFrame:
"""
Modulate or demodulate signal by volatility prediction.
:param df_in: dataframe with `self._signal_cols` and
`self._volatility_col` columns
:return: adjusted signal indexed in the same way as the input signal
"""
dbg.dassert_is_subset(self._signal_cols, df_in.columns.tolist())
dbg.dassert_in(self._volatility_col, df_in.columns)
fwd_signal = df_in[self._signal_cols]
fwd_volatility = df_in[self._volatility_col]
# Shift volatility to align it with signal.
volatility_shift = self._volatility_steps_ahead - self._signal_steps_ahead
if self._nan_mode == "drop":
fwd_volatility = fwd_volatility.dropna()
elif self._nan_mode == "leave_unchanged":
pass
else:
raise ValueError(f"Unrecognized `nan_mode` {self._nan_mode}")
volatility_aligned = fwd_volatility.shift(volatility_shift)
# Adjust signal by volatility.
if self._mode == "demodulate":
adjusted_signal = fwd_signal.divide(volatility_aligned, axis=0)
elif self._mode == "modulate":
adjusted_signal = fwd_signal.multiply(volatility_aligned, axis=0)
else:
raise ValueError(f"Invalid mode=`{self._mode}`")
df_out = self._apply_col_mode(
df_in,
adjusted_signal,
cols=self._signal_cols,
col_rename_func=self._col_rename_func,
col_mode=self._col_mode,
)
return df_out
class VolatilityNormalizer(FitPredictNode, ColModeMixin):
def __init__(
self,
nid: str,
col: str,
target_volatility: float,
col_mode: Optional[str] = None,
) -> None:
"""
Normalize series to target annual volatility.
:param nid: node identifier
:param col: name of column to rescale
:param target_volatility: target volatility as a proportion
:param col_mode: `merge_all` or `replace_all`. If `replace_all`, return
only the rescaled column, if `merge_all`, append the rescaled
column to input dataframe
"""
super().__init__(nid)
self._col = col
self._target_volatility = target_volatility
self._col_mode = col_mode or "merge_all"
dbg.dassert_in(
self._col_mode,
["merge_all", "replace_all"],
"Invalid `col_mode`='%s'",
self._col_mode,
)
self._scale_factor: Optional[float] = None
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
dbg.dassert_in(self._col, df_in.columns)
self._scale_factor = cfinan.compute_volatility_normalization_factor(
df_in[self._col], self._target_volatility
)
rescaled_y_hat = self._scale_factor * df_in[self._col]
df_out = self._apply_col_mode(
df_in,
rescaled_y_hat.to_frame(),
cols=[self._col],
col_rename_func=lambda x: f"rescaled_{x}",
col_mode=self._col_mode,
)
# Store info.
info = collections.OrderedDict()
info["scale_factor"] = self._scale_factor
self._set_info("fit", info)
return {"df_out": df_out}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
dbg.dassert_in(self._col, df_in.columns)
rescaled_y_hat = self._scale_factor * df_in[self._col]
df_out = self._apply_col_mode(
df_in,
rescaled_y_hat.to_frame(),
cols=[self._col],
col_rename_func=lambda x: f"rescaled_{x}",
col_mode=self._col_mode,
)
return {"df_out": df_out}
| [
"logging.getLogger",
"helpers.dbg.dassert_in",
"core.dataflow.nodes.sources.ReadDataFromDf",
"helpers.dbg.dassert_is_not",
"core.dataflow.utils.get_x_and_forward_y_fit_df",
"core.dataflow.utils.merge_dataframes",
"core.data_adapters.transform_to_sklearn",
"core.dataflow.visitors.extract_info",
"help... | [((755, 782), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (772, 782), False, 'import logging\n'), ((2076, 2100), 'core.dataflow.utils.convert_to_list', 'cdu.convert_to_list', (['col'], {}), '(col)\n', (2095, 2100), True, 'import core.dataflow.utils as cdu\n'), ((2191, 2280), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(0)', 'self._steps_ahead', '"""Non-causal prediction attempted! Aborting..."""'], {}), "(0, self._steps_ahead,\n 'Non-causal prediction attempted! Aborting...')\n", (2206, 2280), True, 'import helpers.dbg as dbg\n'), ((2476, 2536), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col_mode', "['replace_all', 'merge_all']"], {}), "(self._col_mode, ['replace_all', 'merge_all'])\n", (2490, 2536), True, 'import helpers.dbg as dbg\n'), ((3013, 3085), 'core.dataflow.utils.get_x_and_forward_y_fit_df', 'cdu.get_x_and_forward_y_fit_df', (['df_in', 'x_vars', 'y_vars', 'self._steps_ahead'], {}), '(df_in, x_vars, y_vars, self._steps_ahead)\n', (3043, 3085), True, 'import core.dataflow.utils as cdu\n'), ((3939, 3969), 'core.dataflow.utils.validate_df_indices', 'cdu.validate_df_indices', (['df_in'], {}), '(df_in)\n', (3962, 3969), True, 'import core.dataflow.utils as cdu\n'), ((4294, 4390), 'helpers.dbg.dassert_is_not', 'dbg.dassert_is_not', (['self._tau', 'None', '"""Parameter tau not found! Check if `fit` has been run."""'], {}), "(self._tau, None,\n 'Parameter tau not found! Check if `fit` has been run.')\n", (4312, 4390), True, 'import helpers.dbg as dbg\n'), ((5057, 5119), 'core.data_adapters.transform_to_sklearn', 'cdataa.transform_to_sklearn', (['df_in.loc[non_nan_idx]', 'self._col'], {}), '(df_in.loc[non_nan_idx], self._col)\n', (5084, 5119), True, 'import core.data_adapters as cdataa\n'), ((5183, 5240), 'core.dataflow.utils.get_forward_cols', 'cdu.get_forward_cols', (['df_in', 'self._col', 'self._steps_ahead'], {}), '(df_in, self._col, self._steps_ahead)\n', (5203, 5240), True, 'import core.dataflow.utils as cdu\n'), ((5438, 5507), 'core.data_adapters.transform_from_sklearn', 'cdataa.transform_from_sklearn', (['non_nan_idx', 'fwd_y_hat_vars', 'fwd_y_hat'], {}), '(non_nan_idx, fwd_y_hat_vars, fwd_y_hat)\n', (5467, 5507), True, 'import core.data_adapters as cdataa\n'), ((5710, 5751), 'helpers.dbg.dassert_no_duplicates', 'dbg.dassert_no_duplicates', (['df_out.columns'], {}), '(df_out.columns)\n', (5735, 5751), True, 'import helpers.dbg as dbg\n'), ((5945, 5970), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5968, 5970), False, 'import collections\n'), ((6096, 6129), 'core.dataflow.utils.get_df_info_as_string', 'cdu.get_df_info_as_string', (['df_out'], {}), '(df_out)\n', (6121, 6129), True, 'import core.dataflow.utils as cdu\n'), ((7570, 7647), 'scipy.optimize.minimize_scalar', 'sp.optimize.minimize_scalar', (['score'], {'method': '"""bounded"""', 'bounds': '[tau_lb, tau_ub]'}), "(score, method='bounded', bounds=[tau_lb, tau_ub])\n", (7597, 7647), True, 'import scipy as sp\n'), ((8367, 8509), 'core.signal_processing.compute_smooth_moving_average', 'csigna.compute_smooth_moving_average', (['x_srs'], {'tau': 'self._tau', 'min_periods': 'min_periods', 'min_depth': 'self._min_depth', 'max_depth': 'self._max_depth'}), '(x_srs, tau=self._tau, min_periods=\n min_periods, min_depth=self._min_depth, max_depth=self._max_depth)\n', (8403, 8509), True, 'import core.signal_processing as csigna\n'), ((9142, 9170), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(1)', 'p_moment'], {}), '(1, p_moment)\n', (9157, 9170), True, 'import helpers.dbg as dbg\n'), ((10288, 10313), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (10311, 10313), False, 'import collections\n'), ((10396, 10444), 'helpers.dbg.dassert_not_in', 'dbg.dassert_not_in', (["(name + '_vol')", 'df_in.columns'], {}), "(name + '_vol', df_in.columns)\n", (10414, 10444), True, 'import helpers.dbg as dbg\n'), ((10891, 10916), 'core.dataflow.visitors.extract_info', 'extract_info', (['dag', '[mode]'], {}), '(dag, [mode])\n', (10903, 10916), False, 'from core.dataflow.visitors import extract_info\n'), ((13514, 13532), 'core.dataflow.core.DAG', 'DAG', ([], {'mode': '"""strict"""'}), "(mode='strict')\n", (13517, 13532), False, 'from core.dataflow.core import DAG, Node\n'), ((13631, 13657), 'core.dataflow.nodes.sources.ReadDataFromDf', 'ReadDataFromDf', (['nid', 'df_in'], {}), '(nid, df_in)\n', (13645, 13657), False, 'from core.dataflow.nodes.sources import ReadDataFromDf\n'), ((15367, 15392), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (15390, 15392), False, 'import collections\n'), ((18019, 18047), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(1)', 'p_moment'], {}), '(1, p_moment)\n', (18034, 18047), True, 'import helpers.dbg as dbg\n'), ((18152, 18187), 'helpers.dbg.dassert', 'dbg.dassert', (['(tau is None or tau > 0)'], {}), '(tau is None or tau > 0)\n', (18163, 18187), True, 'import helpers.dbg as dbg\n'), ((19268, 19298), 'core.dataflow.utils.validate_df_indices', 'cdu.validate_df_indices', (['df_in'], {}), '(df_in)\n', (19291, 19298), True, 'import core.dataflow.utils as cdu\n'), ((20874, 20917), 'helpers.dbg.dassert_isinstance', 'dbg.dassert_isinstance', (['in_col_group', 'tuple'], {}), '(in_col_group, tuple)\n', (20896, 20917), True, 'import helpers.dbg as dbg\n'), ((21119, 21147), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(1)', 'p_moment'], {}), '(1, p_moment)\n', (21134, 21147), True, 'import helpers.dbg as dbg\n'), ((22048, 22078), 'core.dataflow.utils.validate_df_indices', 'cdu.validate_df_indices', (['df_in'], {}), '(df_in)\n', (22071, 22078), True, 'import core.dataflow.utils as cdu\n'), ((22092, 22152), 'core.dataflow.nodes.base.SeriesToDfColProcessor.preprocess', 'SeriesToDfColProcessor.preprocess', (['df_in', 'self._in_col_group'], {}), '(df_in, self._in_col_group)\n', (22125, 22152), False, 'from core.dataflow.nodes.base import ColModeMixin, FitPredictNode, SeriesToDfColProcessor\n'), ((22297, 22357), 'core.dataflow.nodes.base.SeriesToDfColProcessor.postprocess', 'SeriesToDfColProcessor.postprocess', (['dfs', 'self._out_col_group'], {}), '(dfs, self._out_col_group)\n', (22331, 22357), False, 'from core.dataflow.nodes.base import ColModeMixin, FitPredictNode, SeriesToDfColProcessor\n'), ((22375, 22410), 'core.dataflow.utils.merge_dataframes', 'cdu.merge_dataframes', (['df_in', 'df_out'], {}), '(df_in, df_out)\n', (22395, 22410), True, 'import core.dataflow.utils as cdu\n'), ((24829, 24861), 'core.dataflow.utils.convert_to_list', 'cdu.convert_to_list', (['signal_cols'], {}), '(signal_cols)\n', (24848, 24861), True, 'import core.dataflow.utils as cdu\n'), ((24916, 24954), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(0)', 'signal_steps_ahead'], {}), '(0, signal_steps_ahead)\n', (24931, 24954), True, 'import helpers.dbg as dbg\n'), ((25017, 25059), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(0)', 'volatility_steps_ahead'], {}), '(0, volatility_steps_ahead)\n', (25032, 25059), True, 'import helpers.dbg as dbg\n'), ((25130, 25178), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['mode', "['modulate', 'demodulate']"], {}), "(mode, ['modulate', 'demodulate'])\n", (25144, 25178), True, 'import helpers.dbg as dbg\n'), ((25504, 25529), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (25527, 25529), False, 'import collections\n'), ((25560, 25593), 'core.dataflow.utils.get_df_info_as_string', 'cdu.get_df_info_as_string', (['df_out'], {}), '(df_out)\n', (25585, 25593), True, 'import core.dataflow.utils as cdu\n'), ((25796, 25821), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (25819, 25821), False, 'import collections\n'), ((25852, 25885), 'core.dataflow.utils.get_df_info_as_string', 'cdu.get_df_info_as_string', (['df_out'], {}), '(df_out)\n', (25877, 25885), True, 'import core.dataflow.utils as cdu\n'), ((26380, 26431), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._volatility_col', 'df_in.columns'], {}), '(self._volatility_col, df_in.columns)\n', (26394, 26431), True, 'import helpers.dbg as dbg\n'), ((28369, 28476), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col_mode', "['merge_all', 'replace_all']", '"""Invalid `col_mode`=\'%s\'"""', 'self._col_mode'], {}), '(self._col_mode, [\'merge_all\', \'replace_all\'],\n "Invalid `col_mode`=\'%s\'", self._col_mode)\n', (28383, 28476), True, 'import helpers.dbg as dbg\n'), ((28659, 28699), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col', 'df_in.columns'], {}), '(self._col, df_in.columns)\n', (28673, 28699), True, 'import helpers.dbg as dbg\n'), ((28729, 28823), 'core.finance.compute_volatility_normalization_factor', 'cfinan.compute_volatility_normalization_factor', (['df_in[self._col]', 'self._target_volatility'], {}), '(df_in[self._col], self.\n _target_volatility)\n', (28775, 28823), True, 'import core.finance as cfinan\n'), ((29170, 29195), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (29193, 29195), False, 'import collections\n'), ((29396, 29436), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col', 'df_in.columns'], {}), '(self._col, df_in.columns)\n', (29410, 29436), True, 'import helpers.dbg as dbg\n'), ((3633, 3675), 'core.data_adapters.transform_to_sklearn', 'cdataa.transform_to_sklearn', (['df', 'self._col'], {}), '(df, self._col)\n', (3660, 3675), True, 'import core.data_adapters as cdataa\n'), ((6941, 7066), 'core.signal_processing.compute_smooth_moving_average', 'csigna.compute_smooth_moving_average', (['x_srs'], {'tau': 'tau', 'min_periods': '(0)', 'min_depth': 'self._min_depth', 'max_depth': 'self._max_depth'}), '(x_srs, tau=tau, min_periods=0,\n min_depth=self._min_depth, max_depth=self._max_depth)\n', (6977, 7066), True, 'import core.signal_processing as csigna\n'), ((8049, 8085), 'numpy.rint', 'np.rint', (['(self._min_tau_periods * tau)'], {}), '(self._min_tau_periods * tau)\n', (8056, 8085), True, 'import numpy as np\n'), ((8257, 8299), 'numpy.rint', 'np.rint', (['(self._min_tau_periods * self._tau)'], {}), '(self._min_tau_periods * self._tau)\n', (8264, 8299), True, 'import numpy as np\n'), ((13886, 13895), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (13892, 13895), True, 'import numpy as np\n'), ((14424, 14433), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (14430, 14433), True, 'import numpy as np\n')] |
"""
Programmatically building up compartment models
"""
# Standard Libraries
import json
# External Libraries
import numpy as np
class CompartmentModelBuilder:
"""
The CompartmentModelBuilder class gives helper functions for defining
a new compartment model from scratch within a python script. Initializes
an empty dictionary, compartments, that new compartments can be added to.
"""
def __init__(self):
self.compartments = {}
def add_compartment(
self,
name,
transitions=None,
transmissibilities=None,
susceptibilities=None,
initial_prevalence=0.0,
exogenous_prevalence=0.0,
flags=None,
default_state=None,
exclude_from_eff_pop=False,
):
"""
Function to build an individual compartment for a compartment model
Args:
name (string): name of the compartment
transitions:
susceptibilities:
initial_prevalence (float):
exogenous_prevalence (float):
flags:
default_state:
exclude_from_eff_pop:
"""
self.compartments[name] = {
"transitions": transitions if transitions is not None else {},
"transmissibilities": transmissibilities
if transmissibilities is not None
else {},
"susceptibilities": susceptibilities
if susceptibilities is not None
else {},
"initial_prevalence": initial_prevalence,
"exogenous_prevalence": exogenous_prevalence,
"flags": flags if flags is not None else [],
"default_state": default_state if default_state is not None else False,
"exclude_from_eff_pop": exclude_from_eff_pop,
}
if default_state is None and not any(
[self.compartments[c]["default_state"] for c in self.compartments]
):
# There is no default state set so far, make this new compartment the default
self.compartments[name]["default_state"] = True
def add_compartments(self, names):
"""Function to compartments to a compartment model using the add_compartment function
Args:
names (list): list of compartment names to add to the compartment model
"""
for name in names:
self.add_compartment(name)
def add_transition(
self, compartment, to, upon_exposure_to=None, rate=None, time=None, prob=None
):
"""function to add transition for one compartment and destination state at a time
Args:
compartment (string): name of compartment
to (string):
upon_exposure_to (list): list of compartments that can cause a transition
rate:
time (float): how long it takes for transition to occur
prob (float): likelihood of the transition occuring
"""
infectiousStates = (
[upon_exposure_to]
if (
not isinstance(upon_exposure_to, (list, np.ndarray))
and upon_exposure_to is not None
)
else upon_exposure_to
)
if upon_exposure_to is None: # temporal transition
transn_config = {}
if time is not None:
transn_config.update({"time": time, "rate": 1 / time})
if rate is not None:
transn_config.update({"rate": rate, "time": 1 / rate})
if prob is not None:
transn_config.update({"prob": prob})
self.compartments[compartment]["transitions"].update({to: transn_config})
else: # transmission-induced transition
for infectiousState in infectiousStates:
transn_config = {}
if prob is not None:
# transmission-induced transition do not have rates/times
transn_config.update({"prob": prob})
if (
infectiousState
in self.compartments[compartment]["susceptibilities"]
):
self.compartments[compartment]["susceptibilities"][infectiousState][
"transitions"
].update({to: transn_config})
else:
self.compartments[compartment]["susceptibilities"].update(
{infectiousState: {"transitions": {to: transn_config}}}
)
def set_transition_rate(self, compartment, to, rate):
# Note that it only makes sense to set a rate for temporal transitions.
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
destStates = [to] if not isinstance(to, (list, np.ndarray)) else to
for compartment in compartments:
transn_dict = self.compartments[compartment]["transitions"]
for destState in destStates:
try:
transn_dict[destState]["rate"] = rate
transn_dict[destState]["time"] = 1 / rate
except KeyError:
transn_dict[destState] = {"rate": rate}
transn_dict[destState] = {"time": 1 / rate}
def set_transition_time(self, compartment, to, time):
# Note that it only makes sense to set a time for temporal transitions.
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
destStates = [to] if not isinstance(to, (list, np.ndarray)) else to
for compartment in compartments:
transn_dict = self.compartments[compartment]["transitions"]
for destState in destStates:
try:
transn_dict[destState]["time"] = time
transn_dict[destState]["rate"] = 1 / time
except KeyError:
transn_dict[destState] = {"time": time}
transn_dict[destState] = {"rate": 1 / time}
def set_transition_probability(
self, compartment, probs_dict, upon_exposure_to=None
):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
infectiousStates = (
[upon_exposure_to]
if (
not isinstance(upon_exposure_to, (list, np.ndarray))
and upon_exposure_to is not None
)
else upon_exposure_to
)
for compartment in compartments:
if upon_exposure_to is None:
transn_dict = self.compartments[compartment]["transitions"]
for destState in probs_dict:
try:
transn_dict[destState]["prob"] = probs_dict[destState]
except KeyError:
transn_dict[destState] = {"prob": probs_dict[destState]}
else:
for infectiousState in infectiousStates:
transn_dict = self.compartments[compartment]["susceptibilities"][
infectiousState
]["transitions"]
for destState in probs_dict:
try:
transn_dict[destState]["prob"] = probs_dict[destState]
except KeyError:
transn_dict[destState] = {"prob": probs_dict[destState]}
def set_susceptibility(self, compartment, to, susceptibility=1.0, transitions={}):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
infectiousStates = [to] if not isinstance(to, (list, np.ndarray)) else to
for compartment in compartments:
for infectiousState in infectiousStates:
self.compartments[compartment]["susceptibilities"].update(
{
infectiousState: {
"susceptibility": susceptibility,
"transitions": transitions,
}
}
)
def set_transmissibility(self, compartment, transm_mode, transmissibility=0.0):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
transmModes = (
[transm_mode]
if not isinstance(transm_mode, (list, np.ndarray))
else transm_mode
)
for compartment in compartments:
transm_dict = self.compartments[compartment]["transmissibilities"]
for transmMode in transmModes:
transm_dict = self.compartments[compartment][
"transmissibilities"
].update({transmMode: transmissibility})
def set_initial_prevalence(self, compartment, prevalence=0.0):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
for compartment in compartments:
self.compartments[compartment]["initial_prevalence"] = prevalence
def set_exogenous_prevalence(self, compartment, prevalence=0.0):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
for compartment in compartments:
self.compartments[compartment]["exogenous_prevalence"] = prevalence
def set_default_state(self, compartment):
for c in self.compartments:
self.compartments[c]["default_state"] = c == compartment
def set_exclude_from_eff_pop(self, compartment, exclude=True):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
for compartment in compartments:
self.compartments[compartment]["exclude_from_eff_pop"] = exclude
def add_compartment_flag(self, compartment, flag):
compartments = (
list(range(self.pop_size))
if compartment == "all"
else [compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
flags = [flag] if not isinstance(flag, (list, np.ndarray)) else flag
for compartment in compartments:
for flag in flags:
self.compartments[compartment]["flags"].append(flag)
def remove_compartment_flag(self, compartment, flag):
compartments = (
list(range(self.pop_size))
if compartment == "all"
else [compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
flags = [flag] if not isinstance(flag, (list, np.ndarray)) else flag
for compartment in compartments:
for flag in flags:
self.compartments[compartment]["flags"] = [
f for f in self.compartments[compartment]["flags"] if f != flag
] # remove all occurrences of flag
def save_json(self, filename):
"""
Function to save a compartment model as a JSON
"""
with open(filename, "w") as outfile:
json.dump(self.compartments, outfile, indent=6)
| [
"json.dump"
] | [((11572, 11619), 'json.dump', 'json.dump', (['self.compartments', 'outfile'], {'indent': '(6)'}), '(self.compartments, outfile, indent=6)\n', (11581, 11619), False, 'import json\n')] |
# Data processing imports
import scipy.io as io
import numpy as np
from pyDOE import lhs
# Plotting imports
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
import matplotlib.gridspec as gridspec
def load_dataset(file):
data = io.loadmat(file)
return data['x'], data['t'], data['usol'].T
# Inference
def preprocess_data_discrete_inference(file, idx_t0, idx_t1, q = 500, N = 250, noise = 0.0):
x, t, u_exact = load_dataset(file)
X, T = np.meshgrid(x, t)
test_X = x
test_u = u_exact[idx_t1, :]
# Compute domain bounds for x
lb = test_X.min(0)
ub = test_X.max(0)
# Determine dt
dt = t[idx_t1] - t[idx_t0]
# Sampling for initial step
idx_x = np.random.choice(x.shape[0], N, replace = False)
x0 = x[idx_x,:]
u0 = u_exact[idx_t0:idx_t0+1, idx_x].T
u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])
x1 = np.vstack([lb, ub])
tmp = np.float32(np.loadtxt(f'IRK_weights/Butcher_IRK{q}.txt', ndmin = 2))
IRK_weights = np.reshape(tmp[:q**2+q], (q+1,q))
return x, t, u_exact, T, lb, ub, dt, x0, u0, x1, test_X, test_u, IRK_weights
def plot_results_discrete_inference(x, t, x0, u0, u_exact, test_X, u1_pred, idx_t0, idx_t1, lb, ub):
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
####### Row 0: h(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/2 + 0.1, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_exact.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(), t.max(), test_X.min(), test_X.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[idx_t0]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[idx_t1]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('$u(t,x)$', fontsize = 10)
####### Row 1: h(t,x) slices ##################
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-1/2-0.05, bottom=0.15, left=0.15, right=0.85, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x,u_exact[idx_t0,:], 'b-', linewidth = 2)
ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$' % (t[idx_t0]), fontsize = 10)
ax.set_xlim([lb-0.1, ub+0.1])
ax.legend(loc='upper center', bbox_to_anchor=(0.8, -0.3), ncol=2, frameon=False)
ax = plt.subplot(gs1[0, 1])
ax.plot(x,u_exact[idx_t1,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(test_X, u1_pred[:,-1], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$' % (t[idx_t1]), fontsize = 10)
ax.set_xlim([lb-0.1, ub+0.1])
ax.legend(loc='upper center', bbox_to_anchor=(0.1, -0.3), ncol=2, frameon=False)
plt.show()
# Identification
def preprocess_data_discrete_identification(file, idx_t0, idx_t1, N0 = 250, N1 = 250, noise = 0.0):
x, t, u_exact = load_dataset(file)
# Compute domain bounds for x
lb = x.min(0)
ub = x.max(0)
# Determine dt
dt = t[idx_t1] - t[idx_t0]
# Determine q
q = int(np.ceil(0.5*np.log(np.finfo(float).eps)/np.log(dt)))
# Sampling for initial step
idx_x = np.random.choice(x.shape[0], N0, replace = False)
x0 = x[idx_x,:]
u0 = u_exact[idx_t0:idx_t0+1, idx_x].T
u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])
# Sampling for final step
idx_x = np.random.choice(x.shape[0], N1, replace = False)
x1 = x[idx_x,:]
u1 = u_exact[idx_t1:idx_t1+1, idx_x].T
u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1])
tmp = np.float32(np.loadtxt(f'IRK_weights/Butcher_IRK{q}.txt', ndmin = 2))
IRK_weights = np.reshape(tmp[:q**2+q], (q+1,q))
IRK_alphas = IRK_weights[:-1,:]
IRK_betas = IRK_weights[-1:,:]
return x, t, u_exact, lb, ub, dt, q, x0, u0, x1, u1, IRK_alphas, IRK_betas
def plot_results_discrete_identification(x, t, x0, x1, u_exact, u0, u1, idx_t0, idx_t1, lb, ub, lambda_1, lambda_2):
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/3+0.05, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_exact.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(),t.max(), lb[0], ub[0]],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[idx_t0]*np.ones((2,1)), line, 'w-', linewidth = 1.0)
ax.plot(t[idx_t1]*np.ones((2,1)), line, 'w-', linewidth = 1.0)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('$u(t,x)$', fontsize = 10)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-1/3-0.1, bottom=1-2/3, left=0.15, right=0.85, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x, u_exact[idx_t0,:][:,None], 'b', linewidth = 2, label = 'Exact')
ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$\n%d trainng data' % (t[idx_t0], u0.shape[0]), fontsize = 10)
ax = plt.subplot(gs1[0, 1])
ax.plot(x, u_exact[idx_t1,:][:,None], 'b', linewidth = 2, label = 'Exact')
ax.plot(x1, u1, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$\n%d trainng data' % (t[idx_t1], u1.shape[0]), fontsize = 10)
ax.legend(loc='upper center', bbox_to_anchor=(-0.3, -0.3), ncol=2, frameon=False)
gs2 = gridspec.GridSpec(1, 2)
gs2.update(top=1-2/3-0.05, bottom=0, left=0.15, right=0.85, wspace=0.0)
ax = plt.subplot(gs2[0, 0])
ax.axis('off')
ax.text(0.5,0.5,f'Correct PDE: $u_t + u u_x - 0.0031831 u_{{xx}} = 0$ \n$\lambda_1$: {lambda_1:.5f} \t\t $\lambda_2$: {lambda_2:.5f}')
plt.show() | [
"numpy.reshape",
"numpy.ones",
"numpy.random.choice",
"matplotlib.pyplot.gca",
"scipy.io.loadmat",
"numpy.log",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.random.randn",
"numpy.vstack",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.std",
"numpy.finfo",
"nump... | [((311, 327), 'scipy.io.loadmat', 'io.loadmat', (['file'], {}), '(file)\n', (321, 327), True, 'import scipy.io as io\n'), ((533, 550), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (544, 550), True, 'import numpy as np\n'), ((787, 833), 'numpy.random.choice', 'np.random.choice', (['x.shape[0]', 'N'], {'replace': '(False)'}), '(x.shape[0], N, replace=False)\n', (803, 833), True, 'import numpy as np\n'), ((982, 1001), 'numpy.vstack', 'np.vstack', (['[lb, ub]'], {}), '([lb, ub])\n', (991, 1001), True, 'import numpy as np\n'), ((1104, 1144), 'numpy.reshape', 'np.reshape', (['tmp[:q ** 2 + q]', '(q + 1, q)'], {}), '(tmp[:q ** 2 + q], (q + 1, q))\n', (1114, 1144), True, 'import numpy as np\n'), ((1331, 1360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9.5)'}), '(figsize=(10, 9.5))\n', (1341, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1381), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1379, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1521), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (1515, 1521), True, 'import matplotlib.gridspec as gridspec\n'), ((1611, 1633), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs0[:, :]'], {}), '(gs0[:, :])\n', (1622, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1870), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1866, 1870), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2331, 2354), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (2348, 2354), True, 'import matplotlib.gridspec as gridspec\n'), ((2448, 2470), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 0]'], {}), '(gs1[0, 0])\n', (2459, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2837, 2859), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 1]'], {}), '(gs1[0, 1])\n', (2848, 2859), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3276, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3760), 'numpy.random.choice', 'np.random.choice', (['x.shape[0]', 'N0'], {'replace': '(False)'}), '(x.shape[0], N0, replace=False)\n', (3729, 3760), True, 'import numpy as np\n'), ((3946, 3993), 'numpy.random.choice', 'np.random.choice', (['x.shape[0]', 'N1'], {'replace': '(False)'}), '(x.shape[0], N1, replace=False)\n', (3962, 3993), True, 'import numpy as np\n'), ((4234, 4274), 'numpy.reshape', 'np.reshape', (['tmp[:q ** 2 + q]', '(q + 1, q)'], {}), '(tmp[:q ** 2 + q], (q + 1, q))\n', (4244, 4274), True, 'import numpy as np\n'), ((4552, 4581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9.5)'}), '(figsize=(10, 9.5))\n', (4562, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4602), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4600, 4602), True, 'import matplotlib.pyplot as plt\n'), ((4674, 4697), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (4691, 4697), True, 'import matplotlib.gridspec as gridspec\n'), ((4786, 4808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs0[:, :]'], {}), '(gs0[:, :])\n', (4797, 4808), True, 'import matplotlib.pyplot as plt\n'), ((5009, 5032), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (5028, 5032), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5426, 5449), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (5443, 5449), True, 'import matplotlib.gridspec as gridspec\n'), ((5539, 5561), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 0]'], {}), '(gs1[0, 0])\n', (5550, 5561), True, 'import matplotlib.pyplot as plt\n'), ((5857, 5879), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 1]'], {}), '(gs1[0, 1])\n', (5868, 5879), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6285), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (6279, 6285), True, 'import matplotlib.gridspec as gridspec\n'), ((6376, 6398), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs2[0, 0]'], {}), '(gs2[0, 0])\n', (6387, 6398), True, 'import matplotlib.pyplot as plt\n'), ((6561, 6571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6569, 6571), True, 'import matplotlib.pyplot as plt\n'), ((1028, 1082), 'numpy.loadtxt', 'np.loadtxt', (['f"""IRK_weights/Butcher_IRK{q}.txt"""'], {'ndmin': '(2)'}), "(f'IRK_weights/Butcher_IRK{q}.txt', ndmin=2)\n", (1038, 1082), True, 'import numpy as np\n'), ((4158, 4212), 'numpy.loadtxt', 'np.loadtxt', (['f"""IRK_weights/Butcher_IRK{q}.txt"""'], {'ndmin': '(2)'}), "(f'IRK_weights/Butcher_IRK{q}.txt', ndmin=2)\n", (4168, 4212), True, 'import numpy as np\n'), ((930, 971), 'numpy.random.randn', 'np.random.randn', (['u0.shape[0]', 'u0.shape[1]'], {}), '(u0.shape[0], u0.shape[1])\n', (945, 971), True, 'import numpy as np\n'), ((2043, 2058), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (2050, 2058), True, 'import numpy as np\n'), ((2108, 2123), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (2115, 2123), True, 'import numpy as np\n'), ((3857, 3898), 'numpy.random.randn', 'np.random.randn', (['u0.shape[0]', 'u0.shape[1]'], {}), '(u0.shape[0], u0.shape[1])\n', (3872, 3898), True, 'import numpy as np\n'), ((4090, 4131), 'numpy.random.randn', 'np.random.randn', (['u1.shape[0]', 'u1.shape[1]'], {}), '(u1.shape[0], u1.shape[1])\n', (4105, 4131), True, 'import numpy as np\n'), ((5201, 5216), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (5208, 5216), True, 'import numpy as np\n'), ((5268, 5283), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (5275, 5283), True, 'import numpy as np\n'), ((919, 929), 'numpy.std', 'np.std', (['u0'], {}), '(u0)\n', (925, 929), True, 'import numpy as np\n'), ((3651, 3661), 'numpy.log', 'np.log', (['dt'], {}), '(dt)\n', (3657, 3661), True, 'import numpy as np\n'), ((3846, 3856), 'numpy.std', 'np.std', (['u0'], {}), '(u0)\n', (3852, 3856), True, 'import numpy as np\n'), ((4079, 4089), 'numpy.std', 'np.std', (['u1'], {}), '(u1)\n', (4085, 4089), True, 'import numpy as np\n'), ((3630, 3645), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3638, 3645), True, 'import numpy as np\n')] |
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/gene2gene.py -
======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/gene2gene.py --help
Type::
python gpipe/gene2gene.py --help
for command line help.
Documentation
-------------
Code
----
'''
import sys
import CGAT.Experiment as E
USAGE = """python %s [OPTIONS] < gene_list > graph
print list of all transcripts within a gene.
""" % sys.argv[0]
# add links between genes
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gpipe/gene2gene.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.add_option("-q", "--restrict-quality", dest="restrict_quality", type="string",
help="restrict genes to given quality codes.")
parser.set_defaults(separator="|",
restrict_quality=None)
options, args = E.Start(parser)
if options.restrict_quality:
options.restrict_quality = set(options.restrict_quality.split(","))
ninput, noutput, nskipped, nerrors = 0, 0, 0, 0
def print_lines(lines):
global noutput
if not lines:
return
for x in range(len(lines) - 1):
for y in range(x + 1, len(lines)):
options.stdout.write(options.separator.join(
lines[x]) + "\t" + options.separator.join(lines[y]) + "\t0\n")
noutput += 1
transcripts = []
for line in sys.stdin:
try:
schema, prediction_id, gene_id, quality = line[
:-1].split(options.separator)
except ValueError:
nerrors += 1
if options.loglevel >= 1:
options.stdlog.write("# PARSING ERROR in line %s" % line)
continue
transcripts.append((schema, prediction_id, gene_id, quality))
transcripts.sort(lambda x, y: cmp((x[0], x[2]), (y[0], y[2])))
last_gene_id = None
last_schema = None
lines = []
ninput = len(transcripts)
for schema, prediction_id, gene_id, quality in transcripts:
if last_gene_id != gene_id or last_schema != schema:
print_lines(lines)
lines = []
last_gene_id = gene_id
last_schema = schema
if options.restrict_quality and quality not in options.restrict_quality:
nskipped += 1
continue
lines.append((schema, prediction_id, gene_id, quality))
print_lines(lines)
E.info("ninput=%i, noutput=%i, nskipped=%i, nerrors=%i" %
(ninput, noutput, nskipped, nerrors))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
"CGAT.Experiment.Stop",
"CGAT.Experiment.info",
"CGAT.Experiment.Start"
] | [((2115, 2130), 'CGAT.Experiment.Start', 'E.Start', (['parser'], {}), '(parser)\n', (2122, 2130), True, 'import CGAT.Experiment as E\n'), ((3703, 3802), 'CGAT.Experiment.info', 'E.info', (["('ninput=%i, noutput=%i, nskipped=%i, nerrors=%i' % (ninput, noutput,\n nskipped, nerrors))"], {}), "('ninput=%i, noutput=%i, nskipped=%i, nerrors=%i' % (ninput, noutput,\n nskipped, nerrors))\n", (3709, 3802), True, 'import CGAT.Experiment as E\n'), ((3815, 3823), 'CGAT.Experiment.Stop', 'E.Stop', ([], {}), '()\n', (3821, 3823), True, 'import CGAT.Experiment as E\n')] |
import argparse
import importlib
import os
import sys
import jsonschema
import pkg_resources
from multiprocessing import Pool, cpu_count
from pyneval.errors.exceptions import InvalidMetricError, PyNevalError
from pyneval.pyneval_io import json_io
from pyneval.pyneval_io import swc_io
from pyneval.metric.utils import anno_utils, config_utils
from pyneval.metric.utils import cli_utils
from pyneval.metric.utils.metric_manager import get_metric_manager
from pyneval.tools.optimize import optimize
# load method in metrics
def import_metrics():
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
metric_path = os.path.join(base_dir, "pyneval/metric")
files = os.listdir(metric_path)
metrics = []
for f in files:
m_f = f.split(".")
if len(m_f) == 2 and m_f[0][-7:] == "_metric" and m_f[1] == "py":
metrics.append(m_f[0])
for m in metrics:
md = "pyneval.metric.{}".format(m)
importlib.import_module(md)
def read_parameters():
metric_manager = get_metric_manager()
parser = argparse.ArgumentParser(description="Current version: pyneval {}".format(
pkg_resources.require("pyneval")[0].version)
)
parser.add_argument(
"--gold",
"-G",
help="path of the gold standard SWC file",
required=False
)
parser.add_argument(
"--test",
"-T",
help="a list of reconstructed SWC files or folders for evaluation",
required=False,
nargs="*"
)
parser.add_argument(
"--metric",
"-M",
help="metric choice: " + metric_manager.get_metric_summary(False) + ".",
required=False
)
parser.add_argument(
"--output",
"-O",
help="output path of metric results, output file is in json format with different scores of the metric",
required=False,
)
parser.add_argument(
"--detail",
"-D",
help="output path of detail metric result, swc format presented.\n"
"identify different type according to metric result for each node",
required=False,
)
parser.add_argument(
"--config",
"-C",
help="path of custom configuration file for the specified metric",
required=False,
)
parser.add_argument(
"--parallel",
"-P",
help="Enable the parallel processing",
required=False,
action="store_true"
)
parser.add_argument(
"--optimize",
help="Enable optimizer mode",
required=False,
)
parser.add_argument(
"--path_validation",
help="Enable detailed path validation check",
required=False,
action="store_true"
)
parser.add_argument("--debug", help="print debug info or not", required=False, action="store_true")
return parser.parse_args()
def init(abs_dir):
sys.path.append(abs_dir)
sys.path.append(os.path.join(abs_dir, "src"))
sys.path.append(os.path.join(abs_dir, "test"))
sys.setrecursionlimit(1000000)
def set_configs(abs_dir, args):
# argument: debug
is_debug = False
if args.debug and args.debug.lower() in ("true", "t", "yes"):
is_debug = True
# argument: gold
gold_swc_path = os.path.join(abs_dir, args.gold)
gold_swc_tree = swc_io.read_swc_tree(gold_swc_path) # SwcTree
# argument: metric
metric_manager = get_metric_manager()
metric = metric_manager.get_root_metric(args.metric)
if not metric:
raise InvalidMetricError(args.metric, metric_manager.get_metric_summary(True))
# argument: test
test_swc_paths = [os.path.join(abs_dir, path) for path in args.test]
test_swc_trees = []
# read test trees
for file in test_swc_paths:
if file[-4:].lower() == ".tif":
continue
test_swc_trees.extend(swc_io.read_swc_trees(file))
if len(test_swc_paths) == 0:
raise PyNevalError("test models can't be null")
# info: how many trees read
print("Evaluating {} test model(s) \n".format(len(test_swc_trees)))
# argument: config
config_path = args.config
if config_path is None:
config = config_utils.get_default_configs(metric)
else:
config = json_io.read_json(config_path)
config_schema = config_utils.get_config_schema(metric)
jsonschema.validate(config, config_schema)
# argument: output
output_path = None
if args.output:
output_path = os.path.join(abs_dir, args.output)
# argument: detail
detail_dir = None
if args.detail:
detail_dir = os.path.join(abs_dir, args.detail)
# argument: parallel
is_parallel = False
if args.parallel:
is_parallel = args.parallel
is_path_validation = False
if args.path_validation:
is_path_validation = args.path_validation
# argument: optimize
optimize_config = None
if args.optimize:
optimize_config = json_io.read_json(args.optimize)
return gold_swc_tree, test_swc_trees, test_swc_paths, metric, output_path, detail_dir, config, is_debug, is_parallel, optimize_config, is_path_validation
def excute_metric(metric, gold_swc_tree, test_swc_tree, config, detail_dir, output_path, metric_method, is_path_validation):
test_swc_name = test_swc_tree.name()
result, res_gold_swc_tree, res_test_swc_tree = metric_method(
gold_swc_tree=gold_swc_tree, test_swc_tree=test_swc_tree, config=config
)
screen_output = config_utils.get_screen_output()
result_info = ""
for key in result:
if key in screen_output[metric]:
result_info += "{} = {}\n".format(key.ljust(15, " "), result[key])
print("---------------Result---------------\n" +
"swc_file_name = {}\n".format(test_swc_name) +
result_info +
"----------------End-----------------\n"
)
base_file_name = test_swc_name[:-4] + "_" + metric + "_"
def save_detail(swc_tree, file_name):
detail_path = os.path.normpath(os.path.join(detail_dir, file_name))
if is_path_validation:
detail_path = cli_utils.path_validation(detail_path, ".swc")
else:
detail_path = cli_utils.make_sure_path_not_exist(detail_path)
ok = False
if detail_path is not None:
ok = swc_io.swc_save(
swc_tree=swc_tree,
out_path=detail_path,
extra=anno_utils.get_detail_type(metric),
)
if detail_path is None or not ok:
print("[Warning:] Failed to save details: {}".format(file_name))
if detail_dir:
if res_gold_swc_tree is not None:
save_detail(res_gold_swc_tree, base_file_name+"recall.swc")
if res_test_swc_tree is not None:
save_detail(res_test_swc_tree, base_file_name+"precision.swc")
if output_path:
if is_path_validation:
output_path = cli_utils.path_validation(output_path, ".json")
else:
output_path = cli_utils.make_sure_path_not_exist(output_path)
ok = False
if output_path is not None:
ok = json_io.save_json(data=result, json_file_path=output_path)
if ok:
print("[Info:] Output saved")
if output_path is None or not ok:
print("[Warning:] Failed to save output")
# command program
def run():
abs_dir = os.path.abspath("")
import_metrics()
init(abs_dir)
args = read_parameters()
gold_swc_tree, test_swc_trees, test_swc_paths, metric, output_path, detail_dir, \
config, is_debug, is_parallel, optimize_config, is_path_validation = set_configs(abs_dir, args)
metric_manager = get_metric_manager()
metric_method = metric_manager.get_metric_method(metric)
if optimize_config is not None:
optimize.optimize(gold_swc_tree=gold_swc_tree, test_swc_paths=test_swc_paths,
optimize_config=optimize_config, metric_config=config, metric_method=metric_method)
elif is_parallel:
# use multi process
max_procs = cpu_count()
if len(test_swc_trees) < max_procs:
max_procs = len(test_swc_trees)
p_pool = Pool(max_procs)
for test_swc_tree in test_swc_trees:
p_pool.apply_async(
excute_metric,
args=(metric, gold_swc_tree, test_swc_tree, config, detail_dir, output_path, metric_method, is_path_validation),
)
p_pool.close()
p_pool.join()
else:
for test_swc_tree in test_swc_trees:
excute_metric(
metric=metric,
gold_swc_tree=gold_swc_tree,
test_swc_tree=test_swc_tree,
config=config,
detail_dir=detail_dir,
output_path=output_path,
metric_method=metric_method,
is_path_validation=is_path_validation,
)
print("Done!")
if __name__ == "__main__":
sys.exit(run())
| [
"pyneval.metric.utils.cli_utils.make_sure_path_not_exist",
"pyneval.pyneval_io.json_io.save_json",
"pkg_resources.require",
"pyneval.metric.utils.config_utils.get_config_schema",
"multiprocessing.cpu_count",
"pyneval.errors.exceptions.PyNevalError",
"sys.path.append",
"pyneval.pyneval_io.json_io.read_... | [((656, 696), 'os.path.join', 'os.path.join', (['base_dir', '"""pyneval/metric"""'], {}), "(base_dir, 'pyneval/metric')\n", (668, 696), False, 'import os\n'), ((709, 732), 'os.listdir', 'os.listdir', (['metric_path'], {}), '(metric_path)\n', (719, 732), False, 'import os\n'), ((1053, 1073), 'pyneval.metric.utils.metric_manager.get_metric_manager', 'get_metric_manager', ([], {}), '()\n', (1071, 1073), False, 'from pyneval.metric.utils.metric_manager import get_metric_manager\n'), ((2929, 2953), 'sys.path.append', 'sys.path.append', (['abs_dir'], {}), '(abs_dir)\n', (2944, 2953), False, 'import sys\n'), ((3059, 3089), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(1000000)'], {}), '(1000000)\n', (3080, 3089), False, 'import sys\n'), ((3299, 3331), 'os.path.join', 'os.path.join', (['abs_dir', 'args.gold'], {}), '(abs_dir, args.gold)\n', (3311, 3331), False, 'import os\n'), ((3352, 3387), 'pyneval.pyneval_io.swc_io.read_swc_tree', 'swc_io.read_swc_tree', (['gold_swc_path'], {}), '(gold_swc_path)\n', (3372, 3387), False, 'from pyneval.pyneval_io import swc_io\n'), ((3444, 3464), 'pyneval.metric.utils.metric_manager.get_metric_manager', 'get_metric_manager', ([], {}), '()\n', (3462, 3464), False, 'from pyneval.metric.utils.metric_manager import get_metric_manager\n'), ((4335, 4373), 'pyneval.metric.utils.config_utils.get_config_schema', 'config_utils.get_config_schema', (['metric'], {}), '(metric)\n', (4365, 4373), False, 'from pyneval.metric.utils import anno_utils, config_utils\n'), ((4378, 4420), 'jsonschema.validate', 'jsonschema.validate', (['config', 'config_schema'], {}), '(config, config_schema)\n', (4397, 4420), False, 'import jsonschema\n'), ((5520, 5552), 'pyneval.metric.utils.config_utils.get_screen_output', 'config_utils.get_screen_output', ([], {}), '()\n', (5550, 5552), False, 'from pyneval.metric.utils import anno_utils, config_utils\n'), ((7446, 7465), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (7461, 7465), False, 'import os\n'), ((7743, 7763), 'pyneval.metric.utils.metric_manager.get_metric_manager', 'get_metric_manager', ([], {}), '()\n', (7761, 7763), False, 'from pyneval.metric.utils.metric_manager import get_metric_manager\n'), ((979, 1006), 'importlib.import_module', 'importlib.import_module', (['md'], {}), '(md)\n', (1002, 1006), False, 'import importlib\n'), ((2974, 3002), 'os.path.join', 'os.path.join', (['abs_dir', '"""src"""'], {}), "(abs_dir, 'src')\n", (2986, 3002), False, 'import os\n'), ((3024, 3053), 'os.path.join', 'os.path.join', (['abs_dir', '"""test"""'], {}), "(abs_dir, 'test')\n", (3036, 3053), False, 'import os\n'), ((3672, 3699), 'os.path.join', 'os.path.join', (['abs_dir', 'path'], {}), '(abs_dir, path)\n', (3684, 3699), False, 'import os\n'), ((3969, 4010), 'pyneval.errors.exceptions.PyNevalError', 'PyNevalError', (['"""test models can\'t be null"""'], {}), '("test models can\'t be null")\n', (3981, 4010), False, 'from pyneval.errors.exceptions import InvalidMetricError, PyNevalError\n'), ((4215, 4255), 'pyneval.metric.utils.config_utils.get_default_configs', 'config_utils.get_default_configs', (['metric'], {}), '(metric)\n', (4247, 4255), False, 'from pyneval.metric.utils import anno_utils, config_utils\n'), ((4283, 4313), 'pyneval.pyneval_io.json_io.read_json', 'json_io.read_json', (['config_path'], {}), '(config_path)\n', (4300, 4313), False, 'from pyneval.pyneval_io import json_io\n'), ((4510, 4544), 'os.path.join', 'os.path.join', (['abs_dir', 'args.output'], {}), '(abs_dir, args.output)\n', (4522, 4544), False, 'import os\n'), ((4632, 4666), 'os.path.join', 'os.path.join', (['abs_dir', 'args.detail'], {}), '(abs_dir, args.detail)\n', (4644, 4666), False, 'import os\n'), ((4987, 5019), 'pyneval.pyneval_io.json_io.read_json', 'json_io.read_json', (['args.optimize'], {}), '(args.optimize)\n', (5004, 5019), False, 'from pyneval.pyneval_io import json_io\n'), ((7870, 8040), 'pyneval.tools.optimize.optimize.optimize', 'optimize.optimize', ([], {'gold_swc_tree': 'gold_swc_tree', 'test_swc_paths': 'test_swc_paths', 'optimize_config': 'optimize_config', 'metric_config': 'config', 'metric_method': 'metric_method'}), '(gold_swc_tree=gold_swc_tree, test_swc_paths=\n test_swc_paths, optimize_config=optimize_config, metric_config=config,\n metric_method=metric_method)\n', (7887, 8040), False, 'from pyneval.tools.optimize import optimize\n'), ((3892, 3919), 'pyneval.pyneval_io.swc_io.read_swc_trees', 'swc_io.read_swc_trees', (['file'], {}), '(file)\n', (3913, 3919), False, 'from pyneval.pyneval_io import swc_io\n'), ((6061, 6096), 'os.path.join', 'os.path.join', (['detail_dir', 'file_name'], {}), '(detail_dir, file_name)\n', (6073, 6096), False, 'import os\n'), ((6155, 6201), 'pyneval.metric.utils.cli_utils.path_validation', 'cli_utils.path_validation', (['detail_path', '""".swc"""'], {}), "(detail_path, '.swc')\n", (6180, 6201), False, 'from pyneval.metric.utils import cli_utils\n'), ((6242, 6289), 'pyneval.metric.utils.cli_utils.make_sure_path_not_exist', 'cli_utils.make_sure_path_not_exist', (['detail_path'], {}), '(detail_path)\n', (6276, 6289), False, 'from pyneval.metric.utils import cli_utils\n'), ((6973, 7020), 'pyneval.metric.utils.cli_utils.path_validation', 'cli_utils.path_validation', (['output_path', '""".json"""'], {}), "(output_path, '.json')\n", (6998, 7020), False, 'from pyneval.metric.utils import cli_utils\n'), ((7061, 7108), 'pyneval.metric.utils.cli_utils.make_sure_path_not_exist', 'cli_utils.make_sure_path_not_exist', (['output_path'], {}), '(output_path)\n', (7095, 7108), False, 'from pyneval.metric.utils import cli_utils\n'), ((7181, 7239), 'pyneval.pyneval_io.json_io.save_json', 'json_io.save_json', ([], {'data': 'result', 'json_file_path': 'output_path'}), '(data=result, json_file_path=output_path)\n', (7198, 7239), False, 'from pyneval.pyneval_io import json_io\n'), ((8128, 8139), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (8137, 8139), False, 'from multiprocessing import Pool, cpu_count\n'), ((8245, 8260), 'multiprocessing.Pool', 'Pool', (['max_procs'], {}), '(max_procs)\n', (8249, 8260), False, 'from multiprocessing import Pool, cpu_count\n'), ((609, 634), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'import os\n'), ((6474, 6508), 'pyneval.metric.utils.anno_utils.get_detail_type', 'anno_utils.get_detail_type', (['metric'], {}), '(metric)\n', (6500, 6508), False, 'from pyneval.metric.utils import anno_utils, config_utils\n'), ((1170, 1202), 'pkg_resources.require', 'pkg_resources.require', (['"""pyneval"""'], {}), "('pyneval')\n", (1191, 1202), False, 'import pkg_resources\n')] |
"""DB Games model migrations
Revision ID: 89944f8b35b3
Revises:
Create Date: 2020-11-14 03:49:03.255055
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "89944f8b35b3"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"company",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("creation_year", sa.DateTime(), nullable=True),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("logo", sa.String(length=500), nullable=True),
sa.Column("is_publisher", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"franchise",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("title", sa.String(length=250), nullable=False),
sa.Column("first_release", sa.DateTime(), nullable=True),
sa.Column("description", sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"console",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("release_year", sa.DateTime(), nullable=False),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("cover", sa.String(length=500), nullable=True),
sa.Column("motto", sa.String(length=100), nullable=False),
sa.Column("company_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["company_id"], ["company.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"game",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("publication_year", sa.DateTime(), nullable=True),
sa.Column("score", sa.Integer(), nullable=True),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("cover", sa.String(length=500), nullable=True),
sa.Column("franchise_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["franchise_id"], ["franchise.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"franchiseassociation",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("franchise_id", sa.Integer(), nullable=True),
sa.Column("game_id", sa.Integer(), nullable=True),
sa.Column("console_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["console_id"], ["console.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["franchise_id"], ["franchise.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["game_id"], ["game.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"game_console_table",
sa.Column("game_id", sa.Integer(), nullable=True),
sa.Column("console_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["console_id"],
["console.id"],
),
sa.ForeignKeyConstraint(
["game_id"],
["game.id"],
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("game_console_table")
op.drop_table("franchiseassociation")
op.drop_table("game")
op.drop_table("console")
op.drop_table("franchise")
op.drop_table("company")
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"sqlalchemy.Boolean",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String"
] | [((3521, 3556), 'alembic.op.drop_table', 'op.drop_table', (['"""game_console_table"""'], {}), "('game_console_table')\n", (3534, 3556), False, 'from alembic import op\n'), ((3561, 3598), 'alembic.op.drop_table', 'op.drop_table', (['"""franchiseassociation"""'], {}), "('franchiseassociation')\n", (3574, 3598), False, 'from alembic import op\n'), ((3603, 3624), 'alembic.op.drop_table', 'op.drop_table', (['"""game"""'], {}), "('game')\n", (3616, 3624), False, 'from alembic import op\n'), ((3629, 3653), 'alembic.op.drop_table', 'op.drop_table', (['"""console"""'], {}), "('console')\n", (3642, 3653), False, 'from alembic import op\n'), ((3658, 3684), 'alembic.op.drop_table', 'op.drop_table', (['"""franchise"""'], {}), "('franchise')\n", (3671, 3684), False, 'from alembic import op\n'), ((3689, 3713), 'alembic.op.drop_table', 'op.drop_table', (['"""company"""'], {}), "('company')\n", (3702, 3713), False, 'from alembic import op\n'), ((806, 835), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (829, 835), True, 'import sqlalchemy as sa\n'), ((1153, 1182), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1176, 1182), True, 'import sqlalchemy as sa\n'), ((1692, 1767), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['company_id']", "['company.id']"], {'ondelete': '"""CASCADE"""'}), "(['company_id'], ['company.id'], ondelete='CASCADE')\n", (1715, 1767), True, 'import sqlalchemy as sa\n'), ((1777, 1806), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1800, 1806), True, 'import sqlalchemy as sa\n'), ((2308, 2387), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['franchise_id']", "['franchise.id']"], {'ondelete': '"""CASCADE"""'}), "(['franchise_id'], ['franchise.id'], ondelete='CASCADE')\n", (2331, 2387), True, 'import sqlalchemy as sa\n'), ((2397, 2426), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (2420, 2426), True, 'import sqlalchemy as sa\n'), ((2735, 2810), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['console_id']", "['console.id']"], {'ondelete': '"""CASCADE"""'}), "(['console_id'], ['console.id'], ondelete='CASCADE')\n", (2758, 2810), True, 'import sqlalchemy as sa\n'), ((2820, 2899), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['franchise_id']", "['franchise.id']"], {'ondelete': '"""CASCADE"""'}), "(['franchise_id'], ['franchise.id'], ondelete='CASCADE')\n", (2843, 2899), True, 'import sqlalchemy as sa\n'), ((2909, 2978), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['game_id']", "['game.id']"], {'ondelete': '"""CASCADE"""'}), "(['game_id'], ['game.id'], ondelete='CASCADE')\n", (2932, 2978), True, 'import sqlalchemy as sa\n'), ((2988, 3017), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3011, 3017), True, 'import sqlalchemy as sa\n'), ((3205, 3260), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['console_id']", "['console.id']"], {}), "(['console_id'], ['console.id'])\n", (3228, 3260), True, 'import sqlalchemy as sa\n'), ((3305, 3354), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['game_id']", "['game.id']"], {}), "(['game_id'], ['game.id'])\n", (3328, 3354), True, 'import sqlalchemy as sa\n'), ((434, 446), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (444, 446), True, 'import sqlalchemy as sa\n'), ((491, 512), 'sqlalchemy.String', 'sa.String', ([], {'length': '(250)'}), '(length=250)\n', (500, 512), True, 'import sqlalchemy as sa\n'), ((566, 579), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (577, 579), True, 'import sqlalchemy as sa\n'), ((630, 651), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (639, 651), True, 'import sqlalchemy as sa\n'), ((695, 716), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (704, 716), True, 'import sqlalchemy as sa\n'), ((768, 780), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (778, 780), True, 'import sqlalchemy as sa\n'), ((909, 921), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (919, 921), True, 'import sqlalchemy as sa\n'), ((967, 988), 'sqlalchemy.String', 'sa.String', ([], {'length': '(250)'}), '(length=250)\n', (976, 988), True, 'import sqlalchemy as sa\n'), ((1042, 1055), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1053, 1055), True, 'import sqlalchemy as sa\n'), ((1106, 1127), 'sqlalchemy.String', 'sa.String', ([], {'length': '(250)'}), '(length=250)\n', (1115, 1127), True, 'import sqlalchemy as sa\n'), ((1254, 1266), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1264, 1266), True, 'import sqlalchemy as sa\n'), ((1311, 1332), 'sqlalchemy.String', 'sa.String', ([], {'length': '(250)'}), '(length=250)\n', (1320, 1332), True, 'import sqlalchemy as sa\n'), ((1385, 1398), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1396, 1398), True, 'import sqlalchemy as sa\n'), ((1450, 1471), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (1459, 1471), True, 'import sqlalchemy as sa\n'), ((1516, 1537), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (1525, 1537), True, 'import sqlalchemy as sa\n'), ((1582, 1603), 'sqlalchemy.String', 'sa.String', ([], {'length': '(100)'}), '(length=100)\n', (1591, 1603), True, 'import sqlalchemy as sa\n'), ((1654, 1666), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1664, 1666), True, 'import sqlalchemy as sa\n'), ((1875, 1887), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1885, 1887), True, 'import sqlalchemy as sa\n'), ((1932, 1953), 'sqlalchemy.String', 'sa.String', ([], {'length': '(250)'}), '(length=250)\n', (1941, 1953), True, 'import sqlalchemy as sa\n'), ((2010, 2023), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (2021, 2023), True, 'import sqlalchemy as sa\n'), ((2068, 2080), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2078, 2080), True, 'import sqlalchemy as sa\n'), ((2131, 2152), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (2140, 2152), True, 'import sqlalchemy as sa\n'), ((2197, 2218), 'sqlalchemy.String', 'sa.String', ([], {'length': '(500)'}), '(length=500)\n', (2206, 2218), True, 'import sqlalchemy as sa\n'), ((2270, 2282), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2280, 2282), True, 'import sqlalchemy as sa\n'), ((2511, 2523), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2521, 2523), True, 'import sqlalchemy as sa\n'), ((2576, 2588), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2586, 2588), True, 'import sqlalchemy as sa\n'), ((2635, 2647), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2645, 2647), True, 'import sqlalchemy as sa\n'), ((2697, 2709), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2707, 2709), True, 'import sqlalchemy as sa\n'), ((3105, 3117), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3115, 3117), True, 'import sqlalchemy as sa\n'), ((3167, 3179), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3177, 3179), True, 'import sqlalchemy as sa\n')] |
""" Functions for working with tabix dosages in pandas dataframes
"""
import gzip
import numpy as np
import pandas as pd
import pysam
import statsmodels.api as sm
class Dosage(object):
def __init__(self, dosages, annotations, gene_name):
# Match up the annotation dataframe with the dosage dataframe
mindex = np.intersect1d(np.asarray(dosages.index, dtype=str),
np.asarray(annotations.index, dtype=str))
self.annot = annotations.loc[mindex, :]
ordering = self.annot.ix[:, 'pos'].argsort()
self.annot = self.annot.iloc[ordering, :]
self.dosages = dosages.ix[mindex, :]
self.dosages = self.dosages.iloc[ordering, :]
self.gene_name = gene_name
def run_eQTL(self, count_matrix, covariates, extra_snps=None):
#self.pvalues = self.dosages.apply()
pvalues = self.dosages.apply(eQTL_func, axis=1, args=(covariates,
count_matrix.ix[self.gene_name, :]))
self.pvalues = pvalues
def get_dosages_by_range(chrm, start, end, gene_name, annotation_file,
dosage_df, mapping=None):
"""
Fuzzy mapping between annotation and genotypes
Returns Dosage instance.
"""
ann_file = pysam.Tabixfile(annotation_file)
ann_v = ann_file.fetch(chrm, start, end)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
comb_iter = []
for dos in dosage_df:
mindex = np.intersect1d(np.asarray(dos.index, dtype=str),
np.asarray(annot.index, dtype=str))
if len(mindex) > 0:
comb_iter.append(dos.ix[mindex, :])
else:
pass
out_dos = pd.concat(comb_iter)
'''
dosages = pd.read_csv(dosage_path + path, sep=" ", header=None,
index_col = 0, skiprows=roughly_first,
nrows=roughly_end-roughly_first, names=col_names.columns)
'''
print(annot.shape, out_dos.shape, gene_name)
return Dosage(out_dos, annot, gene_name)
def generate_dosage_mapping(dosage_file, mapping_file = None, interval=50):
"""
Returns dictionary of rsIDs: fileposition from a dosage file
"""
if not mapping_file:
with open(dosage_file) as fh:
fh.next()
t = 0
debug = 0
f_i = {}
for i, j in enumerate(fh):
if i % 50 == 0:
f_i[j.split(" ")[0]] = i - 1
else: pass
return(f_i)
def eQTL_func(snps, cov, expression):
"""
"""
cov = cov.T
cov['snps'] = snps
cov = sm.add_constant(cov)
model = sm.OLS(expression, cov)
return(model.fit().pvalues['snps'])
class eQTL(object):
""" Python class for completing eQTLs. Does lazy loading of all large
files.
"""
def __init__(self, dosages_path, expression, vannotation):
self.dosage = dosages_path
self.expression = expression
self.vannotations = vannotations
def generate_mapping():
pass
"""
if mapping:
for i in ann_v:
rsID = i.split("\t")[3]
try:
roughly_first = mapping[rsID]
rsIDs.append(rsID)
pos.append(int(i.split("\t")[1]))
break
except KeyError:
pass
for i in ann_v:
i = i.split("\t")
try:
roughly_end = mapping[i[3]]
except KeyError:
pass
pos.append(int(i[1]))
rsIDs.append(i[3])
"""
def get_annotation(annotation, chrm):
ann_file = pysam.Tabixfile(annotation)
ann_v = ann_file.fetch(chrm)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
return(annot)
| [
"numpy.asarray",
"pysam.Tabixfile",
"pandas.Index",
"statsmodels.api.add_constant",
"statsmodels.api.OLS",
"pandas.concat"
] | [((1215, 1247), 'pysam.Tabixfile', 'pysam.Tabixfile', (['annotation_file'], {}), '(annotation_file)\n', (1230, 1247), False, 'import pysam\n'), ((1870, 1890), 'pandas.concat', 'pd.concat', (['comb_iter'], {}), '(comb_iter)\n', (1879, 1890), True, 'import pandas as pd\n'), ((2765, 2785), 'statsmodels.api.add_constant', 'sm.add_constant', (['cov'], {}), '(cov)\n', (2780, 2785), True, 'import statsmodels.api as sm\n'), ((2798, 2821), 'statsmodels.api.OLS', 'sm.OLS', (['expression', 'cov'], {}), '(expression, cov)\n', (2804, 2821), True, 'import statsmodels.api as sm\n'), ((3788, 3815), 'pysam.Tabixfile', 'pysam.Tabixfile', (['annotation'], {}), '(annotation)\n', (3803, 3815), False, 'import pysam\n'), ((346, 382), 'numpy.asarray', 'np.asarray', (['dosages.index'], {'dtype': 'str'}), '(dosages.index, dtype=str)\n', (356, 382), True, 'import numpy as np\n'), ((401, 441), 'numpy.asarray', 'np.asarray', (['annotations.index'], {'dtype': 'str'}), '(annotations.index, dtype=str)\n', (411, 441), True, 'import numpy as np\n'), ((1569, 1584), 'pandas.Index', 'pd.Index', (['rsIDs'], {}), '(rsIDs)\n', (1577, 1584), True, 'import pandas as pd\n'), ((1663, 1695), 'numpy.asarray', 'np.asarray', (['dos.index'], {'dtype': 'str'}), '(dos.index, dtype=str)\n', (1673, 1695), True, 'import numpy as np\n'), ((1713, 1747), 'numpy.asarray', 'np.asarray', (['annot.index'], {'dtype': 'str'}), '(annot.index, dtype=str)\n', (1723, 1747), True, 'import numpy as np\n'), ((4125, 4140), 'pandas.Index', 'pd.Index', (['rsIDs'], {}), '(rsIDs)\n', (4133, 4140), True, 'import pandas as pd\n')] |
import logging
import enum
import copy
import telegram.error
from telegram import (
InlineKeyboardButton,
InlineKeyboardMarkup,
ParseMode
)
from app.entities import KnowledgeStatus
from app.card import Card
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
class KnowledgeCard(Card):
def __init__(self, bot, word, listener):
self._view = KnowledgeCardView(bot)
self._model = KnowledgeCardModel(
view=self._view,
word=word,
listener=listener,
)
self._controller = KnowledgeCardController(self._model)
self._is_deleted = False
def set_old(self):
self._model.is_old = True
@property
def is_old(self):
return self._model.is_old
def set_as_deleted(self, update, context):
self._model.set_as_deleted(update, context)
def is_deleted(self) -> bool:
return self._is_deleted
def get_word(self):
return copy.copy(self._model._word)
def start(self, update, context) -> str:
return self._model.start(update, context)
def button_clicked(self, update, context):
self._controller.button_clicked(update, context)
class Knowledge(enum.Enum):
true = "✅"
false = "❌"
class KnowledgeCardModel:
def __init__(self, view, word, listener):
self._view = view
self._word = word
self._listener = listener
self._message_id = None
self.is_old = False
def start(self, update, context) -> str:
self._message_id = self._view.send_card(
update=update,
word=self._word,
translation=None,
)
return self._message_id
def show_translation(self, update, context, knowledge):
knowledge_status = KnowledgeStatus.new_word_know
if knowledge == Knowledge.false:
knowledge_status = KnowledgeStatus.new_word_forgot
self._view.update_card(
update=update,
translation=self._word.get_translation() + " " + knowledge.value,
)
self._listener.on_correct_answer_clicked(
update=update,
context=context,
knowledge_status=knowledge_status,
)
def set_as_deleted(self, update, context):
self._view.update_card_as_deleted(
update=update,
context=context,
message_id=self._message_id,
)
self._is_deleted = True
class KnowledgeCardController:
def __init__(self, model):
self._model = model
def button_clicked(self, update, context):
query_data = update.callback_query.data
if query_data == "know":
self._model.show_translation(
update=update,
context=context,
knowledge=Knowledge.true,
)
elif query_data == "forgot":
self._model.show_translation(
update=update,
context=context,
knowledge=Knowledge.false,
)
class KnowledgeCardView:
def __init__(self, bot):
self._bot = bot
@staticmethod
def _get_card_markup(translation=None):
keyboard = [[
InlineKeyboardButton(
text="Know " + Knowledge.true.value,
callback_data="know"
),
InlineKeyboardButton(
text="Forgot " + Knowledge.false.value,
callback_data="forgot"
),
]]
if translation is not None:
keyboard.pop(0)
keyboard.append([
InlineKeyboardButton(
text=translation,
callback_data="translation")
])
return InlineKeyboardMarkup(keyboard)
def send_card(self, update, word, translation):
markup = KnowledgeCardView._get_card_markup(
translation=translation,
)
return self._bot.send_message(
chat_id=update.effective_message.chat_id,
text="*"+word.get_word()+"*",
reply_markup=markup,
parse_mode=ParseMode.MARKDOWN
).message_id
def update_card(self, update, translation):
reply_markup = KnowledgeCardView._get_card_markup(
translation=translation,
)
try:
return self._bot.edit_message_reply_markup(
chat_id=update.effective_message.chat_id,
message_id=update.effective_message.message_id,
reply_markup=reply_markup
)
except telegram.error.BadRequest:
return None
def update_card_as_deleted(self, update, context, message_id):
return self._bot.edit_message_reply_markup(
chat_id=update.effective_message.chat_id,
message_id=message_id,
reply_markup=None
)
| [
"logging.basicConfig",
"copy.copy",
"telegram.InlineKeyboardButton",
"telegram.InlineKeyboardMarkup"
] | [((223, 330), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (242, 330), False, 'import logging\n'), ((1022, 1050), 'copy.copy', 'copy.copy', (['self._model._word'], {}), '(self._model._word)\n', (1031, 1050), False, 'import copy\n'), ((3811, 3841), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (3831, 3841), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode\n'), ((3279, 3358), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': "('Know ' + Knowledge.true.value)", 'callback_data': '"""know"""'}), "(text='Know ' + Knowledge.true.value, callback_data='know')\n", (3299, 3358), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode\n'), ((3418, 3507), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': "('Forgot ' + Knowledge.false.value)", 'callback_data': '"""forgot"""'}), "(text='Forgot ' + Knowledge.false.value, callback_data=\n 'forgot')\n", (3438, 3507), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode\n'), ((3671, 3738), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': 'translation', 'callback_data': '"""translation"""'}), "(text=translation, callback_data='translation')\n", (3691, 3738), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode\n')] |
#
# gamefaqs-scraper
# github.com/01mu
#
from gamefaqs_scraper import GFSBoard
from gamefaqs_scraper import GFSThread
board = GFSBoard()
board.get_site('234547-super-smash-bros-ultimate', 0)
threads = board.find()
print("Pages: " + str(board.max_page) + "\n")
for i in range(len(threads)):
print(threads[i].title + "\n" + threads[i].author + "\n" + threads[i].last
+ "\n" + threads[i].replies + "\n" + threads[i].link + "\n")
'''
thread = GFSThread()
thread.get_site('234547-super-smash-bros-ultimate/77126753', 0)
posts = thread.find()
print("Pages: " + str(thread.max_page) + "\n")
for i in range(len(posts)):
print(posts[i].author + "\n" + posts[i].date + "\n" + posts[i].body + "\n")
'''
| [
"gamefaqs_scraper.GFSBoard"
] | [((128, 138), 'gamefaqs_scraper.GFSBoard', 'GFSBoard', ([], {}), '()\n', (136, 138), False, 'from gamefaqs_scraper import GFSBoard\n')] |
# Generated by Django 2.1.5 on 2019-02-27 02:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('batchrecords', '0002_auto_20190226_1939'),
]
operations = [
migrations.RemoveField(
model_name='historicalbatchrecord',
name='created_by',
),
migrations.RemoveField(
model_name='historicalbatchrecord',
name='history_user',
),
migrations.RemoveField(
model_name='historicalbatchrecord',
name='product',
),
migrations.RemoveField(
model_name='historicalbatchrecord',
name='updated_by',
),
migrations.DeleteModel(
name='HistoricalBatchRecord',
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField"
] | [((232, 309), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""historicalbatchrecord"""', 'name': '"""created_by"""'}), "(model_name='historicalbatchrecord', name='created_by')\n", (254, 309), False, 'from django.db import migrations\n'), ((354, 433), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""historicalbatchrecord"""', 'name': '"""history_user"""'}), "(model_name='historicalbatchrecord', name='history_user')\n", (376, 433), False, 'from django.db import migrations\n'), ((478, 552), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""historicalbatchrecord"""', 'name': '"""product"""'}), "(model_name='historicalbatchrecord', name='product')\n", (500, 552), False, 'from django.db import migrations\n'), ((597, 674), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""historicalbatchrecord"""', 'name': '"""updated_by"""'}), "(model_name='historicalbatchrecord', name='updated_by')\n", (619, 674), False, 'from django.db import migrations\n'), ((719, 771), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""HistoricalBatchRecord"""'}), "(name='HistoricalBatchRecord')\n", (741, 771), False, 'from django.db import migrations\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import unittest
from marbles.ie import grpc
from marbles.ie.ccg import parse_ccg_derivation2 as parse_ccg_derivation
from marbles.ie.drt.drs import Rel
from marbles.ie.semantics.ccg import process_ccg_pt, pt_to_ccg_derivation
from marbles.ie.core.constants import *
from marbles.ie.utils.text import preprocess_sentence
from marbles.test import dprint
class PossessiveTest(unittest.TestCase):
def setUp(self):
self.svc = grpc.CcgParserService('easysrl')
self.stub = self.svc.open_client()
def tearDown(self):
self.svc.shutdown()
def test10_Brutus(self):
text = "Ceasar was stabbed by Brutus"
derivation = grpc.ccg_parse(self.stub, text, grpc.DEFAULT_SESSION)
pt = parse_ccg_derivation(derivation)
sentence = process_ccg_pt(pt, CO_NO_VERBNET|CO_NO_WIKI_SEARCH)
d = sentence.get_drs()
dprint(pt_to_ccg_derivation(pt))
dprint(d)
fnps = sentence.get_np_nominals()
nps = [sp.text for r, sp in fnps]
#self.assertTrue('Average maturity' in nps)
self.assertTrue('Brutus' in nps)
self.assertTrue('Ceasar' in nps)
fvps = sentence.get_vp_nominals()
vps = [sp.text for r, sp in fvps]
self.assertTrue('was stabbed' in vps)
E = filter(lambda x: x[1].text == "was stabbed", fvps)[0][0]
A1 = filter(lambda x: x[1].text == "Brutus", fnps)[0][0]
A0 = filter(lambda x: x[1].text == "Ceasar", fnps)[0][0]
self.assertTrue(d.find_condition(Rel('_ARG0', [E, A0])) is not None)
self.assertTrue(d.find_condition(Rel('_ARG1', [E, A1])) is not None)
if __name__ == '__main__':
unittest.main()
if __name__ == '__main__':
unittest.main()
| [
"marbles.ie.ccg.parse_ccg_derivation2",
"marbles.test.dprint",
"marbles.ie.drt.drs.Rel",
"marbles.ie.semantics.ccg.pt_to_ccg_derivation",
"marbles.ie.semantics.ccg.process_ccg_pt",
"marbles.ie.grpc.ccg_parse",
"unittest.main",
"marbles.ie.grpc.CcgParserService"
] | [((1737, 1752), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1750, 1752), False, 'import unittest\n'), ((1786, 1801), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1799, 1801), False, 'import unittest\n'), ((515, 547), 'marbles.ie.grpc.CcgParserService', 'grpc.CcgParserService', (['"""easysrl"""'], {}), "('easysrl')\n", (536, 547), False, 'from marbles.ie import grpc\n'), ((741, 794), 'marbles.ie.grpc.ccg_parse', 'grpc.ccg_parse', (['self.stub', 'text', 'grpc.DEFAULT_SESSION'], {}), '(self.stub, text, grpc.DEFAULT_SESSION)\n', (755, 794), False, 'from marbles.ie import grpc\n'), ((808, 840), 'marbles.ie.ccg.parse_ccg_derivation2', 'parse_ccg_derivation', (['derivation'], {}), '(derivation)\n', (828, 840), True, 'from marbles.ie.ccg import parse_ccg_derivation2 as parse_ccg_derivation\n'), ((860, 913), 'marbles.ie.semantics.ccg.process_ccg_pt', 'process_ccg_pt', (['pt', '(CO_NO_VERBNET | CO_NO_WIKI_SEARCH)'], {}), '(pt, CO_NO_VERBNET | CO_NO_WIKI_SEARCH)\n', (874, 913), False, 'from marbles.ie.semantics.ccg import process_ccg_pt, pt_to_ccg_derivation\n'), ((992, 1001), 'marbles.test.dprint', 'dprint', (['d'], {}), '(d)\n', (998, 1001), False, 'from marbles.test import dprint\n'), ((958, 982), 'marbles.ie.semantics.ccg.pt_to_ccg_derivation', 'pt_to_ccg_derivation', (['pt'], {}), '(pt)\n', (978, 982), False, 'from marbles.ie.semantics.ccg import process_ccg_pt, pt_to_ccg_derivation\n'), ((1590, 1611), 'marbles.ie.drt.drs.Rel', 'Rel', (['"""_ARG0"""', '[E, A0]'], {}), "('_ARG0', [E, A0])\n", (1593, 1611), False, 'from marbles.ie.drt.drs import Rel\n'), ((1667, 1688), 'marbles.ie.drt.drs.Rel', 'Rel', (['"""_ARG1"""', '[E, A1]'], {}), "('_ARG1', [E, A1])\n", (1670, 1688), False, 'from marbles.ie.drt.drs import Rel\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CNN model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
from .configuration_utils import PretrainedConfig
import numpy as np
logger = logging.getLogger(__name__)
class ClassifierConfig(PretrainedConfig):
def __init__(self,
batch_size=50,
class_size=2,
dropout_prob=0.1,
cnn_train=True,
**kwargs):
super(ClassifierConfig, self).__init__(**kwargs)
self.batch_size = batch_size
self.class_size = class_size
self.dropout_prob = dropout_prob
self.cnn_train = cnn_train
| [
"logging.getLogger"
] | [((932, 959), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (949, 959), False, 'import logging\n')] |
#Step 1 :- Importing dependancies and train test data generated
from config import *
train_data = pd.read_csv("data/train_data/train_feature.csv")
test_data = pd.read_csv("data/test_data/test_feature.csv")
#Step 2 :- Getting train data insights and drop unnecessary columns, Splitting data into input and target variable sets.
print(list(train_data['redemption_status']).count(0) * 100 / len(train_data['redemption_status']), "% coupons not redeemed in training data ")
X = train_data
X.dropna(inplace=True)
X.drop(["id","campaign_id","c_freq_category","c_rare_category","start_date","end_date","duration","age_range","overall_freq_category","overall_rare_category"], axis=1,inplace=True)
y = train_data['redemption_status']
X.drop('redemption_status',axis = 1, inplace = True)
#Step 3 :- Train-test Split for the model
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#Step 4 :- Initiate model and fit transform
model = GaussianNB()
model.fit(X_train, y_train)
#Step 5 :- Predict on the test part of the split
y_pred = model.predict(X_test)
#Step 6 :- Save the model for the inference engine
filename = 'model/finalized_model_2.sav'
pickle.dump(model, open(filename, 'wb'))
#Step 7 :- Calculate Training data accuracy of the model
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
#Step 8 :- Use the model on test data to predict the target in test data
Y = test_data
Y.drop(["id","campaign_id","c_freq_category","c_rare_category","start_date","end_date","duration","age_range","overall_freq_category","overall_rare_category"], axis=1,inplace=True)
Y.dropna(inplace = True)
Predictions = model.predict(Y)
# Print results
print(list(Predictions).count(0) * 100 / len(Predictions) , "% Coupans not redeemed in Test Data" )
| [
"sklearn.model_selection.train_test_split"
] | [((935, 990), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y, test_size=0.33, random_state=42)\n', (951, 990), False, 'from sklearn.model_selection import train_test_split\n')] |
# Copyright 2018 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Testcases for functions in math.
#
# Each line takes the form:
#
# <testid> <function> <input_value> -> <output_value> <flags>
#
# where:
#
# <testid> is a short name identifying the test,
#
# <function> is the function to be tested (exp, cos, asinh, ...),
#
# <input_value> is a string representing a floating-point value
#
# <output_value> is the expected (ideal) output value, again
# represented as a string.
#
# <flags> is a list of the floating-point flags required by C99
#
# The possible flags are:
#
# divide-by-zero : raised when a finite input gives a
# mathematically infinite result.
#
# overflow : raised when a finite input gives a finite result that
# is too large to fit in the usual range of an IEEE 754 double.
#
# invalid : raised for invalid inputs (e.g., sqrt(-1))
#
# ignore-sign : indicates that the sign of the result is
# unspecified; e.g., if the result is given as inf,
# then both -inf and inf should be accepted as correct.
#
# Flags may appear in any order.
#
# Lines beginning with '--' (like this one) start a comment, and are
# ignored. Blank lines, or lines containing only whitespace, are also
# ignored.
# Many of the values below were computed with the help of
# version 2.4 of the MPFR library for multiple-precision
# floating-point computations with correct rounding. All output
# values in this file are (modulo yet-to-be-discovered bugs)
# correctly rounded, provided that each input and output decimal
# floating-point value below is interpreted as a representation of
# the corresponding nearest IEEE 754 double-precision value. See the
# MPFR homepage at http://www.mpfr.org for more information about the
# MPFR project.
import math
from libtest import *
from libulp import *
doc="testcases"
inf = float("inf")
nan = float("nan")
def tolerance(a, b, e):
"""Return if a-b is within tolerance e"""
d = a - b
if d < 0:
d = -d
if a != 0:
e = e * a
if e < 0:
e = -e
return d <= e
def acc_check(what, want, got, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# need to special case infinities, since inf - inf gives nan
if math.isinf(want) and got == want:
return
error = got - want
permitted_error = rel_err * abs(want)
if abs_err > permitted_error:
permitted_error = abs_err
if abs(error) < permitted_error:
return
raise AssertionError("%s: want %g, got %g: error = %g; permitted error = %g" % (what, want, got, error, permitted_error))
def t(name, fn, x, want, exc=None):
global doc
doc = name
if exc is None:
got = fn(x)
if math.isnan(want) and math.isnan(got):
return
if want == inf and got == inf:
return
if want == -inf and got == -inf:
return
if fn == math.lgamma:
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
acc_check(doc, want, got, rel_err = 5e-15, abs_err = 5e-15)
elif fn == math.erfc:
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
ulps_check(doc, want, got, 2000)
else:
ulps_check(doc, want, got, 20)
else:
try:
got = fn(x)
except exc as e:
pass
else:
assert False, "%s not raised" % exc
#
# erf: error function --
#
t("erf0000", math.erf, 0.0, 0.0)
t("erf0001", math.erf, -0.0, -0.0)
t("erf0002", math.erf, inf, 1.0)
t("erf0003", math.erf, -inf, -1.0)
t("erf0004", math.erf, nan, nan)
# tiny values
t("erf0010", math.erf, 1e-308, 1.1283791670955125e-308)
t("erf0011", math.erf, 5e-324, 4.9406564584124654e-324)
t("erf0012", math.erf, 1e-10, 1.1283791670955126e-10)
# small integers
t("erf0020", math.erf, 1, 0.84270079294971489)
t("erf0021", math.erf, 2, 0.99532226501895271)
t("erf0022", math.erf, 3, 0.99997790950300136)
t("erf0023", math.erf, 4, 0.99999998458274209)
t("erf0024", math.erf, 5, 0.99999999999846256)
t("erf0025", math.erf, 6, 1.0)
t("erf0030", math.erf, -1, -0.84270079294971489)
t("erf0031", math.erf, -2, -0.99532226501895271)
t("erf0032", math.erf, -3, -0.99997790950300136)
t("erf0033", math.erf, -4, -0.99999998458274209)
t("erf0034", math.erf, -5, -0.99999999999846256)
t("erf0035", math.erf, -6, -1.0)
# huge values should all go to +/-1, depending on sign
t("erf0040", math.erf, -40, -1.0)
t("erf0041", math.erf, 1e16, 1.0)
t("erf0042", math.erf, -1e150, -1.0)
t("erf0043", math.erf, 1.7e308, 1.0)
# Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erf0100", math.erf, 26.2, 1.0)
t("erf0101", math.erf, 26.4, 1.0)
t("erf0102", math.erf, 26.6, 1.0)
t("erf0103", math.erf, 26.8, 1.0)
t("erf0104", math.erf, 27.0, 1.0)
t("erf0105", math.erf, 27.2, 1.0)
t("erf0106", math.erf, 27.4, 1.0)
t("erf0107", math.erf, 27.6, 1.0)
t("erf0110", math.erf, -26.2, -1.0)
t("erf0111", math.erf, -26.4, -1.0)
t("erf0112", math.erf, -26.6, -1.0)
t("erf0113", math.erf, -26.8, -1.0)
t("erf0114", math.erf, -27.0, -1.0)
t("erf0115", math.erf, -27.2, -1.0)
t("erf0116", math.erf, -27.4, -1.0)
t("erf0117", math.erf, -27.6, -1.0)
#
# erfc: complementary error function --
#
t("erfc0000", math.erfc, 0.0, 1.0)
t("erfc0001", math.erfc, -0.0, 1.0)
t("erfc0002", math.erfc, inf, 0.0)
t("erfc0003", math.erfc, -inf, 2.0)
t("erfc0004", math.erfc, nan, nan)
# tiny values
t("erfc0010", math.erfc, 1e-308, 1.0)
t("erfc0011", math.erfc, 5e-324, 1.0)
t("erfc0012", math.erfc, 1e-10, 0.99999999988716204)
# small integers
t("erfc0020", math.erfc, 1, 0.15729920705028513)
t("erfc0021", math.erfc, 2, 0.0046777349810472662)
t("erfc0022", math.erfc, 3, 2.2090496998585441e-05)
t("erfc0023", math.erfc, 4, 1.541725790028002e-08)
t("erfc0024", math.erfc, 5, 1.5374597944280349e-12)
t("erfc0025", math.erfc, 6, 2.1519736712498913e-17)
t("erfc0030", math.erfc, -1, 1.8427007929497148)
t("erfc0031", math.erfc, -2, 1.9953222650189528)
t("erfc0032", math.erfc, -3, 1.9999779095030015)
t("erfc0033", math.erfc, -4, 1.9999999845827421)
t("erfc0034", math.erfc, -5, 1.9999999999984626)
t("erfc0035", math.erfc, -6, 2.0)
# as x -> infinity, erfc(x) behaves like exp(-x*x)/x/sqrt(pi)
t("erfc0040", math.erfc, 20, 5.3958656116079012e-176)
t("erfc0041", math.erfc, 25, 8.3001725711965228e-274)
# FIXME(underflows to 0) t("erfc0042", math.erfc, 27, 5.2370464393526292e-319)
t("erfc0043", math.erfc, 28, 0.0)
# huge values
t("erfc0050", math.erfc, -40, 2.0)
t("erfc0051", math.erfc, 1e16, 0.0)
t("erfc0052", math.erfc, -1e150, 2.0)
t("erfc0053", math.erfc, 1.7e308, 0.0)
# Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erfc0100", math.erfc, 26.2, 1.6432507924389461e-300)
t("erfc0101", math.erfc, 26.4, 4.4017768588035426e-305)
t("erfc0102", math.erfc, 26.6, 1.0885125885442269e-309)
# FIXME(underflows to 0) t("erfc0103", math.erfc, 26.8, 2.4849621571966629e-314)
# FIXME(underflows to 0) t("erfc0104", math.erfc, 27.0, 5.2370464393526292e-319)
# FIXME(underflows to 0) t("erfc0105", math.erfc, 27.2, 9.8813129168249309e-324)
t("erfc0106", math.erfc, 27.4, 0.0)
t("erfc0107", math.erfc, 27.6, 0.0)
t("erfc0110", math.erfc, -26.2, 2.0)
t("erfc0111", math.erfc, -26.4, 2.0)
t("erfc0112", math.erfc, -26.6, 2.0)
t("erfc0113", math.erfc, -26.8, 2.0)
t("erfc0114", math.erfc, -27.0, 2.0)
t("erfc0115", math.erfc, -27.2, 2.0)
t("erfc0116", math.erfc, -27.4, 2.0)
t("erfc0117", math.erfc, -27.6, 2.0)
#
# lgamma: log of absolute value of the gamma function --
#
# special values
t("lgam0000", math.lgamma, 0.0, inf, ValueError)
t("lgam0001", math.lgamma, -0.0, inf, ValueError)
t("lgam0002", math.lgamma, inf, inf)
# FIXME(ValueError) t("lgam0003", math.lgamma, -inf, inf)
t("lgam0004", math.lgamma, nan, nan)
# negative integers
t("lgam0010", math.lgamma, -1, inf, ValueError)
t("lgam0011", math.lgamma, -2, inf, ValueError)
t("lgam0012", math.lgamma, -1e16, inf, ValueError)
t("lgam0013", math.lgamma, -1e300, inf, ValueError)
t("lgam0014", math.lgamma, -1.79e308, inf, ValueError)
# small positive integers give factorials
t("lgam0020", math.lgamma, 1, 0.0)
t("lgam0021", math.lgamma, 2, 0.0)
t("lgam0022", math.lgamma, 3, 0.69314718055994529)
t("lgam0023", math.lgamma, 4, 1.791759469228055)
t("lgam0024", math.lgamma, 5, 3.1780538303479458)
t("lgam0025", math.lgamma, 6, 4.7874917427820458)
# half integers
t("lgam0030", math.lgamma, 0.5, 0.57236494292470008)
t("lgam0031", math.lgamma, 1.5, -0.12078223763524522)
t("lgam0032", math.lgamma, 2.5, 0.28468287047291918)
t("lgam0033", math.lgamma, 3.5, 1.2009736023470743)
t("lgam0034", math.lgamma, -0.5, 1.2655121234846454)
t("lgam0035", math.lgamma, -1.5, 0.86004701537648098)
t("lgam0036", math.lgamma, -2.5, -0.056243716497674054)
t("lgam0037", math.lgamma, -3.5, -1.309006684993042)
# values near 0
t("lgam0040", math.lgamma, 0.1, 2.252712651734206)
t("lgam0041", math.lgamma, 0.01, 4.5994798780420219)
t("lgam0042", math.lgamma, 1e-8, 18.420680738180209)
t("lgam0043", math.lgamma, 1e-16, 36.841361487904734)
t("lgam0044", math.lgamma, 1e-30, 69.077552789821368)
t("lgam0045", math.lgamma, 1e-160, 368.41361487904732)
# FIXME(inaccurate) t("lgam0046", math.lgamma, 1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0047", math.lgamma, 5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0048", math.lgamma, 5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0049", math.lgamma, 1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0050", math.lgamma, 1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0051", math.lgamma, 5e-324, 744.44007192138122)
t("lgam0060", math.lgamma, -0.1, 2.3689613327287886)
t("lgam0061", math.lgamma, -0.01, 4.6110249927528013)
t("lgam0062", math.lgamma, -1e-8, 18.420680749724522)
t("lgam0063", math.lgamma, -1e-16, 36.841361487904734)
t("lgam0064", math.lgamma, -1e-30, 69.077552789821368)
t("lgam0065", math.lgamma, -1e-160, 368.41361487904732)
# FIXME(inaccurate) t("lgam0066", math.lgamma, -1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0067", math.lgamma, -5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0068", math.lgamma, -5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0069", math.lgamma, -1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0070", math.lgamma, -1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0071", math.lgamma, -5e-324, 744.44007192138122)
# values near negative integers
t("lgam0080", math.lgamma, -0.99999999999999989, 36.736800569677101)
t("lgam0081", math.lgamma, -1.0000000000000002, 36.043653389117154)
t("lgam0082", math.lgamma, -1.9999999999999998, 35.350506208557213)
t("lgam0083", math.lgamma, -2.0000000000000004, 34.657359027997266)
t("lgam0084", math.lgamma, -100.00000000000001, -331.85460524980607)
t("lgam0085", math.lgamma, -99.999999999999986, -331.85460524980596)
# large inputs
t("lgam0100", math.lgamma, 170, 701.43726380873704)
t("lgam0101", math.lgamma, 171, 706.57306224578736)
t("lgam0102", math.lgamma, 171.624, 709.78077443669895)
t("lgam0103", math.lgamma, 171.625, 709.78591682948365)
t("lgam0104", math.lgamma, 172, 711.71472580228999)
t("lgam0105", math.lgamma, 2000, 13198.923448054265)
t("lgam0106", math.lgamma, 2.55998332785163e305, 1.7976931348623099e+308)
t("lgam0107", math.lgamma, 2.55998332785164e305, inf, OverflowError)
t("lgam0108", math.lgamma, 1.7e308, inf, OverflowError)
# inputs for which gamma(x) is tiny
t("lgam0120", math.lgamma, -100.5, -364.90096830942736)
t("lgam0121", math.lgamma, -160.5, -656.88005261126432)
t("lgam0122", math.lgamma, -170.5, -707.99843314507882)
t("lgam0123", math.lgamma, -171.5, -713.14301641168481)
t("lgam0124", math.lgamma, -176.5, -738.95247590846486)
t("lgam0125", math.lgamma, -177.5, -744.13144651738037)
t("lgam0126", math.lgamma, -178.5, -749.3160351186001)
t("lgam0130", math.lgamma, -1000.5, -5914.4377011168517)
t("lgam0131", math.lgamma, -30000.5, -279278.6629959144)
# FIXME t("lgam0132", math.lgamma, -4503599627370495.5, -1.5782258434492883e+17)
# results close to 0: positive argument ...
t("lgam0150", math.lgamma, 0.99999999999999989, 6.4083812134800075e-17)
t("lgam0151", math.lgamma, 1.0000000000000002, -1.2816762426960008e-16)
t("lgam0152", math.lgamma, 1.9999999999999998, -9.3876980655431170e-17)
t("lgam0153", math.lgamma, 2.0000000000000004, 1.8775396131086244e-16)
# ... and negative argument
# these are very inaccurate in python3
t("lgam0160", math.lgamma, -2.7476826467, -5.2477408147689136e-11)
t("lgam0161", math.lgamma, -2.457024738, 3.3464637541912932e-10)
#
# gamma: Gamma function --
#
# special values
t("gam0000", math.gamma, 0.0, inf, ValueError)
t("gam0001", math.gamma, -0.0, -inf, ValueError)
t("gam0002", math.gamma, inf, inf)
t("gam0003", math.gamma, -inf, nan, ValueError)
t("gam0004", math.gamma, nan, nan)
# negative integers inputs are invalid
t("gam0010", math.gamma, -1, nan, ValueError)
t("gam0011", math.gamma, -2, nan, ValueError)
t("gam0012", math.gamma, -1e16, nan, ValueError)
t("gam0013", math.gamma, -1e300, nan, ValueError)
# small positive integers give factorials
t("gam0020", math.gamma, 1, 1)
t("gam0021", math.gamma, 2, 1)
t("gam0022", math.gamma, 3, 2)
t("gam0023", math.gamma, 4, 6)
t("gam0024", math.gamma, 5, 24)
t("gam0025", math.gamma, 6, 120)
# half integers
t("gam0030", math.gamma, 0.5, 1.7724538509055161)
t("gam0031", math.gamma, 1.5, 0.88622692545275805)
t("gam0032", math.gamma, 2.5, 1.3293403881791370)
t("gam0033", math.gamma, 3.5, 3.3233509704478426)
t("gam0034", math.gamma, -0.5, -3.5449077018110322)
t("gam0035", math.gamma, -1.5, 2.3632718012073548)
t("gam0036", math.gamma, -2.5, -0.94530872048294190)
t("gam0037", math.gamma, -3.5, 0.27008820585226911)
# values near 0
t("gam0040", math.gamma, 0.1, 9.5135076986687306)
t("gam0041", math.gamma, 0.01, 99.432585119150602)
t("gam0042", math.gamma, 1e-8, 99999999.422784343)
t("gam0043", math.gamma, 1e-16, 10000000000000000)
t("gam0044", math.gamma, 1e-30, 9.9999999999999988e+29)
t("gam0045", math.gamma, 1e-160, 1.0000000000000000e+160)
t("gam0046", math.gamma, 1e-308, 1.0000000000000000e+308)
t("gam0047", math.gamma, 5.6e-309, 1.7857142857142848e+308)
t("gam0048", math.gamma, 5.5e-309, inf, OverflowError)
t("gam0049", math.gamma, 1e-309, inf, OverflowError)
t("gam0050", math.gamma, 1e-323, inf, OverflowError)
t("gam0051", math.gamma, 5e-324, inf, OverflowError)
t("gam0060", math.gamma, -0.1, -10.686287021193193)
t("gam0061", math.gamma, -0.01, -100.58719796441078)
t("gam0062", math.gamma, -1e-8, -100000000.57721567)
t("gam0063", math.gamma, -1e-16, -10000000000000000)
t("gam0064", math.gamma, -1e-30, -9.9999999999999988e+29)
t("gam0065", math.gamma, -1e-160, -1.0000000000000000e+160)
t("gam0066", math.gamma, -1e-308, -1.0000000000000000e+308)
t("gam0067", math.gamma, -5.6e-309, -1.7857142857142848e+308)
t("gam0068", math.gamma, -5.5e-309, -inf, OverflowError)
t("gam0069", math.gamma, -1e-309, -inf, OverflowError)
t("gam0070", math.gamma, -1e-323, -inf, OverflowError)
t("gam0071", math.gamma, -5e-324, -inf, OverflowError)
# values near negative integers
t("gam0080", math.gamma, -0.99999999999999989, -9007199254740992.0)
t("gam0081", math.gamma, -1.0000000000000002, 4503599627370495.5)
t("gam0082", math.gamma, -1.9999999999999998, 2251799813685248.5)
t("gam0083", math.gamma, -2.0000000000000004, -1125899906842623.5)
t("gam0084", math.gamma, -100.00000000000001, -7.5400833348831090e-145)
t("gam0085", math.gamma, -99.999999999999986, 7.5400833348840962e-145)
# large inputs
t("gam0100", math.gamma, 170, 4.2690680090047051e+304)
t("gam0101", math.gamma, 171, 7.2574156153079990e+306)
# FIXME(overflows) t("gam0102", math.gamma, 171.624, 1.7942117599248104e+308)
t("gam0103", math.gamma, 171.625, inf, OverflowError)
t("gam0104", math.gamma, 172, inf, OverflowError)
t("gam0105", math.gamma, 2000, inf, OverflowError)
t("gam0106", math.gamma, 1.7e308, inf, OverflowError)
# inputs for which gamma(x) is tiny
t("gam0120", math.gamma, -100.5, -3.3536908198076787e-159)
t("gam0121", math.gamma, -160.5, -5.2555464470078293e-286)
t("gam0122", math.gamma, -170.5, -3.3127395215386074e-308)
# Reported as https://github.com/golang/go/issues/11441
# FIXME(overflows) t("gam0123", math.gamma, -171.5, 1.9316265431711902e-310)
# FIXME(overflows) t("gam0124", math.gamma, -176.5, -1.1956388629358166e-321)
# FIXME(overflows) t("gam0125", math.gamma, -177.5, 4.9406564584124654e-324)
# FIXME(overflows) t("gam0126", math.gamma, -178.5, -0.0)
# FIXME(overflows) t("gam0127", math.gamma, -179.5, 0.0)
# FIXME(overflows) t("gam0128", math.gamma, -201.0001, 0.0)
# FIXME(overflows) t("gam0129", math.gamma, -202.9999, -0.0)
# FIXME(overflows) t("gam0130", math.gamma, -1000.5, -0.0)
# FIXME(overflows) t("gam0131", math.gamma, -1000000000.3, -0.0)
# FIXME(overflows) t("gam0132", math.gamma, -4503599627370495.5, 0.0)
# inputs that cause problems for the standard reflection formula,
# thanks to loss of accuracy in 1-x
t("gam0140", math.gamma, -63.349078729022985, 4.1777971677761880e-88)
t("gam0141", math.gamma, -127.45117632943295, 1.1831110896236810e-214)
#
# log1p: log(1 + x), without precision loss for small x --
#
# special values
t("log1p0000", math.log1p, 0.0, 0.0)
t("log1p0001", math.log1p, -0.0, -0.0)
t("log1p0002", math.log1p, inf, inf)
t("log1p0003", math.log1p, -inf, nan, ValueError)
t("log1p0004", math.log1p, nan, nan)
# singularity at -1.0
t("log1p0010", math.log1p, -1.0, -inf, ValueError)
t("log1p0011", math.log1p, -0.9999999999999999, -36.736800569677101)
# finite values < 1.0 are invalid
t("log1p0020", math.log1p, -1.0000000000000002, nan, ValueError)
t("log1p0021", math.log1p, -1.1, nan, ValueError)
t("log1p0022", math.log1p, -2.0, nan, ValueError)
t("log1p0023", math.log1p, -1e300, nan, ValueError)
# tiny x: log1p(x) ~ x
t("log1p0110", math.log1p, 5e-324, 5e-324)
t("log1p0111", math.log1p, 1e-320, 1e-320)
t("log1p0112", math.log1p, 1e-300, 1e-300)
t("log1p0113", math.log1p, 1e-150, 1e-150)
t("log1p0114", math.log1p, 1e-20, 1e-20)
t("log1p0120", math.log1p, -5e-324, -5e-324)
t("log1p0121", math.log1p, -1e-320, -1e-320)
t("log1p0122", math.log1p, -1e-300, -1e-300)
t("log1p0123", math.log1p, -1e-150, -1e-150)
t("log1p0124", math.log1p, -1e-20, -1e-20)
# some (mostly) random small and moderate-sized values
t("log1p0200", math.log1p, -0.89156889782277482, -2.2216403106762863)
t("log1p0201", math.log1p, -0.23858496047770464, -0.27257668276980057)
t("log1p0202", math.log1p, -0.011641726191307515, -0.011710021654495657)
t("log1p0203", math.log1p, -0.0090126398571693817, -0.0090534993825007650)
t("log1p0204", math.log1p, -0.00023442805985712781, -0.00023445554240995693)
t("log1p0205", math.log1p, -1.5672870980936349e-5, -1.5672993801662046e-5)
t("log1p0206", math.log1p, -7.9650013274825295e-6, -7.9650330482740401e-6)
t("log1p0207", math.log1p, -2.5202948343227410e-7, -2.5202951519170971e-7)
t("log1p0208", math.log1p, -8.2446372820745855e-11, -8.2446372824144559e-11)
t("log1p0209", math.log1p, -8.1663670046490789e-12, -8.1663670046824230e-12)
t("log1p0210", math.log1p, 7.0351735084656292e-18, 7.0351735084656292e-18)
t("log1p0211", math.log1p, 5.2732161907375226e-12, 5.2732161907236188e-12)
t("log1p0212", math.log1p, 1.0000000000000000e-10, 9.9999999995000007e-11)
t("log1p0213", math.log1p, 2.1401273266000197e-9, 2.1401273243099470e-9)
t("log1p0214", math.log1p, 1.2668914653979560e-8, 1.2668914573728861e-8)
t("log1p0215", math.log1p, 1.6250007816299069e-6, 1.6249994613175672e-6)
t("log1p0216", math.log1p, 8.3740495645839399e-6, 8.3740145024266269e-6)
t("log1p0217", math.log1p, 3.0000000000000001e-5, 2.9999550008999799e-5)
t("log1p0218", math.log1p, 0.0070000000000000001, 0.0069756137364252423)
t("log1p0219", math.log1p, 0.013026235315053002, 0.012942123564008787)
t("log1p0220", math.log1p, 0.013497160797236184, 0.013406885521915038)
t("log1p0221", math.log1p, 0.027625599078135284, 0.027250897463483054)
t("log1p0222", math.log1p, 0.14179687245544870, 0.13260322540908789)
# large values
t("log1p0300", math.log1p, 1.7976931348623157e+308, 709.78271289338397)
t("log1p0301", math.log1p, 1.0000000000000001e+300, 690.77552789821368)
t("log1p0302", math.log1p, 1.0000000000000001e+70, 161.18095650958321)
t("log1p0303", math.log1p, 10000000000.000000, 23.025850930040455)
# other values transferred from testLog1p in test_math
t("log1p0400", math.log1p, -0.63212055882855767, -1.0000000000000000)
t("log1p0401", math.log1p, 1.7182818284590451, 1.0000000000000000)
t("log1p0402", math.log1p, 1.0000000000000000, 0.69314718055994529)
t("log1p0403", math.log1p, 1.2379400392853803e+27, 62.383246250395075)
#
# expm1: exp(x) - 1, without precision loss for small x --
#
# special values
t("expm10000", math.expm1, 0.0, 0.0)
t("expm10001", math.expm1, -0.0, -0.0)
t("expm10002", math.expm1, inf, inf)
t("expm10003", math.expm1, -inf, -1.0)
t("expm10004", math.expm1, nan, nan)
# expm1(x) ~ x for tiny x
t("expm10010", math.expm1, 5e-324, 5e-324)
t("expm10011", math.expm1, 1e-320, 1e-320)
t("expm10012", math.expm1, 1e-300, 1e-300)
t("expm10013", math.expm1, 1e-150, 1e-150)
t("expm10014", math.expm1, 1e-20, 1e-20)
t("expm10020", math.expm1, -5e-324, -5e-324)
t("expm10021", math.expm1, -1e-320, -1e-320)
t("expm10022", math.expm1, -1e-300, -1e-300)
t("expm10023", math.expm1, -1e-150, -1e-150)
t("expm10024", math.expm1, -1e-20, -1e-20)
# moderate sized values, where direct evaluation runs into trouble
t("expm10100", math.expm1, 1e-10, 1.0000000000500000e-10)
t("expm10101", math.expm1, -9.9999999999999995e-08, -9.9999995000000163e-8)
t("expm10102", math.expm1, 3.0000000000000001e-05, 3.0000450004500034e-5)
t("expm10103", math.expm1, -0.0070000000000000001, -0.0069755570667648951)
t("expm10104", math.expm1, -0.071499208740094633, -0.069002985744820250)
t("expm10105", math.expm1, -0.063296004180116799, -0.061334416373633009)
t("expm10106", math.expm1, 0.02390954035597756, 0.024197665143819942)
t("expm10107", math.expm1, 0.085637352649044901, 0.089411184580357767)
t("expm10108", math.expm1, 0.5966174947411006, 0.81596588596501485)
t("expm10109", math.expm1, 0.30247206212075139, 0.35319987035848677)
t("expm10110", math.expm1, 0.74574727375889516, 1.1080161116737459)
t("expm10111", math.expm1, 0.97767512926555711, 1.6582689207372185)
t("expm10112", math.expm1, 0.8450154566787712, 1.3280137976535897)
t("expm10113", math.expm1, -0.13979260323125264, -0.13046144381396060)
t("expm10114", math.expm1, -0.52899322039643271, -0.41080213643695923)
t("expm10115", math.expm1, -0.74083261478900631, -0.52328317124797097)
t("expm10116", math.expm1, -0.93847766984546055, -0.60877704724085946)
t("expm10117", math.expm1, 10.0, 22025.465794806718)
t("expm10118", math.expm1, 27.0, 532048240600.79865)
t("expm10119", math.expm1, 123, 2.6195173187490626e+53)
t("expm10120", math.expm1, -12.0, -0.99999385578764666)
t("expm10121", math.expm1, -35.100000000000001, -0.99999999999999944)
# extreme negative values
t("expm10201", math.expm1, -37.0, -0.99999999999999989)
t("expm10200", math.expm1, -38.0, -1.0)
# FIXME(overflows) t("expm10210", math.expm1, -710.0, -1.0)
# the formula expm1(x) = 2 * sinh(x/2) * exp(x/2) doesn't work so
# well when exp(x/2) is subnormal or underflows to zero; check we're
# not using it!
# Reported as https://github.com/golang/go/issues/11442
# FIXME(overflows) t("expm10211", math.expm1, -1420.0, -1.0)
# FIXME(overflows) t("expm10212", math.expm1, -1450.0, -1.0)
# FIXME(overflows) t("expm10213", math.expm1, -1500.0, -1.0)
# FIXME(overflows) t("expm10214", math.expm1, -1e50, -1.0)
# FIXME(overflows) t("expm10215", math.expm1, -1.79e308, -1.0)
# extreme positive values
# FIXME(fails on 32 bit) t("expm10300", math.expm1, 300, 1.9424263952412558e+130)
# FIXME(fails on 32 bit) t("expm10301", math.expm1, 700, 1.0142320547350045e+304)
# the next test (expm10302) is disabled because it causes failure on
# OS X 10.4/Intel: apparently all values over 709.78 produce an
# overflow on that platform. See issue #7575.
# expm10302 expm1 709.78271289328393 -> 1.7976931346824240e+308
t("expm10303", math.expm1, 709.78271289348402, inf, OverflowError)
t("expm10304", math.expm1, 1000, inf, OverflowError)
t("expm10305", math.expm1, 1e50, inf, OverflowError)
t("expm10306", math.expm1, 1.79e308, inf, OverflowError)
# weaker version of expm10302
# FIXME(fails on 32 bit) t("expm10307", math.expm1, 709.5, 1.3549863193146328e+308)
#
# log2: log to base 2 --
#
# special values
t("log20000", math.log2, 0.0, -inf, ValueError)
t("log20001", math.log2, -0.0, -inf, ValueError)
t("log20002", math.log2, inf, inf)
t("log20003", math.log2, -inf, nan, ValueError)
t("log20004", math.log2, nan, nan)
# exact value at 1.0
t("log20010", math.log2, 1.0, 0.0)
# negatives
t("log20020", math.log2, -5e-324, nan, ValueError)
t("log20021", math.log2, -1.0, nan, ValueError)
t("log20022", math.log2, -1.7e-308, nan, ValueError)
# exact values at powers of 2
t("log20100", math.log2, 2.0, 1.0)
t("log20101", math.log2, 4.0, 2.0)
t("log20102", math.log2, 8.0, 3.0)
t("log20103", math.log2, 16.0, 4.0)
t("log20104", math.log2, 32.0, 5.0)
t("log20105", math.log2, 64.0, 6.0)
t("log20106", math.log2, 128.0, 7.0)
t("log20107", math.log2, 256.0, 8.0)
t("log20108", math.log2, 512.0, 9.0)
t("log20109", math.log2, 1024.0, 10.0)
t("log20110", math.log2, 2048.0, 11.0)
t("log20200", math.log2, 0.5, -1.0)
t("log20201", math.log2, 0.25, -2.0)
t("log20202", math.log2, 0.125, -3.0)
t("log20203", math.log2, 0.0625, -4.0)
# values close to 1.0
# FIXME(inaccurate) t("log20300", math.log2, 1.0000000000000002, 3.2034265038149171e-16)
# FIXME(inaccurate) t("log20301", math.log2, 1.0000000001, 1.4426951601859516e-10)
# FIXME(inaccurate) t("log20302", math.log2, 1.00001, 1.4426878274712997e-5)
t("log20310", math.log2, 0.9999999999999999, -1.6017132519074588e-16)
t("log20311", math.log2, 0.9999999999, -1.4426951603302210e-10)
t("log20312", math.log2, 0.99999, -1.4427022544056922e-5)
# tiny values
t("log20400", math.log2, 5e-324, -1074.0)
t("log20401", math.log2, 1e-323, -1073.0)
t("log20402", math.log2, 1.5e-323, -1072.4150374992789)
t("log20403", math.log2, 2e-323, -1072.0)
t("log20410", math.log2, 1e-308, -1023.1538532253076)
t("log20411", math.log2, 2.2250738585072014e-308, -1022.0)
t("log20412", math.log2, 4.4501477170144028e-308, -1021.0)
t("log20413", math.log2, 1e-307, -1019.8319251304202)
# huge values
t("log20500", math.log2, 1.7976931348623157e+308, 1024.0)
t("log20501", math.log2, 1.7e+308, 1023.9193879716706)
t("log20502", math.log2, 8.9884656743115795e+307, 1023.0)
# selection of random values
t("log20600", math.log2, -7.2174324841039838e+289, nan, ValueError)
t("log20601", math.log2, -2.861319734089617e+265, nan, ValueError)
t("log20602", math.log2, -4.3507646894008962e+257, nan, ValueError)
t("log20603", math.log2, -6.6717265307520224e+234, nan, ValueError)
t("log20604", math.log2, -3.9118023786619294e+229, nan, ValueError)
t("log20605", math.log2, -1.5478221302505161e+206, nan, ValueError)
t("log20606", math.log2, -1.4380485131364602e+200, nan, ValueError)
t("log20607", math.log2, -3.7235198730382645e+185, nan, ValueError)
t("log20608", math.log2, -1.0472242235095724e+184, nan, ValueError)
t("log20609", math.log2, -5.0141781956163884e+160, nan, ValueError)
t("log20610", math.log2, -2.1157958031160324e+124, nan, ValueError)
t("log20611", math.log2, -7.9677558612567718e+90, nan, ValueError)
t("log20612", math.log2, -5.5553906194063732e+45, nan, ValueError)
t("log20613", math.log2, -16573900952607.953, nan, ValueError)
t("log20614", math.log2, -37198371019.888618, nan, ValueError)
t("log20615", math.log2, -6.0727115121422674e-32, nan, ValueError)
t("log20616", math.log2, -2.5406841656526057e-38, nan, ValueError)
t("log20617", math.log2, -4.9056766703267657e-43, nan, ValueError)
t("log20618", math.log2, -2.1646786075228305e-71, nan, ValueError)
t("log20619", math.log2, -2.470826790488573e-78, nan, ValueError)
t("log20620", math.log2, -3.8661709303489064e-165, nan, ValueError)
t("log20621", math.log2, -1.0516496976649986e-182, nan, ValueError)
t("log20622", math.log2, -1.5935458614317996e-255, nan, ValueError)
t("log20623", math.log2, -2.8750977267336654e-293, nan, ValueError)
t("log20624", math.log2, -7.6079466794732585e-296, nan, ValueError)
t("log20625", math.log2, 3.2073253539988545e-307, -1018.1505544209213)
t("log20626", math.log2, 1.674937885472249e-244, -809.80634755783126)
t("log20627", math.log2, 1.0911259044931283e-214, -710.76679472274213)
t("log20628", math.log2, 2.0275372624809709e-154, -510.55719818383272)
t("log20629", math.log2, 7.3926087369631841e-115, -379.13564735312292)
t("log20630", math.log2, 1.3480198206342423e-86, -285.25497445094436)
t("log20631", math.log2, 8.9927384655719947e-83, -272.55127136401637)
t("log20632", math.log2, 3.1452398713597487e-60, -197.66251564496875)
t("log20633", math.log2, 7.0706573215457351e-55, -179.88420087782217)
t("log20634", math.log2, 3.1258285390731669e-49, -161.13023800505653)
t("log20635", math.log2, 8.2253046627829942e-41, -133.15898277355879)
t("log20636", math.log2, 7.8691367397519897e+49, 165.75068202732419)
t("log20637", math.log2, 2.9920561983925013e+64, 214.18453534573757)
t("log20638", math.log2, 4.7827254553946841e+77, 258.04629628445673)
t("log20639", math.log2, 3.1903566496481868e+105, 350.47616767491166)
t("log20640", math.log2, 5.6195082449502419e+113, 377.86831861008250)
t("log20641", math.log2, 9.9625658250651047e+125, 418.55752921228753)
t("log20642", math.log2, 2.7358945220961532e+145, 483.13158636923413)
t("log20643", math.log2, 2.785842387926931e+174, 579.49360214860280)
t("log20644", math.log2, 2.4169172507252751e+193, 642.40529039289652)
t("log20645", math.log2, 3.1689091206395632e+205, 682.65924573798395)
t("log20646", math.log2, 2.535995592365391e+208, 692.30359597460460)
t("log20647", math.log2, 6.2011236566089916e+233, 776.64177576730913)
t("log20648", math.log2, 2.1843274820677632e+253, 841.57499717289647)
t("log20649", math.log2, 8.7493931063474791e+297, 989.74182713073981)
doc="finished"
| [
"math.isnan",
"math.isinf"
] | [((2624, 2640), 'math.isinf', 'math.isinf', (['want'], {}), '(want)\n', (2634, 2640), False, 'import math\n'), ((3104, 3120), 'math.isnan', 'math.isnan', (['want'], {}), '(want)\n', (3114, 3120), False, 'import math\n'), ((3125, 3140), 'math.isnan', 'math.isnan', (['got'], {}), '(got)\n', (3135, 3140), False, 'import math\n')] |
#%%
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
max_samples = 400000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 256 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of foward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
# Prepare data shape to match `bidirectional_rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(x, n_steps)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
# try:
outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# except Exception: # Old TensorFlow version only returns outputs not states
# outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
# dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate Model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < max_samples:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_len = 10000
test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
| [
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.random_normal",
"tensorflow.transpose",
"tensorflow.placeholder",
"tensorflow.split",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.argma... | [((824, 877), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""/tmp/data/"""'], {'one_hot': '(True)'}), "('/tmp/data/', one_hot=True)\n", (849, 877), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1187, 1236), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_steps, n_input]'], {}), "('float', [None, n_steps, n_input])\n", (1201, 1236), True, 'import tensorflow as tf\n'), ((1241, 1283), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_classes]'], {}), "('float', [None, n_classes])\n", (1255, 1283), True, 'import tensorflow as tf\n'), ((3228, 3261), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3259, 3261), True, 'import tensorflow as tf\n'), ((1826, 1852), 'tensorflow.transpose', 'tf.transpose', (['x', '[1, 0, 2]'], {}), '(x, [1, 0, 2])\n', (1838, 1852), True, 'import tensorflow as tf\n'), ((1908, 1936), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, n_input]'], {}), '(x, [-1, n_input])\n', (1918, 1936), True, 'import tensorflow as tf\n'), ((2023, 2043), 'tensorflow.split', 'tf.split', (['x', 'n_steps'], {}), '(x, n_steps)\n', (2031, 2043), True, 'import tensorflow as tf\n'), ((2133, 2188), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['n_hidden'], {'forget_bias': '(1.0)'}), '(n_hidden, forget_bias=1.0)\n', (2161, 2188), True, 'import tensorflow as tf\n'), ((2238, 2293), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['n_hidden'], {'forget_bias': '(1.0)'}), '(n_hidden, forget_bias=1.0)\n', (2266, 2293), True, 'import tensorflow as tf\n'), ((2352, 2444), 'tensorflow.contrib.rnn.static_bidirectional_rnn', 'tf.contrib.rnn.static_bidirectional_rnn', (['lstm_fw_cell', 'lstm_bw_cell', 'x'], {'dtype': 'tf.float32'}), '(lstm_fw_cell, lstm_bw_cell, x,\n dtype=tf.float32)\n', (2391, 2444), True, 'import tensorflow as tf\n'), ((2910, 2972), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'pred', 'labels': 'y'}), '(logits=pred, labels=y)\n', (2949, 2972), True, 'import tensorflow as tf\n'), ((3095, 3113), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (3104, 3113), True, 'import tensorflow as tf\n'), ((3114, 3129), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (3123, 3129), True, 'import tensorflow as tf\n'), ((3156, 3189), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (3163, 3189), True, 'import tensorflow as tf\n'), ((3289, 3301), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3299, 3301), True, 'import tensorflow as tf\n'), ((1413, 1456), 'tensorflow.random_normal', 'tf.random_normal', (['[2 * n_hidden, n_classes]'], {}), '([2 * n_hidden, n_classes])\n', (1429, 1456), True, 'import tensorflow as tf\n'), ((1492, 1521), 'tensorflow.random_normal', 'tf.random_normal', (['[n_classes]'], {}), '([n_classes])\n', (1508, 1521), True, 'import tensorflow as tf\n'), ((2765, 2803), 'tensorflow.matmul', 'tf.matmul', (['outputs[-1]', "weights['out']"], {}), "(outputs[-1], weights['out'])\n", (2774, 2803), True, 'import tensorflow as tf\n'), ((2986, 3037), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3008, 3037), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
from app import app
import argparse
HOST = '127.0.0.1'
PORT = 8080
PROJECT_NAME = 'idealtrust'
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=PROJECT_NAME, usage='%(prog)s [options]')
parser.add_argument('--port', help='port (default: {0})'.format(PORT), default=PORT)
parser.add_argument('--host', help='host (default: {0})'.format(HOST), default=HOST)
argv = parser.parse_args()
app.run(host=argv.host, port=argv.port, debug=True, use_reloader=False) # debug=True, use_reloader=False
| [
"app.app.run",
"argparse.ArgumentParser"
] | [((160, 230), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': 'PROJECT_NAME', 'usage': '"""%(prog)s [options]"""'}), "(prog=PROJECT_NAME, usage='%(prog)s [options]')\n", (183, 230), False, 'import argparse\n'), ((445, 516), 'app.app.run', 'app.run', ([], {'host': 'argv.host', 'port': 'argv.port', 'debug': '(True)', 'use_reloader': '(False)'}), '(host=argv.host, port=argv.port, debug=True, use_reloader=False)\n', (452, 516), False, 'from app import app\n')] |
import json
import os
import pymongo
'''
fileService.py
Author: <NAME>
'''
mongo_client = pymongo.MongoClient()
#db = {}
'''
initialize
Takes a 'unique_id'entifier and sets up a database in MongoDB
and ensures that that database has collections associated with
the various file types that are stored.
'''
def initialize(unique_id='default'):
#global db
#if unique_id is None:
# unique_id = 'default';
db = mongo_client[unique_id]
if(not 'ontology' in db.collection_names()):
db.create_collection('ontology')
if(not 'abstraction' in db.collection_names()):
db.create_collection('abstraction')
if(not 'commands' in db.collection_names()):
db.create_collection('commands')
if(not 'linkograph' in db.collection_names()):
db.create_collection('linkograph')
return db
'''
FileNotFound
Custom exception class for reporting a file not found exception.
Value should be the name of the file as a string.
'''
class FileNotFound(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return "File "+self.value+" not found!"
'''
FileTypeNotFound
Custom exception class for reporting a file type not found.
Value should be the name of the file type as a string.
'''
class FileTypeNotFound(Exception):
def __init__(self,value):
self.value=value
def __str__(self):
return "File type "+self.value+" not found!"
'''
FileTypeMismatch
Custom exception class for reporting a conflict in a type given
by the user and a type found by the type detection system.
Given and found should both be the file types as strings.
'''
class FileTypeMismatch(Exception):
def __init__(self,given,found):
self.given = given
self.found = found
def __str__(self):
return "Given "+self.given+", but found "+self.found
'''
loadFile
Looks for a fileName of fileType. Both arguments are strings.
Upon success returns the file, throws exceptions when either
the type or name is not found.
'''
def loadFile(fileName,fileType,unique_id='default'):
db=initialize(unique_id)
if (not fileType in db.collection_names()):
raise FileTypeNotFound(fileType)
if (None==db[fileType].find_one({'name':fileName})):
raise FileNotFound(fileName)
result = db[fileType].find_one({'name':fileName})
result.pop('_id',None) #Removes the MongoDB object id
return result #What will usually be returned is a dictionary
#of the type {'name':[...],'content':[....]}.
#The caller will be responsible for handling
#this format.
'''
fileList
Returns a list of all of the file names for files of
type fileType. Argument is a string. Throws error when
fileType is not found.
'''
def fileList(fileType,unique_id='default'):
db=initialize(unique_id)
if (not fileType in db.collection_names()):
raise FileTypeNotFound(fileType)
results = []
for record in db[fileType].find():
if 'name' in record:
results.append(record['name'])
return results
'''
saveLinko
Helper function for saving a linkograph.
All arguments are strings. Throws an error if the commandsName
file cannot be found.
'''
def saveLinko(fileName,fileContent,commandsName,unique_id):
try:
db=initialize(unique_id)
loadFile(commandsName,'commands',unique_id)
toSave = {}
toSave['content'] = fileContent
toSave['commands'] = commandsName
toSave['name']=fileName
db['linkograph'].insert_one(toSave)
return "File " +fileName + " is saved as type linkograph"
except:
raise FileNotFound(fileName)
'''
saveFile
Takes a file and the content stored in it and saves it in the file store.
If the fileType is unknown or there is a mismatch, and exception is thrown.
If fileType isn't given, the system will try and detect the file type.
Stores it in the mongo database in the format of {'name':fileName,'content':
fileContent}, except in the case of a linkograph, at which point it the commandsName is stored along with it with a key of 'commands'.
'''
def saveFile(fileName,fileContent,fileType=None,commandsName=None,unique_id='default'):
db=initialize(unique_id)
if fileType==None:
fileType=detectFiletype(fileContent)
else:
if not fileType == detectFiletype(fileContent):
raise FileTypeMismatch(fileType,detectFiletype(fileContent))
if fileType == "Unknown file":
raise FileTypeNotFound(fileType)
if fileType == "linkograph":
if commandsName==None:
raise FileNotFound("commands file")
return saveLinko(fileName,fileContent,commandsName,unique_id)
if fileType in db.collection_names():
if not None==db[fileType].find_one({'name':fileName}):
if fileContent==db[fileType].find_one({'name':fileName})['content']:
return "We already have "+fileName
else:
fileName=fileName+"new"
return saveFile(fileName,fileContent,fileType,unique_id=unique_id)
else:
toSave = {}
toSave['name'] = fileName
toSave['content'] = fileContent
db[fileType].insert_one(toSave)
return "File "+fileName+" saved as type "+fileType
raise FileTypeNotFound(fileType)
'''
detectFiletype
Function which takes the contents of a file and tries to detect what sort
of file it is. Currently has support for detecting commands, abstraction
and ontology files.
'''
def detectFiletype(fileContent):
try:
file_parsed = json.loads(fileContent)
if (type(file_parsed) is list):
if(type(file_parsed[0]) is dict):
if("ts" in file_parsed[0] and "cmd" in file_parsed[0]):
return "commands"
else:
return "Unknown file"
if(type(file_parsed[0]) is list):
if(len(file_parsed[0])==0):
return "Unknown file"
for label in file_parsed[0]:
if not type(label) is str:
return "Unknown file"
for tupl in file_parsed[1:]:
if not type(tupl) is list:
return "Unknown file"
if not len(tupl)==3:
return "Unknown file"
return "linkograph"
return "Unknown file"
elif (type(file_parsed) is dict):
if(len(file_parsed.keys())==0):
return "Unknown file"
longest_entry = []
for key in file_parsed:
if not type(file_parsed[key]) is list:
return "Unknown file"
if len(file_parsed[key])>len(longest_entry):
longest_entry=file_parsed[key]
if len(longest_entry)==0:
return "Unknown file"
if type(longest_entry[0]) is str:
return "ontology"
if type(longest_entry[0]) is dict:
if "command" in longest_entry[0]:
return "abstraction"
return "Unknown file"
return "Unknown file"
except:
return "Unknown file"
#initialize()
| [
"pymongo.MongoClient",
"json.loads"
] | [((94, 115), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {}), '()\n', (113, 115), False, 'import pymongo\n'), ((5647, 5670), 'json.loads', 'json.loads', (['fileContent'], {}), '(fileContent)\n', (5657, 5670), False, 'import json\n')] |
""" Architecture for SFTMD """
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.archs.arch_util as arch_util
import torch.nn.utils.spectral_norm as spectral_norm
class SFTLayer(nn.Module):
def __init__(self, nf=64, n_condition=10):
super(SFTLayer, self).__init__()
# TODO: can use shared convolution layers to save computation
self.mul_conv1 = nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)
self.mul_conv2 = nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1)
self.add_conv1 = nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)
self.add_conv2 = nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(self.mul_conv2(self.lrelu(self.mul_conv1(cat_input))))
add = self.add_conv2(self.lrelu(self.add_conv1(cat_input)))
return features * mul + add
class SFTLayer_SN(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, bias_sn=False):
super(SFTLayer_SN, self).__init__()
# TODO: can use shared convolution layers to save computation
self.mul_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.mul_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.add_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.add_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
if bias_sn:
self.mul_conv1 = spectral_norm(self.mul_conv1, name='bias',
n_power_iterations=n_power_iterations)
self.mul_conv2 = spectral_norm(self.mul_conv2, name='bias',
n_power_iterations=n_power_iterations)
self.add_conv1 = spectral_norm(self.add_conv1, name='bias',
n_power_iterations=n_power_iterations)
self.add_conv2 = spectral_norm(self.add_conv2, name='bias',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(self.mul_conv2(self.lrelu(self.mul_conv1(cat_input))))
add = self.add_conv2(self.lrelu(self.add_conv1(cat_input)))
return features * mul + add
class SFTLayer_SN_Norm(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, norm='batch'):
super(SFTLayer_SN_Norm, self).__init__()
# TODO: can use shared convolution layers to save computation
if norm == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=True,
track_running_stats=True)
self.mul_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.mul_norm1 = norm_layer(num_features=32)
self.mul_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.mul_norm2 = norm_layer(num_features=nf)
self.add_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.add_norm1 = norm_layer(num_features=32)
self.add_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.add_norm2 = norm_layer(num_features=nf)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(
self.mul_norm2(self.mul_conv2(self.lrelu(self.mul_norm1(self.mul_conv1(cat_input))))))
add = self.add_norm2(self.add_conv2(self.lrelu(self.add_norm1(self.add_conv1(cat_input)))))
return features * mul + add
class SFTLayer_SN_ReLU(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1):
super(SFTLayer_SN_ReLU, self).__init__()
# TODO: can use shared convolution layers to save computation
self.mul_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.mul_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.add_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.add_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.relu = nn.ReLU(inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(self.mul_conv2(self.relu(self.mul_conv1(cat_input))))
add = self.add_conv2(self.relu(self.add_conv1(cat_input)))
return features * mul + add
class SFTResidualBlock(nn.Module):
def __init__(self, nf=64, n_condition=10):
super(SFTResidualBlock, self).__init__()
self.sft1 = SFTLayer(nf=nf, n_condition=n_condition)
self.sft2 = SFTLayer(nf=nf, n_condition=n_condition)
self.conv1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.lrelu(self.sft1(features, conditions))
fea = self.lrelu(self.sft2(self.conv1(fea), conditions))
fea = self.conv2(fea)
return features + fea
class SFTResidualBlock_SN(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, bias_sn=False):
super(SFTResidualBlock_SN, self).__init__()
self.sft1 = SFTLayer_SN(nf=nf, n_condition=n_condition)
self.sft2 = SFTLayer_SN(nf=nf, n_condition=n_condition)
self.conv1 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.conv2 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
if bias_sn:
self.conv1 = spectral_norm(self.conv1, name='bias',
n_power_iterations=n_power_iterations)
self.conv2 = spectral_norm(self.conv2, name='bias',
n_power_iterations=n_power_iterations)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.lrelu(self.sft1(features, conditions))
fea = self.lrelu(self.sft2(self.conv1(fea), conditions))
fea = self.conv2(fea)
return features + fea
class SFTResidualBlock_SN_Norm(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, norm='batch'):
super(SFTResidualBlock_SN_Norm, self).__init__()
if norm == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=True,
track_running_stats=True)
self.sft1 = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.sft2 = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.conv1 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.norm1 = norm_layer(num_features=64)
self.conv2 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.norm2 = norm_layer(num_features=64)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.lrelu(self.sft1(features, conditions))
fea = self.lrelu(self.sft2(self.norm1(self.conv1(fea)), conditions))
fea = self.norm2(self.conv2(fea))
return features + fea
class SFTResidualBlock_SN_ReLU(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1):
super(SFTResidualBlock_SN_ReLU, self).__init__()
self.sft1 = SFTLayer_SN_ReLU(nf=nf, n_condition=n_condition)
self.sft2 = SFTLayer_SN_ReLU(nf=nf, n_condition=n_condition)
self.conv1 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.conv2 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.relu = nn.ReLU(inplace=True)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.relu(self.sft1(features, conditions))
fea = self.relu(self.sft2(self.conv1(fea), conditions))
fea = self.conv2(fea)
return features + fea
class SFTMD(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_Ushape(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_Ushape, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
# downsample operation
for i in range(n_RB // 2):
self.add_module('SFTRB_down' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.mid_layer = SFTResidualBlock(nf=nf, n_condition=n_condition)
# upsample operation
for i in range(n_RB // 2):
self.add_module('SFTRB_up' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.max_pool = nn.MaxPool2d(2, 2)
def forward(self, input, kernel_code, spatial=False, extra=False):
_, _, H_in, W_in = input.size()
kernel_code_ori = kernel_code.clone()
# if not spatial:
# Bk, Ck = kernel_code_ori.size()
# kernel_code = kernel_code_ori.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
Bk, Ck = kernel_code_ori.size()
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
# down_scale
kernel_code_list = []
for i in range(self.n_RB // 2):
H = int(H_in * 2 ** (-1 * i))
W = int(W_in * 2 ** (-1 * i))
kernel_code = kernel_code_ori.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea_sft_x2 = self.__getattr__('SFTRB_down' + str(i))(fea_sft, kernel_code)
fea_sft = self.max_pool(fea_sft_x2)
kernel_code_list.insert(0, kernel_code)
H = int(H_in * 2 ** (-1 * (self.n_RB // 2)))
W = int(W_in * 2 ** (-1 * (self.n_RB // 2)))
kernel_code = kernel_code_ori.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea_sft = self.mid_layer(fea_sft, kernel_code)
#up_scale
for i in range(self.n_RB // 2):
fea_sft = F.interpolate(fea_sft, scale_factor=2, mode='bilinear', align_corners=False)
fea_sft = self.__getattr__('SFTRB_up' + str(i))(fea_sft, kernel_code_list[i])
kernel_code = kernel_code_list[self.n_RB // 2 - 1]
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_Noise_JPEG(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=12, scale=4, n_RB=16):
super(SFTMD_Noise_JPEG, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, noise, jpeg, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
codes = torch.cat((kernel_code, noise, jpeg), dim=1)
Bk, Ck = codes.size()
codes = codes.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, codes)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, codes)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_SN_Noise_JPEG(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, n_power_iterations=1,
norm=None, bias_sn=False):
super(SFTMD_SN_Noise_JPEG, self).__init__()
self.n_RB = n_RB
if bias_sn:
print('Bias SN')
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_first = spectral_norm(self.conv_first, name='bias',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
if norm is None:
self.add_module('SFTRB' + str(i),
SFTResidualBlock_SN(nf=nf, n_condition=n_condition, bias_sn=False))
else:
self.add_module(
'SFTRB' + str(i),
SFTResidualBlock_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm))
if norm is None:
self.sft_extra = SFTLayer_SN(nf=nf, n_condition=n_condition, bias_sn=False)
else:
self.sft_extra = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_extra = spectral_norm(self.conv_extra, name='bias',
n_power_iterations=n_power_iterations)
if scale == 4:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_final = spectral_norm(self.conv_final, name='bias',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, noise, jpeg, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
codes = torch.cat((kernel_code, noise, jpeg), dim=1)
Bk, Ck = codes.size()
codes = codes.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, codes)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, codes)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_SN(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, n_power_iterations=1,
norm=None, bias_sn=False):
super(SFTMD_SN, self).__init__()
self.n_RB = n_RB
if bias_sn:
print('Bias SN')
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_first = spectral_norm(self.conv_first, name='bias',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
if norm is None:
self.add_module('SFTRB' + str(i), SFTResidualBlock_SN(nf=nf,
n_condition=n_condition, bias_sn=False))
else:
self.add_module(
'SFTRB' + str(i),
SFTResidualBlock_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm))
if norm is None:
self.sft_extra = SFTLayer_SN(nf=nf, n_condition=n_condition, bias_sn=False)
else:
self.sft_extra = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_extra = spectral_norm(self.conv_extra, name='bias',
n_power_iterations=n_power_iterations)
if scale == 4:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_final = spectral_norm(self.conv_final, name='bias',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_SN_Dropout(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, n_power_iterations=1,
norm=None, dropSN=True):
super(SFTMD_SN_Dropout, self).__init__()
self.n_RB = n_RB
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
if norm is None:
self.add_module('SFTRB' + str(i), SFTResidualBlock_SN(nf=nf,
n_condition=n_condition))
else:
self.add_module(
'SFTRB' + str(i),
SFTResidualBlock_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm))
if norm is None:
self.sft_extra = SFTLayer_SN(nf=nf, n_condition=n_condition)
else:
self.sft_extra = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
if dropSN:
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf * 2, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
self.conv_extra2 = spectral_norm(
nn.Conv2d(nf * 2, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
else:
self.conv_extra = nn.Conv2d(nf, nf * 2, kernel_size=3, stride=1, padding=1, bias=True)
self.conv_extra2 = nn.Conv2d(nf * 2, nf, kernel_size=3, stride=1, padding=1, bias=True)
self.dropout = nn.Dropout2d(p=0.5, inplace=False)
if scale == 4:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code):
_, _, H, W = input.size()
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
fea = self.dropout(fea)
fea = self.conv_extra2(fea)
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_SN_ReLU(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_SN_ReLU, self).__init__()
self.n_RB = n_RB
n_power_iterations = 1
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
self.add_module('SFTRB' + str(i),
SFTResidualBlock_SN_ReLU(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer_SN_ReLU(nf=nf, n_condition=n_condition)
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if scale == 4:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.ReLU(inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.ReLU(inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.ReLU(inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
self.relu = nn.ReLU(inplace=True)
def forward(self, input, kernel_code):
_, _, H, W = input.size()
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.relu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.relu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_concat(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_concat, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(n_condition + 3, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), arch_util.ResidualBlock_noBN(nf=nf))
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code):
B, _, H, W = input.size()
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(torch.cat((input, kernel_code), 1)))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(fea))
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_kernel(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, k=11):
super(SFTMD_kernel, self).__init__()
self.n_RB = n_RB
self.fc_share_1 = nn.Linear(32, 100)
self.fc_share_2 = nn.Linear(100, 200)
self.fc_share_3 = nn.Linear(200, 400)
self.fc_share_4 = nn.Linear(400, 200)
self.fc_share_conv1_1 = nn.Linear(200, 200)
self.fc_share_conv1_2 = nn.Linear(200, 10 * 3 * k * 1)
self.fc_share_conv2_1 = nn.Linear(200, 200)
self.fc_share_conv2_2 = nn.Linear(200, 10 * 10 * k * 1)
self.conv_first = nn.Conv2d(10, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), arch_util.ResidualBlock_noBN(nf=nf))
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.pad = (k - 1) // 2
self.k = k
def forward(self, input, kernel_code):
B, _, H, W = input.size()
# generate conv code
kernel_code = kernel_code.view((B, -1))
kernel_code = self.lrelu(self.fc_share_1(kernel_code))
kernel_code = self.lrelu(self.fc_share_2(kernel_code))
kernel_code = self.lrelu(self.fc_share_3(kernel_code))
kernel_code = self.lrelu(self.fc_share_4(kernel_code))
conv1_weight = self.fc_share_conv1_2(self.lrelu(self.fc_share_conv1_1(kernel_code)))
conv2_weight = self.fc_share_conv2_2(self.lrelu(self.fc_share_conv2_1(kernel_code)))
conv1_weight = conv1_weight.view((10, 3, self.k, 1))
conv2_weight = conv2_weight.view((10, 10, 1, self.k))
fea = self.lrelu(F.conv2d(input, conv1_weight, padding=(self.pad, 0)))
fea = self.lrelu(F.conv2d(fea, conv2_weight, padding=(0, self.pad)))
fea = self.lrelu(self.conv_first(fea))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(fea))
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_coderefine(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_coderefine, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.fc1 = nn.Linear(n_condition, 400)
self.fc2 = nn.Linear(400, 400)
self.fc3 = nn.Linear(400, 200)
self.fc4 = nn.Linear(200, n_condition)
def forward(self, input, kernel_code):
_, _, H, W = input.size()
kernel_code = self.lrelu(self.fc1(kernel_code))
kernel_code = self.lrelu(self.fc2(kernel_code))
kernel_code = self.lrelu(self.fc3(kernel_code))
kernel_code = self.fc4(kernel_code)
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
return out
class Corrector(nn.Module):
def __init__(self, inc=3, n_condition=10, nf=64, conv_merge=True, use_bias=True):
super(Corrector, self).__init__()
self.ConvNet = nn.Sequential(*[
nn.Conv2d(inc, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.code_dense = nn.Sequential(*[
nn.Linear(n_condition, nf, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Linear(nf, nf, bias=use_bias),
])
if conv_merge:
self.global_dense = nn.Sequential(*[
nn.Conv2d(nf * 2, nf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf * 2, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.nf = nf
self.conv_merge = conv_merge
self.fc1 = nn.Linear(nf, nf, bias=True)
self.fc2 = nn.Linear(nf, nf, bias=True)
self.fc3 = nn.Linear(nf, n_condition, bias=True)
self.globalpooling = nn.AdaptiveAvgPool2d((1, 1))
self.lrelu = nn.LeakyReLU(0.1, True)
def forward(self, input, code):
conv_input = self.ConvNet(input)
B, C_f, H_f, W_f = conv_input.size() # LR_size
code_ori = self.code_dense(code)
if self.conv_merge:
conv_code = code_ori.view((B, self.nf, 1, 1)).expand((B, self.nf, H_f, W_f))
conv_mid = torch.cat((conv_input, conv_code), dim=1)
conv_input = self.global_dense(conv_mid)
fea = self.globalpooling(conv_input).view(conv_input.size(0), -1)
fea = self.lrelu(self.fc1(fea))
fea = self.lrelu(self.fc2(fea))
out = self.fc3(fea)
return out + code
class CorrectorV2(nn.Module):
def __init__(self, inc=3, n_condition=10, nf=64, conv_merge=False, use_bias=True):
super(CorrectorV2, self).__init__()
self.ConvNet = nn.Sequential(*[
nn.Conv2d(inc, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.code_dense = nn.Sequential(*[
nn.Linear(n_condition, nf, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Linear(nf, nf, bias=use_bias),
])
if conv_merge:
self.global_dense = nn.Sequential(*[
nn.Conv2d(nf * 2, nf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf * 2, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.nf = nf
self.conv_merge = conv_merge
self.fc1 = nn.Linear(nf, nf, bias=True)
self.fc2 = nn.Linear(nf, nf, bias=True)
self.fc3 = nn.Linear(nf, n_condition, bias=True)
self.globalpooling = nn.AdaptiveAvgPool2d((1, 1))
self.lrelu = nn.LeakyReLU(0.1, True)
def forward(self, input, code):
conv_input = self.ConvNet(input)
B, C_f, H_f, W_f = conv_input.size() # LR_size
code_ori = self.code_dense(code)
if self.conv_merge:
conv_code = code_ori.view((B, self.nf, 1, 1)).expand((B, self.nf, H_f, W_f))
conv_mid = torch.cat((conv_input, conv_code), dim=1)
conv_input = self.global_dense(conv_mid)
fea = self.globalpooling(conv_input).view(conv_input.size(0), -1)
fea = self.lrelu(self.fc1(fea))
fea = self.lrelu(self.fc2(fea))
out = self.fc3(fea)
return out + code_ori
| [
"torch.nn.functional.conv2d",
"torch.nn.ReLU",
"models.archs.arch_util.initialize_weights",
"torch.nn.LeakyReLU",
"torch.nn.PixelShuffle",
"torch.nn.Dropout2d",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.utils.spectral_norm",
"functools.partial",
"torch.nn.Linear",
"torch.nn.AdaptiveAv... | [((423, 490), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (432, 490), True, 'import torch.nn as nn\n'), ((516, 569), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (525, 569), True, 'import torch.nn as nn\n'), ((595, 662), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (604, 662), True, 'import torch.nn as nn\n'), ((688, 741), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (697, 741), True, 'import torch.nn as nn\n'), ((764, 810), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (776, 810), True, 'import torch.nn as nn\n'), ((877, 917), 'torch.cat', 'torch.cat', (['(features, conditions)'], {'dim': '(1)'}), '((features, conditions), dim=1)\n', (886, 917), False, 'import torch\n'), ((2741, 2787), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (2753, 2787), True, 'import torch.nn as nn\n'), ((2854, 2894), 'torch.cat', 'torch.cat', (['(features, conditions)'], {'dim': '(1)'}), '((features, conditions), dim=1)\n', (2863, 2894), False, 'import torch\n'), ((4607, 4653), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (4619, 4653), True, 'import torch.nn as nn\n'), ((4720, 4760), 'torch.cat', 'torch.cat', (['(features, conditions)'], {'dim': '(1)'}), '((features, conditions), dim=1)\n', (4729, 4760), False, 'import torch\n'), ((6019, 6040), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6026, 6040), True, 'import torch.nn as nn\n'), ((6107, 6147), 'torch.cat', 'torch.cat', (['(features, conditions)'], {'dim': '(1)'}), '((features, conditions), dim=1)\n', (6116, 6147), False, 'import torch\n'), ((6609, 6673), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (6618, 6673), True, 'import torch.nn as nn\n'), ((6695, 6759), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (6704, 6759), True, 'import torch.nn as nn\n'), ((6782, 6828), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (6794, 6828), True, 'import torch.nn as nn\n'), ((6838, 6897), 'models.archs.arch_util.initialize_weights', 'arch_util.initialize_weights', (['[self.conv1, self.conv2]', '(0.1)'], {}), '([self.conv1, self.conv2], 0.1)\n', (6866, 6897), True, 'import models.archs.arch_util as arch_util\n'), ((7832, 7878), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (7844, 7878), True, 'import torch.nn as nn\n'), ((8193, 8252), 'models.archs.arch_util.initialize_weights', 'arch_util.initialize_weights', (['[self.conv1, self.conv2]', '(0.1)'], {}), '([self.conv1, self.conv2], 0.1)\n', (8221, 8252), True, 'import models.archs.arch_util as arch_util\n'), ((9783, 9829), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (9795, 9829), True, 'import torch.nn as nn\n'), ((9839, 9898), 'models.archs.arch_util.initialize_weights', 'arch_util.initialize_weights', (['[self.conv1, self.conv2]', '(0.1)'], {}), '([self.conv1, self.conv2], 0.1)\n', (9867, 9898), True, 'import models.archs.arch_util as arch_util\n'), ((10862, 10883), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10869, 10883), True, 'import torch.nn as nn\n'), ((10893, 10952), 'models.archs.arch_util.initialize_weights', 'arch_util.initialize_weights', (['[self.conv1, self.conv2]', '(0.1)'], {}), '([self.conv1, self.conv2], 0.1)\n', (10921, 10952), True, 'import models.archs.arch_util as arch_util\n'), ((11368, 11410), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (11377, 11410), True, 'import torch.nn as nn\n'), ((11630, 11694), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (11639, 11694), True, 'import torch.nn as nn\n'), ((12422, 12487), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (12431, 12487), True, 'import torch.nn as nn\n'), ((12510, 12556), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (12522, 12556), True, 'import torch.nn as nn\n'), ((13441, 13483), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (13450, 13483), True, 'import torch.nn as nn\n'), ((13983, 14047), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (13992, 14047), True, 'import torch.nn as nn\n'), ((14775, 14840), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (14784, 14840), True, 'import torch.nn as nn\n'), ((14863, 14909), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (14875, 14909), True, 'import torch.nn as nn\n'), ((14935, 14953), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (14947, 14953), True, 'import torch.nn as nn\n'), ((16842, 16884), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (16851, 16884), True, 'import torch.nn as nn\n'), ((17104, 17168), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (17113, 17168), True, 'import torch.nn as nn\n'), ((17896, 17961), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (17905, 17961), True, 'import torch.nn as nn\n'), ((17984, 18030), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (17996, 18030), True, 'import torch.nn as nn\n'), ((23684, 23730), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (23696, 23730), True, 'import torch.nn as nn\n'), ((29400, 29446), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (29412, 29446), True, 'import torch.nn as nn\n'), ((31994, 32028), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.5)', 'inplace': '(False)'}), '(p=0.5, inplace=False)\n', (32006, 32028), True, 'import torch.nn as nn\n'), ((33268, 33314), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (33280, 33314), True, 'import torch.nn as nn\n'), ((35958, 35979), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (35965, 35979), True, 'import torch.nn as nn\n'), ((36739, 36793), 'torch.nn.Conv2d', 'nn.Conv2d', (['(n_condition + 3)', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(n_condition + 3, nf, 3, stride=1, padding=1)\n', (36748, 36793), True, 'import torch.nn as nn\n'), ((36934, 36998), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (36943, 36998), True, 'import torch.nn as nn\n'), ((37726, 37791), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (37735, 37791), True, 'import torch.nn as nn\n'), ((37814, 37860), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (37826, 37860), True, 'import torch.nn as nn\n'), ((38616, 38634), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(100)'], {}), '(32, 100)\n', (38625, 38634), True, 'import torch.nn as nn\n'), ((38661, 38680), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(200)'], {}), '(100, 200)\n', (38670, 38680), True, 'import torch.nn as nn\n'), ((38707, 38726), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(400)'], {}), '(200, 400)\n', (38716, 38726), True, 'import torch.nn as nn\n'), ((38753, 38772), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(200)'], {}), '(400, 200)\n', (38762, 38772), True, 'import torch.nn as nn\n'), ((38806, 38825), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(200)'], {}), '(200, 200)\n', (38815, 38825), True, 'import torch.nn as nn\n'), ((38858, 38888), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(10 * 3 * k * 1)'], {}), '(200, 10 * 3 * k * 1)\n', (38867, 38888), True, 'import torch.nn as nn\n'), ((38921, 38940), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(200)'], {}), '(200, 200)\n', (38930, 38940), True, 'import torch.nn as nn\n'), ((38973, 39004), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(10 * 10 * k * 1)'], {}), '(200, 10 * 10 * k * 1)\n', (38982, 39004), True, 'import torch.nn as nn\n'), ((39032, 39073), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(10, nf, 3, stride=1, padding=1)\n', (39041, 39073), True, 'import torch.nn as nn\n'), ((39214, 39278), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (39223, 39278), True, 'import torch.nn as nn\n'), ((40006, 40071), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (40015, 40071), True, 'import torch.nn as nn\n'), ((40094, 40140), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (40106, 40140), True, 'import torch.nn as nn\n'), ((41600, 41642), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (41609, 41642), True, 'import torch.nn as nn\n'), ((41862, 41926), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (41871, 41926), True, 'import torch.nn as nn\n'), ((42654, 42719), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (42663, 42719), True, 'import torch.nn as nn\n'), ((42742, 42788), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (42754, 42788), True, 'import torch.nn as nn\n'), ((42809, 42836), 'torch.nn.Linear', 'nn.Linear', (['n_condition', '(400)'], {}), '(n_condition, 400)\n', (42818, 42836), True, 'import torch.nn as nn\n'), ((42856, 42875), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(400)'], {}), '(400, 400)\n', (42865, 42875), True, 'import torch.nn as nn\n'), ((42895, 42914), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(200)'], {}), '(400, 200)\n', (42904, 42914), True, 'import torch.nn as nn\n'), ((42934, 42961), 'torch.nn.Linear', 'nn.Linear', (['(200)', 'n_condition'], {}), '(200, n_condition)\n', (42943, 42961), True, 'import torch.nn as nn\n'), ((45531, 45559), 'torch.nn.Linear', 'nn.Linear', (['nf', 'nf'], {'bias': '(True)'}), '(nf, nf, bias=True)\n', (45540, 45559), True, 'import torch.nn as nn\n'), ((45579, 45607), 'torch.nn.Linear', 'nn.Linear', (['nf', 'nf'], {'bias': '(True)'}), '(nf, nf, bias=True)\n', (45588, 45607), True, 'import torch.nn as nn\n'), ((45627, 45664), 'torch.nn.Linear', 'nn.Linear', (['nf', 'n_condition'], {'bias': '(True)'}), '(nf, n_condition, bias=True)\n', (45636, 45664), True, 'import torch.nn as nn\n'), ((45694, 45722), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (45714, 45722), True, 'import torch.nn as nn\n'), ((45744, 45767), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (45756, 45767), True, 'import torch.nn as nn\n'), ((48191, 48219), 'torch.nn.Linear', 'nn.Linear', (['nf', 'nf'], {'bias': '(True)'}), '(nf, nf, bias=True)\n', (48200, 48219), True, 'import torch.nn as nn\n'), ((48239, 48267), 'torch.nn.Linear', 'nn.Linear', (['nf', 'nf'], {'bias': '(True)'}), '(nf, nf, bias=True)\n', (48248, 48267), True, 'import torch.nn as nn\n'), ((48287, 48324), 'torch.nn.Linear', 'nn.Linear', (['nf', 'n_condition'], {'bias': '(True)'}), '(nf, n_condition, bias=True)\n', (48296, 48324), True, 'import torch.nn as nn\n'), ((48354, 48382), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (48374, 48382), True, 'import torch.nn as nn\n'), ((48404, 48427), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (48416, 48427), True, 'import torch.nn as nn\n'), ((1387, 1454), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (1396, 1454), True, 'import torch.nn as nn\n'), ((1561, 1614), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (1570, 1614), True, 'import torch.nn as nn\n'), ((1761, 1828), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (1770, 1828), True, 'import torch.nn as nn\n'), ((1935, 1988), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (1944, 1988), True, 'import torch.nn as nn\n'), ((2132, 2218), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.mul_conv1'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.mul_conv1, name='bias', n_power_iterations=\n n_power_iterations)\n", (2145, 2218), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((2286, 2372), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.mul_conv2'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.mul_conv2, name='bias', n_power_iterations=\n n_power_iterations)\n", (2299, 2372), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((2440, 2526), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.add_conv1'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.add_conv1, name='bias', n_power_iterations=\n n_power_iterations)\n", (2453, 2526), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((2594, 2680), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.add_conv2'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.add_conv2, name='bias', n_power_iterations=\n n_power_iterations)\n", (2607, 2680), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((3374, 3446), 'functools.partial', 'functools.partial', (['nn.BatchNorm2d'], {'affine': '(True)', 'track_running_stats': '(True)'}), '(nn.BatchNorm2d, affine=True, track_running_stats=True)\n', (3391, 3446), False, 'import functools\n'), ((3677, 3744), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (3686, 3744), True, 'import torch.nn as nn\n'), ((3904, 3957), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (3913, 3957), True, 'import torch.nn as nn\n'), ((4157, 4224), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (4166, 4224), True, 'import torch.nn as nn\n'), ((4384, 4437), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (4393, 4437), True, 'import torch.nn as nn\n'), ((5302, 5369), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (5311, 5369), True, 'import torch.nn as nn\n'), ((5476, 5529), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (5485, 5529), True, 'import torch.nn as nn\n'), ((5676, 5743), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf + n_condition)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)\n', (5685, 5743), True, 'import torch.nn as nn\n'), ((5850, 5903), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(32, nf, kernel_size=3, stride=1, padding=1)\n', (5859, 5903), True, 'import torch.nn as nn\n'), ((7465, 7529), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (7474, 7529), True, 'import torch.nn as nn\n'), ((7655, 7719), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (7664, 7719), True, 'import torch.nn as nn\n'), ((7925, 8002), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv1'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv1, name='bias', n_power_iterations=n_power_iterations)\n", (7938, 8002), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((8067, 8144), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv2'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv2, name='bias', n_power_iterations=n_power_iterations)\n", (8080, 8144), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((8720, 8792), 'functools.partial', 'functools.partial', (['nn.BatchNorm2d'], {'affine': '(True)', 'track_running_stats': '(True)'}), '(nn.BatchNorm2d, affine=True, track_running_stats=True)\n', (8737, 8792), False, 'import functools\n'), ((9318, 9382), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (9327, 9382), True, 'import torch.nn as nn\n'), ((9557, 9621), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (9566, 9621), True, 'import torch.nn as nn\n'), ((10496, 10560), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (10505, 10560), True, 'import torch.nn as nn\n'), ((10686, 10750), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, stride=1, padding=1, bias=True)\n', (10695, 10750), True, 'import torch.nn as nn\n'), ((16168, 16244), 'torch.nn.functional.interpolate', 'F.interpolate', (['fea_sft'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(fea_sft, scale_factor=2, mode='bilinear', align_corners=False)\n", (16181, 16244), True, 'import torch.nn.functional as F\n'), ((18194, 18238), 'torch.cat', 'torch.cat', (['(kernel_code, noise, jpeg)'], {'dim': '(1)'}), '((kernel_code, noise, jpeg), dim=1)\n', (18203, 18238), False, 'import torch\n'), ((19106, 19148), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (19115, 19148), True, 'import torch.nn as nn\n'), ((19294, 19381), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv_first'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv_first, name='bias', n_power_iterations=\n n_power_iterations)\n", (19307, 19381), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((20246, 20310), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (20255, 20310), True, 'import torch.nn as nn\n'), ((20428, 20515), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv_extra'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv_extra, name='bias', n_power_iterations=\n n_power_iterations)\n", (20441, 20515), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((23352, 23417), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (23361, 23417), True, 'import torch.nn as nn\n'), ((23535, 23622), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv_final'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv_final, name='bias', n_power_iterations=\n n_power_iterations)\n", (23548, 23622), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((23894, 23938), 'torch.cat', 'torch.cat', (['(kernel_code, noise, jpeg)'], {'dim': '(1)'}), '((kernel_code, noise, jpeg), dim=1)\n', (23903, 23938), False, 'import torch\n'), ((24784, 24826), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (24793, 24826), True, 'import torch.nn as nn\n'), ((24972, 25059), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv_first'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv_first, name='bias', n_power_iterations=\n n_power_iterations)\n", (24985, 25059), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((25962, 26026), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (25971, 26026), True, 'import torch.nn as nn\n'), ((26144, 26231), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv_extra'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv_extra, name='bias', n_power_iterations=\n n_power_iterations)\n", (26157, 26231), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((29068, 29133), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (29077, 29133), True, 'import torch.nn as nn\n'), ((29251, 29338), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv_final'], {'name': '"""bias"""', 'n_power_iterations': 'n_power_iterations'}), "(self.conv_final, name='bias', n_power_iterations=\n n_power_iterations)\n", (29264, 29338), True, 'import torch.nn.utils.spectral_norm as spectral_norm\n'), ((30417, 30459), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (30426, 30459), True, 'import torch.nn as nn\n'), ((31802, 31870), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (31811, 31870), True, 'import torch.nn as nn\n'), ((31902, 31970), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf * 2, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (31911, 31970), True, 'import torch.nn as nn\n'), ((33113, 33178), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (33122, 33178), True, 'import torch.nn as nn\n'), ((34191, 34233), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(inc, nf, 3, stride=1, padding=1)\n', (34200, 34233), True, 'import torch.nn as nn\n'), ((34619, 34683), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (34628, 34683), True, 'import torch.nn as nn\n'), ((35804, 35869), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'inc'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)\n', (35813, 35869), True, 'import torch.nn as nn\n'), ((40936, 40988), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'conv1_weight'], {'padding': '(self.pad, 0)'}), '(input, conv1_weight, padding=(self.pad, 0))\n', (40944, 40988), True, 'import torch.nn.functional as F\n'), ((41015, 41065), 'torch.nn.functional.conv2d', 'F.conv2d', (['fea', 'conv2_weight'], {'padding': '(0, self.pad)'}), '(fea, conv2_weight, padding=(0, self.pad))\n', (41023, 41065), True, 'import torch.nn.functional as F\n'), ((46084, 46125), 'torch.cat', 'torch.cat', (['(conv_input, conv_code)'], {'dim': '(1)'}), '((conv_input, conv_code), dim=1)\n', (46093, 46125), False, 'import torch\n'), ((48744, 48785), 'torch.cat', 'torch.cat', (['(conv_input, conv_code)'], {'dim': '(1)'}), '((conv_input, conv_code), dim=1)\n', (48753, 48785), False, 'import torch\n'), ((3505, 3580), 'functools.partial', 'functools.partial', (['nn.InstanceNorm2d'], {'affine': '(True)', 'track_running_stats': '(True)'}), '(nn.InstanceNorm2d, affine=True, track_running_stats=True)\n', (3522, 3580), False, 'import functools\n'), ((8851, 8926), 'functools.partial', 'functools.partial', (['nn.InstanceNorm2d'], {'affine': '(True)', 'track_running_stats': '(True)'}), '(nn.InstanceNorm2d, affine=True, track_running_stats=True)\n', (8868, 8926), False, 'import functools\n'), ((11777, 11849), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (11786, 11849), True, 'import torch.nn as nn\n'), ((11867, 11894), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (11882, 11894), True, 'import torch.nn as nn\n'), ((11912, 11943), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (11924, 11943), True, 'import torch.nn as nn\n'), ((11961, 12033), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (11970, 12033), True, 'import torch.nn as nn\n'), ((12051, 12078), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (12066, 12078), True, 'import torch.nn as nn\n'), ((12096, 12127), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (12108, 12127), True, 'import torch.nn as nn\n'), ((12215, 12292), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (12224, 12292), True, 'import torch.nn as nn\n'), ((12308, 12330), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (12323, 12330), True, 'import torch.nn as nn\n'), ((12348, 12379), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (12360, 12379), True, 'import torch.nn as nn\n'), ((14130, 14202), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (14139, 14202), True, 'import torch.nn as nn\n'), ((14220, 14247), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (14235, 14247), True, 'import torch.nn as nn\n'), ((14265, 14296), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (14277, 14296), True, 'import torch.nn as nn\n'), ((14314, 14386), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (14323, 14386), True, 'import torch.nn as nn\n'), ((14404, 14431), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (14419, 14431), True, 'import torch.nn as nn\n'), ((14449, 14480), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (14461, 14480), True, 'import torch.nn as nn\n'), ((14568, 14645), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (14577, 14645), True, 'import torch.nn as nn\n'), ((14661, 14683), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (14676, 14683), True, 'import torch.nn as nn\n'), ((14701, 14732), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (14713, 14732), True, 'import torch.nn as nn\n'), ((17251, 17323), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (17260, 17323), True, 'import torch.nn as nn\n'), ((17341, 17368), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (17356, 17368), True, 'import torch.nn as nn\n'), ((17386, 17417), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (17398, 17417), True, 'import torch.nn as nn\n'), ((17435, 17507), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (17444, 17507), True, 'import torch.nn as nn\n'), ((17525, 17552), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (17540, 17552), True, 'import torch.nn as nn\n'), ((17570, 17601), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (17582, 17601), True, 'import torch.nn as nn\n'), ((17689, 17766), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (17698, 17766), True, 'import torch.nn as nn\n'), ((17782, 17804), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (17797, 17804), True, 'import torch.nn as nn\n'), ((17822, 17853), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (17834, 17853), True, 'import torch.nn as nn\n'), ((31416, 31484), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (31425, 31484), True, 'import torch.nn as nn\n'), ((31618, 31686), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf * 2, nf, kernel_size=3, stride=1, padding=1, bias=True)\n', (31627, 31686), True, 'import torch.nn as nn\n'), ((32311, 32338), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (32326, 32338), True, 'import torch.nn as nn\n'), ((32356, 32387), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (32368, 32387), True, 'import torch.nn as nn\n'), ((32605, 32632), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (32620, 32632), True, 'import torch.nn as nn\n'), ((32650, 32681), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (32662, 32681), True, 'import torch.nn as nn\n'), ((32972, 32994), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (32987, 32994), True, 'import torch.nn as nn\n'), ((33012, 33043), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (33024, 33043), True, 'import torch.nn as nn\n'), ((35032, 35059), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (35047, 35059), True, 'import torch.nn as nn\n'), ((35077, 35098), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (35084, 35098), True, 'import torch.nn as nn\n'), ((35316, 35343), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (35331, 35343), True, 'import torch.nn as nn\n'), ((35361, 35382), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (35368, 35382), True, 'import torch.nn as nn\n'), ((35673, 35695), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (35688, 35695), True, 'import torch.nn as nn\n'), ((35713, 35734), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (35720, 35734), True, 'import torch.nn as nn\n'), ((36870, 36905), 'models.archs.arch_util.ResidualBlock_noBN', 'arch_util.ResidualBlock_noBN', ([], {'nf': 'nf'}), '(nf=nf)\n', (36898, 36905), True, 'import models.archs.arch_util as arch_util\n'), ((37081, 37153), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (37090, 37153), True, 'import torch.nn as nn\n'), ((37171, 37198), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (37186, 37198), True, 'import torch.nn as nn\n'), ((37216, 37247), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (37228, 37247), True, 'import torch.nn as nn\n'), ((37265, 37337), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (37274, 37337), True, 'import torch.nn as nn\n'), ((37355, 37382), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (37370, 37382), True, 'import torch.nn as nn\n'), ((37400, 37431), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (37412, 37431), True, 'import torch.nn as nn\n'), ((37519, 37596), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (37528, 37596), True, 'import torch.nn as nn\n'), ((37612, 37634), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (37627, 37634), True, 'import torch.nn as nn\n'), ((37652, 37683), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (37664, 37683), True, 'import torch.nn as nn\n'), ((38096, 38130), 'torch.cat', 'torch.cat', (['(input, kernel_code)', '(1)'], {}), '((input, kernel_code), 1)\n', (38105, 38130), False, 'import torch\n'), ((39150, 39185), 'models.archs.arch_util.ResidualBlock_noBN', 'arch_util.ResidualBlock_noBN', ([], {'nf': 'nf'}), '(nf=nf)\n', (39178, 39185), True, 'import models.archs.arch_util as arch_util\n'), ((39361, 39433), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (39370, 39433), True, 'import torch.nn as nn\n'), ((39451, 39478), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (39466, 39478), True, 'import torch.nn as nn\n'), ((39496, 39527), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (39508, 39527), True, 'import torch.nn as nn\n'), ((39545, 39617), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (39554, 39617), True, 'import torch.nn as nn\n'), ((39635, 39662), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (39650, 39662), True, 'import torch.nn as nn\n'), ((39680, 39711), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (39692, 39711), True, 'import torch.nn as nn\n'), ((39799, 39876), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (39808, 39876), True, 'import torch.nn as nn\n'), ((39892, 39914), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (39907, 39914), True, 'import torch.nn as nn\n'), ((39932, 39963), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (39944, 39963), True, 'import torch.nn as nn\n'), ((42009, 42081), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (42018, 42081), True, 'import torch.nn as nn\n'), ((42099, 42126), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (42114, 42126), True, 'import torch.nn as nn\n'), ((42144, 42175), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (42156, 42175), True, 'import torch.nn as nn\n'), ((42193, 42265), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (42202, 42265), True, 'import torch.nn as nn\n'), ((42283, 42310), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (42298, 42310), True, 'import torch.nn as nn\n'), ((42328, 42359), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (42340, 42359), True, 'import torch.nn as nn\n'), ((42447, 42524), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (42456, 42524), True, 'import torch.nn as nn\n'), ((42540, 42562), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (42555, 42562), True, 'import torch.nn as nn\n'), ((42580, 42611), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (42592, 42611), True, 'import torch.nn as nn\n'), ((21042, 21069), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (21057, 21069), True, 'import torch.nn as nn\n'), ((21091, 21122), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (21103, 21122), True, 'import torch.nn as nn\n'), ((21518, 21545), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (21533, 21545), True, 'import torch.nn as nn\n'), ((21567, 21598), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (21579, 21598), True, 'import torch.nn as nn\n'), ((21914, 21941), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (21929, 21941), True, 'import torch.nn as nn\n'), ((21963, 21994), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (21975, 21994), True, 'import torch.nn as nn\n'), ((22228, 22255), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (22243, 22255), True, 'import torch.nn as nn\n'), ((22277, 22308), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (22289, 22308), True, 'import torch.nn as nn\n'), ((22809, 22831), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (22824, 22831), True, 'import torch.nn as nn\n'), ((22853, 22884), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (22865, 22884), True, 'import torch.nn as nn\n'), ((23203, 23225), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (23218, 23225), True, 'import torch.nn as nn\n'), ((23247, 23278), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (23259, 23278), True, 'import torch.nn as nn\n'), ((26758, 26785), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (26773, 26785), True, 'import torch.nn as nn\n'), ((26807, 26838), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (26819, 26838), True, 'import torch.nn as nn\n'), ((27234, 27261), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (27249, 27261), True, 'import torch.nn as nn\n'), ((27283, 27314), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (27295, 27314), True, 'import torch.nn as nn\n'), ((27630, 27657), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (27645, 27657), True, 'import torch.nn as nn\n'), ((27679, 27710), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (27691, 27710), True, 'import torch.nn as nn\n'), ((27944, 27971), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(scale // 2)'], {}), '(scale // 2)\n', (27959, 27971), True, 'import torch.nn as nn\n'), ((27993, 28024), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (28005, 28024), True, 'import torch.nn as nn\n'), ((28525, 28547), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (28540, 28547), True, 'import torch.nn as nn\n'), ((28569, 28600), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (28581, 28600), True, 'import torch.nn as nn\n'), ((28919, 28941), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (28934, 28941), True, 'import torch.nn as nn\n'), ((28963, 28994), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (28975, 28994), True, 'import torch.nn as nn\n'), ((32146, 32218), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (32155, 32218), True, 'import torch.nn as nn\n'), ((32440, 32512), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (32449, 32512), True, 'import torch.nn as nn\n'), ((32804, 32881), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (32813, 32881), True, 'import torch.nn as nn\n'), ((34867, 34939), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (34876, 34939), True, 'import torch.nn as nn\n'), ((35151, 35223), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (35160, 35223), True, 'import torch.nn as nn\n'), ((35505, 35582), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (35514, 35582), True, 'import torch.nn as nn\n'), ((43945, 44014), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(inc, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (43954, 44014), True, 'import torch.nn as nn\n'), ((44028, 44051), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44040, 44051), True, 'import torch.nn as nn\n'), ((44065, 44133), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias)\n', (44074, 44133), True, 'import torch.nn as nn\n'), ((44147, 44170), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44159, 44170), True, 'import torch.nn as nn\n'), ((44184, 44252), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (44193, 44252), True, 'import torch.nn as nn\n'), ((44266, 44289), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44278, 44289), True, 'import torch.nn as nn\n'), ((44303, 44371), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias)\n', (44312, 44371), True, 'import torch.nn as nn\n'), ((44385, 44408), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44397, 44408), True, 'import torch.nn as nn\n'), ((44422, 44490), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (44431, 44490), True, 'import torch.nn as nn\n'), ((44504, 44527), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44516, 44527), True, 'import torch.nn as nn\n'), ((44541, 44609), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (44550, 44609), True, 'import torch.nn as nn\n'), ((44623, 44646), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44635, 44646), True, 'import torch.nn as nn\n'), ((44660, 44728), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (44669, 44728), True, 'import torch.nn as nn\n'), ((44742, 44765), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44754, 44765), True, 'import torch.nn as nn\n'), ((44834, 44875), 'torch.nn.Linear', 'nn.Linear', (['n_condition', 'nf'], {'bias': 'use_bias'}), '(n_condition, nf, bias=use_bias)\n', (44843, 44875), True, 'import torch.nn as nn\n'), ((44889, 44912), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (44901, 44912), True, 'import torch.nn as nn\n'), ((44926, 44958), 'torch.nn.Linear', 'nn.Linear', (['nf', 'nf'], {'bias': 'use_bias'}), '(nf, nf, bias=use_bias)\n', (44935, 44958), True, 'import torch.nn as nn\n'), ((46605, 46674), 'torch.nn.Conv2d', 'nn.Conv2d', (['inc', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(inc, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (46614, 46674), True, 'import torch.nn as nn\n'), ((46688, 46711), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (46700, 46711), True, 'import torch.nn as nn\n'), ((46725, 46793), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias)\n', (46734, 46793), True, 'import torch.nn as nn\n'), ((46807, 46830), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (46819, 46830), True, 'import torch.nn as nn\n'), ((46844, 46912), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (46853, 46912), True, 'import torch.nn as nn\n'), ((46926, 46949), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (46938, 46949), True, 'import torch.nn as nn\n'), ((46963, 47031), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias)\n', (46972, 47031), True, 'import torch.nn as nn\n'), ((47045, 47068), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47057, 47068), True, 'import torch.nn as nn\n'), ((47082, 47150), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (47091, 47150), True, 'import torch.nn as nn\n'), ((47164, 47187), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47176, 47187), True, 'import torch.nn as nn\n'), ((47201, 47269), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (47210, 47269), True, 'import torch.nn as nn\n'), ((47283, 47306), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47295, 47306), True, 'import torch.nn as nn\n'), ((47320, 47388), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias)\n', (47329, 47388), True, 'import torch.nn as nn\n'), ((47402, 47425), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47414, 47425), True, 'import torch.nn as nn\n'), ((47494, 47535), 'torch.nn.Linear', 'nn.Linear', (['n_condition', 'nf'], {'bias': 'use_bias'}), '(n_condition, nf, bias=use_bias)\n', (47503, 47535), True, 'import torch.nn as nn\n'), ((47549, 47572), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47561, 47572), True, 'import torch.nn as nn\n'), ((47586, 47618), 'torch.nn.Linear', 'nn.Linear', (['nf', 'nf'], {'bias': 'use_bias'}), '(nf, nf, bias=use_bias)\n', (47595, 47618), True, 'import torch.nn as nn\n'), ((21741, 21813), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (21750, 21813), True, 'import torch.nn as nn\n'), ((22055, 22127), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (22064, 22127), True, 'import torch.nn as nn\n'), ((23027, 23104), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (23036, 23104), True, 'import torch.nn as nn\n'), ((27457, 27529), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (27466, 27529), True, 'import torch.nn as nn\n'), ((27771, 27843), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (27780, 27843), True, 'import torch.nn as nn\n'), ((28743, 28820), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (28752, 28820), True, 'import torch.nn as nn\n'), ((45060, 45136), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', '(nf * 2)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(nf * 2, nf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (45069, 45136), True, 'import torch.nn as nn\n'), ((45154, 45177), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (45166, 45177), True, 'import torch.nn as nn\n'), ((45195, 45267), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(nf * 2, nf, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (45204, 45267), True, 'import torch.nn as nn\n'), ((45285, 45308), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (45297, 45308), True, 'import torch.nn as nn\n'), ((45326, 45394), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (45335, 45394), True, 'import torch.nn as nn\n'), ((45412, 45435), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (45424, 45435), True, 'import torch.nn as nn\n'), ((47720, 47796), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', '(nf * 2)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(nf * 2, nf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (47729, 47796), True, 'import torch.nn as nn\n'), ((47814, 47837), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47826, 47837), True, 'import torch.nn as nn\n'), ((47855, 47927), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(nf * 2, nf, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (47864, 47927), True, 'import torch.nn as nn\n'), ((47945, 47968), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (47957, 47968), True, 'import torch.nn as nn\n'), ((47986, 48054), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(nf, nf, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (47995, 48054), True, 'import torch.nn as nn\n'), ((48072, 48095), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)', '(True)'], {}), '(0.1, True)\n', (48084, 48095), True, 'import torch.nn as nn\n'), ((20750, 20822), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (20759, 20822), True, 'import torch.nn as nn\n'), ((21226, 21298), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (21235, 21298), True, 'import torch.nn as nn\n'), ((22514, 22591), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (22523, 22591), True, 'import torch.nn as nn\n'), ((26466, 26538), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (26475, 26538), True, 'import torch.nn as nn\n'), ((26942, 27014), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True)\n', (26951, 27014), True, 'import torch.nn as nn\n'), ((28230, 28307), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * scale ** 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(nf, nf * scale ** 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (28239, 28307), True, 'import torch.nn as nn\n')] |
###################################################
# #
# Name : Online Hash Cracker (HASH++) #
# Created by : MomboteQ #
# Version : 1.0 #
# #
###################################################
from colorama import Fore, Style, init
import urllib3
import urllib
import re
def online_crack(hash):
init()
if len(hash) == 32:
hash_type = 'MD5'
elif len(hash) == 40:
hash_type = 'SHA-1'
elif len(hash) == 64:
hash_type = 'SHA-256'
elif len(hash) == 96:
hash_type = 'SHA-384'
elif len(hash) == 128:
hash_type = 'SHA-512'
else:
hash_type = None
if hash_type != None:
print(f'\n[{Fore.LIGHTGREEN_EX}✓{Style.RESET_ALL}] Detected hash type : {Fore.LIGHTBLUE_EX + hash_type + Style.RESET_ALL}')
http = urllib3.PoolManager()
try:
response = http.request('GET', f'https://hashtoolkit.com/decrypt-hash/?hash={hash}')
except:
print(f'[{Fore.LIGHTRED_EX}✗{Style.RESET_ALL}] Check your internet connection!\n')
return
try:
decrypted = urllib.parse.unquote(re.search(r'/generate-hash/\?text=(.*?)"', response.data.decode()).group(1))
print(f'[{Fore.LIGHTGREEN_EX}✓{Style.RESET_ALL}] {hash} : {Fore.LIGHTGREEN_EX + decrypted + Style.RESET_ALL}\n')
except:
print(f'[{Fore.LIGHTRED_EX}✗{Style.RESET_ALL}] {hash} : {Fore.LIGHTRED_EX}This hash was not found in the database!{Style.RESET_ALL}\n')
else:
print(f'\n[{Fore.LIGHTRED_EX}✗{Style.RESET_ALL}] This hash type is not supported.\n') | [
"urllib3.PoolManager",
"colorama.init"
] | [((474, 480), 'colorama.init', 'init', ([], {}), '()\n', (478, 480), False, 'from colorama import Fore, Style, init\n'), ((970, 991), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (989, 991), False, 'import urllib3\n')] |
import numpy as np
import scipy.sparse as sp
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
from .transformation import Transformation, Lookup
__all__ = [
"ReplaceUnknowns",
"Average",
"DoNotImpute",
"DropInstances",
"Model",
"AsValue",
"Random",
"Default",
]
class ReplaceUnknowns(Transformation):
"""
A column transformation which replaces unknown values with a fixed `value`.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
value : int or float
The value with which to replace the unknown values
"""
def __init__(self, variable, value=0):
super().__init__(variable)
self.value = value
def transform(self, c):
if sp.issparse(c):
c.data = np.where(np.isnan(c.data), self.value, c.data)
return c
else:
return np.where(np.isnan(c), self.value, c)
class BaseImputeMethod(Reprable):
name = ""
short_name = ""
description = ""
format = "{var.name} -> {self.short_name}"
columns_only = False
def __call__(self, data, variable):
""" Imputes table along variable column.
Args:
data (Table): A table to impute.
variable (Variable): Variable for completing missing values.
Returns:
A new Variable instance with completed missing values or
a array mask of rows to drop out.
"""
raise NotImplementedError
def format_variable(self, var):
return self.format.format(var=var, self=self)
def __str__(self):
return self.name
def copy(self):
return self
@classmethod
def supports_variable(cls, variable):
return True
class DoNotImpute(BaseImputeMethod):
name = "Don't impute"
short_name = "leave"
description = ""
def __call__(self, data, variable):
return variable
class DropInstances(BaseImputeMethod):
name = "Remove instances with unknown values"
short_name = "drop"
description = ""
def __call__(self, data, variable):
col, _ = data.get_column_view(variable)
return np.isnan(col)
class Average(BaseImputeMethod):
name = "Average/Most frequent"
short_name = "average"
description = "Replace with average/mode of the column"
def __call__(self, data, variable, value=None):
variable = data.domain[variable]
if value is None:
if variable.is_continuous:
stats = basic_stats.BasicStats(data, variable)
value = stats.mean
elif variable.is_discrete:
dist = distribution.get_distribution(data, variable)
value = dist.modus()
else:
raise TypeError("Variable must be continuous or discrete")
a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
a.to_sql = ImputeSql(variable, value)
return a
class ImputeSql(Reprable):
def __init__(self, var, default):
self.var = var
self.default = default
def __call__(self):
return "coalesce(%s, %s)" % (self.var.to_sql(), str(self.default))
class Default(BaseImputeMethod):
name = "Value"
short_name = "value"
description = ""
columns_only = True
format = "{var} -> {self.default}"
def __init__(self, default=0):
self.default = default
def __call__(self, data, variable, *, default=None):
variable = data.domain[variable]
default = default if default is not None else self.default
return variable.copy(compute_value=ReplaceUnknowns(variable, default))
def copy(self):
return Default(self.default)
class ReplaceUnknownsModel(Reprable):
"""
Replace unknown values with predicted values using a `Orange.base.Model`
Parameters
----------
variable : Orange.data.Variable
The target variable for the imputation.
model : Orange.base.Model
A fitted model predicting `variable`.
"""
def __init__(self, variable, model):
assert model.domain.class_var == variable
self.variable = variable
self.model = model
def __call__(self, data):
if isinstance(data, Orange.data.Instance):
column = np.array([float(data[self.variable])])
else:
column = np.array(data.get_column_view(self.variable)[0], copy=True)
mask = np.isnan(column)
if not np.any(mask):
return column
if isinstance(data, Orange.data.Instance):
predicted = self.model(data)
else:
predicted = self.model(data[mask])
column[mask] = predicted
return column
class Model(BaseImputeMethod):
_name = "Model-based imputer"
short_name = "model"
description = ""
format = BaseImputeMethod.format + " ({self.learner.name})"
@property
def name(self):
return "{} ({})".format(self._name, getattr(self.learner, "name", ""))
def __init__(self, learner):
self.learner = learner
def __call__(self, data, variable):
variable = data.domain[variable]
domain = domain_with_class_var(data.domain, variable)
if self.learner.check_learner_adequacy(domain):
data = data.transform(domain)
model = self.learner(data)
assert model.domain.class_var == variable
return variable.copy(compute_value=ReplaceUnknownsModel(variable, model))
else:
raise ValueError(
"`{}` doesn't support domain type".format(self.learner.name)
)
def copy(self):
return Model(self.learner)
def supports_variable(self, variable):
domain = Orange.data.Domain([], class_vars=variable)
return self.learner.check_learner_adequacy(domain)
def domain_with_class_var(domain, class_var):
"""
Return a domain with class_var as output domain.class_var.
If class_var is in the input domain's attributes it is removed from the
output's domain.attributes.
"""
if domain.class_var is class_var:
return domain
elif class_var in domain.attributes:
attrs = [var for var in domain.attributes if var is not class_var]
else:
attrs = domain.attributes
return Orange.data.Domain(attrs, class_var)
class IsDefined(Transformation):
def transform(self, c):
if sp.issparse(c):
c = c.toarray()
return ~np.isnan(c)
class AsValue(BaseImputeMethod):
name = "As a distinct value"
short_name = "new value"
description = ""
def __call__(self, data, variable):
variable = data.domain[variable]
if variable.is_discrete:
fmt = "{var.name}"
value = "N/A"
var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=variable.values + [value],
base_value=variable.base_value,
compute_value=Lookup(
variable,
np.arange(len(variable.values), dtype=int),
unknown=len(variable.values),
),
sparse=variable.sparse,
)
return var
elif variable.is_continuous:
fmt = "{var.name}_def"
indicator_var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=("undef", "def"),
compute_value=IsDefined(variable),
sparse=variable.sparse,
)
stats = basic_stats.BasicStats(data, variable)
return (
variable.copy(compute_value=ReplaceUnknowns(variable, stats.mean)),
indicator_var,
)
else:
raise TypeError(type(variable))
class ReplaceUnknownsRandom(Transformation):
"""
A column transformation replacing unknowns with values drawn randomly from
an empirical distribution.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
distribution : Orange.statistics.distribution.Distribution
The corresponding sampling distribution
"""
def __init__(self, variable, distribution):
assert distribution.size > 0
assert distribution.variable == variable
super().__init__(variable)
self.distribution = distribution
if variable.is_discrete:
counts = np.array(distribution)
elif variable.is_continuous:
counts = np.array(distribution)[1, :]
else:
raise TypeError("Only discrete and continuous " "variables are supported")
csum = np.sum(counts)
if csum > 0:
self.sample_prob = counts / csum
else:
self.sample_prob = np.ones_like(counts) / len(counts)
def transform(self, c):
if not sp.issparse(c):
c = np.array(c, copy=True)
else:
c = c.toarray().ravel()
nanindices = np.flatnonzero(np.isnan(c))
if self.variable.is_discrete:
sample = np.random.choice(
len(self.variable.values),
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
else:
sample = np.random.choice(
np.asarray(self.distribution)[0, :],
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
c[nanindices] = sample
return c
class Random(BaseImputeMethod):
name = "Random values"
short_name = "random"
description = "Replace with a random value"
def __call__(self, data, variable):
variable = data.domain[variable]
dist = distribution.get_distribution(data, variable)
# A distribution is invalid if a continuous variable's column does not
# contain any known values or if a discrete variable's .values == []
isinvalid = dist.size == 0
if isinvalid and variable.is_discrete:
assert len(variable.values) == 0
raise ValueError("'{}' has no values".format(variable))
elif isinvalid and variable.is_continuous:
raise ValueError("'{}' has an unknown distribution".format(variable))
if variable.is_discrete and np.sum(dist) == 0:
dist += 1 / len(dist)
elif variable.is_continuous and np.sum(dist[1, :]) == 0:
dist[1, :] += 1 / dist.shape[1]
return variable.copy(compute_value=ReplaceUnknownsRandom(variable, dist))
| [
"Orange.statistics.distribution.get_distribution",
"numpy.ones_like",
"Orange.statistics.basic_stats.BasicStats",
"numpy.asarray",
"numpy.any",
"scipy.sparse.issparse",
"numpy.sum",
"numpy.array",
"numpy.isnan"
] | [((833, 847), 'scipy.sparse.issparse', 'sp.issparse', (['c'], {}), '(c)\n', (844, 847), True, 'import scipy.sparse as sp\n'), ((2250, 2263), 'numpy.isnan', 'np.isnan', (['col'], {}), '(col)\n', (2258, 2263), True, 'import numpy as np\n'), ((4537, 4553), 'numpy.isnan', 'np.isnan', (['column'], {}), '(column)\n', (4545, 4553), True, 'import numpy as np\n'), ((6529, 6543), 'scipy.sparse.issparse', 'sp.issparse', (['c'], {}), '(c)\n', (6540, 6543), True, 'import scipy.sparse as sp\n'), ((8830, 8844), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (8836, 8844), True, 'import numpy as np\n'), ((9933, 9978), 'Orange.statistics.distribution.get_distribution', 'distribution.get_distribution', (['data', 'variable'], {}), '(data, variable)\n', (9962, 9978), False, 'from Orange.statistics import distribution, basic_stats\n'), ((4569, 4581), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (4575, 4581), True, 'import numpy as np\n'), ((6589, 6600), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (6597, 6600), True, 'import numpy as np\n'), ((8604, 8626), 'numpy.array', 'np.array', (['distribution'], {}), '(distribution)\n', (8612, 8626), True, 'import numpy as np\n'), ((9035, 9049), 'scipy.sparse.issparse', 'sp.issparse', (['c'], {}), '(c)\n', (9046, 9049), True, 'import scipy.sparse as sp\n'), ((9067, 9089), 'numpy.array', 'np.array', (['c'], {'copy': '(True)'}), '(c, copy=True)\n', (9075, 9089), True, 'import numpy as np\n'), ((9176, 9187), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (9184, 9187), True, 'import numpy as np\n'), ((879, 895), 'numpy.isnan', 'np.isnan', (['c.data'], {}), '(c.data)\n', (887, 895), True, 'import numpy as np\n'), ((980, 991), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (988, 991), True, 'import numpy as np\n'), ((2604, 2642), 'Orange.statistics.basic_stats.BasicStats', 'basic_stats.BasicStats', (['data', 'variable'], {}), '(data, variable)\n', (2626, 2642), False, 'from Orange.statistics import distribution, basic_stats\n'), ((7696, 7734), 'Orange.statistics.basic_stats.BasicStats', 'basic_stats.BasicStats', (['data', 'variable'], {}), '(data, variable)\n', (7718, 7734), False, 'from Orange.statistics import distribution, basic_stats\n'), ((8956, 8976), 'numpy.ones_like', 'np.ones_like', (['counts'], {}), '(counts)\n', (8968, 8976), True, 'import numpy as np\n'), ((10500, 10512), 'numpy.sum', 'np.sum', (['dist'], {}), '(dist)\n', (10506, 10512), True, 'import numpy as np\n'), ((2740, 2785), 'Orange.statistics.distribution.get_distribution', 'distribution.get_distribution', (['data', 'variable'], {}), '(data, variable)\n', (2769, 2785), False, 'from Orange.statistics import distribution, basic_stats\n'), ((8685, 8707), 'numpy.array', 'np.array', (['distribution'], {}), '(distribution)\n', (8693, 8707), True, 'import numpy as np\n'), ((9497, 9526), 'numpy.asarray', 'np.asarray', (['self.distribution'], {}), '(self.distribution)\n', (9507, 9526), True, 'import numpy as np\n'), ((10593, 10611), 'numpy.sum', 'np.sum', (['dist[1, :]'], {}), '(dist[1, :])\n', (10599, 10611), True, 'import numpy as np\n')] |
"""
Verify workchain.
-----------------
Indented to be used to verify a calculation, perform corrections in inputs files and
restart depending on physical principles etc. E.g. issues that are outside the Calculators awereness,
or not currently checked in it. This workchain does currently nothing.
"""
# pylint: disable=attribute-defined-outside-init
from aiida.common.extendeddicts import AttributeDict
from aiida.engine import WorkChain, while_, append_
from aiida.plugins import WorkflowFactory
from aiida_vasp.utils.aiida_utils import get_data_class, get_data_node
from aiida_vasp.utils.workchains import prepare_process_inputs, compose_exit_code
class VerifyWorkChain(WorkChain):
"""Verify the calculations based on basic principles from physics, chemistry and material science."""
_verbose = False
_next_workchain_string = 'vasp.vasp'
_next_workchain = WorkflowFactory(_next_workchain_string)
@classmethod
def define(cls, spec):
super(VerifyWorkChain, cls).define(spec)
spec.expose_inputs(cls._next_workchain)
spec.input('verify.max_iterations',
valid_type=get_data_class('int'),
required=False,
default=get_data_node('int', 1),
help="""
The maximum number of iterations to perform.
""")
spec.exit_code(0, 'NO_ERROR', message='the sun is shining')
spec.exit_code(420, 'ERROR_NO_CALLED_WORKCHAIN', message='no called workchain detected')
spec.exit_code(500, 'ERROR_UNKNOWN', message='unknown error detected in the verify workchain')
spec.outline(
cls.initialize,
while_(cls.run_next_workchains)(
cls.init_next_workchain,
cls.run_next_workchain,
cls.verify_next_workchain
),
cls.finalize
) # yapf: disable
spec.expose_outputs(cls._next_workchain)
def initialize(self):
"""Initialize."""
self._init_context()
self._init_inputs()
def _init_context(self):
"""Initialize context variables that are used during the logical flow."""
self.ctx.exit_code = self.exit_codes.ERROR_UNKNOWN # pylint: disable=no-member
self.ctx.is_finished = False
self.ctx.iteration = 0
self.ctx.inputs = AttributeDict()
def _init_inputs(self):
"""Initialize inputs."""
try:
self._verbose = self.inputs.verbose.value
self.ctx.inputs.verbose = self.inputs.verbose
except AttributeError:
pass
def run_next_workchains(self):
"""
Return whether a new calculation should be run.
This is the case as long as the last calculation has not finished successfully and the maximum number of restarts
has not yet been exceeded.
"""
return not self.ctx.is_finished and self.ctx.iteration <= self.inputs.verify.max_iterations.value
def init_next_workchain(self):
"""Initialize the next workchain."""
self.ctx.iteration += 1
try:
self.ctx.inputs
except AttributeError:
raise ValueError('No input dictionary was defined in self.ctx.inputs')
# Add exposed inputs
self.ctx.inputs.update(self.exposed_inputs(self._next_workchain))
# Make sure we do not have any floating dict (convert to Dict)
self.ctx.inputs = prepare_process_inputs(self.ctx.inputs)
def run_next_workchain(self):
"""Run the next workchain."""
inputs = self.ctx.inputs
running = self.submit(self._next_workchain, **inputs)
self.report('launching {}<{}> iteration #{}'.format(self._next_workchain.__name__, running.pk, self.ctx.iteration))
return self.to_context(workchains=append_(running))
def verify_next_workchain(self):
"""
Correct for unexpected physics/chemistry/material science behavior.
Here we should correct all things that voids what we expect from
physics/chemistry/material science. I.e. things that cannot be corrected for at the
calculation level (simple restarts etc.).
"""
# Currently only set to finished on first go.
self.ctx.is_finished = True
try:
workchain = self.ctx.workchains[-1]
except IndexError:
self.report('There is no {} in the called workchain list.'.format(self._next_workchain.__name__))
return self.exit_codes.ERROR_NO_CALLED_WORKCHAIN # pylint: disable=no-member
# Inherit exit status from last workchain (supposed to be
# successfull)
next_workchain_exit_status = workchain.exit_status
next_workchain_exit_message = workchain.exit_message
if not next_workchain_exit_status:
self.ctx.exit_code = self.exit_codes.NO_ERROR # pylint: disable=no-member
else:
self.ctx.exit_code = compose_exit_code(next_workchain_exit_status, next_workchain_exit_message)
self.report('The called {}<{}> returned a non-zero exit status. '
'The exit status {} is inherited'.format(workchain.__class__.__name__, workchain.pk, self.ctx.exit_code))
return self.ctx.exit_code
def finalize(self):
"""Finalize the workchain."""
workchain = self.ctx.workchains[-1]
self.out_many(self.exposed_outputs(workchain, self._next_workchain))
| [
"aiida.common.extendeddicts.AttributeDict",
"aiida_vasp.utils.workchains.compose_exit_code",
"aiida.engine.while_",
"aiida.engine.append_",
"aiida_vasp.utils.workchains.prepare_process_inputs",
"aiida_vasp.utils.aiida_utils.get_data_node",
"aiida.plugins.WorkflowFactory",
"aiida_vasp.utils.aiida_utils... | [((880, 919), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['_next_workchain_string'], {}), '(_next_workchain_string)\n', (895, 919), False, 'from aiida.plugins import WorkflowFactory\n'), ((2369, 2384), 'aiida.common.extendeddicts.AttributeDict', 'AttributeDict', ([], {}), '()\n', (2382, 2384), False, 'from aiida.common.extendeddicts import AttributeDict\n'), ((3472, 3511), 'aiida_vasp.utils.workchains.prepare_process_inputs', 'prepare_process_inputs', (['self.ctx.inputs'], {}), '(self.ctx.inputs)\n', (3494, 3511), False, 'from aiida_vasp.utils.workchains import prepare_process_inputs, compose_exit_code\n'), ((4986, 5060), 'aiida_vasp.utils.workchains.compose_exit_code', 'compose_exit_code', (['next_workchain_exit_status', 'next_workchain_exit_message'], {}), '(next_workchain_exit_status, next_workchain_exit_message)\n', (5003, 5060), False, 'from aiida_vasp.utils.workchains import prepare_process_inputs, compose_exit_code\n'), ((1136, 1157), 'aiida_vasp.utils.aiida_utils.get_data_class', 'get_data_class', (['"""int"""'], {}), "('int')\n", (1150, 1157), False, 'from aiida_vasp.utils.aiida_utils import get_data_class, get_data_node\n'), ((1221, 1244), 'aiida_vasp.utils.aiida_utils.get_data_node', 'get_data_node', (['"""int"""', '(1)'], {}), "('int', 1)\n", (1234, 1244), False, 'from aiida_vasp.utils.aiida_utils import get_data_class, get_data_node\n'), ((1692, 1723), 'aiida.engine.while_', 'while_', (['cls.run_next_workchains'], {}), '(cls.run_next_workchains)\n', (1698, 1723), False, 'from aiida.engine import WorkChain, while_, append_\n'), ((3847, 3863), 'aiida.engine.append_', 'append_', (['running'], {}), '(running)\n', (3854, 3863), False, 'from aiida.engine import WorkChain, while_, append_\n')] |
#!/usr/bin/env python
"""
Cubic spline peak finder.
Hazen 03/16
"""
import pickle
import numpy
import tifffile
import storm_analysis.sa_library.analysis_io as analysisIO
import storm_analysis.sa_library.fitting as fitting
import storm_analysis.sa_library.ia_utilities_c as utilC
import storm_analysis.sa_library.matched_filter_c as matchedFilterC
import storm_analysis.spliner.cubic_fit_c as cubicFitC
import storm_analysis.spliner.spline_to_psf as splineToPSF
def initFitter(finder, parameters, spline_fn):
"""
Initialize and return a cubicFitC.CSplineFit object.
"""
# Load variance, scale by gain.
#
# Offset is in units of ADU.
# Variance is in units of ADU*ADU.
# Gain is ADU/photo-electron.
# RQE is dimensionless, it should be around 1.0.
#
rqe = None
variance = None
if parameters.hasAttr("camera_calibration"):
[offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr("camera_calibration"))
variance = variance/(gain*gain)
# Set variance in the peak finder, this method also pads the
# variance to the correct size.
variance = finder.setVariance(variance)
# Pad relative quantum efficiency array to the correct size.
rqe = finder.padArray(rqe)
# Create C fitter object.
mfitter = None
kwds = {'rqe' : rqe,
'scmos_cal' : variance,
'spline_fn' : spline_fn}
emodel = parameters.getAttr("fit_error_model")
if (spline_fn.getType() == "2D"):
if (emodel == "MLE"):
mfitter = cubicFitC.CSpline2DFit(**kwds)
else:
if (emodel == "MLE"):
return cubicFitC.CSpline3DFit(**kwds)
elif (emodel == "ALS"):
return cubicFitC.CSpline3DFitALS(**kwds)
elif (emodel == "LS"):
return cubicFitC.CSpline3DFitLS(**kwds)
elif (emodel == "FWLS"):
return cubicFitC.CSpline3DFitFWLS(**kwds)
if mfitter is None:
raise Exception("Request error model is not available. " + emodel)
return mfitter
def initFindAndFit(parameters):
"""
Initialize and return a SplinerFinderFitter object.
"""
# Create spline object.
spline_fn = splineToPSF.loadSpline(parameters.getAttr("spline"))
# Create peak finder.
finder = fitting.PeakFinderArbitraryPSF(parameters = parameters,
psf_object = spline_fn)
# Create cubicFitC.CSplineFit object.
mfitter = initFitter(finder, parameters, spline_fn)
# Create peak fitter.
fitter = fitting.PeakFitterArbitraryPSF(mfitter = mfitter,
parameters = parameters)
# Specify which properties we want from the analysis.
properties = ["background", "error", "height", "iterations", "significance", "sum", "x", "y", "z"]
return fitting.PeakFinderFitter(peak_finder = finder,
peak_fitter = fitter,
properties = properties)
| [
"storm_analysis.spliner.cubic_fit_c.CSpline2DFit",
"storm_analysis.sa_library.fitting.PeakFinderArbitraryPSF",
"storm_analysis.spliner.cubic_fit_c.CSpline3DFitFWLS",
"storm_analysis.spliner.cubic_fit_c.CSpline3DFit",
"storm_analysis.sa_library.fitting.PeakFitterArbitraryPSF",
"storm_analysis.sa_library.fi... | [((2356, 2431), 'storm_analysis.sa_library.fitting.PeakFinderArbitraryPSF', 'fitting.PeakFinderArbitraryPSF', ([], {'parameters': 'parameters', 'psf_object': 'spline_fn'}), '(parameters=parameters, psf_object=spline_fn)\n', (2386, 2431), True, 'import storm_analysis.sa_library.fitting as fitting\n'), ((2623, 2693), 'storm_analysis.sa_library.fitting.PeakFitterArbitraryPSF', 'fitting.PeakFitterArbitraryPSF', ([], {'mfitter': 'mfitter', 'parameters': 'parameters'}), '(mfitter=mfitter, parameters=parameters)\n', (2653, 2693), True, 'import storm_analysis.sa_library.fitting as fitting\n'), ((2920, 3012), 'storm_analysis.sa_library.fitting.PeakFinderFitter', 'fitting.PeakFinderFitter', ([], {'peak_finder': 'finder', 'peak_fitter': 'fitter', 'properties': 'properties'}), '(peak_finder=finder, peak_fitter=fitter, properties\n =properties)\n', (2944, 3012), True, 'import storm_analysis.sa_library.fitting as fitting\n'), ((1589, 1619), 'storm_analysis.spliner.cubic_fit_c.CSpline2DFit', 'cubicFitC.CSpline2DFit', ([], {}), '(**kwds)\n', (1611, 1619), True, 'import storm_analysis.spliner.cubic_fit_c as cubicFitC\n'), ((1679, 1709), 'storm_analysis.spliner.cubic_fit_c.CSpline3DFit', 'cubicFitC.CSpline3DFit', ([], {}), '(**kwds)\n', (1701, 1709), True, 'import storm_analysis.spliner.cubic_fit_c as cubicFitC\n'), ((1761, 1794), 'storm_analysis.spliner.cubic_fit_c.CSpline3DFitALS', 'cubicFitC.CSpline3DFitALS', ([], {}), '(**kwds)\n', (1786, 1794), True, 'import storm_analysis.spliner.cubic_fit_c as cubicFitC\n'), ((1845, 1877), 'storm_analysis.spliner.cubic_fit_c.CSpline3DFitLS', 'cubicFitC.CSpline3DFitLS', ([], {}), '(**kwds)\n', (1869, 1877), True, 'import storm_analysis.spliner.cubic_fit_c as cubicFitC\n'), ((1930, 1964), 'storm_analysis.spliner.cubic_fit_c.CSpline3DFitFWLS', 'cubicFitC.CSpline3DFitFWLS', ([], {}), '(**kwds)\n', (1956, 1964), True, 'import storm_analysis.spliner.cubic_fit_c as cubicFitC\n')] |
###
# A script to convert the Services-consumable feeSchedules.json
# into the "typed" format used by the public pricing calculator.
###
import json
providers = ['nodedata', 'networkdata', 'servicedata']
typed_schedules = {}
with open('hedera-node/src/main/resources/feeSchedules.json', 'r') as fin:
cur_and_next_schedules = json.load(fin)
schedules = cur_and_next_schedules[0]['currentFeeSchedule']
for tfs in schedules:
if 'expiryTime' in tfs:
break
tfs = tfs['transactionFeeSchedule']
function = tfs['hederaFunctionality']
prices_list = tfs['fees']
prices_by_type = {}
for typed_prices in prices_list:
this_type = typed_prices.get('subType', 'DEFAULT')
this_type_prices = {}
for provider in providers:
this_type_prices[provider] = typed_prices[provider]
prices_by_type[this_type] = this_type_prices
typed_schedules[function] = prices_by_type
with open('typedFeeSchedules.json', 'w') as fout:
json.dump(typed_schedules, fout, indent=2)
| [
"json.load",
"json.dump"
] | [((334, 348), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (343, 348), False, 'import json\n'), ((1049, 1091), 'json.dump', 'json.dump', (['typed_schedules', 'fout'], {'indent': '(2)'}), '(typed_schedules, fout, indent=2)\n', (1058, 1091), False, 'import json\n')] |
import requests, os, sys, numpy, requests
from plexapi.server import PlexServer
from tqdm import tqdm
#
from howdy.core import core, return_error_raw
def get_tautulli_apikey( username, password, endpoint ):
"""
Gets the tautulli API key with provided Tautulli_ username and password.
:param str username: the Tautulli_ username.
:param str password: the <PASSWORD>li_ password.
:param str endpoint: the Tautulli_ server endpoint.
:returns: the Tautulli_ API key.
:rtype: str
.. _Tautulli: https://tautulli.com
"""
full_url = os.path.join( endpoint, 'api', 'v2' )
#
## follow this reference: https://github.com/Tautulli/Tautulli/wiki/Tautulli-API-Reference#get_apikey
response = requests.get( full_url,
params = {
'username' : username,
'password' : password,
'cmd' : 'get_apikey' } )
if response.status_code != 200:
raise ValueError("Error, could not find the Tautulli API key.")
return response.json( )[ 'response' ][ 'data' ]
def get_tautulli_activity( endpoint, apikey ):
"""
Gets the activity on the Plex_ server (using Tautulli_).
:param str endpoint: the Tautulli_ server endpoint.
:param str apikey: the Tautulli_ API Key.
"""
full_url = os.path.join( endpoint, 'api', 'v2' )
#
## follow this reference: https://github.com/Tautulli/Tautulli/wiki/Tautulli-API-Reference#get_activity
response = requests.get( full_url,
params = {
'apikey' : apikey,
'cmd' : 'get_activity' })
if response.status_code != 200:
raise ValueError("Error, could not get the activity from the Plex server.")
#
## now the data
data = response.json( )[ 'response' ][ 'data' ]
if data['stream_count'] == 0: return [ ]
#
## now if there are streams
def get_relevant_info( session_info ):
session_dat = {
'title' : session_info['title'], 'type' : session_info['media_type'].upper( ),
'username' : session_info['username'], 'progress' : int( session_info['progress_percent'] ) }
if 'friendly_name' in session_info:
session_dat['friendly name'] = session_info[ 'friendly_name' ]
return session_dat
return list(map(get_relevant_info, data['sessions'] ) )
def plex_check_for_update( token, fullURL = 'http://localhost:32400' ):
"""
Determines whether there are any new Plex_ server releases.
:param str token: the Plex_ server access token.
:param str fullURL: the Plex_ server address.
:returns: a :py:class:`tuple` of :py:class:`Release <plexapi.server.Release>` and "SUCCESS" if successful. Otherwise returns the :py:class:`tuple` returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
:rtype: tuple
.. _Plex: https://plex.tv
"""
try:
plex = PlexServer( fullURL, token )
release = plex.checkForUpdate( )
return release, "SUCCESS"
except Exception as e:
return return_error_raw( str( e ) )
def plex_download_release( release, destination_dir = os.getcwd( ), do_progress = False ):
"""
Downloads the Plex_ update into a specific directory, with optional progress bar.
:param release: the :py:class:`Release <plexapi.server.Release>` containing the Plex_ update information.
:type release: :py:class:`Release <plexapi.server.Release>`
:pararm str destination_dir: the destination directory into which to download.
:param bool do_progress: whether to show the progress bar or not. Default is ``False``.
:returns: If unsuccessful an error message. If successful, the full path of the downloaded file.
:rtype: str
"""
downloadURL = release.downloadURL
response = requests.get( downloadURL, stream = True )
if not response.ok:
return "ERROR, %s IS NOT ACCESSIBLE" % downloadURL
#
## destination of the PLEX download
r2 = requests.head( downloadURL )
if not r2.ok:
return "ERROR, %s IS NOT ACCESSIBLE WITH REQUESTS.HEAD" % downloadURL
destination = os.path.join( destination_dir, os.path.basename( r2.headers['Location'] ) )
#
## following instructions from https://stackoverflow.com/a/37573701/3362358
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1 << 16
if not do_progress:
with open( destination, 'wb' ) as openfile:
for chunk in response.iter_content( block_size ):
openfile.write( chunk )
return destination
#
## have a progress bar
with tqdm( total = total_size_in_bytes, unit='iB', unit_scale=True) as progress_bar, open( destination, 'wb' ) as openfile:
for chunk in response.iter_content( block_size ):
progress_bar.update(len(chunk))
openfile.write( chunk )
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
return "ERROR, something went wrong"
return destination
| [
"plexapi.server.PlexServer",
"tqdm.tqdm",
"os.path.join",
"requests.get",
"os.getcwd",
"requests.head",
"os.path.basename"
] | [((572, 607), 'os.path.join', 'os.path.join', (['endpoint', '"""api"""', '"""v2"""'], {}), "(endpoint, 'api', 'v2')\n", (584, 607), False, 'import requests, os, sys, numpy, requests\n'), ((737, 837), 'requests.get', 'requests.get', (['full_url'], {'params': "{'username': username, 'password': password, 'cmd': 'get_apikey'}"}), "(full_url, params={'username': username, 'password': password,\n 'cmd': 'get_apikey'})\n", (749, 837), False, 'import requests, os, sys, numpy, requests\n'), ((1375, 1410), 'os.path.join', 'os.path.join', (['endpoint', '"""api"""', '"""v2"""'], {}), "(endpoint, 'api', 'v2')\n", (1387, 1410), False, 'import requests, os, sys, numpy, requests\n'), ((1542, 1614), 'requests.get', 'requests.get', (['full_url'], {'params': "{'apikey': apikey, 'cmd': 'get_activity'}"}), "(full_url, params={'apikey': apikey, 'cmd': 'get_activity'})\n", (1554, 1614), False, 'import requests, os, sys, numpy, requests\n'), ((3251, 3262), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3260, 3262), False, 'import requests, os, sys, numpy, requests\n'), ((3914, 3952), 'requests.get', 'requests.get', (['downloadURL'], {'stream': '(True)'}), '(downloadURL, stream=True)\n', (3926, 3952), False, 'import requests, os, sys, numpy, requests\n'), ((4095, 4121), 'requests.head', 'requests.head', (['downloadURL'], {}), '(downloadURL)\n', (4108, 4121), False, 'import requests, os, sys, numpy, requests\n'), ((3021, 3047), 'plexapi.server.PlexServer', 'PlexServer', (['fullURL', 'token'], {}), '(fullURL, token)\n', (3031, 3047), False, 'from plexapi.server import PlexServer\n'), ((4269, 4309), 'os.path.basename', 'os.path.basename', (["r2.headers['Location']"], {}), "(r2.headers['Location'])\n", (4285, 4309), False, 'import requests, os, sys, numpy, requests\n'), ((4749, 4808), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_size_in_bytes', 'unit': '"""iB"""', 'unit_scale': '(True)'}), "(total=total_size_in_bytes, unit='iB', unit_scale=True)\n", (4753, 4808), False, 'from tqdm import tqdm\n')] |
from __future__ import print_function
import datetime
import hashlib
import logging
from abc import ABCMeta
from pynamodb.attributes import UnicodeAttribute
from pynamodb.models import Model
from halolib.exceptions import DbIdemError
from halolib.logs import log_json
from .settingsx import settingsx
settings = settingsx()
# java -Djava.library.path=./DynamoDBLocal_lib -jar DynamoDBLocal.jar -sharedDb -port 8600
# java -D"java.library.path"=./DynamoDBLocal_lib -jar DynamoDBLocal.jar -sharedDb -port 8600
logger = logging.getLogger(__name__)
ver = settings.DB_VER
uri = settings.DB_URL
tbl = False
page_size = settings.PAGE_SIZE
class AbsDbMixin(object):
__metaclass__ = ABCMeta
# intercept db calls
req_context = None
def __init__(self, req_context):
self.req_context = req_context
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
now = datetime.datetime.now()
result = attr(*args, **kwargs)
total = datetime.datetime.now() - now
logger.info("performance_data", extra=log_json(self.req_context,
{"type": "DBACCESS",
"milliseconds": int(total.total_seconds() * 1000),
"function": str(attr.__name__)}))
return result
return newfunc
else:
return attr
class AbsModel(Model):
__metaclass__ = ABCMeta
halo_request_id = UnicodeAttribute(null=False)
@classmethod
def get_pre(cls):
"""
:return:
"""
hash_key_name = super(AbsModel, cls)._hash_key_attribute().attr_name
range_key_name = None
attr = super(AbsModel, cls)._range_key_attribute()
if attr:
range_key_name = attr.attr_name
logger.debug("\nhash_key_name=" + str(hash_key_name))
logger.debug("\nrange_key_name=" + str(range_key_name))
return hash_key_name, range_key_name
def get_pre_val(self):
"""
:return:
"""
hash_key_name, range_key_name = self.get_pre()
hash_key_val = super(AbsModel, self).__getattribute__(hash_key_name)
range_key_val = None
if range_key_name:
range_key_val = super(AbsModel, self).__getattribute__(range_key_name)
logger.debug("\nhash_key_name=" + hash_key_name + "=" + str(hash_key_val))
if range_key_val:
logger.debug("\nrange_key_val=" + range_key_name + "=" + str(range_key_val))
return hash_key_val, range_key_val
def get_idempotent_id(self, halo_request_id): # return fixed size id of 128 bit hash value
"""
:param halo_request_id:
:return:
"""
if halo_request_id is None or halo_request_id == "":
raise DbIdemError("empty request id")
hash_key_val, range_key_val = self.get_pre_val()
request_id = halo_request_id + "-" + str(hash_key_val)
if range_key_val:
request_id = request_id + "-" + str(range_key_val)
idempotent_id = hashlib.md5(halo_request_id.encode() + request_id.encode()).hexdigest()
return idempotent_id
def save(self, halo_request_id, condition=None, conditional_operator=None, **expected_values):
"""
:param halo_request_id:
:param condition:
:param conditional_operator:
:param expected_values:
:return:
"""
if condition is None:
condition = AbsModel.halo_request_id.does_not_exist()
else:
condition = condition & (AbsModel.halo_request_id.does_not_exist())
self.halo_request_id = self.get_idempotent_id(halo_request_id)
return super(AbsModel, self).save(condition, conditional_operator, **expected_values)
def update(self, halo_request_id, attributes=None, actions=None, condition=None, conditional_operator=None,
**expected_values):
"""
:param halo_request_id:
:param attributes:
:param actions:
:param condition:
:param conditional_operator:
:param expected_values:
:return:
"""
if condition is None:
condition = AbsModel.halo_request_id.does_not_exist()
else:
condition = condition & (AbsModel.halo_request_id.does_not_exist())
self.halo_request_id = self.get_idempotent_id(halo_request_id)
return super(AbsModel, self).update(attributes, actions, condition, conditional_operator, **expected_values)
| [
"logging.getLogger",
"datetime.datetime.now",
"pynamodb.attributes.UnicodeAttribute",
"halolib.exceptions.DbIdemError"
] | [((523, 550), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (540, 550), False, 'import logging\n'), ((1678, 1706), 'pynamodb.attributes.UnicodeAttribute', 'UnicodeAttribute', ([], {'null': '(False)'}), '(null=False)\n', (1694, 1706), False, 'from pynamodb.attributes import UnicodeAttribute\n'), ((3019, 3050), 'halolib.exceptions.DbIdemError', 'DbIdemError', (['"""empty request id"""'], {}), "('empty request id')\n", (3030, 3050), False, 'from halolib.exceptions import DbIdemError\n'), ((1013, 1036), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1034, 1036), False, 'import datetime\n'), ((1108, 1131), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1129, 1131), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
"""Python lager brewed by a loguru"""
import asyncio
from functools import wraps
from time import time
from typing import Union
from loguru import logger
from lager.const import LOG_LEVELS
__all__ = ['loglevel', 'flog', 'handlers', 'logger', 'log', 'LOG', 'ln', 'LN']
logger.t = logger.trace
logger.d = logger.debug
logger.i = logger.info
logger.s = logger.success
logger.w = logger.warning
logger.e = logger.error
logger.c = logger.critical
# commonly used dgpy aliases
log = logger
LOG = logger
# ln => natural log
ln = logger
LN = logger
def loglevel(level: Union[str, int]) -> str:
"""Convert log-level abrev to a valid loguru log level"""
return LOG_LEVELS[str(level).strip("'").strip('"').lower()]
def flog(funk=None, level="debug", enter=True, exit=True):
"""Log function (sync/async) enter and exit using this decorator
Args:
funk (Callable): Function to decorate
level (Union[int, str]): Log level
enter (bool): Log function entry if True
exit (bool): Log function exit if False
Returns:
A wrapped function that now has logging!
Usage:
# SYNC
@flog
def add(a, b):
return a + b
add(1, 4)
# ASYNC
@flog
async def add_async(a, b):
return a + b
import asyncio
asyncio.run(add_async(1, 4))
"""
def _flog(funk):
name = funk.__name__
@wraps(funk)
def _flog_decorator(*args, **kwargs):
logger_ = logger.opt(depth=1)
if enter:
logger_.log(
loglevel(level),
"FLOG-ENTER > '{}' (args={}, kwargs={})",
name,
args,
kwargs,
)
ti = time()
result = funk(*args, **kwargs)
tf = time()
if exit:
logger_.log(
loglevel(level),
"FLOG-EXIT < '{}' (return={}, dt_sec={})",
name,
result,
tf - ti,
)
return result
@wraps(funk)
async def _flog_decorator_async(*args, **kwargs):
logger_ = logger.opt(depth=7)
if enter:
logger_.log(
loglevel(level),
"FLOG-ENTER > '{}' (args={}, kwargs={})",
name,
args,
kwargs,
)
ti = time()
result = await funk(*args, **kwargs)
tf = time()
if exit:
logger_.log(
loglevel(level),
"FLOG-EXIT < '{}' (return={}, dt_sec={})",
name,
result,
tf - ti,
)
return result
if asyncio.iscoroutinefunction(funk) or asyncio.iscoroutine(funk):
return _flog_decorator_async
return _flog_decorator
return _flog(funk) if funk else _flog
def handlers():
"""Return all handlers"""
return logger._core.handlers
| [
"asyncio.iscoroutinefunction",
"functools.wraps",
"loguru.logger.opt",
"time.time",
"asyncio.iscoroutine"
] | [((1463, 1474), 'functools.wraps', 'wraps', (['funk'], {}), '(funk)\n', (1468, 1474), False, 'from functools import wraps\n'), ((2189, 2200), 'functools.wraps', 'wraps', (['funk'], {}), '(funk)\n', (2194, 2200), False, 'from functools import wraps\n'), ((1543, 1562), 'loguru.logger.opt', 'logger.opt', ([], {'depth': '(1)'}), '(depth=1)\n', (1553, 1562), False, 'from loguru import logger\n'), ((1828, 1834), 'time.time', 'time', ([], {}), '()\n', (1832, 1834), False, 'from time import time\n'), ((1895, 1901), 'time.time', 'time', ([], {}), '()\n', (1899, 1901), False, 'from time import time\n'), ((2281, 2300), 'loguru.logger.opt', 'logger.opt', ([], {'depth': '(7)'}), '(depth=7)\n', (2291, 2300), False, 'from loguru import logger\n'), ((2566, 2572), 'time.time', 'time', ([], {}), '()\n', (2570, 2572), False, 'from time import time\n'), ((2639, 2645), 'time.time', 'time', ([], {}), '()\n', (2643, 2645), False, 'from time import time\n'), ((2935, 2968), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['funk'], {}), '(funk)\n', (2962, 2968), False, 'import asyncio\n'), ((2972, 2997), 'asyncio.iscoroutine', 'asyncio.iscoroutine', (['funk'], {}), '(funk)\n', (2991, 2997), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
"""
@date Created on Wed Jan 13 17:45:15 2016
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
from os.path import join
from unittest import TestCase
import matplotlib.pyplot as plt
from numpy import pi
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.Lamination import Lamination
from pyleecan.Classes.Machine import Machine
from pyleecan.Classes.Magnet import Magnet
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.MatLamination import MatLamination
from pyleecan.Classes.HoleM54 import HoleM54
from pyleecan.Tests.Plot import save_path
class test_Hole_54_plot(TestCase):
"""unittest for Lamination with Hole plot"""
def test_Lam_Hole_54_plot(self):
"""Test machine plot hole 54"""
plt.close("all")
test_obj = Machine()
test_obj.rotor = LamHole(
is_internal=True, Rint=0.1, Rext=0.2, is_stator=False, L1=0.7
)
test_obj.rotor.hole = list()
test_obj.rotor.hole.append(
HoleM54(Zh=8, W0=pi / 4, H0=50e-3, H1=10e-3, R1=100e-3)
)
test_obj.rotor.hole.append(
HoleM54(Zh=8, W0=pi / 6, H0=25e-3, H1=10e-3, R1=100e-3)
)
test_obj.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s54-Rotor.png"))
self.assertEqual(len(fig.axes[0].patches), 18)
test_obj.rotor.hole[0].plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s54-Rotor hole.png"))
self.assertEqual(len(fig.axes[0].patches), 1)
| [
"pyleecan.Classes.HoleM54.HoleM54",
"matplotlib.pyplot.gcf",
"os.path.join",
"pyleecan.Classes.LamHole.LamHole",
"matplotlib.pyplot.close",
"pyleecan.Classes.Machine.Machine"
] | [((829, 845), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (838, 845), True, 'import matplotlib.pyplot as plt\n'), ((865, 874), 'pyleecan.Classes.Machine.Machine', 'Machine', ([], {}), '()\n', (872, 874), False, 'from pyleecan.Classes.Machine import Machine\n'), ((900, 970), 'pyleecan.Classes.LamHole.LamHole', 'LamHole', ([], {'is_internal': '(True)', 'Rint': '(0.1)', 'Rext': '(0.2)', 'is_stator': '(False)', 'L1': '(0.7)'}), '(is_internal=True, Rint=0.1, Rext=0.2, is_stator=False, L1=0.7)\n', (907, 970), False, 'from pyleecan.Classes.LamHole import LamHole\n'), ((1303, 1312), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1310, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1498), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1496, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1128), 'pyleecan.Classes.HoleM54.HoleM54', 'HoleM54', ([], {'Zh': '(8)', 'W0': '(pi / 4)', 'H0': '(0.05)', 'H1': '(0.01)', 'R1': '(0.1)'}), '(Zh=8, W0=pi / 4, H0=0.05, H1=0.01, R1=0.1)\n', (1085, 1128), False, 'from pyleecan.Classes.HoleM54 import HoleM54\n'), ((1192, 1243), 'pyleecan.Classes.HoleM54.HoleM54', 'HoleM54', ([], {'Zh': '(8)', 'W0': '(pi / 6)', 'H0': '(0.025)', 'H1': '(0.01)', 'R1': '(0.1)'}), '(Zh=8, W0=pi / 6, H0=0.025, H1=0.01, R1=0.1)\n', (1199, 1243), False, 'from pyleecan.Classes.HoleM54 import HoleM54\n'), ((1333, 1379), 'os.path.join', 'join', (['save_path', '"""test_Lam_Hole_s54-Rotor.png"""'], {}), "(save_path, 'test_Lam_Hole_s54-Rotor.png')\n", (1337, 1379), False, 'from os.path import join\n'), ((1519, 1570), 'os.path.join', 'join', (['save_path', '"""test_Lam_Hole_s54-Rotor hole.png"""'], {}), "(save_path, 'test_Lam_Hole_s54-Rotor hole.png')\n", (1523, 1570), False, 'from os.path import join\n')] |
# Copyright (c) 2017 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as n_exc
from kuryr_kubernetes.controller.drivers import public_ip\
as d_public_ip
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
class TestFipPubIpDriver(test_base.TestCase):
def test_is_ip_available_none_param(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
fip_ip_addr = None
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_empty_param(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
fip_ip_addr = None
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_ip_not_exist(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4', 'port_id': None,
'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.1.1.1'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_empty_fip_list(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = None
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.1.1.1'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_occupied_fip(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4',
'port_id': 'ec29d641-fec4-4f67-928a-124a76b3a8e6'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.2.3.4'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_ip_exist_and_available(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4', 'port_id': None,
'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.2.3.4'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertEqual(fip_id, 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b')
def test_allocate_ip_all_green(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
pub_net_id = mock.sentinel.pub_net_id
pub_subnet_id = mock.sentinel.pub_subnet_id
project_id = mock.sentinel.project_id
description = mock.sentinel.description
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'}
neutron.create_floatingip.return_value = {'floatingip': floating_ip}
fip_id, fip_addr = cls.allocate_ip(
m_driver, pub_net_id, project_id, pub_subnet_id, description)
self.assertEqual(fip_id, floating_ip['id'])
self.assertEqual(fip_addr, floating_ip['floating_ip_address'])
def test_allocate_ip_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
pub_net_id = mock.sentinel.pub_net_id
pub_subnet_id = mock.sentinel.pub_subnet_id
project_id = mock.sentinel.project_id
description = mock.sentinel.description
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.create_floatingip.side_effect = n_exc.NeutronClientException
self.assertRaises(
n_exc.NeutronClientException, cls.allocate_ip,
m_driver, pub_net_id, project_id, pub_subnet_id, description)
def test_free_ip_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.delete_floatingip.side_effect = n_exc.NeutronClientException
rc = cls.free_ip(m_driver, res_id)
self.assertEqual(rc, False)
def test_free_ip_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.delete_floatingip.return_value = None
rc = cls.free_ip(m_driver, res_id)
self.assertEqual(rc, True)
# try:
# cls.free_ip(m_driver, res_id)
# except Exception:
# self.fail("Encountered an unexpected exception.")
def test_associate_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
vip_port_id = mock.sentinel.vip_port_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.side_effect = n_exc.NeutronClientException
retcode = cls.associate(m_driver, res_id, vip_port_id)
self.assertIsNone(retcode)
def test_associate_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
vip_port_id = mock.sentinel.vip_port_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.return_value = None
retcode = cls.associate(m_driver, res_id, vip_port_id)
self.assertIsNone(retcode)
def test_disassociate_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.side_effect = n_exc.NeutronClientException
self.assertIsNone(cls.disassociate
(m_driver, res_id))
def test_disassociate_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.return_value = None
self.assertIsNone(cls.disassociate
(m_driver, res_id))
| [
"mock.Mock",
"kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient"
] | [((1047, 1066), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (1056, 1066), False, 'import mock\n'), ((1298, 1317), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (1307, 1317), False, 'import mock\n'), ((1550, 1569), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (1559, 1569), False, 'import mock\n'), ((2100, 2119), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (2109, 2119), False, 'import mock\n'), ((2532, 2551), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (2541, 2551), False, 'import mock\n'), ((3076, 3095), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (3085, 3095), False, 'import mock\n'), ((3656, 3675), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (3665, 3675), False, 'import mock\n'), ((4493, 4512), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (4502, 4512), False, 'import mock\n'), ((5119, 5138), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (5128, 5138), False, 'import mock\n'), ((5501, 5520), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (5510, 5520), False, 'import mock\n'), ((6018, 6037), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (6027, 6037), False, 'import mock\n'), ((6469, 6488), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (6478, 6488), False, 'import mock\n'), ((6909, 6928), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (6918, 6928), False, 'import mock\n'), ((7306, 7325), 'mock.Mock', 'mock.Mock', ([], {'spec': 'cls'}), '(spec=cls)\n', (7315, 7325), False, 'import mock\n'), ((1604, 1629), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (1627, 1629), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((2154, 2179), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (2177, 2179), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((2586, 2611), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (2609, 2611), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((3130, 3155), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (3153, 3155), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((3903, 3928), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (3926, 3928), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((4740, 4765), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (4763, 4765), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((5212, 5237), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (5235, 5237), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((5594, 5619), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (5617, 5619), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((6159, 6184), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (6182, 6184), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((6610, 6635), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (6633, 6635), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((7002, 7027), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (7025, 7027), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n'), ((7399, 7424), 'kuryr_kubernetes.tests.unit.kuryr_fixtures.MockNeutronClient', 'k_fix.MockNeutronClient', ([], {}), '()\n', (7422, 7424), True, 'from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix\n')] |
import asyncio
async def f1():
print("f1")
return "f1"
async def f2():
result = await f1()
print(result)
return "f2"
loop = asyncio.get_event_loop()
try:
result = loop.run_until_complete(f2())
print(result)
finally:
print("exit")
| [
"asyncio.get_event_loop"
] | [((148, 172), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (170, 172), False, 'import asyncio\n')] |
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pybindings import RobotObs, EEObs, LinearPlanner, GMMPlanner
MIN_PLANNER_VELOCITY = 0.001
MAX_PLANNER_VELOCITY = 0.1
# also defined in robot_env.cpp!
TIME_STEP_TRAIN = 0.1
class EEPlanner:
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map):
self.gripper_goal_tip = gripper_goal_tip
self.gripper_goal_wrist = gripper_goal_wrist
self._head_start = head_start
self._map = map
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist: float,
success_thres_rot: float) -> EEObs:
raise NotImplementedError()
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
raise NotImplementedError()
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
raise NotImplementedError()
class LinearPlannerWrapper(EEPlanner):
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map):
super(LinearPlannerWrapper, self).__init__(gripper_goal_tip,
gripper_goal_wrist,
head_start,
map)
self._planner = None
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist: float,
success_thres_rot: float) -> EEObs:
self._planner = LinearPlanner(self.gripper_goal_wrist,
robot_obs.gripper_tf,
[0, 0, 0, 0, 0, 0, 1],
robot_obs.base_tf,
success_thres_dist,
success_thres_rot,
MIN_PLANNER_VELOCITY,
MAX_PLANNER_VELOCITY,
slow_down_factor,
self._head_start,
TIME_STEP_TRAIN,
is_analytic_env)
return self.generate_obs_step(robot_obs)
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
return self._planner.step(robot_obs, learned_vel_norm)
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
return self._planner.generate_obs_step(robot_state)
class GMMPlannerWrapper(EEPlanner):
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map,
gmm_model_path: str,
robot_config):
super(GMMPlannerWrapper, self).__init__(gripper_goal_tip,
gripper_goal_wrist,
head_start,
map)
self._planner = None
assert os.path.exists(gmm_model_path), f"Path {gmm_model_path} doesn't exist"
self._gmm_model_path = gmm_model_path
self._robot_config = robot_config
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist,
success_thres_rot) -> EEObs:
# NOTE: planners either take in the goal for the tip or the wrist, but always output plans for the wrist!
self._planner = GMMPlanner(self.gripper_goal_wrist,
robot_obs.gripper_tf,
[0, 0, 0, 0, 0, 0, 1],
robot_obs.base_tf,
success_thres_dist,
success_thres_rot,
MIN_PLANNER_VELOCITY,
MAX_PLANNER_VELOCITY,
slow_down_factor,
self._head_start,
TIME_STEP_TRAIN,
is_analytic_env,
self._robot_config["tip_to_gripper_offset"],
self._robot_config["gripper_to_base_rot_offset"],
str(self._gmm_model_path),
self._robot_config["gmm_base_offset"])
return self.generate_obs_step(robot_obs)
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
return self._planner.step(robot_obs, learned_vel_norm)
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
return self._planner.generate_obs_step(robot_state)
| [
"os.path.exists",
"pybindings.LinearPlanner"
] | [((2308, 2576), 'pybindings.LinearPlanner', 'LinearPlanner', (['self.gripper_goal_wrist', 'robot_obs.gripper_tf', '[0, 0, 0, 0, 0, 0, 1]', 'robot_obs.base_tf', 'success_thres_dist', 'success_thres_rot', 'MIN_PLANNER_VELOCITY', 'MAX_PLANNER_VELOCITY', 'slow_down_factor', 'self._head_start', 'TIME_STEP_TRAIN', 'is_analytic_env'], {}), '(self.gripper_goal_wrist, robot_obs.gripper_tf, [0, 0, 0, 0, 0,\n 0, 1], robot_obs.base_tf, success_thres_dist, success_thres_rot,\n MIN_PLANNER_VELOCITY, MAX_PLANNER_VELOCITY, slow_down_factor, self.\n _head_start, TIME_STEP_TRAIN, is_analytic_env)\n', (2321, 2576), False, 'from pybindings import RobotObs, EEObs, LinearPlanner, GMMPlanner\n'), ((3841, 3871), 'os.path.exists', 'os.path.exists', (['gmm_model_path'], {}), '(gmm_model_path)\n', (3855, 3871), False, 'import os\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
======================
Laplacian segmentation
======================
This notebook implements the laplacian segmentation method of
`McFee and Ellis, 2014 <http://bmcfee.github.io/papers/ismir2014_spectral.pdf>`_,
with a couple of minor stability improvements.
This implementation is available at https://librosa.github.io/librosa/auto_examples/plot_segmentation.html
Additional functions have been added to the core segmentation:
- unsupervised determination of the number of clusters suitable for the running task
- different feature packages: spectral, cepstral and chroma.
- a cosine distance between the different clusters that is plot together with cluster segmentation
- a set of parameters reported in params.py file necessary for tuning the segmentation model.
usage:
python3 spectral_clustering_audio.py audiofilename.wav [.mp3]
Input:
- name of audio file to be analyzed
Output:
- Segmentation and grouping of the different musical sections synchronized on user-chosen onsets
- Optional plots of similarity and recurrence matrix
- Optional timestamps text file with parameters and time boundaries
"""
# Code source by <NAME> (2018) adapted from <NAME> (2014)
# License: ISC
###################################
# Imports
# - numpy for basic functionality
# - scipy for graph Laplacian
# - matplotlib for visualization
# - sklearn.cluster for K-Means, for metrics and scaling.
# - warnings to delete warning message for scipy package
from __future__ import division
import numpy as np
import scipy
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import sys, os
import argparse
import matplotlib.pyplot as plt
from matplotlib import gridspec
import sklearn.cluster
from sklearn.preprocessing import scale
import sklearn.metrics
import sklearn.utils
import librosa
import librosa.display
import cluster_rotate
import params
plt.rcParams.update({'font.size': 8})
BINS_PER_OCTAVE = params.BINS_PER_OCTAVE
N_OCTAVES = params.N_OCTAVES
NFFT = int(params.NFFT)
STEP = int(params.STEP)
#######################################
def detect_onsets(y, sr, M):
#detect onsets
oenv = librosa.onset.onset_strength(S=M, sr=sr)
# Detect events without backtracking
onset_raw = librosa.onset.onset_detect(onset_envelope=oenv, backtrack=False)
## Backtrack the events using the onset envelope
onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
onset_frames = librosa.util.fix_frames(onset_raw, x_min=0, x_max=M.shape[1]-1)
onset_times = librosa.frames_to_time(onset_frames, sr=sr, hop_length = STEP)
# To reduce dimensionality, we'll beat-synchronous the CQT
Msync = librosa.util.sync(M, onset_raw, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.plot(oenv, label='Onset strength')
plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
plt.legend(frameon=True, framealpha=0.75)
plt.tight_layout()
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length= STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on onsets')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onset_raw, onset_times, Msync
##############################################
def detect_beats(y, sr, M):
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length = STEP, trim=False)
print('Detected tempo: {0:.2f} bpm'.format(tempo))
beat_period = np.diff(librosa.frames_to_time(beats, sr=sr, hop_length= STEP))
print('mean beat period: {0:.2f} ; std beat period: {1:.2f}'.format(60/np.mean(beat_period), np.std(beat_period)))
beats_frames = librosa.util.fix_frames(beats, x_min=0, x_max=M.shape[1]-1)
beat_times = librosa.frames_to_time(beats_frames, sr=sr, hop_length = STEP)
Msync = librosa.util.sync(M, beats_frames, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
# For plotting purposes, we'll need the timing of the beats
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on beats')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=beat_times)
plt.tight_layout()
return beats_frames, beat_times, Msync
##############################################
def no_onsets(sr, M):
onsets = np.arange(0, M.shape[1])
onset_times = librosa.samples_to_time(onsets, sr=sr/STEP)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onsets, onset_times, M
def get_manual_beats(sr, M, filename):
with open(filename, 'r') as f:
data = f.readlines()
times = np.array([float(x.strip()) for x in data[1:]])
frames = np.array([int(x * sr / STEP) for x in times])
onsets = librosa.util.fix_frames(frames, x_min=0, x_max=M.shape[1]-1)
onset_times = librosa.frames_to_time(onsets, sr=sr, hop_length = STEP)
Msync = librosa.util.sync(M, onsets, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on beats')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onsets, onset_times, Msync
def extract_onsets(y, sr, manual_opt):
method = params.onset
#compute the CQT transform C: np.array((252, Tmax*sr/STEP))
C = librosa.amplitude_to_db(librosa.core.magphase(librosa.cqt(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES * BINS_PER_OCTAVE, hop_length = STEP))[0], ref=np.max)
#to reduce dimensionality, we'll onset-synchronous the CQT
#onset is a vector of onset indexes np.array((N+1,)) including 0
#onset_times is a vector of onset times np.array((N+1,)) including 0
#Csync is the CQT transform synchronized on onsets np.array((252, N))
if method == 'no':
onset, onset_times, Csync = no_onsets(sr, C)
elif method == 'onset':
onset, onset_times, Csync = detect_onsets(y, sr, C)
elif method == 'beat':
onset, onset_times, Csync = detect_beats(y, sr, C)
elif method == 'manual':
onset, onset_times, Csync = get_manual_beats(sr, C, manual_opt)
else:
print('onset parameter is not well-defined')
sys.exit()
return onset, onset_times, Csync
def build_weighted_rec_matrix(M):
# Let's build a weighted recurrence affinity matrix using onset-synchronous CQT
# the similarity matrix is filtered to prevent linkage errors and fill the gaps
# the filter corresponds to a width=3 time window and a majority vote.
R = librosa.segment.recurrence_matrix(M, width=3, mode='affinity',sym=True)
# Enhance diagonals with a median filter
df = librosa.segment.timelag_filter(scipy.ndimage.median_filter)
Rf = df(R, size=(1, 7))
return Rf
def build_seq_matrix(M, x):
#build the sequence matrix using feature-similarity
#Rpath[i, i+/-1] = \exp(- |M[i] - C[i+/-1]|^2 / sigma^2)`
#synchronize features with onsets
Msync = librosa.util.sync(M, x, aggregate=np.median)
#Msync = M #pas de syncrhonisation
#normalize (rescale) features between 0 and 1
Msync_normed = scale(Msync)
#constant scaling
path_distance = np.sum(np.diff(Msync_normed, axis=1)**2, axis=0)
#sigma is the median distance between successive beats/onsets.
sigma = np.median(path_distance)
path_sim = np.exp(-path_distance / sigma)
#local scaling from A Spectral Clustering Approach to Speaker Diarization, <NAME>, <NAME>, <NAME>, <NAME>
R_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)
return R_path
def build_laplacian_and_evec(Rf, R_path, opt, onsets):
# And compute the balanced combination A of the two similarity matrices Rf and R_path
deg_path = np.sum(R_path, axis=1)
deg_rec = np.sum(Rf, axis=1)
mu = deg_path.dot(deg_path + deg_rec) / np.sum((deg_path + deg_rec)**2)
print('Optimal weight value (mu): {0:.2f}'.format(mu))
A = mu * Rf + (1 - mu) * R_path
# Plot the resulting graphs
if opt: plot_similarity(Rf, R_path, A, onsets)
# L: symetrized normalized Laplacian
L = scipy.sparse.csgraph.laplacian(A, normed=True)
# and its spectral decomposition (Find eigenvalues w and optionally eigenvectors v of matrix L)
evals, evecs = np.linalg.eigh(L)
print('L shape:', L.shape)
# We can clean this up further with a median filter.
# This can help smooth over small discontinuities
evecs = scipy.ndimage.median_filter(evecs, size=(9, 1))
# cumulative normalization is needed for symmetric normalize laplacian eigenvectors
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
return Cnorm, evals, evecs
################################################
def compute_nb_clusters(method, evals, evecs, Tmax):
if method == 'fixed':
c = params.cluster_nb # list
elif method == 'max':
nc = []
for it in range(params.cluster_max):
nc.append(cluster_rotate.cluster_rotate(evecs/Cnorm, evals, range(1,10), 1, False))
c = [int(np.mean(nc))+1]
elif method == 'evals':
ind = np.where(1- evals > 0.75)[0]
#print(ind)
return [len(ind)+1 ]
elif method in ['silhouette', 'davies_bouldin', 'calinski_harabaz']:
list_k = range(2,50,2)
Cnorm = np.cumsum(e**2, axis=1)**0.5 #eigenvectors in input
for k in list_k:
print('nb of clusters:', k)
X = e[:, :k] / Cnorm[:, k-1:k]
# Let's use these k components to cluster beats into segments
# (Algorithm 1)
KM = sklearn.cluster.KMeans(n_clusters=k)
seg_ids = KM.fit_predict(X)
score = []
if method == 'silhouette':
score.append(sklearn.metrics.silhouette_score(X, seg_ids, metric='euclidean')) #max (proche de 1)
elif method == 'davies_bouldin':
score.append(davies_bouldin_score(X, seg_ids)) #min
elif method == 'calinski_harabaz':
score.append(sklearn.metrics.calinski_harabaz_score(X, seg_ids)) #max
if method == 'silhouette':
return list_k[np.argmax(score)]
elif method == 'davies_bouldin':
return list_k[np.argmin(score)]
elif method == 'calinski_harabaz':
return list_k[np.argmax(score)]
else:
print('method for finding the right number of clusters is unknown')
sys.exit()
print('nb of clusters:', c)
return c
def davies_bouldin_score(X, labels):
"""Computes the Davies-Bouldin score.
The score is defined as the ratio of within-cluster distances to
between-cluster distances.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] `Davies, <NAME>.; Bouldin, <NAME>. (1979).
"A Cluster Separation Measure". IEEE Transactions on
Pattern Analysis and Machine Intelligence. PAMI-1 (2): 224-227`_
"""
X, labels = sklearn.utils.check_X_y(X, labels)
le = sklearn.preprocessing.LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)" % n_labels)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=np.float)
for k in range(n_labels):
cluster_k = sklearn.utils.safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(sklearn.metrics.pairwise.pairwise_distances(cluster_k, [centroid]))
centroid_distances = sklearn.metrics.pairwise.pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
score = (intra_dists[:, None] + intra_dists) / centroid_distances
score[score == np.inf] = np.nan
return np.mean(np.nanmax(score, axis=1))
def plot_similarity(Rf, R_path, A, onset_times):
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
librosa.display.specshow(Rf, cmap='inferno_r', y_axis='time', y_coords=onset_times)
plt.title('Long-range recurrence similarity (Rrec)')
plt.subplot(1, 3, 2)
librosa.display.specshow(R_path, cmap='inferno_r')
plt.title('Local path similarity (Rloc)')
plt.subplot(1, 3, 3)
librosa.display.specshow(A, cmap='inferno_r')
plt.title('Combined graph (A = m Rrec + (1-m) Rloc)')
plt.tight_layout()
def plot_structure(Rf, X, seg_ids, k, onset_times):
fig_s = plt.figure(figsize=(12, 4))
colors = plt.get_cmap('Paired', k)
ax_s1 = fig_s.add_subplot(1, 3, 2)
librosa.display.specshow(Rf, cmap='inferno_r')
ax_s1.set_title('Long-range recurrence similarity (Rrec)')
ax_s2 =fig_s.add_subplot(1, 3, 1)
librosa.display.specshow(X, y_axis='time', y_coords=onset_times)
ax_s2.set_title('Structure components (Eigen vectors)')
ax_s3 = fig_s.add_subplot(1, 3, 3)
librosa.display.specshow(np.atleast_2d(seg_ids).T, cmap=colors)
ax_s3.set_title('Estimated segments')
plt.colorbar(ticks=range(k))
plt.tight_layout()
#################################################
def compute_musical_density(C, onset_times, w, alpha):
N = C.shape[1]
density = []
for n in range(N):
t1 = np.min([onset_times[-1], onset_times[n] + w])
t2 = np.min([onset_times[-1] -w, onset_times[n]])
idw = np.where((onset_times < t1) & (onset_times >= t2))
#if n + w < :
threshold_chroma = np.max(C[:,idw])
#else:
#threshold_chroma = np.mean(C[:, N - w : N])
idx = np.where(C[:,n] > alpha * threshold_chroma)
density.append(len(idx[0]))
return density
def plot_features(X, onsets, onset_times):
Xsync = librosa.util.sync(X, onsets, aggregate=np.median)
#print(X.shape, Xsync.shape)
#print(onset_times)
if params.feat[0] == 'chroma':
fig_c = plt.figure(figsize=(12, 6))
ax0_c = fig_c.add_subplot(3,1,1)
ax0_c.set_title('onset-synchronous chroma (12)')
#ax0_c.pcolor(distance, cmap = 'plasma')
librosa.display.specshow(Xsync[:12,:], y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')
#plt.colorbar()
ax1_c = fig_c.add_subplot(3,1,2, sharex = ax0_c)
ax1_c.set_title('onset-synchronous delta chroma (12)')
librosa.display.specshow(np.abs(Xsync[12:,:]), y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')
#plt.colorbar()
density = compute_musical_density(Xsync[:12,:], onset_times, params.norm_density_win, params.alpha)
print(len(onset_times), len(density))
ax2_c = fig_c.add_subplot(3,1,3, sharex = ax0_c)
ax2_c.set_title('musical density')
ax2_c.plot(onset_times, density)
plt.tight_layout()
elif params.feat[0] == 'cepstral':
fig_s = plt.figure(figsize=(12, 6))
ax0_s = fig_s.add_subplot(3,1,1)
ax0_s.set_title('onset-synchronous MFCC (20)')
librosa.display.specshow(Xsync[:21,:], x_axis='time', x_coords=onset_times)
#plt.colorbar()
#plt.tight_layout()
ax1_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)
ax1_s.set_title('onset-synchronous delta MFCC (20)')
librosa.display.specshow(np.abs(Xsync[20:,:]), x_axis='time', x_coords=onset_times)
#plt.colorbar()
density = compute_musical_density(Xsync[:21,:], onset_times, params.norm_density_win, params.alpha)
ax2_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)
ax2_s.set_title('musical density')
ax2_s.plot(onset_times, density)
plt.tight_layout()
else:
print('these parameters can not be plot')
def load_wav_percu(filename, start, duration, opt_percussive_part):
y, sr = librosa.load(filename, offset=start, duration = duration)
if opt_percussive_part:
#separate harmonics and percussives into two wavforms
y_harmo, yo = librosa.effects.hpss(y)
librosa.output.write_wav(filename + '_harmo.wav', y_harmo, sr)
librosa.output.write_wav(filename + '_percu.wav', y_percu, sr)
return yo, sr
else:
return y, sr
################################################
def feature_extraction(y, sr, opt_tuning):
if opt_tuning:
#extraction of tuning
A440 = librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
print('Deviation from A440 is : {0:.2f}'.format(A440))
else:
A440 = 0.0
print('Features for local similarity: ', ' '.join(params.feat))
full = []
idx_chroma = 0
if 'cepstral' in params.feat:
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc = 20, n_fft = NFFT, hop_length = STEP)
mfcc_delta = librosa.feature.delta(mfcc)
fcep = np.concatenate((mfcc, mfcc_delta), axis=0)
full.append(fcep)
if 'chroma' in params.feat:
chroma = librosa.feature.chroma_cqt(y=y, sr=sr, n_chroma = 12, n_octaves = N_OCTAVES, hop_length = STEP, norm = None, tuning= A440)
chroma_delta = librosa.feature.delta(chroma)
fchr = np.concatenate((chroma, chroma_delta), axis=0)
idx_chroma = len(full)
full.append(fchr)
if 'spectral' in params.feat:
centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft = NFFT, hop_length = STEP)
contrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft = NFFT, n_bands=6, hop_length = STEP)
flatness = librosa.feature.spectral_flatness(y=y, n_fft = NFFT, hop_length = STEP)
rolloff05 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.05)
rolloff25 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.25)
rolloff50 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.50)
rolloff75 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.75)
rolloff95 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.95)
spec = np.concatenate((centroid, contrast, flatness, rolloff05,rolloff25,rolloff50,rolloff75,rolloff95), axis=0)
spec_delta = librosa.feature.delta(spec)
fspec = np.concatenate((spec, spec_delta), axis = 0)
full.append(fspec)
full = np.array(full)[0]
print('feature shape', full.shape)
return full, idx_chroma
def extract_time_boundaries(cluster_ids, onsets, nb_frames, sr):
# Locate segment boundaries from the label sequence
bound_beats = 1 + np.flatnonzero(cluster_ids[:-1] != cluster_ids[1:])
# Count beat 0 as a boundary
bound_beats = librosa.util.fix_frames(bound_beats, x_min=0)
# Compute the segment label for each boundary
bound_labels = list(cluster_ids[bound_beats])
# Convert beat indices to frames
bound_frames = onsets[bound_beats]
# Make sure we cover to the end of the track
bound_frames = librosa.util.fix_frames(bound_frames, x_min=None, x_max=nb_frames-1)
bound_times = librosa.frames_to_time(bound_frames, sr=sr, hop_length = STEP)
return bound_times, bound_labels
##################################
def extract_cosine_distance_clusters(center_clusters, distance_ref, type_dist = 'cos'):
distance = []
for center in center_clusters:
if type_dist == 'cos':
distance.append( scipy.spatial.distance.cosine( center, distance_ref) )
elif type_dist == 'eucl':
distance.append(np.sqrt( np.sum( (center - distance_ref)**2) ))
return distance
def extract_distance_between_clusters(center_clusters, type_dist = 'cos'):
distance = np.zeros((center_clusters.shape))
for i, center_i in enumerate(center_clusters):
for j, center_j in enumerate(center_clusters):
if type_dist == 'cos':
distance[i,j] = scipy.spatial.distance.cosine( center_i, center_j)
elif type_dist == 'eucl':
distance[i,j] = np.sqrt( np.sum( (center_i - center_j)**2) )
x = range(i+1)
y = range(j+1)
xloc = [c + 0.5 for c in x]
cx = [str(c) for c in x]
#print(cx)
fig_d, ax_d = plt.subplots(figsize=(5, 4))
p_d = ax_d.pcolor(distance, cmap = 'inferno_r')
cb = fig_d.colorbar(p_d)
ax_d.xaxis.set_ticks(xloc)
ax_d.xaxis.set_ticklabels(cx)
ax_d.yaxis.set_ticks(xloc)
ax_d.yaxis.set_ticklabels(cx)
ax_d.set_title('Distance between clusters')
ax_d.set_xlabel('clusters numbers')
plt.tight_layout()
return distance
def extract_ref_signal(X, onset_times):
ind = np.where((onset_times >= params.begin_ref) & (onset_times < params.end_ref))
return X[ind,:]
def main():
parser = argparse.ArgumentParser(description='Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)')
parser.add_argument('filename', type=str, help='name of audio file')
parser.add_argument('manual_onset', nargs='?', type=str, help='name of the file containing manual annotations for onset timestamps (with method=manual)')
args = parser.parse_args()
#==================
# Signal processing
#==================
#extract waveform from audio signal of given duration and begining. If onset_percu is True, extract only percussive part of the signal.
y, sr = load_wav_percu(args.filename, params.begin, params.duration, params.onset_percu)
print('signal shape:', y.shape, ' sr=', sr, 'win duration=%.2f' %(NFFT / sr))
#extract acoustic feature from audio signal feat is a matrix np.array((nb features, Tmax*sr/STEP))
feat, idx_chroma = feature_extraction(y, sr, params.opt_tuning)
#extract onset indexes and times + onset-synchronous CQT transform on onsets.
onsets, onset_times, Csync = extract_onsets(y, sr, args.manual_onset)
#if 'chroma' in params.feat:
# compute_musical_density(Csync, onset_times, idx_chroma, params.norm_density_win, params.alpha, sr)
if params.plot_features: plot_features(feat, onsets, onset_times)
#================
# Affinity matrix
#================
#compute a non-negative affinity matrix using onset-synchronous CQT (with Gaussian kernel)
#represent local consistency of timbral (CQT) features
Rf = build_weighted_rec_matrix(Csync)
#compute a non-negative affinity matrix using onset-synchronous feature matrix (with Gaussian kernel)
#represent long-range repeating forms of harmonic features
R_path = build_seq_matrix(feat, onsets)
#compute Laplacian (sequence augmented affinity matrix) as a linear combination of Rf and Rpath and extract eigenvalues and vectors.
Cnorm, evals, evecs = build_laplacian_and_evec(Rf, R_path, params.plot_simi, onset_times)
#===========
# Clustering
#===========
#determine number of clusters kl is a list of potential numbers of cluster.
kl = compute_nb_clusters(params.cluster_method, evals, evecs, y.shape[0]*sr)
N_CLUST = len(kl)
#=================
# Start plotting
#=================
import matplotlib.patches as patches
fig_f = plt.figure(figsize = (12, 3+2*N_CLUST))
#fig.subplots_adjust(hspace=.5)
#plot onset-synchronous CQT
hr = [1] * (N_CLUST +1)
hr[0] = 2
gs = gridspec.GridSpec(1 + N_CLUST,1, height_ratios=hr)
ax_f0 = fig_f.add_subplot(gs[0])
librosa.display.specshow(Csync, y_axis='cqt_hz', sr=sr, hop_length = STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)
#librosa.display.specshow(feat, y_axis='chroma', x_axis='time') #ou
ax_f0.set_title('CQT spectrogram synchronized {0}'.format(params.onset))
for it, k in enumerate(kl):
#limit the number of clusters per second
if k > params.cluster_nb_max*sr*y.shape[0]:
k = params.cluster_nb_max*sr*y.shape[0]
print('nb of clusters: {} for it {}/{}'.format(k, it, N_CLUST))
#for k clusters, use the first k normalized eigenvectors.
#X can be interpretable as an onset-synchronous matrix containing relevant feature information for local and log-range structure segmentation
X = evecs[:, :k] / Cnorm[:, k-1:k]
#onsets are grouped into k clusters, each cluster having its own acoustic characteristics
KM = sklearn.cluster.KMeans(n_clusters=k)
#seg_ids is a np.array((label)) label being a number corresponding to one cluster seg_ids[i] is the label of onset i
seg_ids = KM.fit_predict(X)
#if needed compute the cosine distance between each cluster and a reference taken at the very begining of th signal
#KM.cluster_centers_ : array, [n_clusters, n_features]
if params.cluster_dist:
ref_signal = extract_ref_signal(X, onset_times)
distance_cosine_cluster = extract_cosine_distance_clusters( KM.cluster_centers_, np.mean(X[:10*NFFT,:], axis=0))
else:
distance_cosine_cluster = None
if params.plot_dist:
distance_between_clusters = extract_distance_between_clusters( KM.cluster_centers_ )
# and plot the resulting structure representation
if params.plot_struct: plot_structure(Rf, X, seg_ids, k, onset_times)
bound_times, bound_labels = extract_time_boundaries(seg_ids, onsets, feat.shape[1], sr)
freqs = librosa.cqt_frequencies(n_bins=Csync.shape[0], fmin=librosa.note_to_hz('C1'), bins_per_octave=BINS_PER_OCTAVE)
timestamps_name = os.path.splitext(args.filename)[0] + '_timestamps.txt'
#=============
# Plot results
#=============
cmap = plt.get_cmap('Paired', k)
#write header of text file with parameters.
if params.timestamps:
f = open(timestamps_name, 'a')
f.write('WIN = {0:.2f} sec, NFFT = {1}, STEP = {2}, begin = {3}, duration = {4}\n'.format(NFFT / sr, NFFT, STEP, params.begin, params.duration))
f.write('Nb of clusters: {0} obtained with method {1} and features {2}\n'.format(k, params.cluster_method, '-'.join(params.feat)))
#plot onset-synchronous CQT
#if it == 0:
#plot segmentation and clusters grouping (+ cosine distance.)
#also write obtained boundaries in the text file.
ax_f1 = fig_f.add_subplot(gs[it + 1], sharex = ax_f0)
for interval, label in zip(zip(bound_times, bound_times[1:]), bound_labels):
if params.timestamps: f.write('{0:.2f} \t {1:.2f} \t {2} \n'.format(interval[0], interval[1], label))
if params.cluster_dist: ax_f1.plot([interval[0], interval[1]],[distance_cosine_cluster[label], distance_cosine_cluster[label]], 'k')
ax_f1.add_patch(patches.Rectangle((interval[0], 0), interval[1] - interval[0], 1, facecolor=cmap(label), alpha=1))
ax_f1.text(interval[0]+(interval[1]-interval[0])/2, 0.9, label, fontsize=8)
if params.timestamps: f.close()
#plt.subplots_adjust(hspace=.0)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
title = 'Palestrina'
# Palestrina, AccordsMajeurs, AccordsMineur, Majeur3et4notes, Majeur3et4notes, Accords3Notes, DispoMajeurMineur, Tension
# Cadence3V, Cadence4VMaj, Cadence4Vmin,
audio = load('/Users/manuel/Dropbox (TMG)/Thèse/code/DescripteursHarmoniquesAudio/'+title+'.wav')
main(audio)
| [
"librosa.feature.spectral_flatness",
"librosa.util.fix_frames",
"librosa.feature.mfcc",
"librosa.estimate_tuning",
"numpy.array",
"numpy.cumsum",
"sys.exit",
"librosa.onset.onset_backtrack",
"librosa.feature.spectral_centroid",
"librosa.feature.spectral_contrast",
"numpy.arange",
"librosa.load... | [((1598, 1686), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'module': '"""scipy"""', 'message': '"""^internal gelsd"""'}), "(action='ignore', module='scipy', message=\n '^internal gelsd')\n", (1621, 1686), False, 'import warnings\n'), ((1963, 2000), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (1982, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2256), 'librosa.onset.onset_strength', 'librosa.onset.onset_strength', ([], {'S': 'M', 'sr': 'sr'}), '(S=M, sr=sr)\n', (2244, 2256), False, 'import librosa\n'), ((2308, 2372), 'librosa.onset.onset_detect', 'librosa.onset.onset_detect', ([], {'onset_envelope': 'oenv', 'backtrack': '(False)'}), '(onset_envelope=oenv, backtrack=False)\n', (2334, 2372), False, 'import librosa\n'), ((2435, 2481), 'librosa.onset.onset_backtrack', 'librosa.onset.onset_backtrack', (['onset_raw', 'oenv'], {}), '(onset_raw, oenv)\n', (2464, 2481), False, 'import librosa\n'), ((2573, 2638), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['onset_raw'], {'x_min': '(0)', 'x_max': '(M.shape[1] - 1)'}), '(onset_raw, x_min=0, x_max=M.shape[1] - 1)\n', (2596, 2638), False, 'import librosa\n'), ((2652, 2712), 'librosa.frames_to_time', 'librosa.frames_to_time', (['onset_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(onset_frames, sr=sr, hop_length=STEP)\n', (2674, 2712), False, 'import librosa\n'), ((2784, 2836), 'librosa.util.sync', 'librosa.util.sync', (['M', 'onset_raw'], {'aggregate': 'np.median'}), '(M, onset_raw, aggregate=np.median)\n', (2801, 2836), False, 'import librosa\n'), ((3701, 3765), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'y', 'sr': 'sr', 'hop_length': 'STEP', 'trim': '(False)'}), '(y=y, sr=sr, hop_length=STEP, trim=False)\n', (3724, 3765), False, 'import librosa\n'), ((4033, 4094), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['beats'], {'x_min': '(0)', 'x_max': '(M.shape[1] - 1)'}), '(beats, x_min=0, x_max=M.shape[1] - 1)\n', (4056, 4094), False, 'import librosa\n'), ((4107, 4167), 'librosa.frames_to_time', 'librosa.frames_to_time', (['beats_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(beats_frames, sr=sr, hop_length=STEP)\n', (4129, 4167), False, 'import librosa\n'), ((4180, 4235), 'librosa.util.sync', 'librosa.util.sync', (['M', 'beats_frames'], {'aggregate': 'np.median'}), '(M, beats_frames, aggregate=np.median)\n', (4197, 4235), False, 'import librosa\n'), ((4956, 4980), 'numpy.arange', 'np.arange', (['(0)', 'M.shape[1]'], {}), '(0, M.shape[1])\n', (4965, 4980), True, 'import numpy as np\n'), ((4996, 5041), 'librosa.samples_to_time', 'librosa.samples_to_time', (['onsets'], {'sr': '(sr / STEP)'}), '(onsets, sr=sr / STEP)\n', (5019, 5041), False, 'import librosa\n'), ((5520, 5582), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['frames'], {'x_min': '(0)', 'x_max': '(M.shape[1] - 1)'}), '(frames, x_min=0, x_max=M.shape[1] - 1)\n', (5543, 5582), False, 'import librosa\n'), ((5596, 5650), 'librosa.frames_to_time', 'librosa.frames_to_time', (['onsets'], {'sr': 'sr', 'hop_length': 'STEP'}), '(onsets, sr=sr, hop_length=STEP)\n', (5618, 5650), False, 'import librosa\n'), ((5663, 5712), 'librosa.util.sync', 'librosa.util.sync', (['M', 'onsets'], {'aggregate': 'np.median'}), '(M, onsets, aggregate=np.median)\n', (5680, 5712), False, 'import librosa\n'), ((7478, 7550), 'librosa.segment.recurrence_matrix', 'librosa.segment.recurrence_matrix', (['M'], {'width': '(3)', 'mode': '"""affinity"""', 'sym': '(True)'}), "(M, width=3, mode='affinity', sym=True)\n", (7511, 7550), False, 'import librosa\n'), ((7599, 7658), 'librosa.segment.timelag_filter', 'librosa.segment.timelag_filter', (['scipy.ndimage.median_filter'], {}), '(scipy.ndimage.median_filter)\n', (7629, 7658), False, 'import librosa\n'), ((7883, 7927), 'librosa.util.sync', 'librosa.util.sync', (['M', 'x'], {'aggregate': 'np.median'}), '(M, x, aggregate=np.median)\n', (7900, 7927), False, 'import librosa\n'), ((8028, 8040), 'sklearn.preprocessing.scale', 'scale', (['Msync'], {}), '(Msync)\n', (8033, 8040), False, 'from sklearn.preprocessing import scale\n'), ((8200, 8224), 'numpy.median', 'np.median', (['path_distance'], {}), '(path_distance)\n', (8209, 8224), True, 'import numpy as np\n'), ((8237, 8267), 'numpy.exp', 'np.exp', (['(-path_distance / sigma)'], {}), '(-path_distance / sigma)\n', (8243, 8267), True, 'import numpy as np\n'), ((8607, 8629), 'numpy.sum', 'np.sum', (['R_path'], {'axis': '(1)'}), '(R_path, axis=1)\n', (8613, 8629), True, 'import numpy as np\n'), ((8641, 8659), 'numpy.sum', 'np.sum', (['Rf'], {'axis': '(1)'}), '(Rf, axis=1)\n', (8647, 8659), True, 'import numpy as np\n'), ((8945, 8991), 'scipy.sparse.csgraph.laplacian', 'scipy.sparse.csgraph.laplacian', (['A'], {'normed': '(True)'}), '(A, normed=True)\n', (8975, 8991), False, 'import scipy\n'), ((9106, 9123), 'numpy.linalg.eigh', 'np.linalg.eigh', (['L'], {}), '(L)\n', (9120, 9123), True, 'import numpy as np\n'), ((9267, 9314), 'scipy.ndimage.median_filter', 'scipy.ndimage.median_filter', (['evecs'], {'size': '(9, 1)'}), '(evecs, size=(9, 1))\n', (9294, 9314), False, 'import scipy\n'), ((12099, 12117), 'numpy.zeros', 'np.zeros', (['n_labels'], {}), '(n_labels)\n', (12107, 12117), True, 'import numpy as np\n'), ((12786, 12813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (12796, 12813), True, 'import matplotlib.pyplot as plt\n'), ((12815, 12835), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (12826, 12835), True, 'import matplotlib.pyplot as plt\n'), ((12837, 12925), 'librosa.display.specshow', 'librosa.display.specshow', (['Rf'], {'cmap': '"""inferno_r"""', 'y_axis': '"""time"""', 'y_coords': 'onset_times'}), "(Rf, cmap='inferno_r', y_axis='time', y_coords=\n onset_times)\n", (12861, 12925), False, 'import librosa\n'), ((12922, 12974), 'matplotlib.pyplot.title', 'plt.title', (['"""Long-range recurrence similarity (Rrec)"""'], {}), "('Long-range recurrence similarity (Rrec)')\n", (12931, 12974), True, 'import matplotlib.pyplot as plt\n'), ((12976, 12996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (12987, 12996), True, 'import matplotlib.pyplot as plt\n'), ((12998, 13048), 'librosa.display.specshow', 'librosa.display.specshow', (['R_path'], {'cmap': '"""inferno_r"""'}), "(R_path, cmap='inferno_r')\n", (13022, 13048), False, 'import librosa\n'), ((13050, 13091), 'matplotlib.pyplot.title', 'plt.title', (['"""Local path similarity (Rloc)"""'], {}), "('Local path similarity (Rloc)')\n", (13059, 13091), True, 'import matplotlib.pyplot as plt\n'), ((13093, 13113), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (13104, 13113), True, 'import matplotlib.pyplot as plt\n'), ((13115, 13160), 'librosa.display.specshow', 'librosa.display.specshow', (['A'], {'cmap': '"""inferno_r"""'}), "(A, cmap='inferno_r')\n", (13139, 13160), False, 'import librosa\n'), ((13162, 13215), 'matplotlib.pyplot.title', 'plt.title', (['"""Combined graph (A = m Rrec + (1-m) Rloc)"""'], {}), "('Combined graph (A = m Rrec + (1-m) Rloc)')\n", (13171, 13215), True, 'import matplotlib.pyplot as plt\n'), ((13217, 13235), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13233, 13235), True, 'import matplotlib.pyplot as plt\n'), ((13301, 13328), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (13311, 13328), True, 'import matplotlib.pyplot as plt\n'), ((13339, 13364), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Paired"""', 'k'], {}), "('Paired', k)\n", (13351, 13364), True, 'import matplotlib.pyplot as plt\n'), ((13403, 13449), 'librosa.display.specshow', 'librosa.display.specshow', (['Rf'], {'cmap': '"""inferno_r"""'}), "(Rf, cmap='inferno_r')\n", (13427, 13449), False, 'import librosa\n'), ((13546, 13610), 'librosa.display.specshow', 'librosa.display.specshow', (['X'], {'y_axis': '"""time"""', 'y_coords': 'onset_times'}), "(X, y_axis='time', y_coords=onset_times)\n", (13570, 13610), False, 'import librosa\n'), ((13839, 13857), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13855, 13857), True, 'import matplotlib.pyplot as plt\n'), ((14445, 14494), 'librosa.util.sync', 'librosa.util.sync', (['X', 'onsets'], {'aggregate': 'np.median'}), '(X, onsets, aggregate=np.median)\n', (14462, 14494), False, 'import librosa\n'), ((16273, 16328), 'librosa.load', 'librosa.load', (['filename'], {'offset': 'start', 'duration': 'duration'}), '(filename, offset=start, duration=duration)\n', (16285, 16328), False, 'import librosa\n'), ((18975, 19020), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['bound_beats'], {'x_min': '(0)'}), '(bound_beats, x_min=0)\n', (18998, 19020), False, 'import librosa\n'), ((19250, 19320), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['bound_frames'], {'x_min': 'None', 'x_max': '(nb_frames - 1)'}), '(bound_frames, x_min=None, x_max=nb_frames - 1)\n', (19273, 19320), False, 'import librosa\n'), ((19335, 19395), 'librosa.frames_to_time', 'librosa.frames_to_time', (['bound_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(bound_frames, sr=sr, hop_length=STEP)\n', (19357, 19395), False, 'import librosa\n'), ((19909, 19940), 'numpy.zeros', 'np.zeros', (['center_clusters.shape'], {}), '(center_clusters.shape)\n', (19917, 19940), True, 'import numpy as np\n'), ((20347, 20375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (20359, 20375), True, 'import matplotlib.pyplot as plt\n'), ((20652, 20670), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20668, 20670), True, 'import matplotlib.pyplot as plt\n'), ((20739, 20815), 'numpy.where', 'np.where', (['((onset_times >= params.begin_ref) & (onset_times < params.end_ref))'], {}), '((onset_times >= params.begin_ref) & (onset_times < params.end_ref))\n', (20747, 20815), True, 'import numpy as np\n'), ((20860, 21017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)"""'}), "(description=\n 'Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)'\n )\n", (20883, 21017), False, 'import argparse\n'), ((23154, 23195), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3 + 2 * N_CLUST)'}), '(figsize=(12, 3 + 2 * N_CLUST))\n', (23164, 23195), True, 'import matplotlib.pyplot as plt\n'), ((23299, 23350), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1 + N_CLUST)', '(1)'], {'height_ratios': 'hr'}), '(1 + N_CLUST, 1, height_ratios=hr)\n', (23316, 23350), False, 'from matplotlib import gridspec\n'), ((23385, 23531), 'librosa.display.specshow', 'librosa.display.specshow', (['Csync'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Csync, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)\n", (23409, 23531), False, 'import librosa\n'), ((26648, 26666), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26664, 26666), True, 'import matplotlib.pyplot as plt\n'), ((26668, 26678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26676, 26678), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (2873, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2931), 'matplotlib.pyplot.plot', 'plt.plot', (['oenv'], {'label': '"""Onset strength"""'}), "(oenv, label='Onset strength')\n", (2901, 2931), True, 'import matplotlib.pyplot as plt\n'), ((3063, 3104), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'framealpha': '(0.75)'}), '(frameon=True, framealpha=0.75)\n', (3073, 3104), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3125), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3123, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (3139, 3156), True, 'import matplotlib.pyplot as plt\n'), ((3159, 3179), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3170, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3208), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (3189, 3208), True, 'import matplotlib.pyplot as plt\n'), ((3211, 3331), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""'}), "(M, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n", (3235, 3331), False, 'import librosa\n'), ((3331, 3349), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3347, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3373), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3364, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3374, 3425), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram synchronized on onsets"""'], {}), "('CQT spectrogram synchronized on onsets')\n", (3383, 3425), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3551), 'librosa.display.specshow', 'librosa.display.specshow', (['Msync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'y_axis': '"""cqt_hz"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis=\n 'cqt_hz', x_axis='time', x_coords=onset_times)\n", (3452, 3551), False, 'import librosa\n'), ((3549, 3567), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3565, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3897), 'librosa.frames_to_time', 'librosa.frames_to_time', (['beats'], {'sr': 'sr', 'hop_length': 'STEP'}), '(beats, sr=sr, hop_length=STEP)\n', (3866, 3897), False, 'import librosa\n'), ((4261, 4288), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (4271, 4288), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4311), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (4302, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4312, 4340), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (4321, 4340), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4463), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""'}), "(M, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n", (4367, 4463), False, 'import librosa\n'), ((4462, 4480), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4478, 4480), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4642), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (4633, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4643, 4693), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram synchronized on beats"""'], {}), "('CQT spectrogram synchronized on beats')\n", (4652, 4693), True, 'import matplotlib.pyplot as plt\n'), ((4696, 4818), 'librosa.display.specshow', 'librosa.display.specshow', (['Msync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'y_axis': '"""cqt_hz"""', 'x_axis': '"""time"""', 'x_coords': 'beat_times'}), "(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis=\n 'cqt_hz', x_axis='time', x_coords=beat_times)\n", (4720, 4818), False, 'import librosa\n'), ((4816, 4834), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4832, 4834), True, 'import matplotlib.pyplot as plt\n'), ((5066, 5093), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (5076, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5096, 5124), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (5105, 5124), True, 'import matplotlib.pyplot as plt\n'), ((5127, 5253), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(M, y_axis='cqt_hz', sr=sr, bins_per_octave=\n BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)\n", (5151, 5253), False, 'import librosa\n'), ((5251, 5269), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5267, 5269), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5766), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (5749, 5766), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5789), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5780, 5789), True, 'import matplotlib.pyplot as plt\n'), ((5790, 5818), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (5799, 5818), True, 'import matplotlib.pyplot as plt\n'), ((5821, 5941), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""'}), "(M, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n", (5845, 5941), False, 'import librosa\n'), ((5940, 5958), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5956, 5958), True, 'import matplotlib.pyplot as plt\n'), ((5962, 5982), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (5973, 5982), True, 'import matplotlib.pyplot as plt\n'), ((5983, 6033), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram synchronized on beats"""'], {}), "('CQT spectrogram synchronized on beats')\n", (5992, 6033), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6159), 'librosa.display.specshow', 'librosa.display.specshow', (['Msync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'y_axis': '"""cqt_hz"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis=\n 'cqt_hz', x_axis='time', x_coords=onset_times)\n", (6060, 6159), False, 'import librosa\n'), ((6157, 6175), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6173, 6175), True, 'import matplotlib.pyplot as plt\n'), ((8386, 8408), 'numpy.diag', 'np.diag', (['path_sim'], {'k': '(1)'}), '(path_sim, k=1)\n', (8393, 8408), True, 'import numpy as np\n'), ((8411, 8434), 'numpy.diag', 'np.diag', (['path_sim'], {'k': '(-1)'}), '(path_sim, k=-1)\n', (8418, 8434), True, 'import numpy as np\n'), ((8701, 8734), 'numpy.sum', 'np.sum', (['((deg_path + deg_rec) ** 2)'], {}), '((deg_path + deg_rec) ** 2)\n', (8707, 8734), True, 'import numpy as np\n'), ((9410, 9439), 'numpy.cumsum', 'np.cumsum', (['(evecs ** 2)'], {'axis': '(1)'}), '(evecs ** 2, axis=1)\n', (9419, 9439), True, 'import numpy as np\n'), ((12508, 12535), 'numpy.allclose', 'np.allclose', (['intra_dists', '(0)'], {}), '(intra_dists, 0)\n', (12519, 12535), True, 'import numpy as np\n'), ((12539, 12573), 'numpy.allclose', 'np.allclose', (['centroid_distances', '(0)'], {}), '(centroid_distances, 0)\n', (12550, 12573), True, 'import numpy as np\n'), ((12706, 12730), 'numpy.nanmax', 'np.nanmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (12715, 12730), True, 'import numpy as np\n'), ((14022, 14067), 'numpy.min', 'np.min', (['[onset_times[-1], onset_times[n] + w]'], {}), '([onset_times[-1], onset_times[n] + w])\n', (14028, 14067), True, 'import numpy as np\n'), ((14075, 14120), 'numpy.min', 'np.min', (['[onset_times[-1] - w, onset_times[n]]'], {}), '([onset_times[-1] - w, onset_times[n]])\n', (14081, 14120), True, 'import numpy as np\n'), ((14128, 14178), 'numpy.where', 'np.where', (['((onset_times < t1) & (onset_times >= t2))'], {}), '((onset_times < t1) & (onset_times >= t2))\n', (14136, 14178), True, 'import numpy as np\n'), ((14216, 14233), 'numpy.max', 'np.max', (['C[:, idw]'], {}), '(C[:, idw])\n', (14222, 14233), True, 'import numpy as np\n'), ((14298, 14342), 'numpy.where', 'np.where', (['(C[:, n] > alpha * threshold_chroma)'], {}), '(C[:, n] > alpha * threshold_chroma)\n', (14306, 14342), True, 'import numpy as np\n'), ((14590, 14617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (14600, 14617), True, 'import matplotlib.pyplot as plt\n'), ((14749, 14859), 'librosa.display.specshow', 'librosa.display.specshow', (['Xsync[:12, :]'], {'y_axis': '"""chroma"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times', 'cmap': '"""OrRd"""'}), "(Xsync[:12, :], y_axis='chroma', x_axis='time',\n x_coords=onset_times, cmap='OrRd')\n", (14773, 14859), False, 'import librosa\n'), ((15388, 15406), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15404, 15406), True, 'import matplotlib.pyplot as plt\n'), ((16428, 16451), 'librosa.effects.hpss', 'librosa.effects.hpss', (['y'], {}), '(y)\n', (16448, 16451), False, 'import librosa\n'), ((16454, 16516), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(filename + '_harmo.wav')", 'y_harmo', 'sr'], {}), "(filename + '_harmo.wav', y_harmo, sr)\n", (16478, 16516), False, 'import librosa\n'), ((16519, 16581), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(filename + '_percu.wav')", 'y_percu', 'sr'], {}), "(filename + '_percu.wav', y_percu, sr)\n", (16543, 16581), False, 'import librosa\n'), ((16768, 16821), 'librosa.estimate_tuning', 'librosa.estimate_tuning', ([], {'y': 'y', 'sr': 'sr', 'resolution': '(0.001)'}), '(y=y, sr=sr, resolution=0.001)\n', (16791, 16821), False, 'import librosa\n'), ((17032, 17104), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'y', 'sr': 'sr', 'n_mfcc': '(20)', 'n_fft': 'NFFT', 'hop_length': 'STEP'}), '(y=y, sr=sr, n_mfcc=20, n_fft=NFFT, hop_length=STEP)\n', (17052, 17104), False, 'import librosa\n'), ((17126, 17153), 'librosa.feature.delta', 'librosa.feature.delta', (['mfcc'], {}), '(mfcc)\n', (17147, 17153), False, 'import librosa\n'), ((17163, 17205), 'numpy.concatenate', 'np.concatenate', (['(mfcc, mfcc_delta)'], {'axis': '(0)'}), '((mfcc, mfcc_delta), axis=0)\n', (17177, 17205), True, 'import numpy as np\n'), ((17267, 17384), 'librosa.feature.chroma_cqt', 'librosa.feature.chroma_cqt', ([], {'y': 'y', 'sr': 'sr', 'n_chroma': '(12)', 'n_octaves': 'N_OCTAVES', 'hop_length': 'STEP', 'norm': 'None', 'tuning': 'A440'}), '(y=y, sr=sr, n_chroma=12, n_octaves=N_OCTAVES,\n hop_length=STEP, norm=None, tuning=A440)\n', (17293, 17384), False, 'import librosa\n'), ((17407, 17436), 'librosa.feature.delta', 'librosa.feature.delta', (['chroma'], {}), '(chroma)\n', (17428, 17436), False, 'import librosa\n'), ((17446, 17492), 'numpy.concatenate', 'np.concatenate', (['(chroma, chroma_delta)'], {'axis': '(0)'}), '((chroma, chroma_delta), axis=0)\n', (17460, 17492), True, 'import numpy as np\n'), ((17583, 17657), 'librosa.feature.spectral_centroid', 'librosa.feature.spectral_centroid', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP)\n', (17616, 17657), False, 'import librosa\n'), ((17675, 17764), 'librosa.feature.spectral_contrast', 'librosa.feature.spectral_contrast', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'n_bands': '(6)', 'hop_length': 'STEP'}), '(y=y, sr=sr, n_fft=NFFT, n_bands=6,\n hop_length=STEP)\n', (17708, 17764), False, 'import librosa\n'), ((17778, 17845), 'librosa.feature.spectral_flatness', 'librosa.feature.spectral_flatness', ([], {'y': 'y', 'n_fft': 'NFFT', 'hop_length': 'STEP'}), '(y=y, n_fft=NFFT, hop_length=STEP)\n', (17811, 17845), False, 'import librosa\n'), ((17864, 17960), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.05)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.05)\n', (17896, 17960), False, 'import librosa\n'), ((17977, 18073), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.25)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.25)\n', (18009, 18073), False, 'import librosa\n'), ((18090, 18185), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.5)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.5)\n', (18122, 18185), False, 'import librosa\n'), ((18203, 18299), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.75)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.75)\n', (18235, 18299), False, 'import librosa\n'), ((18316, 18412), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.95)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.95)\n', (18348, 18412), False, 'import librosa\n'), ((18424, 18537), 'numpy.concatenate', 'np.concatenate', (['(centroid, contrast, flatness, rolloff05, rolloff25, rolloff50, rolloff75,\n rolloff95)'], {'axis': '(0)'}), '((centroid, contrast, flatness, rolloff05, rolloff25,\n rolloff50, rolloff75, rolloff95), axis=0)\n', (18438, 18537), True, 'import numpy as np\n'), ((18545, 18572), 'librosa.feature.delta', 'librosa.feature.delta', (['spec'], {}), '(spec)\n', (18566, 18572), False, 'import librosa\n'), ((18583, 18625), 'numpy.concatenate', 'np.concatenate', (['(spec, spec_delta)'], {'axis': '(0)'}), '((spec, spec_delta), axis=0)\n', (18597, 18625), True, 'import numpy as np\n'), ((18658, 18672), 'numpy.array', 'np.array', (['full'], {}), '(full)\n', (18666, 18672), True, 'import numpy as np\n'), ((18877, 18928), 'numpy.flatnonzero', 'np.flatnonzero', (['(cluster_ids[:-1] != cluster_ids[1:])'], {}), '(cluster_ids[:-1] != cluster_ids[1:])\n', (18891, 18928), True, 'import numpy as np\n'), ((25429, 25454), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Paired"""', 'k'], {}), "('Paired', k)\n", (25441, 25454), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4013), 'numpy.std', 'np.std', (['beat_period'], {}), '(beat_period)\n', (4000, 4013), True, 'import numpy as np\n'), ((8085, 8114), 'numpy.diff', 'np.diff', (['Msync_normed'], {'axis': '(1)'}), '(Msync_normed, axis=1)\n', (8092, 8114), True, 'import numpy as np\n'), ((13730, 13752), 'numpy.atleast_2d', 'np.atleast_2d', (['seg_ids'], {}), '(seg_ids)\n', (13743, 13752), True, 'import numpy as np\n'), ((15011, 15032), 'numpy.abs', 'np.abs', (['Xsync[12:, :]'], {}), '(Xsync[12:, :])\n', (15017, 15032), True, 'import numpy as np\n'), ((15454, 15481), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (15464, 15481), True, 'import matplotlib.pyplot as plt\n'), ((15568, 15644), 'librosa.display.specshow', 'librosa.display.specshow', (['Xsync[:21, :]'], {'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Xsync[:21, :], x_axis='time', x_coords=onset_times)\n", (15592, 15644), False, 'import librosa\n'), ((16123, 16141), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16139, 16141), True, 'import matplotlib.pyplot as plt\n'), ((3972, 3992), 'numpy.mean', 'np.mean', (['beat_period'], {}), '(beat_period)\n', (3979, 3992), True, 'import numpy as np\n'), ((6388, 6501), 'librosa.cqt', 'librosa.cqt', ([], {'y': 'y', 'sr': 'sr', 'bins_per_octave': 'BINS_PER_OCTAVE', 'n_bins': '(N_OCTAVES * BINS_PER_OCTAVE)', 'hop_length': 'STEP'}), '(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES *\n BINS_PER_OCTAVE, hop_length=STEP)\n', (6399, 6501), False, 'import librosa\n'), ((15818, 15839), 'numpy.abs', 'np.abs', (['Xsync[20:, :]'], {}), '(Xsync[20:, :])\n', (15824, 15839), True, 'import numpy as np\n'), ((19652, 19703), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['center', 'distance_ref'], {}), '(center, distance_ref)\n', (19681, 19703), False, 'import scipy\n'), ((20087, 20136), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['center_i', 'center_j'], {}), '(center_i, center_j)\n', (20116, 20136), False, 'import scipy\n'), ((24766, 24799), 'numpy.mean', 'np.mean', (['X[:10 * NFFT, :]'], {'axis': '(0)'}), '(X[:10 * NFFT, :], axis=0)\n', (24773, 24799), True, 'import numpy as np\n'), ((25231, 25255), 'librosa.note_to_hz', 'librosa.note_to_hz', (['"""C1"""'], {}), "('C1')\n", (25249, 25255), False, 'import librosa\n'), ((25311, 25342), 'os.path.splitext', 'os.path.splitext', (['args.filename'], {}), '(args.filename)\n', (25327, 25342), False, 'import sys, os\n'), ((7155, 7165), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7163, 7165), False, 'import sys, os\n'), ((9853, 9879), 'numpy.where', 'np.where', (['(1 - evals > 0.75)'], {}), '(1 - evals > 0.75)\n', (9861, 9879), True, 'import numpy as np\n'), ((10957, 10967), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10965, 10967), False, 'import sys, os\n'), ((9804, 9815), 'numpy.mean', 'np.mean', (['nc'], {}), '(nc)\n', (9811, 9815), True, 'import numpy as np\n'), ((10024, 10049), 'numpy.cumsum', 'np.cumsum', (['(e ** 2)'], {'axis': '(1)'}), '(e ** 2, axis=1)\n', (10033, 10049), True, 'import numpy as np\n'), ((19763, 19799), 'numpy.sum', 'np.sum', (['((center - distance_ref) ** 2)'], {}), '((center - distance_ref) ** 2)\n', (19769, 19799), True, 'import numpy as np\n'), ((20196, 20230), 'numpy.sum', 'np.sum', (['((center_i - center_j) ** 2)'], {}), '((center_i - center_j) ** 2)\n', (20202, 20230), True, 'import numpy as np\n'), ((10717, 10733), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (10726, 10733), True, 'import numpy as np\n'), ((10787, 10803), 'numpy.argmin', 'np.argmin', (['score'], {}), '(score)\n', (10796, 10803), True, 'import numpy as np\n'), ((10859, 10875), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (10868, 10875), True, 'import numpy as np\n')] |
import subprocess
import pg8000
from agnostic import AbstractBackend
class PostgresBackend(AbstractBackend):
''' Support for PostgreSQL. '''
def backup_db(self, backup_file):
'''
Return a ``Popen`` instance that will backup the database to the
``backup_file`` handle.
'''
env = {'PGPASSWORD': self._password}
command = [
'pg_dump',
'-h', self._host,
'-U', self._user,
]
if self._port is not None:
command.append('-p')
command.append(str(self._port))
for schema in self._split_schema():
command.append('-n')
command.append(schema)
command.append(self._database)
process = subprocess.Popen(
command,
env=env,
stdout=backup_file,
stderr=subprocess.PIPE
)
return process
def clear_db(self, cursor):
''' Remove all objects from the database. '''
# Drop tables.
cursor.execute('''
SELECT schemaname, tablename FROM pg_tables
WHERE tableowner = %s
AND schemaname != 'pg_catalog'
AND schemaname != 'information_schema'
''', (self._user,))
tables = ['"{}"."{}"'.format(r[0], r[1]) for r in cursor.fetchall()]
if len(tables) > 0:
sql = 'DROP TABLE {} CASCADE'.format(', '.join(tables))
cursor.execute(sql)
# Drop sequences.
cursor.execute('''
SELECT relname FROM pg_class
WHERE relkind = 'S'
''')
sequences = ['"{}"'.format(row[0]) for row in cursor.fetchall()]
if len(sequences) > 0:
sql = 'DROP SEQUENCE {} CASCADE'.format(','.join(sequences))
cursor.execute(sql)
# Drop custom types, e.g. ENUM types.
cursor.execute('''
SELECT typname FROM pg_type
WHERE typtype = 'e'
''')
types = ['"{}"'.format(row[0]) for row in cursor.fetchall()]
if len(types) > 0:
sql = 'DROP TYPE {} CASCADE'.format(','.join(types))
cursor.execute(sql)
# Drop schema objects.
for schema in self._split_schema():
if schema != 'public':
sql = 'DROP SCHEMA IF EXISTS {} CASCADE'.format(schema)
cursor.execute(sql)
def connect_db(self):
''' Connect to PostgreSQL. '''
connect_args = {
'host': self._host,
'user': self._user,
'password': self._password,
'database': self._database,
}
if self._port is not None:
connect_args['port'] = self._port
db = pg8000.connect(**connect_args)
db.autocommit = True
if self._schema is not None:
cursor = db.cursor()
cursor.execute("SET SCHEMA '{}'".format(self._schema))
return db
def get_schema_command(self):
''' Return a command that will set the current schema. '''
if self._schema is None:
return 'SET search_path = "$user",public;\n'
else:
return 'SET search_path = {};\n'.format(self._schema)
def restore_db(self, backup_file):
'''
Return a ``Popen`` instance that will restore the database from the
``backup_file`` handle.
'''
env = {'PGPASSWORD': self._password}
command = [
'psql',
'-h', self._host,
'-U', self._user,
'-v', 'ON_ERROR_STOP=1', # Fail fast if an error occurs.
]
if self._port is not None:
command.append('-p')
command.append(str(self._port))
command.append(self._database)
process = subprocess.Popen(
command,
env=env,
stdin=backup_file,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE
)
return process
def snapshot_db(self, snapshot_file):
'''
Return a ``Popen`` instance that writes a snapshot to ``snapshot_file``.
'''
env = {'PGPASSWORD': self._password}
command = [
'pg_dump',
'-h', self._host,
'-U', self._user,
'-s', # dump schema only
'-x', # don't dump grant/revoke statements
'-O', # don't dump ownership commands
'--no-tablespaces',
]
if self._port is not None:
command.append('-p')
command.append(str(self._port))
if self._schema is not None:
for schema in self._split_schema():
command.extend(('-n', schema))
command.append(self._database)
process = subprocess.Popen(
command,
env=env,
stdout=snapshot_file,
stderr=subprocess.PIPE
)
return process
def _split_schema(self):
'''
Split schema string into separate schema names.
PostgreSQL allows specifying the schema name as a search path that
look for objects in more than one schema. This method breaks that
search path into individual schema names.
It also replaces the special schema name ``"$user"`` (quotes included)
with the current username, mimicking the ``SET SEARCH PATH TO ...``
behavior in PostgreSQL.
'''
schemas = list()
if self._schema is not None:
for schema in map(str.strip, self._schema.split(',')):
if schema == '"$user"':
schemas.append(self._user)
else:
schemas.append(schema)
return schemas
| [
"subprocess.Popen",
"pg8000.connect"
] | [((762, 840), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'env': 'env', 'stdout': 'backup_file', 'stderr': 'subprocess.PIPE'}), '(command, env=env, stdout=backup_file, stderr=subprocess.PIPE)\n', (778, 840), False, 'import subprocess\n'), ((2755, 2785), 'pg8000.connect', 'pg8000.connect', ([], {}), '(**connect_args)\n', (2769, 2785), False, 'import pg8000\n'), ((3813, 3922), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'env': 'env', 'stdin': 'backup_file', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.PIPE'}), '(command, env=env, stdin=backup_file, stdout=subprocess.\n DEVNULL, stderr=subprocess.PIPE)\n', (3829, 3922), False, 'import subprocess\n'), ((4799, 4884), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'env': 'env', 'stdout': 'snapshot_file', 'stderr': 'subprocess.PIPE'}), '(command, env=env, stdout=snapshot_file, stderr=subprocess.PIPE\n )\n', (4815, 4884), False, 'import subprocess\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import common_fn as cf
import seaborn as sns
plt.rcParams["svg.hashsalt"]=0
pre_path='EnvEq/All3/'
parm_format='{:.2e}'
parm_name='therapy_abi-Tneg_initratio-Totcell'
parm_name_array=['Tneg_initratio','Totcell']
post_path1='o2-Null_test-HE/'
parm_name1=parm_name+'/'+post_path1
cf.mkdirs(pre_path=pre_path,parm_name=parm_name1)
#iterator over these
ir_arr=np.logspace(-1,-3,5)
tot_cell_arr=np.array([1000,2000,4000])
cases=['No','AT','AT_nn','MT','SOC']
parms_array=np.empty([0,2])
for ir in ir_arr:
for tc in tot_cell_arr:
parms_array=np.append(parms_array,[[ir,tc]],axis=0)
for case in cases:
post_path=post_path1+case+'-'
cf.timeseries(pre_path=pre_path,parm_name=parm_name,parm_array=parms_array,parm_format=parm_format,post_path=post_path)
df=cf.eq_values(pre_path=pre_path,parm_name=parm_name,parm_array=parms_array,parm_format=parm_format,parm_name_array=parm_name_array,post_path=post_path,ttp=True,limit=9000)
| [
"common_fn.mkdirs",
"common_fn.eq_values",
"common_fn.timeseries",
"numpy.array",
"numpy.append",
"numpy.empty",
"numpy.logspace"
] | [((350, 400), 'common_fn.mkdirs', 'cf.mkdirs', ([], {'pre_path': 'pre_path', 'parm_name': 'parm_name1'}), '(pre_path=pre_path, parm_name=parm_name1)\n', (359, 400), True, 'import common_fn as cf\n'), ((429, 451), 'numpy.logspace', 'np.logspace', (['(-1)', '(-3)', '(5)'], {}), '(-1, -3, 5)\n', (440, 451), True, 'import numpy as np\n'), ((463, 491), 'numpy.array', 'np.array', (['[1000, 2000, 4000]'], {}), '([1000, 2000, 4000])\n', (471, 491), True, 'import numpy as np\n'), ((540, 556), 'numpy.empty', 'np.empty', (['[0, 2]'], {}), '([0, 2])\n', (548, 556), True, 'import numpy as np\n'), ((728, 856), 'common_fn.timeseries', 'cf.timeseries', ([], {'pre_path': 'pre_path', 'parm_name': 'parm_name', 'parm_array': 'parms_array', 'parm_format': 'parm_format', 'post_path': 'post_path'}), '(pre_path=pre_path, parm_name=parm_name, parm_array=\n parms_array, parm_format=parm_format, post_path=post_path)\n', (741, 856), True, 'import common_fn as cf\n'), ((855, 1041), 'common_fn.eq_values', 'cf.eq_values', ([], {'pre_path': 'pre_path', 'parm_name': 'parm_name', 'parm_array': 'parms_array', 'parm_format': 'parm_format', 'parm_name_array': 'parm_name_array', 'post_path': 'post_path', 'ttp': '(True)', 'limit': '(9000)'}), '(pre_path=pre_path, parm_name=parm_name, parm_array=parms_array,\n parm_format=parm_format, parm_name_array=parm_name_array, post_path=\n post_path, ttp=True, limit=9000)\n', (867, 1041), True, 'import common_fn as cf\n'), ((622, 664), 'numpy.append', 'np.append', (['parms_array', '[[ir, tc]]'], {'axis': '(0)'}), '(parms_array, [[ir, tc]], axis=0)\n', (631, 664), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""
tools module
"""
__author__ = 'Dr. <NAME>, University of Bristol, UK'
__maintainer__ = 'Dr. <NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import sys
import os
import copy
import numpy as np
try:
import opt_einsum as oe
OE_AVAILABLE = True
except ImportError:
OE_AVAILABLE = False
from subprocess import Popen, PIPE
from pyscf import gto, scf, dft, symm, lib
from pyscf import tools as pyscf_tools
from typing import Tuple, List, Dict, Union
MAX_CYCLE = 100
NATORB_THRES = 1.e-12
class Logger(object):
"""
this class pipes all write statements to both stdout and output_file
"""
def __init__(self, output_file, both=True) -> None:
"""
init Logger
"""
self.terminal = sys.stdout
self.log = open(output_file, 'a')
self.both = both
def write(self, message) -> None:
"""
define write
"""
self.log.write(message)
if self.both:
self.terminal.write(message)
def flush(self) -> None:
"""
define flush
"""
pass
def git_version() -> str:
"""
this function returns the git revision as a string
"""
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = Popen(cmd, stdout=PIPE, env=env, \
cwd=os.path.dirname(__file__)).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def dim(mo_occ: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
determine molecular dimensions
"""
return np.where(np.abs(mo_occ[0]) > 0.)[0], np.where(np.abs(mo_occ[1]) > 0.)[0]
def mf_info(mf: Union[scf.hf.SCF, dft.rks.KohnShamDFT]) -> Tuple[Tuple[np.ndarray, np.ndarray], \
Tuple[np.ndarray, np.ndarray]]:
"""
retrieve mf information (mo coefficients & occupations)
"""
# mo occupations
if np.asarray(mf.mo_occ).ndim == 1:
mo_occ = (np.ones(np.count_nonzero(0. < mf.mo_occ)), np.ones(np.count_nonzero(1. < mf.mo_occ)))
else:
mo_occ = (mf.mo_occ[0][np.nonzero(mf.mo_occ[0])], mf.mo_occ[1][np.nonzero(mf.mo_occ[1])])
# dimensions
alpha, beta = dim(mo_occ)
# mo coefficients
if np.asarray(mf.mo_coeff).ndim == 2:
mo_coeff = (mf.mo_coeff[:, alpha], mf.mo_coeff[:, beta])
else:
mo_coeff = (mf.mo_coeff[0][:, alpha], mf.mo_coeff[1][:, beta])
return mo_coeff, mo_occ
def orbsym(mol, mo_coeff):
"""
this functions returns orbital symmetries
"""
if isinstance(mo_coeff, np.ndarray):
if mo_coeff.ndim == 2:
try:
orbsymm = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo_coeff)
except:
orbsymm = np.array(['A'] * mo_coeff.shape[1])
else:
try:
orbsymm = np.array([symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c) for c in mo_coeff])
except:
orbsymm = np.array([['A'] * c.shape[1] for c in mo_coeff])
else:
try:
orbsymm = np.array([symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c) for c in mo_coeff])
except:
orbsymm = np.array([['A'] * c.shape[1] for c in mo_coeff])
return orbsymm
def make_rdm1(mo: np.ndarray, occup: np.ndarray) -> np.ndarray:
"""
this function returns an 1-RDM (in ao basis) corresponding to given mo(s)
"""
return contract('ip,jp->ij', occup * mo, mo)
def make_natorb(mol: gto.Mole, mo_coeff: np.ndarray, \
rdm1: np.ndarray, thres: float = NATORB_THRES) -> Tuple[Tuple[np.ndarray, np.ndarray], \
Tuple[np.ndarray, np.ndarray]]:
"""
this function returns no coefficients and occupations corresponding
to given mo coefficients and rdm1
"""
# reshape mo_coeff and rdm1
if mo_coeff.ndim == 2:
c = np.asarray((mo_coeff,) * 2)
else:
c = mo_coeff
if rdm1.ndim == 2:
d = np.array([rdm1, rdm1]) * .5
else:
d = rdm1
# overlap matrix
s = mol.intor_symmetric('int1e_ovlp')
# ao to mo transformation of dm
rdm1_mo = contract('xpi,pq,xqr,rs,xsj->xij', c, s, d, s, c)
# diagonalize rdm1_mo
occ_no, u = np.linalg.eigh(rdm1_mo)
# transform to no basis
mo_no = contract('xip,xpj->xij', c, u)
# retain only significant nos
return (mo_no[0][:, np.where(np.abs(occ_no[0]) >= thres)[0]], mo_no[1][:, np.where(np.abs(occ_no[1]) >= thres)[0]]), \
(occ_no[0][np.where(np.abs(occ_no[0]) >= thres)], occ_no[1][np.where(np.abs(occ_no[1]) >= thres)])
def write_rdm1(mol: gto.Mole, part: str, \
mo_coeff: np.ndarray, mo_occ: np.ndarray, fmt: str, \
weights: List[np.ndarray], \
suffix: str = '') -> None:
"""
this function writes a 1-RDM as a numpy or cube (default) file
"""
# assertion
assert part == 'atoms', '`write_rdm1` function only implemented for `atoms` partitioning'
assert fmt in ['cube', 'numpy'], 'fmt arg to `write_rdm1` must be `cube` or `numpy`'
# molecular dimensions
alpha, beta = dim(mo_occ)
# compute total 1-RDM (AO basis)
rdm1_tot = np.array([make_rdm1(mo_coeff[0], mo_occ[0]), make_rdm1(mo_coeff[1], mo_occ[1])])
# loop over atoms
for a in range(mol.natm):
# atom-specific rdm1
rdm1_atom = np.zeros_like(rdm1_tot)
# loop over spins
for i, spin_mo in enumerate((alpha, beta)):
# loop over spin-orbitals
for m, j in enumerate(spin_mo):
# get orbital(s)
orb = mo_coeff[i][:, j].reshape(mo_coeff[i].shape[0], -1)
# orbital-specific rdm1
rdm1_orb = make_rdm1(orb, mo_occ[i][j])
# weighted contribution to rdm1_atom
rdm1_atom[i] += rdm1_orb * weights[i][m][a]
if fmt == 'cube':
# write rdm1_atom as cube file
pyscf_tools.cubegen.density(mol, f'atom_{mol.atom_symbol(a).upper():s}{a:d}_rdm1{suffix:}.cube', \
np.sum(rdm1_atom, axis=0))
else:
# write rdm1_atom as numpy file
np.save(f'atom_{mol.atom_symbol(a).upper():s}{a:d}_rdm1{suffix:}.npy', np.sum(rdm1_atom, axis=0))
def res_add(res_a, res_b):
"""
this function adds two result dictionaries
"""
return {key: res_a[key] + res_b[key] for key in res_a.keys()}
def res_sub(res_a, res_b):
"""
this function subtracts two result dictionaries
"""
return {key: res_a[key] - res_b[key] for key in res_a.keys()}
def contract(eqn, *tensors):
"""
interface to optimized einsum operation
"""
if OE_AVAILABLE:
return oe.contract(eqn, *tensors)
else:
return np.einsum(eqn, *tensors, optimize=True)
| [
"opt_einsum.contract",
"numpy.abs",
"numpy.asarray",
"os.environ.get",
"numpy.count_nonzero",
"numpy.array",
"numpy.sum",
"os.path.dirname",
"numpy.einsum",
"numpy.nonzero",
"numpy.linalg.eigh",
"pyscf.symm.label_orb_symm",
"numpy.zeros_like"
] | [((5197, 5220), 'numpy.linalg.eigh', 'np.linalg.eigh', (['rdm1_mo'], {}), '(rdm1_mo)\n', (5211, 5220), True, 'import numpy as np\n'), ((4795, 4822), 'numpy.asarray', 'np.asarray', (['((mo_coeff,) * 2)'], {}), '((mo_coeff,) * 2)\n', (4805, 4822), True, 'import numpy as np\n'), ((6408, 6431), 'numpy.zeros_like', 'np.zeros_like', (['rdm1_tot'], {}), '(rdm1_tot)\n', (6421, 6431), True, 'import numpy as np\n'), ((7891, 7917), 'opt_einsum.contract', 'oe.contract', (['eqn', '*tensors'], {}), '(eqn, *tensors)\n', (7902, 7917), True, 'import opt_einsum as oe\n'), ((7951, 7990), 'numpy.einsum', 'np.einsum', (['eqn', '*tensors'], {'optimize': '(True)'}), '(eqn, *tensors, optimize=True)\n', (7960, 7990), True, 'import numpy as np\n'), ((1464, 1481), 'os.environ.get', 'os.environ.get', (['k'], {}), '(k)\n', (1478, 1481), False, 'import os\n'), ((2591, 2612), 'numpy.asarray', 'np.asarray', (['mf.mo_occ'], {}), '(mf.mo_occ)\n', (2601, 2612), True, 'import numpy as np\n'), ((2940, 2963), 'numpy.asarray', 'np.asarray', (['mf.mo_coeff'], {}), '(mf.mo_coeff)\n', (2950, 2963), True, 'import numpy as np\n'), ((4905, 4927), 'numpy.array', 'np.array', (['[rdm1, rdm1]'], {}), '([rdm1, rdm1])\n', (4913, 4927), True, 'import numpy as np\n'), ((2654, 2687), 'numpy.count_nonzero', 'np.count_nonzero', (['(0.0 < mf.mo_occ)'], {}), '(0.0 < mf.mo_occ)\n', (2670, 2687), True, 'import numpy as np\n'), ((2697, 2730), 'numpy.count_nonzero', 'np.count_nonzero', (['(1.0 < mf.mo_occ)'], {}), '(1.0 < mf.mo_occ)\n', (2713, 2730), True, 'import numpy as np\n'), ((2781, 2805), 'numpy.nonzero', 'np.nonzero', (['mf.mo_occ[0]'], {}), '(mf.mo_occ[0])\n', (2791, 2805), True, 'import numpy as np\n'), ((2821, 2845), 'numpy.nonzero', 'np.nonzero', (['mf.mo_occ[1]'], {}), '(mf.mo_occ[1])\n', (2831, 2845), True, 'import numpy as np\n'), ((3400, 3464), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_name', 'mol.symm_orb', 'mo_coeff'], {}), '(mol, mol.irrep_name, mol.symm_orb, mo_coeff)\n', (3419, 3464), False, 'from pyscf import gto, scf, dft, symm, lib\n'), ((4006, 4056), 'numpy.array', 'np.array', (["[(['A'] * c.shape[1]) for c in mo_coeff]"], {}), "([(['A'] * c.shape[1]) for c in mo_coeff])\n", (4014, 4056), True, 'import numpy as np\n'), ((7184, 7209), 'numpy.sum', 'np.sum', (['rdm1_atom'], {'axis': '(0)'}), '(rdm1_atom, axis=0)\n', (7190, 7209), True, 'import numpy as np\n'), ((7364, 7389), 'numpy.sum', 'np.sum', (['rdm1_atom'], {'axis': '(0)'}), '(rdm1_atom, axis=0)\n', (7370, 7389), True, 'import numpy as np\n'), ((2206, 2223), 'numpy.abs', 'np.abs', (['mo_occ[0]'], {}), '(mo_occ[0])\n', (2212, 2223), True, 'import numpy as np\n'), ((2243, 2260), 'numpy.abs', 'np.abs', (['mo_occ[1]'], {}), '(mo_occ[1])\n', (2249, 2260), True, 'import numpy as np\n'), ((3519, 3554), 'numpy.array', 'np.array', (["(['A'] * mo_coeff.shape[1])"], {}), "(['A'] * mo_coeff.shape[1])\n", (3527, 3554), True, 'import numpy as np\n'), ((3766, 3816), 'numpy.array', 'np.array', (["[(['A'] * c.shape[1]) for c in mo_coeff]"], {}), "([(['A'] * c.shape[1]) for c in mo_coeff])\n", (3774, 3816), True, 'import numpy as np\n'), ((3882, 3939), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_name', 'mol.symm_orb', 'c'], {}), '(mol, mol.irrep_name, mol.symm_orb, c)\n', (3901, 3939), False, 'from pyscf import gto, scf, dft, symm, lib\n'), ((3634, 3691), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_name', 'mol.symm_orb', 'c'], {}), '(mol, mol.irrep_name, mol.symm_orb, c)\n', (3653, 3691), False, 'from pyscf import gto, scf, dft, symm, lib\n'), ((5500, 5517), 'numpy.abs', 'np.abs', (['occ_no[0]'], {}), '(occ_no[0])\n', (5506, 5517), True, 'import numpy as np\n'), ((5549, 5566), 'numpy.abs', 'np.abs', (['occ_no[1]'], {}), '(occ_no[1])\n', (5555, 5566), True, 'import numpy as np\n'), ((1764, 1789), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1779, 1789), False, 'import os\n'), ((5375, 5392), 'numpy.abs', 'np.abs', (['occ_no[0]'], {}), '(occ_no[0])\n', (5381, 5392), True, 'import numpy as np\n'), ((5429, 5446), 'numpy.abs', 'np.abs', (['occ_no[1]'], {}), '(occ_no[1])\n', (5435, 5446), True, 'import numpy as np\n')] |
import numpy as np
from ._base import FilterAlgorithmBase
class WhiteTophat(FilterAlgorithmBase):
"""
Performs "white top hat" filtering of an image to enhance spots. "White top hat filtering" finds spots that are both
smaller and brighter than their surroundings.
See Also
--------
https://en.wikipedia.org/wiki/Top-hat_transform
"""
def __init__(self, disk_size, **kwargs):
"""Instance of a white top hat morphological masking filter which masks objects larger than `disk_size`
Parameters
----------
disk_size : int
diameter of the morphological masking disk in pixels
"""
self.disk_size = disk_size
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument(
"--disk-size", default=15, type=int, help="diameter of morphological masking disk in pixels")
def filter(self, stack) -> None:
"""Perform in-place filtering of an image stack and all contained aux images
Parameters
----------
stack : starfish.Stack
Stack to be filtered
"""
from scipy.ndimage.filters import maximum_filter, minimum_filter
from skimage.morphology import disk
def white_tophat(image):
if image.dtype.kind != "u":
raise TypeError("images should be stored in an unsigned integer array")
structuring_element = disk(self.disk_size)
min_filtered = minimum_filter(image, footprint=structuring_element)
max_filtered = maximum_filter(min_filtered, footprint=structuring_element)
filtered_image = image - np.minimum(image, max_filtered)
return filtered_image
stack.image.apply(white_tophat)
# apply to aux dict too.
for auxiliary_image in stack.auxiliary_images.values():
auxiliary_image.apply(white_tophat)
| [
"numpy.minimum",
"skimage.morphology.disk",
"scipy.ndimage.filters.maximum_filter",
"scipy.ndimage.filters.minimum_filter"
] | [((1454, 1474), 'skimage.morphology.disk', 'disk', (['self.disk_size'], {}), '(self.disk_size)\n', (1458, 1474), False, 'from skimage.morphology import disk\n'), ((1502, 1554), 'scipy.ndimage.filters.minimum_filter', 'minimum_filter', (['image'], {'footprint': 'structuring_element'}), '(image, footprint=structuring_element)\n', (1516, 1554), False, 'from scipy.ndimage.filters import maximum_filter, minimum_filter\n'), ((1582, 1641), 'scipy.ndimage.filters.maximum_filter', 'maximum_filter', (['min_filtered'], {'footprint': 'structuring_element'}), '(min_filtered, footprint=structuring_element)\n', (1596, 1641), False, 'from scipy.ndimage.filters import maximum_filter, minimum_filter\n'), ((1679, 1710), 'numpy.minimum', 'np.minimum', (['image', 'max_filtered'], {}), '(image, max_filtered)\n', (1689, 1710), True, 'import numpy as np\n')] |
import datetime
import cv2
import numpy as np
from artsci2019.lib.frame_checker import FrameChecker
from artsci2019.lib.util import scale_frame, scale_point, is_in_frame
from artsci2019.lib.face_recog import get_faces
from artsci2019.lib.sound import SoundPlayer
def draw_checked_frame(frame, checked_frame, factor):
green = (100, 255, 100)
red = (100, 100, 255)
eye_line_color = green if checked_frame.width_ok else red
cv2.line(frame,
scale_point(checked_frame.left_eye, factor),
scale_point(checked_frame.right_eye, factor),
eye_line_color,
thickness=2)
centre_line_color = green if checked_frame.centre_ok else red
cv2.line(frame,
scale_point(checked_frame.centre, factor),
scale_point(checked_frame.centre_target, factor),
centre_line_color,
thickness=4)
height_line_color = green if checked_frame.height_ok else red
cv2.line(frame,
scale_point(checked_frame.h_min_point, factor),
scale_point(checked_frame.h_max_point, factor),
height_line_color,
thickness=2)
def draw_triangles(frame, checked_frame, factor):
f_h, f_w, _ = checked_frame.recognized_frame.frame.shape
# prep delaunay
rect = (0, 0, f_w, f_h)
subdiv = cv2.Subdiv2D(rect)
for lm in checked_frame.recognized_frame.face_landmarks:
if is_in_frame(f_w, f_h, lm):
subdiv.insert(lm)
print("triangles: {}".format(len(subdiv.getTriangleList())))
for t in subdiv.getTriangleList():
t = np.reshape(t, (3, 2)).astype(np.int32)
pt1 = scale_point(tuple(t[0]), factor)
pt2 = scale_point(tuple(t[1]), factor)
pt3 = scale_point(tuple(t[2]), factor)
cv2.line(frame, pt1, pt2, (255, 255, 255), 1, 8, 0)
cv2.line(frame, pt2, pt3, (255, 255, 255), 1, 8, 0)
cv2.line(frame, pt3, pt1, (255, 255, 255), 1, 8, 0)
def my_get_frame(video_capture, rotate):
# get a single frame
rval, frame = video_capture.read()
if rotate:
frame = cv2.transpose(frame)
frame = cv2.flip(frame, flipCode=1)
return rval, frame
class InteractiveDisplay:
def __init__(self, camera_number, rotate, fullscreen, processing_backend):
self.camera_number = camera_number
self.rotate = rotate
self.fullscreen = fullscreen
self.debug_scaling = 1/2
if fullscreen:
self.debug_scaling = 1
self.scaling_factor = 4
self.preview_window = "preview"
self.genimage_window = "genimage"
self.genimage = None
self.video_capture = None
self.collected_frames = []
self.pb = processing_backend
self.current_checked_frames = []
self.checkpoint_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
self.frame_checker = None
self.sound_player = SoundPlayer("bing.wav")
def init(self):
# initialize window
cv2.namedWindow(self.preview_window, cv2.WINDOW_NORMAL)
cv2.namedWindow(self.genimage_window, cv2.WINDOW_NORMAL) # WINDOW_NORMAL required for fullscreen to work
if self.fullscreen:
cv2.setWindowProperty(self.genimage_window, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# get webcam
self.video_capture = cv2.VideoCapture(self.camera_number)
self.video_capture.set(3, 1920)
self.video_capture.set(4, 1080)
rval = False
frame = None
if self.video_capture.isOpened(): # try to get the first frame
rval, frame = my_get_frame(self.video_capture, self.rotate)
if frame is not None:
self.genimage = scale_frame(frame, self.debug_scaling)
cv2.imshow(self.genimage_window, self.genimage)
if self.rotate:
self.frame_checker = FrameChecker(1080, 1920)
else:
self.frame_checker = FrameChecker(1920, 1080)
return rval
def teardown(self):
cv2.destroyWindow(self.preview_window)
cv2.destroyWindow(self.genimage_window)
self.video_capture.release()
def portrait_update(self, checked_frames):
current_time = datetime.datetime.now()
if current_time < self.checkpoint_time:
print("too early")
return # too early for an update
# update portrait
ok_frames = [cf.recognized_frame
for cf in checked_frames
if cf.all_ok]
changed = False
if ok_frames:
print("Updating")
self.sound_player.play()
changed = self.pb.update(ok_frames)
if changed:
print("Updated")
portrait_frame = self.pb.get_portrait()
f = scale_frame(portrait_frame, self.debug_scaling)
self.genimage = f
cv2.imshow(self.genimage_window, self.genimage)
self.checkpoint_time = current_time + datetime.timedelta(seconds=10)
return changed
def loop_update(self, frame):
frame = scale_frame(frame, self.debug_scaling)
new_preview = frame
new_genimage = self.genimage
current_time = datetime.datetime.now()
if current_time > self.checkpoint_time and self.current_checked_frames:
# draw face lines
score = max([cf.total_score for cf in self.current_checked_frames])
for cf in self.current_checked_frames:
print("Score: {}".format(cf.total_score))
new_genimage = cv2.addWeighted(self.genimage, 1 - score, frame, score, 0)
# draw_triangles(new_genimage, self.current_checked_frames[0], self.debug_scaling)
# draw_triangles(new_preview, self.current_checked_frames[0], self.debug_scaling)
if score > 0.5:
print("YO")
draw_triangles(new_genimage, self.current_checked_frames[0], self.debug_scaling)
# Display the resulting image
cv2.imshow(self.preview_window, new_preview)
cv2.imshow(self.genimage_window, new_genimage)
cv2.waitKey(50)
changed = self.portrait_update(self.current_checked_frames)
def start(self):
process_this_frame = True
rval = True
while rval:
# get a single frame
rval, frame = my_get_frame(self.video_capture, self.rotate)
# TODO drop frames while processing
# get the faces
if process_this_frame:
rfs = get_faces(frame, self.scaling_factor)
self.current_checked_frames = [self.frame_checker.check(rf) for rf in rfs]
process_this_frame = not process_this_frame
self.loop_update(frame)
# exit on ESC
key = cv2.waitKey(20)
if key == 113: # exit on q
break
| [
"artsci2019.lib.util.scale_point",
"cv2.transpose",
"artsci2019.lib.sound.SoundPlayer",
"cv2.imshow",
"datetime.timedelta",
"artsci2019.lib.frame_checker.FrameChecker",
"numpy.reshape",
"cv2.line",
"cv2.addWeighted",
"cv2.waitKey",
"cv2.Subdiv2D",
"cv2.namedWindow",
"artsci2019.lib.util.is_i... | [((1334, 1352), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (1346, 1352), False, 'import cv2\n'), ((470, 513), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.left_eye', 'factor'], {}), '(checked_frame.left_eye, factor)\n', (481, 513), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((528, 572), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.right_eye', 'factor'], {}), '(checked_frame.right_eye, factor)\n', (539, 572), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((729, 770), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.centre', 'factor'], {}), '(checked_frame.centre, factor)\n', (740, 770), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((785, 833), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.centre_target', 'factor'], {}), '(checked_frame.centre_target, factor)\n', (796, 833), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((993, 1039), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.h_min_point', 'factor'], {}), '(checked_frame.h_min_point, factor)\n', (1004, 1039), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((1054, 1100), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.h_max_point', 'factor'], {}), '(checked_frame.h_max_point, factor)\n', (1065, 1100), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((1425, 1450), 'artsci2019.lib.util.is_in_frame', 'is_in_frame', (['f_w', 'f_h', 'lm'], {}), '(f_w, f_h, lm)\n', (1436, 1450), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((1786, 1837), 'cv2.line', 'cv2.line', (['frame', 'pt1', 'pt2', '(255, 255, 255)', '(1)', '(8)', '(0)'], {}), '(frame, pt1, pt2, (255, 255, 255), 1, 8, 0)\n', (1794, 1837), False, 'import cv2\n'), ((1846, 1897), 'cv2.line', 'cv2.line', (['frame', 'pt2', 'pt3', '(255, 255, 255)', '(1)', '(8)', '(0)'], {}), '(frame, pt2, pt3, (255, 255, 255), 1, 8, 0)\n', (1854, 1897), False, 'import cv2\n'), ((1906, 1957), 'cv2.line', 'cv2.line', (['frame', 'pt3', 'pt1', '(255, 255, 255)', '(1)', '(8)', '(0)'], {}), '(frame, pt3, pt1, (255, 255, 255), 1, 8, 0)\n', (1914, 1957), False, 'import cv2\n'), ((2097, 2117), 'cv2.transpose', 'cv2.transpose', (['frame'], {}), '(frame)\n', (2110, 2117), False, 'import cv2\n'), ((2134, 2161), 'cv2.flip', 'cv2.flip', (['frame'], {'flipCode': '(1)'}), '(frame, flipCode=1)\n', (2142, 2161), False, 'import cv2\n'), ((2933, 2956), 'artsci2019.lib.sound.SoundPlayer', 'SoundPlayer', (['"""bing.wav"""'], {}), "('bing.wav')\n", (2944, 2956), False, 'from artsci2019.lib.sound import SoundPlayer\n'), ((3014, 3069), 'cv2.namedWindow', 'cv2.namedWindow', (['self.preview_window', 'cv2.WINDOW_NORMAL'], {}), '(self.preview_window, cv2.WINDOW_NORMAL)\n', (3029, 3069), False, 'import cv2\n'), ((3078, 3134), 'cv2.namedWindow', 'cv2.namedWindow', (['self.genimage_window', 'cv2.WINDOW_NORMAL'], {}), '(self.genimage_window, cv2.WINDOW_NORMAL)\n', (3093, 3134), False, 'import cv2\n'), ((3367, 3403), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.camera_number'], {}), '(self.camera_number)\n', (3383, 3403), False, 'import cv2\n'), ((4039, 4077), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.preview_window'], {}), '(self.preview_window)\n', (4056, 4077), False, 'import cv2\n'), ((4086, 4125), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.genimage_window'], {}), '(self.genimage_window)\n', (4103, 4125), False, 'import cv2\n'), ((4235, 4258), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4256, 4258), False, 'import datetime\n'), ((5103, 5141), 'artsci2019.lib.util.scale_frame', 'scale_frame', (['frame', 'self.debug_scaling'], {}), '(frame, self.debug_scaling)\n', (5114, 5141), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((5231, 5254), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5252, 5254), False, 'import datetime\n'), ((6029, 6073), 'cv2.imshow', 'cv2.imshow', (['self.preview_window', 'new_preview'], {}), '(self.preview_window, new_preview)\n', (6039, 6073), False, 'import cv2\n'), ((6082, 6128), 'cv2.imshow', 'cv2.imshow', (['self.genimage_window', 'new_genimage'], {}), '(self.genimage_window, new_genimage)\n', (6092, 6128), False, 'import cv2\n'), ((6137, 6152), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (6148, 6152), False, 'import cv2\n'), ((2814, 2837), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2835, 2837), False, 'import datetime\n'), ((2840, 2870), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (2858, 2870), False, 'import datetime\n'), ((3224, 3320), 'cv2.setWindowProperty', 'cv2.setWindowProperty', (['self.genimage_window', 'cv2.WND_PROP_FULLSCREEN', 'cv2.WINDOW_FULLSCREEN'], {}), '(self.genimage_window, cv2.WND_PROP_FULLSCREEN, cv2.\n WINDOW_FULLSCREEN)\n', (3245, 3320), False, 'import cv2\n'), ((3731, 3769), 'artsci2019.lib.util.scale_frame', 'scale_frame', (['frame', 'self.debug_scaling'], {}), '(frame, self.debug_scaling)\n', (3742, 3769), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((3782, 3829), 'cv2.imshow', 'cv2.imshow', (['self.genimage_window', 'self.genimage'], {}), '(self.genimage_window, self.genimage)\n', (3792, 3829), False, 'import cv2\n'), ((3888, 3912), 'artsci2019.lib.frame_checker.FrameChecker', 'FrameChecker', (['(1080)', '(1920)'], {}), '(1080, 1920)\n', (3900, 3912), False, 'from artsci2019.lib.frame_checker import FrameChecker\n'), ((3960, 3984), 'artsci2019.lib.frame_checker.FrameChecker', 'FrameChecker', (['(1920)', '(1080)'], {}), '(1920, 1080)\n', (3972, 3984), False, 'from artsci2019.lib.frame_checker import FrameChecker\n'), ((4810, 4857), 'artsci2019.lib.util.scale_frame', 'scale_frame', (['portrait_frame', 'self.debug_scaling'], {}), '(portrait_frame, self.debug_scaling)\n', (4821, 4857), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((4900, 4947), 'cv2.imshow', 'cv2.imshow', (['self.genimage_window', 'self.genimage'], {}), '(self.genimage_window, self.genimage)\n', (4910, 4947), False, 'import cv2\n'), ((5581, 5639), 'cv2.addWeighted', 'cv2.addWeighted', (['self.genimage', '(1 - score)', 'frame', 'score', '(0)'], {}), '(self.genimage, 1 - score, frame, score, 0)\n', (5596, 5639), False, 'import cv2\n'), ((6826, 6841), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (6837, 6841), False, 'import cv2\n'), ((1598, 1619), 'numpy.reshape', 'np.reshape', (['t', '(3, 2)'], {}), '(t, (3, 2))\n', (1608, 1619), True, 'import numpy as np\n'), ((4998, 5028), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (5016, 5028), False, 'import datetime\n'), ((6558, 6595), 'artsci2019.lib.face_recog.get_faces', 'get_faces', (['frame', 'self.scaling_factor'], {}), '(frame, self.scaling_factor)\n', (6567, 6595), False, 'from artsci2019.lib.face_recog import get_faces\n')] |
from unittest.mock import patch, MagicMock, call
import json
from datetime import datetime
from copy import deepcopy
import pytest
from PIL import Image
from sm.engine import DB, ESExporter, QueuePublisher
from sm.engine.dataset_manager import SMapiDatasetManager, SMDaemonDatasetManager
from sm.engine.dataset_manager import Dataset, DatasetActionPriority, DatasetAction, DatasetStatus
from sm.engine.errors import DSIDExists
from sm.engine.queue import SM_ANNOTATE, SM_DS_STATUS
from sm.engine.tests.util import pysparkling_context, sm_config, ds_config, test_db
from sm.engine.png_generator import ImageStoreServiceWrapper
@pytest.fixture()
def fill_db(test_db, sm_config, ds_config):
upload_dt = '2000-01-01 00:00:00'
ds_id = '2000-01-01'
meta = {"meta": "data"}
db = DB(sm_config['db'])
db.insert('INSERT INTO dataset (id, name, input_path, upload_dt, metadata, config, '
'status, is_public, mol_dbs, adducts) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
rows=[(ds_id, 'ds_name', 'input_path', upload_dt,
json.dumps(meta), json.dumps(ds_config), DatasetStatus.FINISHED,
True, ['HMDB-v4'], ['+H'])])
db.insert("INSERT INTO job (id, db_id, ds_id) VALUES (%s, %s, %s)",
rows=[(0, 0, ds_id)])
db.insert("INSERT INTO sum_formula (id, db_id, sf) VALUES (%s, %s, %s)",
rows=[(1, 0, 'H2O')])
db.insert(("INSERT INTO iso_image_metrics (job_id, db_id, sf, adduct, iso_image_ids) "
"VALUES (%s, %s, %s, %s, %s)"),
rows=[(0, 0, 'H2O', '+H', ['iso_image_1_id', 'iso_image_2_id'])])
db.close()
def create_ds_man(sm_config, db=None, es=None, img_store=None,
action_queue=None, status_queue=None, sm_api=False):
db = db or DB(sm_config['db'])
es_mock = es or MagicMock(spec=ESExporter)
action_queue_mock = action_queue or MagicMock(QueuePublisher)
status_queue_mock = status_queue or MagicMock(QueuePublisher)
img_store_mock = img_store or MagicMock(spec=ImageStoreServiceWrapper)
if sm_api:
return SMapiDatasetManager(db=db, es=es_mock,
mode='queue', image_store=img_store_mock,
action_queue=action_queue_mock, status_queue=status_queue_mock)
else:
return SMDaemonDatasetManager(db=db, es=es_mock,
img_store=img_store_mock, mode=None,
status_queue=status_queue_mock)
def create_ds(ds_id='2000-01-01', ds_name='ds_name', input_path='input_path', upload_dt=None,
metadata=None, ds_config=None, status=DatasetStatus.NEW, mol_dbs=None, adducts=None):
upload_dt = upload_dt or datetime.now()
if not mol_dbs:
mol_dbs = ['HMDB-v4']
if not adducts:
adducts = ['+H', '+Na', '+K']
return Dataset(ds_id, ds_name, input_path, upload_dt, metadata or {}, ds_config or {},
status=status, mol_dbs=mol_dbs, adducts=adducts, img_storage_type='fs')
class TestSMapiDatasetManager:
def test_add_new_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.add(ds, priority=DatasetActionPriority.HIGH)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.ADD, 'del_first': False}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_delete_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.delete(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path', 'action': DatasetAction.DELETE}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_update_ds__configs_equal_metadata_diff(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds.metadata = {'new': 'metadata'}
ds_man.update(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.UPDATE}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_update_ds__configs_metadata_equal__do_nothing(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.update(ds)
action_queue_mock.assert_not_called()
def test_add_ds__new_mol_db(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds.config['databases'] = [{'name': 'HMDB'}, {'name': 'ChEBI'}]
ds_man.add(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.ADD, 'del_first': False}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.DEFAULT)])
def test_add_optical_image(self, fill_db, sm_config, ds_config):
db = DB(sm_config['db'])
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
img_store_mock = MagicMock(ImageStoreServiceWrapper)
img_store_mock.post_image.side_effect = ['opt_img_id1', 'opt_img_id2', 'opt_img_id3', 'thumbnail_id']
img_store_mock.get_image_by_id.return_value = Image.new('RGB', (100, 100))
ds_man = create_ds_man(sm_config=sm_config, db=db, es=es_mock,
img_store=img_store_mock, action_queue=action_queue_mock, sm_api=True)
ds_man._annotation_image_shape = MagicMock(return_value=(100, 100))
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
zoom_levels = [1, 2, 3]
raw_img_id = 'raw_opt_img_id'
ds_man.add_optical_image(ds, raw_img_id, [[1, 0, 0], [0, 1, 0], [0, 0, 1]],
zoom_levels=zoom_levels)
assert db.select('SELECT * FROM optical_image') == [
('opt_img_id{}'.format(i + 1), ds.id, zoom)
for i, zoom in enumerate(zoom_levels)]
assert db.select('SELECT optical_image FROM dataset where id = %s', params=(ds_id,)) == [(raw_img_id,)]
assert db.select('SELECT thumbnail FROM dataset where id = %s', params=(ds_id,)) == [('thumbnail_id',)]
class TestSMDaemonDatasetManager:
class SearchJob:
def __init__(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def test_add_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
db = DB(sm_config['db'])
try:
ds_man = create_ds_man(sm_config, db=db, es=es_mock, action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds_name = 'ds_name'
input_path = 'input_path'
upload_dt = datetime.now()
metadata = {}
ds = create_ds(ds_id=ds_id, ds_name=ds_name, input_path=input_path, upload_dt=upload_dt,
metadata=metadata, ds_config=ds_config)
ds_man.add(ds, search_job_factory=self.SearchJob)
DS_SEL = 'select name, input_path, upload_dt, metadata, config from dataset where id=%s'
assert db.select_one(DS_SEL, params=(ds_id,)) == (ds_name, input_path, upload_dt, metadata, ds_config)
finally:
db.close()
def test_update_ds(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
ds_man = create_ds_man(sm_config, es=es_mock, action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
with patch('sm.engine.dataset_manager.MolecularDB') as MolecularDB:
mol_db_mock = MolecularDB.return_value
mol_db_mock.name = 'HMDB'
with patch('sm.engine.dataset_manager.MolDBServiceWrapper') as MolDBServiceWrapper:
moldb_service_wrapper_mock = MolDBServiceWrapper.return_value
moldb_service_wrapper_mock.find_db_by_id.return_value = {'name': 'HMDB-v4'}
ds_man.update(ds)
es_mock.delete_ds.assert_called_with(ds_id)
call_args = es_mock.index_ds.call_args[1].values()
assert ds_id in call_args and mol_db_mock in call_args
def test_delete_ds(self, fill_db, sm_config, ds_config):
db = DB(sm_config['db'])
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
img_store_service_mock = MagicMock(spec=ImageStoreServiceWrapper)
ds_man = create_ds_man(sm_config, db=db, es=es_mock, img_store=img_store_service_mock,
action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.delete(ds)
ids = ['iso_image_{}_id'.format(id) for id in range(1, 3)]
img_store_service_mock.delete_image_by_id.assert_has_calls(
[call('fs', 'iso_image', ids[0]), call('fs', 'iso_image', ids[1])])
es_mock.delete_ds.assert_called_with(ds_id)
assert db.select_one('SELECT * FROM dataset WHERE id = %s', params=(ds_id,)) == []
| [
"sm.engine.dataset_manager.Dataset",
"sm.engine.DB",
"sm.engine.dataset_manager.SMapiDatasetManager",
"unittest.mock.MagicMock",
"PIL.Image.new",
"unittest.mock.call",
"json.dumps",
"datetime.datetime.now",
"pytest.fixture",
"unittest.mock.patch",
"sm.engine.dataset_manager.SMDaemonDatasetManage... | [((630, 646), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (644, 646), False, 'import pytest\n'), ((791, 810), 'sm.engine.DB', 'DB', (["sm_config['db']"], {}), "(sm_config['db'])\n", (793, 810), False, 'from sm.engine import DB, ESExporter, QueuePublisher\n'), ((2897, 3052), 'sm.engine.dataset_manager.Dataset', 'Dataset', (['ds_id', 'ds_name', 'input_path', 'upload_dt', '(metadata or {})', '(ds_config or {})'], {'status': 'status', 'mol_dbs': 'mol_dbs', 'adducts': 'adducts', 'img_storage_type': '"""fs"""'}), "(ds_id, ds_name, input_path, upload_dt, metadata or {}, ds_config or\n {}, status=status, mol_dbs=mol_dbs, adducts=adducts, img_storage_type='fs')\n", (2904, 3052), False, 'from sm.engine.dataset_manager import Dataset, DatasetActionPriority, DatasetAction, DatasetStatus\n'), ((1807, 1826), 'sm.engine.DB', 'DB', (["sm_config['db']"], {}), "(sm_config['db'])\n", (1809, 1826), False, 'from sm.engine import DB, ESExporter, QueuePublisher\n'), ((1847, 1873), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ESExporter'}), '(spec=ESExporter)\n', (1856, 1873), False, 'from unittest.mock import patch, MagicMock, call\n'), ((1914, 1939), 'unittest.mock.MagicMock', 'MagicMock', (['QueuePublisher'], {}), '(QueuePublisher)\n', (1923, 1939), False, 'from unittest.mock import patch, MagicMock, call\n'), ((1980, 2005), 'unittest.mock.MagicMock', 'MagicMock', (['QueuePublisher'], {}), '(QueuePublisher)\n', (1989, 2005), False, 'from unittest.mock import patch, MagicMock, call\n'), ((2040, 2080), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ImageStoreServiceWrapper'}), '(spec=ImageStoreServiceWrapper)\n', (2049, 2080), False, 'from unittest.mock import patch, MagicMock, call\n'), ((2111, 2265), 'sm.engine.dataset_manager.SMapiDatasetManager', 'SMapiDatasetManager', ([], {'db': 'db', 'es': 'es_mock', 'mode': '"""queue"""', 'image_store': 'img_store_mock', 'action_queue': 'action_queue_mock', 'status_queue': 'status_queue_mock'}), "(db=db, es=es_mock, mode='queue', image_store=\n img_store_mock, action_queue=action_queue_mock, status_queue=\n status_queue_mock)\n", (2130, 2265), False, 'from sm.engine.dataset_manager import SMapiDatasetManager, SMDaemonDatasetManager\n'), ((2351, 2466), 'sm.engine.dataset_manager.SMDaemonDatasetManager', 'SMDaemonDatasetManager', ([], {'db': 'db', 'es': 'es_mock', 'img_store': 'img_store_mock', 'mode': 'None', 'status_queue': 'status_queue_mock'}), '(db=db, es=es_mock, img_store=img_store_mock, mode=\n None, status_queue=status_queue_mock)\n', (2373, 2466), False, 'from sm.engine.dataset_manager import SMapiDatasetManager, SMDaemonDatasetManager\n'), ((2763, 2777), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2775, 2777), False, 'from datetime import datetime\n'), ((3192, 3222), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (3201, 3222), False, 'from unittest.mock import patch, MagicMock, call\n'), ((3786, 3816), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (3795, 3816), False, 'from unittest.mock import patch, MagicMock, call\n'), ((4343, 4373), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (4352, 4373), False, 'from unittest.mock import patch, MagicMock, call\n'), ((4964, 4994), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (4973, 4994), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5342, 5372), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (5351, 5372), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5966, 5985), 'sm.engine.DB', 'DB', (["sm_config['db']"], {}), "(sm_config['db'])\n", (5968, 5985), False, 'from sm.engine import DB, ESExporter, QueuePublisher\n'), ((6014, 6044), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (6023, 6044), False, 'from unittest.mock import patch, MagicMock, call\n'), ((6063, 6089), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ESExporter'}), '(spec=ESExporter)\n', (6072, 6089), False, 'from unittest.mock import patch, MagicMock, call\n'), ((6115, 6150), 'unittest.mock.MagicMock', 'MagicMock', (['ImageStoreServiceWrapper'], {}), '(ImageStoreServiceWrapper)\n', (6124, 6150), False, 'from unittest.mock import patch, MagicMock, call\n'), ((6315, 6343), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(100, 100)'], {}), "('RGB', (100, 100))\n", (6324, 6343), False, 'from PIL import Image\n'), ((6559, 6593), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(100, 100)'}), '(return_value=(100, 100))\n', (6568, 6593), False, 'from unittest.mock import patch, MagicMock, call\n'), ((7559, 7589), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (7568, 7589), False, 'from unittest.mock import patch, MagicMock, call\n'), ((7608, 7634), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ESExporter'}), '(spec=ESExporter)\n', (7617, 7634), False, 'from unittest.mock import patch, MagicMock, call\n'), ((7648, 7667), 'sm.engine.DB', 'DB', (["sm_config['db']"], {}), "(sm_config['db'])\n", (7650, 7667), False, 'from sm.engine import DB, ESExporter, QueuePublisher\n'), ((8539, 8569), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (8548, 8569), False, 'from unittest.mock import patch, MagicMock, call\n'), ((8588, 8614), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ESExporter'}), '(spec=ESExporter)\n', (8597, 8614), False, 'from unittest.mock import patch, MagicMock, call\n'), ((9544, 9563), 'sm.engine.DB', 'DB', (["sm_config['db']"], {}), "(sm_config['db'])\n", (9546, 9563), False, 'from sm.engine import DB, ESExporter, QueuePublisher\n'), ((9592, 9622), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'QueuePublisher'}), '(spec=QueuePublisher)\n', (9601, 9622), False, 'from unittest.mock import patch, MagicMock, call\n'), ((9641, 9667), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ESExporter'}), '(spec=ESExporter)\n', (9650, 9667), False, 'from unittest.mock import patch, MagicMock, call\n'), ((9701, 9741), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'ImageStoreServiceWrapper'}), '(spec=ImageStoreServiceWrapper)\n', (9710, 9741), False, 'from unittest.mock import patch, MagicMock, call\n'), ((7920, 7934), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7932, 7934), False, 'from datetime import datetime\n'), ((8816, 8862), 'unittest.mock.patch', 'patch', (['"""sm.engine.dataset_manager.MolecularDB"""'], {}), "('sm.engine.dataset_manager.MolecularDB')\n", (8821, 8862), False, 'from unittest.mock import patch, MagicMock, call\n'), ((3656, 3693), 'unittest.mock.call', 'call', (['msg', 'DatasetActionPriority.HIGH'], {}), '(msg, DatasetActionPriority.HIGH)\n', (3660, 3693), False, 'from unittest.mock import patch, MagicMock, call\n'), ((4184, 4221), 'unittest.mock.call', 'call', (['msg', 'DatasetActionPriority.HIGH'], {}), '(msg, DatasetActionPriority.HIGH)\n', (4188, 4221), False, 'from unittest.mock import patch, MagicMock, call\n'), ((4798, 4835), 'unittest.mock.call', 'call', (['msg', 'DatasetActionPriority.HIGH'], {}), '(msg, DatasetActionPriority.HIGH)\n', (4802, 4835), False, 'from unittest.mock import patch, MagicMock, call\n'), ((5840, 5880), 'unittest.mock.call', 'call', (['msg', 'DatasetActionPriority.DEFAULT'], {}), '(msg, DatasetActionPriority.DEFAULT)\n', (5844, 5880), False, 'from unittest.mock import patch, MagicMock, call\n'), ((8986, 9040), 'unittest.mock.patch', 'patch', (['"""sm.engine.dataset_manager.MolDBServiceWrapper"""'], {}), "('sm.engine.dataset_manager.MolDBServiceWrapper')\n", (8991, 9040), False, 'from unittest.mock import patch, MagicMock, call\n'), ((10177, 10208), 'unittest.mock.call', 'call', (['"""fs"""', '"""iso_image"""', 'ids[0]'], {}), "('fs', 'iso_image', ids[0])\n", (10181, 10208), False, 'from unittest.mock import patch, MagicMock, call\n'), ((10210, 10241), 'unittest.mock.call', 'call', (['"""fs"""', '"""iso_image"""', 'ids[1]'], {}), "('fs', 'iso_image', ids[1])\n", (10214, 10241), False, 'from unittest.mock import patch, MagicMock, call\n'), ((1087, 1103), 'json.dumps', 'json.dumps', (['meta'], {}), '(meta)\n', (1097, 1103), False, 'import json\n'), ((1105, 1126), 'json.dumps', 'json.dumps', (['ds_config'], {}), '(ds_config)\n', (1115, 1126), False, 'import json\n')] |
import gym
from dqn_tf import DeepQNetwork, Agent
import numpy as np
from gym import wrappers
def preprocess(observation):
return np.mean(observation[30:, :], axis=2).reshape(180, 160, 1)
def stack_frames(stacked_frames, frame, buffer_size):
if stacked_frames is None:
stacked_frames = np.zeros((buffer_size, *frame.reshape))
for idx, _ in enumerate(stacked_frame):
stacked_frames[idx, :] = frame[0]
else:
stacked_frames[0:buffer_size-1, :] = stacked_frames[1:, :]
stacked_frames[buffer_size-1, :] = frame[0]
stacked_frames = stacked_frames.reshape(1, *frames.shape[0:2], buffer_size)
return stacked_frames
if __name__ == "__main__":
env = gym.make("Breakout-v0")
load_checkpoint = False
agent = Agent(gamma=0.99, epsilon=1.0, alpha=0.00025, input_dims=(180, 160, 4),
n_actions=3, mem_size=3000, batch_size=32)
if load_checkpoint:
agent.load_models()
scores = []
numGames = 200
stack_size = 400
score = 0
while agent.mem_cntr < 3000:
done = False
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
observation = stack_frames(stacked_frames, observation, stack_size)
while not done:
action = np.random.choice([0, 1, 2])
action += 1
observation_, reward, done, info = env.step(action)
observation_ = stack_frames(stacked_frames, preprocess(observation_),
stack_size)
action -= 1
agent.store_transition(observation, action,
reward, observation_, int(done))
observation = observation_
print("Done with random gameplay, game on")
for i in range(numGames):
done = False
if i % 10 == 0 and i > 0:
avg_score = np.mean(score[max(0, i-10):(i+1)])
print('episode', i, 'score', score, 'average_score %.3f' % avg_score,
'epsilon %.3f' % agent.epsilon)
agent.save_models()
else:
print('episode: ', i, 'score ', score)
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
observation = stack_frames(stacked_frames, observation, stack_size)
while not done:
action = agent.choose_action(observation)
action += 1
observation_, reward, done, info = env.step(action)
observation_ = stack_frames(stacked_frames, preprocess(observation_),
stack_size)
action -= 1
agent.store_transition(observation, action,
reward, observation_, int(done))
observation = observation_
agent.learn()
score += reward
scores.append(score)
| [
"numpy.mean",
"numpy.random.choice",
"numpy.zeros",
"dqn_tf.Agent",
"gym.make"
] | [((717, 740), 'gym.make', 'gym.make', (['"""Breakout-v0"""'], {}), "('Breakout-v0')\n", (725, 740), False, 'import gym\n'), ((781, 899), 'dqn_tf.Agent', 'Agent', ([], {'gamma': '(0.99)', 'epsilon': '(1.0)', 'alpha': '(0.00025)', 'input_dims': '(180, 160, 4)', 'n_actions': '(3)', 'mem_size': '(3000)', 'batch_size': '(32)'}), '(gamma=0.99, epsilon=1.0, alpha=0.00025, input_dims=(180, 160, 4),\n n_actions=3, mem_size=3000, batch_size=32)\n', (786, 899), False, 'from dqn_tf import DeepQNetwork, Agent\n'), ((306, 345), 'numpy.zeros', 'np.zeros', (['(buffer_size, *frame.reshape)'], {}), '((buffer_size, *frame.reshape))\n', (314, 345), True, 'import numpy as np\n'), ((136, 172), 'numpy.mean', 'np.mean', (['observation[30:, :]'], {'axis': '(2)'}), '(observation[30:, :], axis=2)\n', (143, 172), True, 'import numpy as np\n'), ((1322, 1349), 'numpy.random.choice', 'np.random.choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1338, 1349), True, 'import numpy as np\n')] |
from bs4 import BeautifulSoup
import requests
import csv
import sys
from urllib.error import HTTPError
sys.path.append("..")
import mytemp
import time
import json
url='https://gz.17zwd.com/api/shop/get-list/73'
resp=requests.get(url)
f=open('17wang.txt','w+',encoding='utf-8')
f.write(resp.text)
print(resp.text) | [
"sys.path.append",
"requests.get"
] | [((108, 129), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (123, 129), False, 'import sys\n'), ((228, 245), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (240, 245), False, 'import requests\n')] |
#!/usr/bin/env python
"""
Example model with strong correlations between the fitted parameters.
We use a*x = y + N(0,1) made complicated by defining a=p1+p2.
The expected distribution for p1 and p2 will be uniform, with p2 = a-p1 in
each sample. Because this distribution is inherently unbounded, artificial
bounds are required on a least one of the parameters for finite duration
simulations.
The expected distribution for p1+p2 can be determined from the linear model
y = a*x. This is reported along with the values estimated from MCMC.
"""
from __future__ import print_function
from pylab import * # Numeric functions and plotting
from dream import * # sampler functions
# Create the correlation function and generate some fake data
x = linspace(-1., 1, 40)
fn = lambda p: sum(p)*x
bounds=(-20,-inf),(40,inf)
sigma = 1
data = fn((1,1)) + randn(*x.shape)*sigma # Fake data
# Sample from the posterior density function
n=2
model = Simulation(f=fn, data=data, sigma=sigma, bounds=bounds,
labels=["x","y"])
sampler = Dream(model=model,
population=randn(5*n,4,n),
thinning=1,
draws=20000,
)
mc = sampler.sample()
mc.title = 'Strong anti-correlation'
# Create a derived parameter without the correlation
mc.derive_vars(lambda p: (p[0]+p[1]), labels=['x+y'])
# Compare the MCMC estimate for the derived parameter to a least squares fit
from bumps.wsolve import wpolyfit
poly = wpolyfit(x,data,degree=1,origin=True)
print("x+y from linear fit", poly.coeff[0], poly.std[0])
points,logp = mc.sample(portion=0.5)
print("x+y from MCMC",mean(points[:,2]), std(points[:,2],ddof=1))
# Plot the samples
plot_all(mc, portion=0.5)
show()
| [
"bumps.wsolve.wpolyfit"
] | [((1471, 1511), 'bumps.wsolve.wpolyfit', 'wpolyfit', (['x', 'data'], {'degree': '(1)', 'origin': '(True)'}), '(x, data, degree=1, origin=True)\n', (1479, 1511), False, 'from bumps.wsolve import wpolyfit\n')] |
"""Data analyzation metrics
Each algorithm works on a set of handwritings. They have to be applied like
this:
>>> import hwrt.data_analyzation_metrics
>>> from hwrt.handwritten_data import HandwrittenData
>>> data_json = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> a = [{'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=2953, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "2953L"},
... {'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=4037, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "4037L"},
... {'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=4056, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "4056L"}]
>>> creator_metric = Creator('creator.csv')
>>> creator_metric(a)
100%
"""
# Core Library modules
import logging
import math
import os
import sys
import time
from collections import defaultdict
# Third party modules
import numpy
# Local modules
# HandwrittenData and preprocessing are needed because of pickle
from . import handwritten_data # pylint: disable=W0611
from . import preprocessing # pylint: disable=W0611
from . import utils
logger = logging.getLogger(__name__)
sys.modules["hwrt.HandwrittenData"] = handwritten_data
def get_metrics(metrics_description):
"""Get metrics from a list of dictionaries. """
return utils.get_objectlist(
metrics_description,
config_key="data_analyzation_plugins",
module=sys.modules[__name__],
)
# Helper functions that are useful for some metrics
def prepare_file(filename):
"""Truncate the file and return the filename."""
directory = os.path.join(utils.get_project_root(), "analyzation/")
if not os.path.exists(directory):
os.makedirs(directory)
workfilename = os.path.join(directory, filename)
with open(workfilename, "w") as fp:
pass # Truncate the file
return workfilename
def sort_by_formula_id(raw_datasets):
"""
Sort a list of formulas by `id`, where `id` represents the accepted
formula id.
Parameters
----------
raw_datasets : list of dictionaries
A list of raw datasets.
Examples
--------
The parameter `raw_datasets` has to be of the format
>>> from hwrt.handwritten_data import HandwrittenData
>>> data = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> rd = [{'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=2953, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 2953},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4037, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 4037},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4056, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 4056}]
>>> _ = sort_by_formula_id(rd)
"""
by_formula_id = defaultdict(list)
for el in raw_datasets:
by_formula_id[el["handwriting"].formula_id].append(el["handwriting"])
return by_formula_id
# Only data analyzation calculation classes follow
# Every class must have a __str__, __repr__ and __call__ function where
# __call__ must take exactly one argument of type list of dictionaries
# Every class must have a constructor which takes the filename as a parameter.
# This filename has to be used to write the evaluation results
# (preferably in CSV format) to this file.
# prepare_file should be applied to every file in the constructor
class Creator:
"""Analyze who created most of the data."""
def __init__(self, filename="creator.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "AnalyzeCreator(%s)" % self.filename
def __str__(self):
return "AnalyzeCreator(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("creatorid,nr of recordings\n") # heading
print_data = defaultdict(int)
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
print_data[raw_dataset["handwriting"].user_id] += 1
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data.items(), key=lambda n: n[1], reverse=True)
# Write data to file
write_file.write(f"total,{sum(value for _, value in print_data)}\n")
for userid, value in print_data:
write_file.write(f"{userid},{value}\n")
class InstrokeSpeed:
"""Analyze how fast the points were in pixel/ms."""
def __init__(self, filename="instroke_speed.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "InstrokeSpeed(%s)" % self.filename
def __str__(self):
return "InstrokeSpeed(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("speed\n") # heading
print_data = []
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
pointlist = raw_dataset["handwriting"].get_sorted_pointlist()
for stroke in pointlist:
for last_point, point in zip(stroke, stroke[1:]):
space_dist = math.hypot(
last_point["x"] - point["x"], last_point["y"] - point["y"]
)
time_delta = point["time"] - last_point["time"]
if time_delta == 0:
continue
print_data.append(space_dist / time_delta)
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data, reverse=True)
# Write data to file
for value in print_data:
write_file.write("%0.8f\n" % (value))
logger.info("instroke speed mean: %0.8f", numpy.mean(print_data))
logger.info("instroke speed std: %0.8f", numpy.std(print_data))
class InterStrokeDistance:
"""Analyze how much distance in px is between strokes."""
def __init__(self, filename="dist_between_strokes.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "InterStrokeDistance(%s)" % self.filename
def __str__(self):
return "InterStrokeDistance(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("speed\n") # heading
print_data = []
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
pointlist = raw_dataset["handwriting"].get_sorted_pointlist()
for last_stroke, stroke in zip(pointlist, pointlist[1:]):
point1 = last_stroke[-1]
point2 = stroke[0]
space_dist = math.hypot(
point1["x"] - point2["x"], point1["y"] - point2["y"]
)
print_data.append(space_dist)
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data, reverse=True)
# Write data to file
for value in print_data:
write_file.write("%0.8f\n" % (value))
logger.info("dist_between_strokes mean:\t%0.8fpx", numpy.mean(print_data))
logger.info("dist_between_strokes std: \t%0.8fpx", numpy.std(print_data))
class TimeBetweenPointsAndStrokes:
"""For each recording: Store the average time between controll points of
one stroke / controll points of two different strokes.
"""
def __init__(
self,
filename="average_time_between_points.txt",
filename_strokes="average_time_between_strokes.txt",
):
self.filename_points = prepare_file(filename)
self.filename_strokes = prepare_file(filename_strokes)
def __repr__(self):
return "TimeBetweenPointsAndStrokes({points}, {strokes})".format(
points=self.filename_points,
strokes=self.filename_strokes,
)
__str__ = __repr__
def __call__(self, raw_datasets):
average_between_points = open(self.filename_points, "a") # noqa
average_between_strokes = open(self.filename_strokes, "a") # noqa
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
# Do the work
times_between_points, times_between_strokes = [], []
last_stroke_end = None
for stroke in raw_dataset["handwriting"].get_sorted_pointlist():
if last_stroke_end is not None:
times_between_strokes.append(stroke[-1]["time"] - last_stroke_end)
last_stroke_end = stroke[-1]["time"]
for point1, point2 in zip(stroke, stroke[1:]):
delta = point2["time"] - point1["time"]
times_between_points.append(delta)
# The recording might only have one point
if len(times_between_points) > 0:
tmp = times_between_points
average_between_points.write("%0.2f\n" % numpy.average(tmp))
# The recording might only have one stroke
if len(times_between_strokes) > 0:
tmp = times_between_strokes
average_between_strokes.write("%0.2f\n" % numpy.average(tmp))
print("100%")
average_between_points.close()
average_between_strokes.close()
class AnalyzeErrors:
"""Analyze the number of errors in the dataset."""
def __init__(self, filename="errors.txt", time_max_threshold=30 * 1000):
self.filename = prepare_file(filename)
self.time_max_threshold = time_max_threshold # in ms
self.dot_symbols = [
"i",
"j",
r"\cdot",
r"\div",
"\\because",
"\\therefore",
] # TODO: Use the tags!
def __repr__(self):
return "AnalyzeErrors"
def __str__(self):
return "AnalyzeErrors"
def _write_data(
self,
symbols,
err_recs,
nr_recordings,
total_error_count,
percentages,
time_max_list,
):
"""Write all obtained data to a file.
Parameters
----------
symbols : list of tuples (String, non-negative int)
List of all symbols with the count of recordings
err_recs : dictionary
count of recordings by error type
nr_recordings : non-negative int
number of recordings
total_error_count : dictionary
Count of all error that have happened by type
percentages : list
List of all recordings where removing the dots changed the size of
the bounding box.
time_max_list : list
List of all recordings where the recording time is above a
threshold.
"""
write_file = open(self.filename, "a") # noqa
s = ""
for symbol, count in sorted(symbols.items(), key=lambda n: n[0]):
if symbol in ["a", "0", "A"]:
s += "\n%s (%i), " % (symbol, count)
elif symbol in ["z", "9", "Z"]:
s += "%s (%i) \n" % (symbol, count)
else:
s += "%s (%i), " % (symbol, count)
print("## Data", file=write_file)
print("Symbols: %i" % len(symbols), file=write_file)
print("Recordings: %i" % sum(symbols.values()), file=write_file)
print("```", file=write_file)
print(s[:-1], file=write_file)
print("```", file=write_file)
# Show errors
print(
"Recordings with wild points: %i (%0.2f%%)"
% (
err_recs["wild_points"],
float(err_recs["wild_points"]) / nr_recordings * 100,
),
file=write_file,
)
print("wild points: %i" % total_error_count["wild_points"], file=write_file)
print(
"Recordings with missing stroke: %i (%0.2f%%)"
% (
err_recs["missing_stroke"],
float(err_recs["missing_stroke"]) / nr_recordings * 100,
),
file=write_file,
)
print(
"Recordings with errors: %i (%0.2f%%)"
% (err_recs["total"], float(err_recs["total"]) / nr_recordings * 100),
file=write_file,
)
print(
"Recordings with dots: %i (%0.2f%%)"
% (
err_recs["single_dots"],
float(err_recs["single_dots"]) / nr_recordings * 100,
),
file=write_file,
)
print("dots: %i" % total_error_count["single_dots"], file=write_file)
print(
"size changing removal: %i (%0.2f%%)"
% (len(percentages), float(len(percentages)) / nr_recordings * 100),
file=write_file,
)
print(
"%i recordings took more than %i ms. That were: "
% (len(time_max_list), self.time_max_threshold),
file=write_file,
)
for recording in time_max_list:
print(
"* %ims: %s: %s"
% (
recording.get_time(),
utils.get_readable_time(recording.get_time()),
recording,
),
file=write_file,
)
write_file.close()
def __call__(self, raw_datasets):
# Initialize variables
symbols = defaultdict(int)
# Count errornous recordings
err_recs = {
"wild_points": 0,
"missing_stroke": 0,
"single_dots": 0, # except symbols_with_dots
"total": 0,
}
# Count errors (one type of error might occur multiple times in
# a single recording)
total_error_count = {"wild_points": 0, "single_dots": 0}
percentages = []
# List with recordings that are over the time maximum
time_max_list = []
for raw_dataset in raw_datasets:
recording = raw_dataset["handwriting"]
symbols[recording.formula_in_latex] += 1
if recording.get_time() > self.time_max_threshold:
time_max_list.append(recording)
if recording.wild_point_count > 0:
err_recs["wild_points"] += 1
total_error_count["wild_points"] += recording.wild_point_count
err_recs["missing_stroke"] += recording.missing_stroke
if recording.wild_point_count > 0 or recording.missing_stroke:
err_recs["total"] += 1
if (
recording.count_single_dots() > 0
and raw_dataset["formula_in_latex"] not in self.dot_symbols
and "dots" not in raw_dataset["formula_in_latex"]
):
err_recs["single_dots"] += 1
old_area = recording.get_area()
tmp = [preprocessing.RemoveDots()]
recording.preprocessing(tmp)
new_area = recording.get_area()
percentage = float(new_area) / float(old_area)
if percentage < 1.0:
percentages.append(percentage)
total_error_count["single_dots"] += recording.count_single_dots()
time_max_list = sorted(time_max_list, key=lambda n: n.get_time(), reverse=True)
self._write_data(
symbols,
err_recs,
len(raw_datasets),
total_error_count,
percentages,
time_max_list,
)
| [
"logging.getLogger",
"os.path.exists",
"numpy.mean",
"os.makedirs",
"numpy.average",
"os.path.join",
"collections.defaultdict",
"numpy.std",
"math.hypot",
"time.time"
] | [((1294, 1321), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1311, 1321), False, 'import logging\n'), ((1918, 1951), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (1930, 1951), False, 'import os\n'), ((3211, 3228), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3222, 3228), False, 'from collections import defaultdict\n'), ((1841, 1866), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1855, 1866), False, 'import os\n'), ((1876, 1898), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1887, 1898), False, 'import os\n'), ((9196, 9207), 'time.time', 'time.time', ([], {}), '()\n', (9205, 9207), False, 'import time\n'), ((14570, 14586), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (14581, 14586), False, 'from collections import defaultdict\n'), ((4311, 4327), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4322, 4327), False, 'from collections import defaultdict\n'), ((4353, 4364), 'time.time', 'time.time', ([], {}), '()\n', (4362, 4364), False, 'import time\n'), ((5524, 5535), 'time.time', 'time.time', ([], {}), '()\n', (5533, 5535), False, 'import time\n'), ((7263, 7274), 'time.time', 'time.time', ([], {}), '()\n', (7272, 7274), False, 'import time\n'), ((6603, 6625), 'numpy.mean', 'numpy.mean', (['print_data'], {}), '(print_data)\n', (6613, 6625), False, 'import numpy\n'), ((6680, 6701), 'numpy.std', 'numpy.std', (['print_data'], {}), '(print_data)\n', (6689, 6701), False, 'import numpy\n'), ((8210, 8232), 'numpy.mean', 'numpy.mean', (['print_data'], {}), '(print_data)\n', (8220, 8232), False, 'import numpy\n'), ((8297, 8318), 'numpy.std', 'numpy.std', (['print_data'], {}), '(print_data)\n', (8306, 8318), False, 'import numpy\n'), ((7720, 7784), 'math.hypot', 'math.hypot', (["(point1['x'] - point2['x'])", "(point1['y'] - point2['y'])"], {}), "(point1['x'] - point2['x'], point1['y'] - point2['y'])\n", (7730, 7784), False, 'import math\n'), ((5938, 6008), 'math.hypot', 'math.hypot', (["(last_point['x'] - point['x'])", "(last_point['y'] - point['y'])"], {}), "(last_point['x'] - point['x'], last_point['y'] - point['y'])\n", (5948, 6008), False, 'import math\n'), ((10141, 10159), 'numpy.average', 'numpy.average', (['tmp'], {}), '(tmp)\n', (10154, 10159), False, 'import numpy\n'), ((10365, 10383), 'numpy.average', 'numpy.average', (['tmp'], {}), '(tmp)\n', (10378, 10383), False, 'import numpy\n')] |
import conftest # Add root path to sys.path
import os
import matplotlib.pyplot as plt
from PathPlanning.SpiralSpanningTreeCPP \
import spiral_spanning_tree_coverage_path_planner
spiral_spanning_tree_coverage_path_planner.do_animation = True
def spiral_stc_cpp(img, start):
num_free = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
num_free += img[i][j]
STC_planner = spiral_spanning_tree_coverage_path_planner.\
SpiralSpanningTreeCoveragePlanner(img)
edge, route, path = STC_planner.plan(start)
covered_nodes = set()
for p, q in edge:
covered_nodes.add(p)
covered_nodes.add(q)
# assert complete coverage
assert len(covered_nodes) == num_free / 4
def test_spiral_stc_cpp_1():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_2():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_2.png'))
start = (10, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_3():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_3.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
if __name__ == '__main__':
conftest.run_this_test(__file__)
| [
"os.path.join",
"PathPlanning.SpiralSpanningTreeCPP.spiral_spanning_tree_coverage_path_planner.SpiralSpanningTreeCoveragePlanner",
"conftest.run_this_test",
"os.path.abspath"
] | [((423, 509), 'PathPlanning.SpiralSpanningTreeCPP.spiral_spanning_tree_coverage_path_planner.SpiralSpanningTreeCoveragePlanner', 'spiral_spanning_tree_coverage_path_planner.SpiralSpanningTreeCoveragePlanner', (['img'], {}), '(\n img)\n', (499, 509), False, 'from PathPlanning.SpiralSpanningTreeCPP import spiral_spanning_tree_coverage_path_planner\n'), ((1576, 1608), 'conftest.run_this_test', 'conftest.run_this_test', (['__file__'], {}), '(__file__)\n', (1598, 1608), False, 'import conftest\n'), ((920, 960), 'os.path.join', 'os.path.join', (['img_dir', '"""map"""', '"""test.png"""'], {}), "(img_dir, 'map', 'test.png')\n", (932, 960), False, 'import os\n'), ((1183, 1225), 'os.path.join', 'os.path.join', (['img_dir', '"""map"""', '"""test_2.png"""'], {}), "(img_dir, 'map', 'test_2.png')\n", (1195, 1225), False, 'import os\n'), ((1449, 1491), 'os.path.join', 'os.path.join', (['img_dir', '"""map"""', '"""test_3.png"""'], {}), "(img_dir, 'map', 'test_3.png')\n", (1461, 1491), False, 'import os\n'), ((819, 844), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (834, 844), False, 'import os\n'), ((1082, 1107), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1097, 1107), False, 'import os\n'), ((1348, 1373), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1363, 1373), False, 'import os\n')] |
from __future__ import print_function
import numpy as np
from kernel_tuner import run_kernel
from .context import skip_if_no_cuda_device, create_plot
from km3net.util import get_kernel_path, generate_correlations_table
def test_degrees_kernel():
skip_if_no_cuda_device()
def in_degrees(correlations):
degrees = np.zeros(correlations.shape[1])
for i in range(correlations.shape[1]):
in_degree = 0
for j in range(correlations.shape[0]):
col = i-j-1
if col>=0:
in_degree += correlations[j, col]
degrees[i] = in_degree
return degrees
with open(get_kernel_path()+'degrees.cu', 'r') as f:
kernel_string = f.read()
N = np.int32(400)
sliding_window_width = np.int32(150)
problem_size = (N, 1)
#generate input data with an expected density of correlated hits
correlations = generate_correlations_table(N, sliding_window_width, cutoff=2.87)
#compute reference answer
in_degree = in_degrees(correlations)
out_degree = np.sum(correlations, axis=0).astype(np.int32)
reference = (in_degree+out_degree)
#call the CUDA kernel
args = [out_degree, correlations, N]
params = { "block_size_x": 256, 'window_width': sliding_window_width }
answer = run_kernel("degrees_dense", kernel_string, problem_size, args, params)
print("answer", answer[0])
print("reference", reference)
#verify
test_result = np.sum(answer[0] - reference) == 0
if not test_result == True:
print("test degrees_dense FAILED, attempting to create a plot for visual comparison")
create_plot(reference.reshape(20,20), answer[0].reshape(20,20))
assert test_result
| [
"km3net.util.generate_correlations_table",
"km3net.util.get_kernel_path",
"numpy.int32",
"numpy.sum",
"numpy.zeros",
"kernel_tuner.run_kernel"
] | [((754, 767), 'numpy.int32', 'np.int32', (['(400)'], {}), '(400)\n', (762, 767), True, 'import numpy as np\n'), ((795, 808), 'numpy.int32', 'np.int32', (['(150)'], {}), '(150)\n', (803, 808), True, 'import numpy as np\n'), ((924, 989), 'km3net.util.generate_correlations_table', 'generate_correlations_table', (['N', 'sliding_window_width'], {'cutoff': '(2.87)'}), '(N, sliding_window_width, cutoff=2.87)\n', (951, 989), False, 'from km3net.util import get_kernel_path, generate_correlations_table\n'), ((1320, 1390), 'kernel_tuner.run_kernel', 'run_kernel', (['"""degrees_dense"""', 'kernel_string', 'problem_size', 'args', 'params'], {}), "('degrees_dense', kernel_string, problem_size, args, params)\n", (1330, 1390), False, 'from kernel_tuner import run_kernel\n'), ((331, 362), 'numpy.zeros', 'np.zeros', (['correlations.shape[1]'], {}), '(correlations.shape[1])\n', (339, 362), True, 'import numpy as np\n'), ((1488, 1517), 'numpy.sum', 'np.sum', (['(answer[0] - reference)'], {}), '(answer[0] - reference)\n', (1494, 1517), True, 'import numpy as np\n'), ((1079, 1107), 'numpy.sum', 'np.sum', (['correlations'], {'axis': '(0)'}), '(correlations, axis=0)\n', (1085, 1107), True, 'import numpy as np\n'), ((669, 686), 'km3net.util.get_kernel_path', 'get_kernel_path', ([], {}), '()\n', (684, 686), False, 'from km3net.util import get_kernel_path, generate_correlations_table\n')] |
import numpy as np
import torch
import torch.nn.functional as F
from scipy.sparse import coo_matrix
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from torch_geometric.data import InMemoryDataset, Data, Batch
from tqdm.auto import tqdm
from utils.data_utils import window_data_sorted, add_age_gender
class GraphDataset(InMemoryDataset):
"""
Dataset to use for graph neural networks.
"""
def __init__(self, root='/data/home/efridgeirsson/projects/dementia/data/sequence_dementia'):
super(GraphDataset, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
self.labels = self.data.y
@property
def num_features(self):
return len(self.data.x.unique())
@property
def raw_file_names(self):
return ['python_data']
@property
def processed_file_names(self):
return ['dementia.dataset']
def download(self):
pass
def process(self):
data = torch.load(self.raw_paths[0])
old_covariate_ids = data['map'].oldCovariateId
covariate_ref = data['covariateRef']
feature_names = covariate_ref[covariate_ref.covariateId.isin(old_covariate_ids)].covariateName.values
window_lengths = (30, 180, 365)
feature_matrix_counts, windowed_feature_names = window_data_sorted(
window_lengths=list(window_lengths),
feature_matrix=data['data'].coalesce(),
all_feature_names=feature_names)
feature_matrix_counts = feature_matrix_counts.T
feature_matrix_counts.data = np.clip(feature_matrix_counts.data, 0, 1) # counts to binary
feature_matrix_counts, windowed_feature_names = add_age_gender(feature_matrix_counts,
data['nonTemporalData'],
windowed_feature_names,
age_normalized=False)
train_index = data['population'][data['population']['index'] >= 0].index.values
test_index = data['population'][data['population']['index'] < 0.0].index.values
encounter_data = feature_matrix_counts[:, :-4]
demographic_data = feature_matrix_counts[:, -4:].toarray()
scaler = StandardScaler()
demographic_data[train_index, :-1] = scaler.fit_transform(demographic_data[train_index, :-1])
demographic_data[test_index, :-1] = scaler.transform(demographic_data[test_index, :-1])
outcomes = torch.as_tensor(data['population'].outcomeCount.values, dtype=torch.float32)
demographic_data = torch.as_tensor(demographic_data, dtype=torch.float32)
patients = [p for p in range(encounter_data.shape[0])]
data_list = self.process_patient(patients, demographic_data, encounter_data, outcomes)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
@staticmethod
def process_patient(patient_idxs, demographic_data=None, encounter_data=None, outcomes=None):
data = []
for patient_idx in tqdm(patient_idxs):
patient_data = encounter_data[patient_idx, :].toarray()
source_nodes = torch.as_tensor(patient_data.nonzero()[1], dtype=torch.long)
num_nodes = len(source_nodes)
source_nodes = source_nodes[None, :]
normalized_source_nodes = torch.as_tensor((range(len(source_nodes.unique()))))
edge_index = torch.cat((normalized_source_nodes.repeat(1, num_nodes),
normalized_source_nodes.repeat(num_nodes, 1).transpose(0, 1).contiguous().view(
(1, num_nodes ** 2))), dim=0)
# add extra node for classification
output_nodes = torch.cat((source_nodes[0, :], torch.as_tensor([patient_data.shape[1]])))
output_nodes = output_nodes[None, :]
normalized_output_nodes = torch.as_tensor((range(len(output_nodes.unique()))))
output_edge_index = torch.cat((normalized_output_nodes.repeat(1, num_nodes + 1),
normalized_output_nodes.repeat(num_nodes + 1, 1).transpose(0,
1).contiguous().view(
(1, (num_nodes + 1) ** 2))), dim=0)
dem_data = demographic_data[patient_idx, :]
y = outcomes[patient_idx]
data.append(Data(x=output_nodes.transpose(0, 1), edge_index=edge_index.long(),
output_edge_index=output_edge_index.long(), y=y,
demographic=dem_data[None, :]))
return data
def graph_collate(batch):
"""
Collate function to use with graph datasets.
Parameters
----------
batch :
Returns
-------
"""
elem = batch[0]
if isinstance(elem, Data):
batch = Batch.from_data_list(batch)
return batch, batch.y
class SARDData(Dataset):
"""
Dataset class used for the original SARD implementation.
"""
def __init__(self, indices, non_temporal, train_indices, outcomes, linear_predictions=None,
distill=True):
"""
Parameters
----------
indices : dict with train, val and test indices
outcomes : outcome labels
linear_predictions : predictions from previous model to distill
distill : if run for distillation or not, if distillation then get_item returns also predictions
of already fit model
"""
self.distill = distill
self.outcomes = outcomes
self.linear_predictions = linear_predictions
self.indices = indices
# fix r to py
non_temporal.rowIdPython = non_temporal.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal[non_temporal.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
scaler = StandardScaler()
scaler.fit(ages[train_indices])
ages = scaler.transform(ages)
# other covariates
other_df = non_temporal[non_temporal.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(ages)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.num = torch.cat([ages, not_age[:, None]], dim=1)
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
if self.distill:
return (self.indices[item], self.num[item]), (
self.outcomes[self.indices[item]], self.linear_predictions[self.indices[item]])
else:
return (self.indices[item], self.num[item]), self.outcomes[self.indices[item]]
class VisitSequenceWithLabelDataset(Dataset):
"""
Dataset class that uses lists of lists
"""
def __init__(self, seqs, labels, num_features, non_temporal_data, visits, train_indices, reverse=False):
"""
Args:
seqs (list): list of patients (list) of visits (list) of codes (int) that contains visit sequences
labels (list): list of labels (int)
num_features (int): number of total features available
non_temporal_data (dataframe): dataframe with nonTemporalData such as age or gender.
visits (list): list of patients with timeId of visits
train_indices (): indices of training set, used for operations that should only use info from training set
reverse (bool): If true, reverse the order of sequence (for RETAIN)
"""
if len(seqs) != len(labels):
raise ValueError("Sequences and Labels have different lengths")
# fix r to py
non_temporal_data.rowIdPython = non_temporal_data.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal_data[non_temporal_data.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
scaler = StandardScaler()
scaler.fit(ages[train_indices])
ages = torch.as_tensor(scaler.transform(ages), dtype=torch.float32)
# other covariates
other_df = non_temporal_data[non_temporal_data.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(seqs)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.train_indices = train_indices
self.num = torch.cat([ages, not_age[:, None]], dim=1)
n_visits = [len(v) for v in visits]
self.max_visits = np.percentile(n_visits, 99).astype(int)
self.num_features = num_features
self.visits = torch.vstack(
[F.pad(torch.as_tensor(v, dtype=torch.long), (0, self.max_visits - len(v))) for v in visits])
self.seqs = []
self.lengths = []
for i, (seq, label) in tqdm(enumerate(zip(seqs, labels))):
if reverse:
sequence = list(reversed(seq))
else:
sequence = seq
row = []
col = []
val = []
for j, visit in enumerate(sequence):
for code in visit:
if code < num_features:
row.append(j)
col.append(code)
val.append(1.0)
if len(sequence) < self.max_visits:
self.seqs.append(coo_matrix((np.array(val, dtype=np.float32), (np.array(row), np.array(col))),
shape=(self.max_visits, num_features)))
self.lengths.append(len(sequence))
else:
ix = np.array(row) < self.max_visits # truncate to max visits
self.seqs.append(
coo_matrix((np.array(val, dtype=np.float32)[ix], (np.array(row)[ix], np.array(col)[ix])),
shape=(self.max_visits, num_features)))
self.lengths.append(self.max_visits)
self.labels = torch.as_tensor(labels, dtype=torch.float32)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
class DistillDataset(VisitSequenceWithLabelDataset):
"""
Dataset class for the distillation where I needed to add the predictions from the teacher model
"""
def __init__(self, linear_predictions=None, distill=True, **kwargs):
super(DistillDataset, self).__init__(**kwargs)
self.distill = distill
self.linear_predictions = torch.as_tensor(linear_predictions.values, dtype=torch.float32)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
if self.distill:
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.linear_predictions[index], \
self.labels[index], \
self.lengths[index], self.visits[index]
else:
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
class RETAIN_dataset(Dataset):
"""
RETAIN is an RNN and so doesn't need to pad the input but can work with variable length sequences so I used
this class that doesn't pad the input.
"""
def __init__(self, seqs, labels, num_features, non_temporal_data, visits, train_indices, reverse=True):
"""
Args:
seqs (list): list of patients (list) of visits (list) of codes (int) that contains visit sequences
labels (list): list of labels (int)
num_features (int): number of total features available
non_temporal_data (dataframe): dataframe with nonTemporalData such as age or gender.
visits (list): list of patients with timeId of visits
train_indices (): indices of training set, used for operations that should only use info from training set
reverse (bool): If true, reverse the order of sequence (for RETAIN)
"""
if len(seqs) != len(labels):
raise ValueError("Sequences and Labels have different lengths")
# fix r to py
non_temporal_data.rowIdPython = non_temporal_data.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal_data[non_temporal_data.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
age_maxes = torch.max(ages[train_indices], dim=0).values
ages = ages / age_maxes
# other covariates
other_df = non_temporal_data[non_temporal_data.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(seqs)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.num = torch.cat([ages, not_age[:, None]], dim=1)
self.visits = visits
self.seqs = []
self.lengths = []
for i, (seq, label) in enumerate(zip(seqs, labels)):
if reverse:
sequence = list(reversed(seq))
else:
sequence = seq
row = []
col = []
val = []
for j, visit in enumerate(sequence):
for code in visit:
if code < num_features:
row.append(j)
col.append(code)
val.append(1.0)
self.seqs.append(coo_matrix((np.array(val, dtype=np.float32), (np.array(row), np.array(col))),
shape=(len(sequence), num_features)))
self.lengths.append(len(sequence))
self.labels = torch.as_tensor(labels, dtype=torch.long)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
def pad(batch):
"""
Collate function that I use with RETAIN and the vanilla Transformer.
Parameters
----------
batch :
Returns
-------
"""
batch_split = list(zip(*batch))
seqs, num, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], batch_split[4]
num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T
visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]
return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \
torch.as_tensor(targs, dtype=torch.float32)
def distill_pad(batch):
"""
Collate function I use when distilling
Parameters
----------
batch :
Returns
-------
"""
batch_split = list(zip(*batch))
seqs, num, preds, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], \
batch_split[4], batch_split[5]
num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T
visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]
return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \
[torch.as_tensor(targs, dtype=torch.float32), torch.as_tensor(preds, dtype=torch.float32)]
| [
"numpy.clip",
"torch.as_tensor",
"torch.load",
"torch.stack",
"torch.sqrt",
"torch.max",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"torch_geometric.data.Batch.from_data_list",
"torch.save",
"tqdm.auto.tqdm",
"numpy.percentile",
"utils.data_utils.add_age_gender",
"torch.cat"
] | [((622, 657), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (632, 657), False, 'import torch\n'), ((1016, 1045), 'torch.load', 'torch.load', (['self.raw_paths[0]'], {}), '(self.raw_paths[0])\n', (1026, 1045), False, 'import torch\n'), ((1611, 1652), 'numpy.clip', 'np.clip', (['feature_matrix_counts.data', '(0)', '(1)'], {}), '(feature_matrix_counts.data, 0, 1)\n', (1618, 1652), True, 'import numpy as np\n'), ((1729, 1841), 'utils.data_utils.add_age_gender', 'add_age_gender', (['feature_matrix_counts', "data['nonTemporalData']", 'windowed_feature_names'], {'age_normalized': '(False)'}), "(feature_matrix_counts, data['nonTemporalData'],\n windowed_feature_names, age_normalized=False)\n", (1743, 1841), False, 'from utils.data_utils import window_data_sorted, add_age_gender\n'), ((2368, 2384), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2382, 2384), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2602, 2678), 'torch.as_tensor', 'torch.as_tensor', (["data['population'].outcomeCount.values"], {'dtype': 'torch.float32'}), "(data['population'].outcomeCount.values, dtype=torch.float32)\n", (2617, 2678), False, 'import torch\n'), ((2706, 2760), 'torch.as_tensor', 'torch.as_tensor', (['demographic_data'], {'dtype': 'torch.float32'}), '(demographic_data, dtype=torch.float32)\n', (2721, 2760), False, 'import torch\n'), ((2976, 3027), 'torch.save', 'torch.save', (['(data, slices)', 'self.processed_paths[0]'], {}), '((data, slices), self.processed_paths[0])\n', (2986, 3027), False, 'import torch\n'), ((3190, 3208), 'tqdm.auto.tqdm', 'tqdm', (['patient_idxs'], {}), '(patient_idxs)\n', (3194, 3208), False, 'from tqdm.auto import tqdm\n'), ((5087, 5114), 'torch_geometric.data.Batch.from_data_list', 'Batch.from_data_list', (['batch'], {}), '(batch)\n', (5107, 5114), False, 'from torch_geometric.data import InMemoryDataset, Data, Batch\n'), ((6232, 6298), 'torch.as_tensor', 'torch.as_tensor', (['age_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(age_df.covariateValue.values, dtype=torch.float32)\n', (6247, 6298), False, 'import torch\n'), ((6349, 6364), 'torch.sqrt', 'torch.sqrt', (['age'], {}), '(age)\n', (6359, 6364), False, 'import torch\n'), ((6441, 6457), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6455, 6457), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6752, 6820), 'torch.as_tensor', 'torch.as_tensor', (['other_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(other_df.covariateValue.values, dtype=torch.float32)\n', (6767, 6820), False, 'import torch\n'), ((6841, 6883), 'torch.cat', 'torch.cat', (['[ages, not_age[:, None]]'], {'dim': '(1)'}), '([ages, not_age[:, None]], dim=1)\n', (6850, 6883), False, 'import torch\n'), ((8497, 8563), 'torch.as_tensor', 'torch.as_tensor', (['age_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(age_df.covariateValue.values, dtype=torch.float32)\n', (8512, 8563), False, 'import torch\n'), ((8614, 8629), 'torch.sqrt', 'torch.sqrt', (['age'], {}), '(age)\n', (8624, 8629), False, 'import torch\n'), ((8706, 8722), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8720, 8722), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9065, 9133), 'torch.as_tensor', 'torch.as_tensor', (['other_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(other_df.covariateValue.values, dtype=torch.float32)\n', (9080, 9133), False, 'import torch\n'), ((9197, 9239), 'torch.cat', 'torch.cat', (['[ages, not_age[:, None]]'], {'dim': '(1)'}), '([ages, not_age[:, None]], dim=1)\n', (9206, 9239), False, 'import torch\n'), ((10765, 10809), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.float32'}), '(labels, dtype=torch.float32)\n', (10780, 10809), False, 'import torch\n'), ((11426, 11489), 'torch.as_tensor', 'torch.as_tensor', (['linear_predictions.values'], {'dtype': 'torch.float32'}), '(linear_predictions.values, dtype=torch.float32)\n', (11441, 11489), False, 'import torch\n'), ((13340, 13406), 'torch.as_tensor', 'torch.as_tensor', (['age_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(age_df.covariateValue.values, dtype=torch.float32)\n', (13355, 13406), False, 'import torch\n'), ((13457, 13472), 'torch.sqrt', 'torch.sqrt', (['age'], {}), '(age)\n', (13467, 13472), False, 'import torch\n'), ((13855, 13923), 'torch.as_tensor', 'torch.as_tensor', (['other_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(other_df.covariateValue.values, dtype=torch.float32)\n', (13870, 13923), False, 'import torch\n'), ((13944, 13986), 'torch.cat', 'torch.cat', (['[ages, not_age[:, None]]'], {'dim': '(1)'}), '([ages, not_age[:, None]], dim=1)\n', (13953, 13986), False, 'import torch\n'), ((14814, 14855), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.long'}), '(labels, dtype=torch.long)\n', (14829, 14855), False, 'import torch\n'), ((15548, 15584), 'torch.as_tensor', 'torch.as_tensor', (['s'], {'dtype': 'torch.long'}), '(s, dtype=torch.long)\n', (15563, 15584), False, 'import torch\n'), ((15697, 15740), 'torch.as_tensor', 'torch.as_tensor', (['targs'], {'dtype': 'torch.float32'}), '(targs, dtype=torch.float32)\n', (15712, 15740), False, 'import torch\n'), ((16233, 16269), 'torch.as_tensor', 'torch.as_tensor', (['s'], {'dtype': 'torch.long'}), '(s, dtype=torch.long)\n', (16248, 16269), False, 'import torch\n'), ((6380, 6421), 'torch.stack', 'torch.stack', (['[age, age_squared, age_sqrt]'], {}), '([age, age_squared, age_sqrt])\n', (6391, 6421), False, 'import torch\n'), ((8645, 8686), 'torch.stack', 'torch.stack', (['[age, age_squared, age_sqrt]'], {}), '([age, age_squared, age_sqrt])\n', (8656, 8686), False, 'import torch\n'), ((13488, 13529), 'torch.stack', 'torch.stack', (['[age, age_squared, age_sqrt]'], {}), '([age, age_squared, age_sqrt])\n', (13499, 13529), False, 'import torch\n'), ((13552, 13589), 'torch.max', 'torch.max', (['ages[train_indices]'], {'dim': '(0)'}), '(ages[train_indices], dim=0)\n', (13561, 13589), False, 'import torch\n'), ((15631, 15673), 'torch.as_tensor', 'torch.as_tensor', (['lengths'], {'dtype': 'torch.long'}), '(lengths, dtype=torch.long)\n', (15646, 15673), False, 'import torch\n'), ((16316, 16358), 'torch.as_tensor', 'torch.as_tensor', (['lengths'], {'dtype': 'torch.long'}), '(lengths, dtype=torch.long)\n', (16331, 16358), False, 'import torch\n'), ((16383, 16426), 'torch.as_tensor', 'torch.as_tensor', (['targs'], {'dtype': 'torch.float32'}), '(targs, dtype=torch.float32)\n', (16398, 16426), False, 'import torch\n'), ((16428, 16471), 'torch.as_tensor', 'torch.as_tensor', (['preds'], {'dtype': 'torch.float32'}), '(preds, dtype=torch.float32)\n', (16443, 16471), False, 'import torch\n'), ((9310, 9337), 'numpy.percentile', 'np.percentile', (['n_visits', '(99)'], {}), '(n_visits, 99)\n', (9323, 9337), True, 'import numpy as np\n'), ((15461, 15505), 'torch.as_tensor', 'torch.as_tensor', (['sample'], {'dtype': 'torch.float32'}), '(sample, dtype=torch.float32)\n', (15476, 15505), False, 'import torch\n'), ((16146, 16190), 'torch.as_tensor', 'torch.as_tensor', (['sample'], {'dtype': 'torch.float32'}), '(sample, dtype=torch.float32)\n', (16161, 16190), False, 'import torch\n'), ((3924, 3964), 'torch.as_tensor', 'torch.as_tensor', (['[patient_data.shape[1]]'], {}), '([patient_data.shape[1]])\n', (3939, 3964), False, 'import torch\n'), ((9446, 9482), 'torch.as_tensor', 'torch.as_tensor', (['v'], {'dtype': 'torch.long'}), '(v, dtype=torch.long)\n', (9461, 9482), False, 'import torch\n'), ((10416, 10429), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (10424, 10429), True, 'import numpy as np\n'), ((14601, 14632), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (14609, 14632), True, 'import numpy as np\n'), ((10176, 10207), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (10184, 10207), True, 'import numpy as np\n'), ((14635, 14648), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (14643, 14648), True, 'import numpy as np\n'), ((14650, 14663), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (14658, 14663), True, 'import numpy as np\n'), ((10210, 10223), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (10218, 10223), True, 'import numpy as np\n'), ((10225, 10238), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (10233, 10238), True, 'import numpy as np\n'), ((10540, 10571), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (10548, 10571), True, 'import numpy as np\n'), ((10578, 10591), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (10586, 10591), True, 'import numpy as np\n'), ((10597, 10610), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (10605, 10610), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf8 -*-
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint # use Runge-Kutta 4
def pend(y, t, b, c): # function definition
"""Gives 2D vector dy/dt as function of y and t, with parameters b and c."""
return np.array([y[1], -b*y[1] - c*np.sin(y[0])])
b, c = 0.25, 5.0 # tuple assignment
y0 = np.array([np.pi - 0.1, 0.0])
t = np.linspace(0, 10, 101) # on [0,10] with 101 points
sol = odeint(pend, y0, t, args=(b, c))
plt.plot(t, sol[:, 0], 'b', label=r'$\theta(t)$') # blue
plt.plot(t, sol[:, 1], 'g', label=r'$\omega(t)$') # green
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.savefig("figures/Pendulum_solution.png")
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"scipy.integrate.odeint",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((380, 408), 'numpy.array', 'np.array', (['[np.pi - 0.1, 0.0]'], {}), '([np.pi - 0.1, 0.0])\n', (388, 408), True, 'import numpy as np\n'), ((413, 436), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (424, 436), True, 'import numpy as np\n'), ((473, 505), 'scipy.integrate.odeint', 'odeint', (['pend', 'y0', 't'], {'args': '(b, c)'}), '(pend, y0, t, args=(b, c))\n', (479, 505), False, 'from scipy.integrate import odeint\n'), ((507, 556), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 0]', '"""b"""'], {'label': '"""$\\\\theta(t)$"""'}), "(t, sol[:, 0], 'b', label='$\\\\theta(t)$')\n", (515, 556), True, 'import matplotlib.pyplot as plt\n'), ((565, 614), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 1]', '"""g"""'], {'label': '"""$\\\\omega(t)$"""'}), "(t, sol[:, 1], 'g', label='$\\\\omega(t)$')\n", (573, 614), True, 'import matplotlib.pyplot as plt\n'), ((624, 646), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (634, 646), True, 'import matplotlib.pyplot as plt\n'), ((647, 662), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (657, 662), True, 'import matplotlib.pyplot as plt\n'), ((663, 673), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (671, 673), True, 'import matplotlib.pyplot as plt\n'), ((674, 718), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/Pendulum_solution.png"""'], {}), "('figures/Pendulum_solution.png')\n", (685, 718), True, 'import matplotlib.pyplot as plt\n'), ((719, 729), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((321, 333), 'numpy.sin', 'np.sin', (['y[0]'], {}), '(y[0])\n', (327, 333), True, 'import numpy as np\n')] |
from PySide2.QtWidgets import QWidget
from SciDataTool.GUI.WVectorSelector.Ui_WVectorSelector import Ui_WVectorSelector
from PySide2.QtCore import Signal
from PySide2.QtGui import QStandardItem
COMP_DICT = {
"radial": "radial",
"circumferential": "tangential",
"axial": "axial",
"x-axis component": "comp_x",
"y-axis component": "comp_y",
"z-axis component": "comp_z",
}
REV_COMP_DICT = {
"radial": "radial",
"tangential": "circumferential",
"axial": "axial",
"comp_x": "x-axis component",
"comp_y": "y-axis component",
"comp_z": "z-axis component",
}
class WVectorSelector(Ui_WVectorSelector, QWidget):
"""Widget to select how to export the data"""
refreshComponent = Signal()
def __init__(self, parent=None):
"""Initialize the UI and linking buttons to their methods
Parameters
----------
self : WExport
a WVectorSelector object
parent : QWidget
The parent widget
"""
# Build the interface according to the .ui file
QWidget.__init__(self, parent=parent)
self.setupUi(self)
self.c_component.currentTextChanged.connect(self.update_needed)
self.c_referential.hide()
self.in_referential.hide()
# self.c_referential.currentTextChanged.connect(self.update_needed)
self.component_selected = None
self.component_list = list()
def get_component_selected(self):
"""Getting the component selected
Parameters
----------
self : WExport
a WVectorSelector object
"""
return COMP_DICT[self.c_component.currentText()]
def set_component(self, component_selected):
"""Method that set the component selected according to the input of the user (auto-plot)
Parameters
----------
self : DDataPlotter
a DDataPlotter object
component_selected : str
Component to select
"""
# Setting the combobox with the right component
if component_selected in self.component_list:
self.c_component.setCurrentIndex(
self.component_list.index(component_selected)
)
else:
print(
"WARNING : Trying to set the vector to "
+ component_selected
+ " a component which is not available. Setting to default component"
)
self.c_component.setCurrentIndex(1)
def update(self, data):
"""Updating the combobox according to the components store in the VectorField
Parameters
----------
self : WExport
a WVectorSelector object
data : VectorField
the object that we want to plot
"""
comp_stored = data.components.keys()
self.blockSignals(True)
self.c_component.clear()
self.c_component.addItems([REV_COMP_DICT[comp] for comp in comp_stored])
model = self.c_component.model()
if "radial" in comp_stored or "tangential" in comp_stored:
item = QStandardItem("Polar coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(0, item)
try:
data.to_xyz()
item = QStandardItem("Cartesian coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(self.c_component.count(), item)
self.c_component.addItem("x-axis component")
self.c_component.addItem("y-axis component")
if "axial" in comp_stored:
self.c_component.addItem("z-axis component")
except:
pass
elif "comp_x" in comp_stored or "comp_y" in comp_stored:
item = QStandardItem("Cartesian coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(0, item)
try:
data.to_rphiz()
item = QStandardItem("Polar coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(self.c_component.count(), item)
self.c_component.addItem("radial")
self.c_component.addItem("circumferential")
if "comp_z" in comp_stored:
self.c_component.addItem("axial")
except:
pass
# Recovering all the components available after the update
self.component_list = [
self.c_component.itemText(i)
for i in range(self.c_component.count())
if self.c_component.itemText(i)
not in ["Polar coordinates", "Cartesian coordinates"]
]
# Modifying the width of the dropdown list to make sure that all the element are readable
component_list = [
self.c_component.itemText(i) for i in range(self.c_component.count())
]
width_drop_down = max([len(ac) for ac in component_list]) * 6
self.c_component.view().setMinimumWidth(width_drop_down)
self.c_component.setCurrentIndex(1)
self.blockSignals(False)
def update_needed(self):
"""Emit a signal when the component must be changed
Parameters
----------
self : WExport
a WVectorSelector object
"""
# if self.c_component.currentText() in [
# "Polar coordinates",
# "Cartesian coordinates",
# ]:
# self.c_component.setCurrentIndex(self.c_component.currentIndex() + 1)
self.refreshComponent.emit()
| [
"PySide2.QtWidgets.QWidget.__init__",
"PySide2.QtGui.QStandardItem",
"PySide2.QtCore.Signal"
] | [((731, 739), 'PySide2.QtCore.Signal', 'Signal', ([], {}), '()\n', (737, 739), False, 'from PySide2.QtCore import Signal\n'), ((1075, 1112), 'PySide2.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (1091, 1112), False, 'from PySide2.QtWidgets import QWidget\n'), ((3135, 3169), 'PySide2.QtGui.QStandardItem', 'QStandardItem', (['"""Polar coordinates"""'], {}), "('Polar coordinates')\n", (3148, 3169), False, 'from PySide2.QtGui import QStandardItem\n'), ((3405, 3443), 'PySide2.QtGui.QStandardItem', 'QStandardItem', (['"""Cartesian coordinates"""'], {}), "('Cartesian coordinates')\n", (3418, 3443), False, 'from PySide2.QtGui import QStandardItem\n'), ((4007, 4045), 'PySide2.QtGui.QStandardItem', 'QStandardItem', (['"""Cartesian coordinates"""'], {}), "('Cartesian coordinates')\n", (4020, 4045), False, 'from PySide2.QtGui import QStandardItem\n'), ((4283, 4317), 'PySide2.QtGui.QStandardItem', 'QStandardItem', (['"""Polar coordinates"""'], {}), "('Polar coordinates')\n", (4296, 4317), False, 'from PySide2.QtGui import QStandardItem\n')] |
#!/usr/bin/env python3
"""Defines a status route for the HolbertonBnB API."""
from flask import jsonify
from flasgger import swag_from
from models import storage
from api.v1.views import app_views
@app_views.route("/status")
@swag_from("../apidocs/status/status.yml")
def status():
"""Returns the server status.
Returns:
JSON object with the current server status.
"""
return jsonify({"status": "OK"})
@app_views.route("/stats")
@swag_from("../apidocs/stats/stats.yml")
def stats():
"""Retrives the count of each object type.
Returns:
JSON object with the number of objects by type."""
return jsonify({
"amenities": storage.count("Amenity"),
"cities": storage.count("City"),
"places": storage.count("Place"),
"reviews": storage.count("Review"),
"states": storage.count("State"),
"users": storage.count("User")
})
| [
"models.storage.count",
"flasgger.swag_from",
"api.v1.views.app_views.route",
"flask.jsonify"
] | [((200, 226), 'api.v1.views.app_views.route', 'app_views.route', (['"""/status"""'], {}), "('/status')\n", (215, 226), False, 'from api.v1.views import app_views\n'), ((228, 269), 'flasgger.swag_from', 'swag_from', (['"""../apidocs/status/status.yml"""'], {}), "('../apidocs/status/status.yml')\n", (237, 269), False, 'from flasgger import swag_from\n'), ((432, 457), 'api.v1.views.app_views.route', 'app_views.route', (['"""/stats"""'], {}), "('/stats')\n", (447, 457), False, 'from api.v1.views import app_views\n'), ((459, 498), 'flasgger.swag_from', 'swag_from', (['"""../apidocs/stats/stats.yml"""'], {}), "('../apidocs/stats/stats.yml')\n", (468, 498), False, 'from flasgger import swag_from\n'), ((403, 428), 'flask.jsonify', 'jsonify', (["{'status': 'OK'}"], {}), "({'status': 'OK'})\n", (410, 428), False, 'from flask import jsonify\n'), ((674, 698), 'models.storage.count', 'storage.count', (['"""Amenity"""'], {}), "('Amenity')\n", (687, 698), False, 'from models import storage\n'), ((718, 739), 'models.storage.count', 'storage.count', (['"""City"""'], {}), "('City')\n", (731, 739), False, 'from models import storage\n'), ((759, 781), 'models.storage.count', 'storage.count', (['"""Place"""'], {}), "('Place')\n", (772, 781), False, 'from models import storage\n'), ((802, 825), 'models.storage.count', 'storage.count', (['"""Review"""'], {}), "('Review')\n", (815, 825), False, 'from models import storage\n'), ((845, 867), 'models.storage.count', 'storage.count', (['"""State"""'], {}), "('State')\n", (858, 867), False, 'from models import storage\n'), ((886, 907), 'models.storage.count', 'storage.count', (['"""User"""'], {}), "('User')\n", (899, 907), False, 'from models import storage\n')] |
import inspect
class Queue(object):
'''
Queue data structure FIFO - First In First Out
'''
def __init__(self, capacity = 10):
'''
:param size: max capacity of the queue, default is 10
'''
self.queue = []
self.front = None
self.rear = None
self.size = 0
self.capacity = capacity
def __str__(self):
'''
:return:
'''
return ' '.join([str(i) for i in self.queue])
def get_size(self):
'''
:return: current size of the queue
'''
return self.size
def is_empty(self):
'''
:return: true if queue is empty, false otherwise
'''
return self.size == 0
def enequeue(self, value):
'''
:param value: value to be enqueued
:return: -1 if queue is full
'''
if self.size >= self.capacity:
return -1
else:
self.queue.append(value)
if self.front is None:
self.front = self.rear = 0
else:
self.rear = self.size
self.size += 1
def dequeue(self):
'''
:return: the element removed from the queue, None if queue is empty
'''
if self.is_empty():
return None
else:
self.size -= 1
if self.size == 0:
self.front = self.rear = 0
else:
self.rear = self.size - 1
return self.queue.pop(0)
@staticmethod
def get_code():
'''
:return: return source code for current class
'''
return inspect.getsource(Queue)
class Deque(object):
'''
Deque -> doubly ended queue
'''
def __init__(self, capacity = 10):
'''
:param capacity: max capacity of the deque
'''
self.queue = []
self.capacity = capacity
def __str__(self):
return ' '.join([str(i) for i in self.queue])
def is_full(self):
'''
to check whether deque is full or not
:return: true if deque is full, false otherwise
'''
return len(self.queue) == self.capacity
def is_empty(self):
'''
to check whether deque is empty or not
:return: true if deque is empty, false otherwise
'''
return len(self.queue) == 0
def insert_right(self, info):
'''
:param info: data to be added
:return: None if deque is full
'''
if self.is_full():
return None
else:
self.queue.append(info)
def insert_left(self, info):
'''
:param info: data to be added
:return: None if deque is full
'''
if self.is_full():
return None
else:
self.queue.insert(0, info)
def remove_left(self):
'''
:return: element which is removed, None if deque is empty
'''
if not self.is_empty():
return self.queue.pop(0)
else:
return None
def remove_right(self):
'''
:return: remove element from right end
'''
if self.is_empty():
return None
else:
self.queue.pop()
@staticmethod
def get_code():
'''
:return: source code for the current class
'''
return inspect.getsource(Deque)
# TODO -> add priority queue and circuler queue for concept purpose
| [
"inspect.getsource"
] | [((1659, 1683), 'inspect.getsource', 'inspect.getsource', (['Queue'], {}), '(Queue)\n', (1676, 1683), False, 'import inspect\n'), ((3428, 3452), 'inspect.getsource', 'inspect.getsource', (['Deque'], {}), '(Deque)\n', (3445, 3452), False, 'import inspect\n')] |
# Copyright 2021 (David) <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def _get_lib_root():
root = os.path.join(os.path.dirname(__file__), "..")
return os.path.abspath(root)
class _PrefixPathDict(dict):
def __init__(self, prefix):
super(_PrefixPathDict, self).__init__()
self._prefix = prefix
def __setitem__(self, key, value):
if isinstance(value, str):
value = os.path.join(self._prefix, value)
super(_PrefixPathDict, self).__setitem__(key, value)
def _PackageDataMeta(prefix):
class _Meta(type):
@classmethod
def __prepare__(metacls, name, bases):
origin = super(_Meta, _Meta).__prepare__(metacls=metacls,
__name=name,
__bases=bases)
pfx_path_dict = _PrefixPathDict(
os.path.join(_get_lib_root(), prefix))
if origin: pfx_path_dict.update(origin)
return pfx_path_dict
return _Meta
class Template(metaclass=_PackageDataMeta("data")):
PY_INIT = "__init__.tmpl"
PY_SETUP = "setup.tmpl"
| [
"os.path.abspath",
"os.path.dirname",
"os.path.join"
] | [((888, 909), 'os.path.abspath', 'os.path.abspath', (['root'], {}), '(root)\n', (903, 909), False, 'import os\n'), ((846, 871), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (861, 871), False, 'import os\n'), ((1124, 1157), 'os.path.join', 'os.path.join', (['self._prefix', 'value'], {}), '(self._prefix, value)\n', (1136, 1157), False, 'import os\n')] |
# Time: O(nlogk)
# Space: O(k)
import heapq
class KthLargest(object):
def __init__(self, k, nums):
"""
:type k: int
:type nums: List[int]
"""
self.__k = k
self.__min_heap = []
for n in nums:
self.add(n)
def add(self, val):
"""
:type val: int
:rtype: int
"""
heapq.heappush(self.__min_heap, val)
if len(self.__min_heap) > self.__k:
heapq.heappop(self.__min_heap)
return self.__min_heap[0]
| [
"heapq.heappush",
"heapq.heappop"
] | [((379, 415), 'heapq.heappush', 'heapq.heappush', (['self.__min_heap', 'val'], {}), '(self.__min_heap, val)\n', (393, 415), False, 'import heapq\n'), ((472, 502), 'heapq.heappop', 'heapq.heappop', (['self.__min_heap'], {}), '(self.__min_heap)\n', (485, 502), False, 'import heapq\n')] |
import os
import platform
from configparser import ExtendedInterpolation
from pathlib import Path
from subprocess import run
from sys import exit
from typing import List
from blulib.config_parser import ConfigParser
from tealprint import TealPrint
from tealprint.teallevel import TealLevel
from youtube_series_downloader.config import General, config
from youtube_series_downloader.core.channel import Channel
class ConfigGateway:
def __init__(self) -> None:
self.path = Path.home().joinpath(f".{config.app_name}.cfg")
self.parser = ConfigParser(interpolation=ExtendedInterpolation())
def check_config_exists(self) -> None:
if not self.path.exists():
TealPrint.info(f"Could not find any configuration file in {self.path}")
user_input = input("Do you want to copy the example config and edit it (y/n)?")
if user_input.lower() == "y":
self.parser.copy_example_if_conf_not_exists(config.app_name)
editor = ""
if "EDITOR" in os.environ:
editor = os.environ["EDITOR"]
if editor == "" and platform.system() == "Windows":
editor = "notepad.exe"
elif editor == "":
editor = "vim"
run([editor, self.path])
else:
exit(0)
def read(self):
self.parser.read(self.path)
def get_general(self) -> General:
general = General()
self.parser.to_object(
general,
"General",
"series_dir",
"int:threads",
"float:speed_up_default",
"int:max_days_back",
"log_level",
)
if not general.series_dir:
TealPrint.warning(f"Missing 'series_dir' in [General] in your configuration. Please add it.", exit=True)
# Convert string to LogLevel
if isinstance(general.log_level, str):
try:
general.log_level = TealLevel[general.log_level]
except KeyError:
TealPrint.warning(
f"Failed to set log_level from config, invalid level: {general.log_level}. Setting log_level to info"
)
general.log_level = TealLevel.info
return general
def get_channels(self) -> List[Channel]:
channels: List[Channel] = []
for section in self.parser.sections():
if ConfigGateway.is_channel_section(section):
channel = Channel()
channel.name = section
self.parser.to_object(
channel,
section,
"id",
"name",
"dir->collection_dir",
"float:speed",
"str_list:includes",
"str_list:excludes",
)
if not channel.id:
TealPrint.warning(
f"Missing 'id' for channel [{section}] in your configuration. Please add it.", exit=True
)
channels.append(channel)
return channels
@staticmethod
def is_channel_section(section: str) -> bool:
return section != "General" and section != "DEFAULT" and section != "vars"
| [
"tealprint.TealPrint.info",
"pathlib.Path.home",
"subprocess.run",
"platform.system",
"youtube_series_downloader.core.channel.Channel",
"tealprint.TealPrint.warning",
"sys.exit",
"youtube_series_downloader.config.General",
"configparser.ExtendedInterpolation"
] | [((1482, 1491), 'youtube_series_downloader.config.General', 'General', ([], {}), '()\n', (1489, 1491), False, 'from youtube_series_downloader.config import General, config\n'), ((699, 770), 'tealprint.TealPrint.info', 'TealPrint.info', (['f"""Could not find any configuration file in {self.path}"""'], {}), "(f'Could not find any configuration file in {self.path}')\n", (713, 770), False, 'from tealprint import TealPrint\n'), ((1773, 1886), 'tealprint.TealPrint.warning', 'TealPrint.warning', (['f"""Missing \'series_dir\' in [General] in your configuration. Please add it."""'], {'exit': '(True)'}), '(\n f"Missing \'series_dir\' in [General] in your configuration. Please add it.",\n exit=True)\n', (1790, 1886), False, 'from tealprint import TealPrint\n'), ((486, 497), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (495, 497), False, 'from pathlib import Path\n'), ((583, 606), 'configparser.ExtendedInterpolation', 'ExtendedInterpolation', ([], {}), '()\n', (604, 606), False, 'from configparser import ExtendedInterpolation\n'), ((1300, 1324), 'subprocess.run', 'run', (['[editor, self.path]'], {}), '([editor, self.path])\n', (1303, 1324), False, 'from subprocess import run\n'), ((1360, 1367), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (1364, 1367), False, 'from sys import exit\n'), ((2538, 2547), 'youtube_series_downloader.core.channel.Channel', 'Channel', ([], {}), '()\n', (2545, 2547), False, 'from youtube_series_downloader.core.channel import Channel\n'), ((2090, 2220), 'tealprint.TealPrint.warning', 'TealPrint.warning', (['f"""Failed to set log_level from config, invalid level: {general.log_level}. Setting log_level to info"""'], {}), "(\n f'Failed to set log_level from config, invalid level: {general.log_level}. Setting log_level to info'\n )\n", (2107, 2220), False, 'from tealprint import TealPrint\n'), ((2972, 3089), 'tealprint.TealPrint.warning', 'TealPrint.warning', (['f"""Missing \'id\' for channel [{section}] in your configuration. Please add it."""'], {'exit': '(True)'}), '(\n f"Missing \'id\' for channel [{section}] in your configuration. Please add it."\n , exit=True)\n', (2989, 3089), False, 'from tealprint import TealPrint\n'), ((1139, 1156), 'platform.system', 'platform.system', ([], {}), '()\n', (1154, 1156), False, 'import platform\n')] |
"""
@author: <NAME> (University of Sydney)
-------------------------------------------------------------------------
AMICAL: Aperture Masking Interferometry Calibration and Analysis Library
-------------------------------------------------------------------------
Function related to data cleaning (ghost, background correction,
centering, etc.) and data selection (sigma-clipping, centered flux,).
--------------------------------------------------------------------
"""
import numpy as np
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from matplotlib import pyplot as plt
from matplotlib.colors import PowerNorm
from termcolor import cprint
from tqdm import tqdm
from amical.tools import apply_windowing, crop_max
def _apply_patch_ghost(cube, xc, yc, radius=20, dx=0, dy=-200, method='bg'):
"""Apply a patch on an eventual artifacts/ghosts on the spectral filter (i.e.
K1 filter of SPHERE presents an artifact/ghost at (392, 360)).
Arguments:
----------
`cube` {array} -- Data cube,\n
`xc` {int} -- x-axis position of the artifact,\n
`yc` {int} -- y-axis position of the artifact.
Keyword Arguments:
----------
`radius` {int} -- Radius to apply the patch in a circle (default: {10}),\n
`dy` {int} -- Offset pixel number to compute background values (default: {0}),\n
`dx` {int} -- Same along y-axis (default: {0}),\n
`method` {str} -- If 'bg', the replacement values are the background computed at
xc+dx, yx+dy, else zero is apply (default: {'bg'}).
"""
cube_corrected = []
for i in range(len(cube)):
imA = cube[i].copy()
isz = imA.shape[0]
xc_off, yc_off = xc+dx, yc+dy
xx, yy = np.arange(isz), np.arange(isz)
xx_c = (xx-xc)
yy_c = (yc-yy)
xx_off = (xx-xc_off)
yy_off = (yc_off-yy)
distance = np.sqrt(xx_c**2 + yy_c[:, np.newaxis]**2)
distance_off = np.sqrt(xx_off**2 + yy_off[:, np.newaxis]**2)
cond_patch = (distance <= radius)
cond_bg = (distance_off <= radius)
if method == 'bg':
imA[cond_patch] = imA[cond_bg]
elif method == 'zero':
imA[cond_patch] = 0
cube_corrected.append(imA)
cube_corrected = np.array(cube_corrected)
return cube_corrected
def select_data(cube, clip_fact=0.5, clip=False, verbose=True, display=True):
""" Check the cleaned data cube using the position of the maximum in the
fft image (supposed to be zero). If not in zero position, the fram is
rejected. It can apply a sigma-clipping to select only the frames with the
highest total fluxes.
Parameters:
-----------
`cube` {array} -- Data cube,\n
`clip_fact` {float} -- Relative sigma if rejecting frames by
sigma-clipping (default=False),\n
`clip` {bool} -- If True, sigma-clipping is used,\n
`verbose` {bool} -- If True, print informations in the terminal,\n
`display` {bool} -- If True, plot figures.
"""
fft_fram = abs(np.fft.fft2(cube))
# flag_fram, cube_flagged, cube_cleaned_checked = [], [], []
fluxes, flag_fram, good_fram = [], [], []
for i in range(len(fft_fram)):
fluxes.append(fft_fram[i][0, 0])
pos_max = np.argmax(fft_fram[i])
if pos_max != 0:
flag_fram.append(i)
else:
good_fram.append(cube[i])
fluxes = np.array(fluxes)
flag_fram = np.array(flag_fram)
best_fr = np.argmax(fluxes)
worst_fr = np.argmin(fluxes)
std_flux = np.std(fluxes)
med_flux = np.median(fluxes)
if verbose:
if (med_flux/std_flux) <= 5.:
cprint('\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True).' % (
(med_flux/std_flux)), 'cyan')
limit_flux = med_flux - clip_fact*std_flux
if clip:
cond_clip = (fluxes > limit_flux)
cube_cleaned_checked = cube[cond_clip]
ind_clip = np.where(fluxes <= limit_flux)[0]
else:
ind_clip = []
cube_cleaned_checked = np.array(good_fram)
ind_clip2 = np.where(fluxes <= limit_flux)[0]
if ((worst_fr in ind_clip2) and clip) or (worst_fr in flag_fram):
ext = '(rejected)'
else:
ext = ''
diffmm = 100*abs(np.max(fluxes) - np.min(fluxes))/med_flux
if display:
plt.figure()
plt.plot(fluxes, label=r'|$\Delta F$|/$\sigma_F$=%2.0f (%2.2f %%)' %
(med_flux/std_flux, diffmm))
if len(flag_fram) > 0:
plt.scatter(flag_fram, fluxes[flag_fram],
s=52, facecolors='none', edgecolors='r', label='Rejected frames (maximum fluxes)')
if clip:
if len(ind_clip) > 0:
plt.plot(ind_clip, fluxes[ind_clip], 'rx',
label='Rejected frames (clipping)')
else:
print('0')
plt.hlines(limit_flux, 0, len(fluxes), lw=1,
ls='--', label='Clipping limit', zorder=10)
plt.legend(loc='best', fontsize=9)
plt.ylabel('Flux [counts]')
plt.xlabel('# frames')
plt.grid(alpha=.2)
plt.tight_layout()
plt.figure(figsize=(7, 7))
plt.subplot(2, 2, 1)
plt.title('Best fram (%i)' % best_fr)
plt.imshow(cube[best_fr], norm=PowerNorm(.5), cmap='afmhot', vmin=0)
plt.subplot(2, 2, 2)
plt.imshow(np.fft.fftshift(fft_fram[best_fr]), cmap='gist_stern')
plt.subplot(2, 2, 3)
plt.title('Worst fram (%i) %s' % (worst_fr, ext))
plt.imshow(cube[worst_fr], norm=PowerNorm(.5), cmap='afmhot', vmin=0)
plt.subplot(2, 2, 4)
plt.imshow(np.fft.fftshift(fft_fram[worst_fr]), cmap='gist_stern')
plt.tight_layout()
plt.show(block=False)
if verbose:
n_good = len(cube_cleaned_checked)
n_bad = len(cube) - n_good
if clip:
cprint('\n---- σ-clip + centered fluxes selection ---', 'cyan')
else:
cprint('\n---- centered fluxes selection ---', 'cyan')
print('%i/%i (%2.1f%%) are flagged as bad frames' %
(n_bad, len(cube), 100*float(n_bad)/len(cube)))
return cube_cleaned_checked
def sky_correction(imA, r1=100, dr=20, verbose=False):
"""
Perform background sky correction to be as close to zero as possible.
"""
isz = imA.shape[0]
xc, yc = isz//2, isz//2
xx, yy = np.arange(isz), np.arange(isz)
xx2 = (xx-xc)
yy2 = (yc-yy)
r2 = r1 + dr
distance = np.sqrt(xx2**2 + yy2[:, np.newaxis]**2)
cond_bg = (r1 <= distance) & (distance <= r2)
try:
minA = imA.min()
imB = imA + 1.01*abs(minA)
backgroundB = np.mean(imB[cond_bg])
imC = imB - backgroundB
backgroundC = np.mean(imC[cond_bg])
except IndexError:
imC = imA.copy()
backgroundC = 0
if verbose:
cprint('Warning: Background not computed', 'green')
cprint(
'-> check the inner and outer radius rings (checkrad option).', 'green')
return imC, backgroundC
def fix_bad_pixels(image, bad_map, add_bad=[], x_stddev=1):
""" Replace bad pixels with values interpolated from their neighbors (interpolation
is made with a gaussian kernel convolution)."""
if len(add_bad) != 0:
for j in range(len(add_bad)):
bad_map[add_bad[j][0], add_bad[j][1]] = 1
img_nan = image.copy()
img_nan[bad_map == 1] = np.nan
kernel = Gaussian2DKernel(x_stddev=x_stddev)
fixed_image = interpolate_replace_nans(img_nan, kernel)
return fixed_image
def check_data_params(filename, isz, r1, dr, bad_map=None, add_bad=[],
edge=0, remove_bad=True, nframe=0, ihdu=0, f_kernel=3,
offx=0, offy=0, apod=False, window=None):
""" Check the input parameters for the cleaning.
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`bad_map` {array}: Bad pixel map with 0 and 1 where 1 set for a bad pixel (default: None),\n
`add_bad` {list}: List of 2d coordinates of bad pixels/cosmic rays (default: []),\n
`edge` {int}: Number of pixel to be removed on the edge of the image (SPHERE),\n
`remove_bad` {bool}: If True, the bad pixels are removed using a gaussian interpolation,\n
`nframe` {int}: Frame number to be shown (default: 0),\n
`ihdu` {int}: Hdu number of the fits file. Normally 1 for NIRISS and 0 for SPHERE (default: 0).
"""
data = fits.open(filename)[ihdu].data
img0 = data[nframe]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if (bad_map is not None) & (remove_bad):
img1 = fix_bad_pixels(img0, bad_map, add_bad=add_bad)
else:
img1 = img0.copy()
cropped_infos = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)
pos = cropped_infos[1]
noBadPixel = False
bad_pix_x, bad_pix_y = [], []
if (bad_map is not None) or (len(add_bad) != 0):
for j in range(len(add_bad)):
bad_map[add_bad[j][0], add_bad[j][1]] = 1
bad_pix = np.where(bad_map == 1)
bad_pix_x = bad_pix[0]
bad_pix_y = bad_pix[1]
else:
noBadPixel = True
r2 = r1 + dr
theta = np.linspace(0, 2*np.pi, 100)
x0 = pos[0]
y0 = pos[1]
x1 = r1 * np.cos(theta) + x0
y1 = r1 * np.sin(theta) + y0
x2 = r2 * np.cos(theta) + x0
y2 = r2 * np.sin(theta) + y0
if window is not None:
r3 = window
x3 = r3 * np.cos(theta) + x0
y3 = r3 * np.sin(theta) + y0
xs1, ys1 = x0 + isz//2, y0 + isz//2
xs2, ys2 = x0 - isz//2, y0 + isz//2
xs3, ys3 = x0 - isz//2, y0 - isz//2
xs4, ys4 = x0 + isz//2, y0 - isz//2
max_val = img1[y0, x0]
fig = plt.figure(figsize=(6, 6))
plt.imshow(img1, norm=PowerNorm(.5), cmap='afmhot', vmin=0, vmax=max_val)
plt.plot(x1, y1, label='Inner radius for sky subtraction')
plt.plot(x2, y2, label='Outer radius for sky subtraction')
if apod:
if window is not None:
plt.plot(x3, y3, label='Super-gaussian windowing')
plt.plot(x0, y0, '+', color='g', ms=10, label='Centering position')
plt.plot([xs1, xs2, xs3, xs4, xs1], [ys1, ys2, ys3, ys4, ys1], 'w--',
label='Resized image')
if not noBadPixel:
if remove_bad:
label = 'Fixed hot/bad pixels'
else:
label = 'Hot/bad pixels'
plt.scatter(bad_pix_y, bad_pix_x, color='', marker='s',
edgecolors='r', s=20, label=label)
plt.legend(fontsize=7, loc=1)
plt.tight_layout()
return fig
def clean_data(data, isz=None, r1=None, dr=None, edge=0,
r2=None, bad_map=None, add_bad=[], apod=True,
offx=0, offy=0, sky=True, window=None,
f_kernel=3, verbose=False):
""" Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
# print(data.shape[1])
# if data.shape[1] % 2 == 1:
# data = np.array([im[:-1, :-1] for im in data])
n_im = data.shape[0]
cube_cleaned = np.zeros([n_im, isz, isz])
for i in tqdm(range(n_im), ncols=100, desc='Cleaning', leave=False):
img0 = data[i]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map, add_bad=add_bad)
else:
img1 = img0.copy()
im_rec_max = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)[0]
if sky:
img_biased = sky_correction(im_rec_max, r1=r1, dr=dr,
verbose=verbose)[0]
else:
img_biased = im_rec_max.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if img_biased.shape[0] != img_biased.shape[1]:
cprint(
'\nCropped image do not have same X, Y dimensions -> check isz', 'red')
return None
if apod:
if r2 is None:
r2 = isz//3
img = apply_windowing(img_biased, window=window)
else:
img = img_biased.copy()
cube_cleaned[i] = img
return cube_cleaned
def select_clean_data(filename, isz=256, r1=100, r2=None, dr=10, edge=0,
clip=True, bad_map=None, add_bad=[], offx=0, offy=0,
clip_fact=0.5, apod=True, sky=True, window=None,
f_kernel=3, verbose=False, ihdu=0, display=False):
""" Clean and select good datacube (sigma-clipping using fluxes variations).
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`edge` {int}: Patch the edges of the image (VLT/SPHERE artifact, default: {100}),\n
`clip` {bool}: If True, sigma-clipping is used to reject frames with low integrated flux,\n
`clip_fact` {float}: Relative sigma if rejecting frames by sigma-clipping
(default=0.5),\n
Returns:
--------
`cube_final` {np.array}: Cleaned and selected datacube.
"""
hdu = fits.open(filename)
cube = hdu[ihdu].data
hdr = hdu[0].header
if hdr['INSTRUME'] == 'SPHERE':
seeing_start = float(hdr['HIERARCH ESO TEL AMBI FWHM START'])
seeing = float(hdr['HIERARCH ESO TEL IA FWHM'])
seeing_end = float(hdr['HIERARCH ESO TEL AMBI FWHM END'])
if verbose:
print('\n----- Seeing conditions -----')
print("%2.2f (start), %2.2f (end), %2.2f (Corrected AirMass)" %
(seeing_start, seeing_end, seeing))
raw_size = cube.shape[1]
if isz > raw_size:
raise ValueError(
'Reshape factor is larger than the data size (choose a smaller isz).')
cube_cleaned = clean_data(cube, isz=isz, r1=r1, edge=edge,
r2=r2, bad_map=bad_map, add_bad=add_bad,
dr=dr, sky=sky, apod=apod, window=window,
f_kernel=f_kernel, offx=offx, offy=offy,
verbose=verbose)
if cube_cleaned is None:
return None
cube_final = select_data(cube_cleaned, clip=clip, clip_fact=clip_fact,
verbose=verbose, display=display)
return cube_final
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"astropy.io.fits.open",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"amical.tools.apply_windowing",
"numpy.fft.fft2",
"numpy.max",
"numpy... | [((2288, 2312), 'numpy.array', 'np.array', (['cube_corrected'], {}), '(cube_corrected)\n', (2296, 2312), True, 'import numpy as np\n'), ((3422, 3438), 'numpy.array', 'np.array', (['fluxes'], {}), '(fluxes)\n', (3430, 3438), True, 'import numpy as np\n'), ((3455, 3474), 'numpy.array', 'np.array', (['flag_fram'], {}), '(flag_fram)\n', (3463, 3474), True, 'import numpy as np\n'), ((3490, 3507), 'numpy.argmax', 'np.argmax', (['fluxes'], {}), '(fluxes)\n', (3499, 3507), True, 'import numpy as np\n'), ((3523, 3540), 'numpy.argmin', 'np.argmin', (['fluxes'], {}), '(fluxes)\n', (3532, 3540), True, 'import numpy as np\n'), ((3557, 3571), 'numpy.std', 'np.std', (['fluxes'], {}), '(fluxes)\n', (3563, 3571), True, 'import numpy as np\n'), ((3587, 3604), 'numpy.median', 'np.median', (['fluxes'], {}), '(fluxes)\n', (3596, 3604), True, 'import numpy as np\n'), ((6549, 6592), 'numpy.sqrt', 'np.sqrt', (['(xx2 ** 2 + yy2[:, np.newaxis] ** 2)'], {}), '(xx2 ** 2 + yy2[:, np.newaxis] ** 2)\n', (6556, 6592), True, 'import numpy as np\n'), ((7518, 7553), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', ([], {'x_stddev': 'x_stddev'}), '(x_stddev=x_stddev)\n', (7534, 7553), False, 'from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans\n'), ((7572, 7613), 'astropy.convolution.interpolate_replace_nans', 'interpolate_replace_nans', (['img_nan', 'kernel'], {}), '(img_nan, kernel)\n', (7596, 7613), False, 'from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans\n'), ((9093, 9146), 'amical.tools.crop_max', 'crop_max', (['img1', 'isz'], {'offx': 'offx', 'offy': 'offy', 'f': 'f_kernel'}), '(img1, isz, offx=offx, offy=offy, f=f_kernel)\n', (9101, 9146), False, 'from amical.tools import apply_windowing, crop_max\n'), ((9546, 9576), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (9557, 9576), True, 'import numpy as np\n'), ((10060, 10086), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (10070, 10086), True, 'from matplotlib import pyplot as plt\n'), ((10169, 10227), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1'], {'label': '"""Inner radius for sky subtraction"""'}), "(x1, y1, label='Inner radius for sky subtraction')\n", (10177, 10227), True, 'from matplotlib import pyplot as plt\n'), ((10232, 10290), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {'label': '"""Outer radius for sky subtraction"""'}), "(x2, y2, label='Outer radius for sky subtraction')\n", (10240, 10290), True, 'from matplotlib import pyplot as plt\n'), ((10402, 10469), 'matplotlib.pyplot.plot', 'plt.plot', (['x0', 'y0', '"""+"""'], {'color': '"""g"""', 'ms': '(10)', 'label': '"""Centering position"""'}), "(x0, y0, '+', color='g', ms=10, label='Centering position')\n", (10410, 10469), True, 'from matplotlib import pyplot as plt\n'), ((10474, 10571), 'matplotlib.pyplot.plot', 'plt.plot', (['[xs1, xs2, xs3, xs4, xs1]', '[ys1, ys2, ys3, ys4, ys1]', '"""w--"""'], {'label': '"""Resized image"""'}), "([xs1, xs2, xs3, xs4, xs1], [ys1, ys2, ys3, ys4, ys1], 'w--', label\n ='Resized image')\n", (10482, 10571), True, 'from matplotlib import pyplot as plt\n'), ((10844, 10873), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(7)', 'loc': '(1)'}), '(fontsize=7, loc=1)\n', (10854, 10873), True, 'from matplotlib import pyplot as plt\n'), ((10878, 10896), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10894, 10896), True, 'from matplotlib import pyplot as plt\n'), ((11894, 11920), 'numpy.zeros', 'np.zeros', (['[n_im, isz, isz]'], {}), '([n_im, isz, isz])\n', (11902, 11920), True, 'import numpy as np\n'), ((14138, 14157), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (14147, 14157), False, 'from astropy.io import fits\n'), ((1903, 1948), 'numpy.sqrt', 'np.sqrt', (['(xx_c ** 2 + yy_c[:, np.newaxis] ** 2)'], {}), '(xx_c ** 2 + yy_c[:, np.newaxis] ** 2)\n', (1910, 1948), True, 'import numpy as np\n'), ((1968, 2017), 'numpy.sqrt', 'np.sqrt', (['(xx_off ** 2 + yy_off[:, np.newaxis] ** 2)'], {}), '(xx_off ** 2 + yy_off[:, np.newaxis] ** 2)\n', (1975, 2017), True, 'import numpy as np\n'), ((3051, 3068), 'numpy.fft.fft2', 'np.fft.fft2', (['cube'], {}), '(cube)\n', (3062, 3068), True, 'import numpy as np\n'), ((3276, 3298), 'numpy.argmax', 'np.argmax', (['fft_fram[i]'], {}), '(fft_fram[i])\n', (3285, 3298), True, 'import numpy as np\n'), ((4092, 4111), 'numpy.array', 'np.array', (['good_fram'], {}), '(good_fram)\n', (4100, 4111), True, 'import numpy as np\n'), ((4129, 4159), 'numpy.where', 'np.where', (['(fluxes <= limit_flux)'], {}), '(fluxes <= limit_flux)\n', (4137, 4159), True, 'import numpy as np\n'), ((4375, 4387), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4385, 4387), True, 'from matplotlib import pyplot as plt\n'), ((4396, 4501), 'matplotlib.pyplot.plot', 'plt.plot', (['fluxes'], {'label': "('|$\\\\Delta F$|/$\\\\sigma_F$=%2.0f (%2.2f %%)' % (med_flux / std_flux, diffmm))"}), "(fluxes, label='|$\\\\Delta F$|/$\\\\sigma_F$=%2.0f (%2.2f %%)' % (\n med_flux / std_flux, diffmm))\n", (4404, 4501), True, 'from matplotlib import pyplot as plt\n'), ((5043, 5077), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(9)'}), "(loc='best', fontsize=9)\n", (5053, 5077), True, 'from matplotlib import pyplot as plt\n'), ((5086, 5113), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux [counts]"""'], {}), "('Flux [counts]')\n", (5096, 5113), True, 'from matplotlib import pyplot as plt\n'), ((5122, 5144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# frames"""'], {}), "('# frames')\n", (5132, 5144), True, 'from matplotlib import pyplot as plt\n'), ((5153, 5172), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5161, 5172), True, 'from matplotlib import pyplot as plt\n'), ((5180, 5198), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5196, 5198), True, 'from matplotlib import pyplot as plt\n'), ((5208, 5234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (5218, 5234), True, 'from matplotlib import pyplot as plt\n'), ((5243, 5263), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (5254, 5263), True, 'from matplotlib import pyplot as plt\n'), ((5272, 5309), 'matplotlib.pyplot.title', 'plt.title', (["('Best fram (%i)' % best_fr)"], {}), "('Best fram (%i)' % best_fr)\n", (5281, 5309), True, 'from matplotlib import pyplot as plt\n'), ((5395, 5415), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (5406, 5415), True, 'from matplotlib import pyplot as plt\n'), ((5498, 5518), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (5509, 5518), True, 'from matplotlib import pyplot as plt\n'), ((5527, 5576), 'matplotlib.pyplot.title', 'plt.title', (["('Worst fram (%i) %s' % (worst_fr, ext))"], {}), "('Worst fram (%i) %s' % (worst_fr, ext))\n", (5536, 5576), True, 'from matplotlib import pyplot as plt\n'), ((5663, 5683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (5674, 5683), True, 'from matplotlib import pyplot as plt\n'), ((5767, 5785), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5783, 5785), True, 'from matplotlib import pyplot as plt\n'), ((5794, 5815), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5802, 5815), True, 'from matplotlib import pyplot as plt\n'), ((6449, 6463), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (6458, 6463), True, 'import numpy as np\n'), ((6465, 6479), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (6474, 6479), True, 'import numpy as np\n'), ((6731, 6752), 'numpy.mean', 'np.mean', (['imB[cond_bg]'], {}), '(imB[cond_bg])\n', (6738, 6752), True, 'import numpy as np\n'), ((6807, 6828), 'numpy.mean', 'np.mean', (['imC[cond_bg]'], {}), '(imC[cond_bg])\n', (6814, 6828), True, 'import numpy as np\n'), ((9395, 9417), 'numpy.where', 'np.where', (['(bad_map == 1)'], {}), '(bad_map == 1)\n', (9403, 9417), True, 'import numpy as np\n'), ((10728, 10823), 'matplotlib.pyplot.scatter', 'plt.scatter', (['bad_pix_y', 'bad_pix_x'], {'color': '""""""', 'marker': '"""s"""', 'edgecolors': '"""r"""', 's': '(20)', 'label': 'label'}), "(bad_pix_y, bad_pix_x, color='', marker='s', edgecolors='r', s=\n 20, label=label)\n", (10739, 10823), True, 'from matplotlib import pyplot as plt\n'), ((1749, 1763), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (1758, 1763), True, 'import numpy as np\n'), ((1765, 1779), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (1774, 1779), True, 'import numpy as np\n'), ((3672, 3819), 'termcolor.cprint', 'cprint', (['("""\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True)."""\n % (med_flux / std_flux))', '"""cyan"""'], {}), '(\n """\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True)."""\n % (med_flux / std_flux), \'cyan\')\n', (3678, 3819), False, 'from termcolor import cprint\n'), ((3995, 4025), 'numpy.where', 'np.where', (['(fluxes <= limit_flux)'], {}), '(fluxes <= limit_flux)\n', (4003, 4025), True, 'import numpy as np\n'), ((4554, 4682), 'matplotlib.pyplot.scatter', 'plt.scatter', (['flag_fram', 'fluxes[flag_fram]'], {'s': '(52)', 'facecolors': '"""none"""', 'edgecolors': '"""r"""', 'label': '"""Rejected frames (maximum fluxes)"""'}), "(flag_fram, fluxes[flag_fram], s=52, facecolors='none',\n edgecolors='r', label='Rejected frames (maximum fluxes)')\n", (4565, 4682), True, 'from matplotlib import pyplot as plt\n'), ((5435, 5469), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_fram[best_fr]'], {}), '(fft_fram[best_fr])\n', (5450, 5469), True, 'import numpy as np\n'), ((5703, 5738), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_fram[worst_fr]'], {}), '(fft_fram[worst_fr])\n', (5718, 5738), True, 'import numpy as np\n'), ((5939, 6005), 'termcolor.cprint', 'cprint', (['"""\n---- σ-clip + centered fluxes selection ---"""', '"""cyan"""'], {}), '("""\n---- σ-clip + centered fluxes selection ---""", \'cyan\')\n', (5945, 6005), False, 'from termcolor import cprint\n'), ((6029, 6086), 'termcolor.cprint', 'cprint', (['"""\n---- centered fluxes selection ---"""', '"""cyan"""'], {}), '("""\n---- centered fluxes selection ---""", \'cyan\')\n', (6035, 6086), False, 'from termcolor import cprint\n'), ((8740, 8759), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (8749, 8759), False, 'from astropy.io import fits\n'), ((9622, 9635), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9628, 9635), True, 'import numpy as np\n'), ((9655, 9668), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9661, 9668), True, 'import numpy as np\n'), ((9688, 9701), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9694, 9701), True, 'import numpy as np\n'), ((9721, 9734), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9727, 9734), True, 'import numpy as np\n'), ((10113, 10127), 'matplotlib.colors.PowerNorm', 'PowerNorm', (['(0.5)'], {}), '(0.5)\n', (10122, 10127), False, 'from matplotlib.colors import PowerNorm\n'), ((10347, 10397), 'matplotlib.pyplot.plot', 'plt.plot', (['x3', 'y3'], {'label': '"""Super-gaussian windowing"""'}), "(x3, y3, label='Super-gaussian windowing')\n", (10355, 10397), True, 'from matplotlib import pyplot as plt\n'), ((12335, 12388), 'amical.tools.crop_max', 'crop_max', (['img1', 'isz'], {'offx': 'offx', 'offy': 'offy', 'f': 'f_kernel'}), '(img1, isz, offx=offx, offy=offy, f=f_kernel)\n', (12343, 12388), False, 'from amical.tools import apply_windowing, crop_max\n'), ((12724, 12809), 'termcolor.cprint', 'cprint', (['"""\nCropped image do not have same X, Y dimensions -> check isz"""', '"""red"""'], {}), '("""\nCropped image do not have same X, Y dimensions -> check isz""",\n \'red\')\n', (12730, 12809), False, 'from termcolor import cprint\n'), ((12935, 12977), 'amical.tools.apply_windowing', 'apply_windowing', (['img_biased'], {'window': 'window'}), '(img_biased, window=window)\n', (12950, 12977), False, 'from amical.tools import apply_windowing, crop_max\n'), ((4770, 4848), 'matplotlib.pyplot.plot', 'plt.plot', (['ind_clip', 'fluxes[ind_clip]', '"""rx"""'], {'label': '"""Rejected frames (clipping)"""'}), "(ind_clip, fluxes[ind_clip], 'rx', label='Rejected frames (clipping)')\n", (4778, 4848), True, 'from matplotlib import pyplot as plt\n'), ((5349, 5363), 'matplotlib.colors.PowerNorm', 'PowerNorm', (['(0.5)'], {}), '(0.5)\n', (5358, 5363), False, 'from matplotlib.colors import PowerNorm\n'), ((5617, 5631), 'matplotlib.colors.PowerNorm', 'PowerNorm', (['(0.5)'], {}), '(0.5)\n', (5626, 5631), False, 'from matplotlib.colors import PowerNorm\n'), ((6933, 6984), 'termcolor.cprint', 'cprint', (['"""Warning: Background not computed"""', '"""green"""'], {}), "('Warning: Background not computed', 'green')\n", (6939, 6984), False, 'from termcolor import cprint\n'), ((6997, 7076), 'termcolor.cprint', 'cprint', (['"""-> check the inner and outer radius rings (checkrad option)."""', '"""green"""'], {}), "('-> check the inner and outer radius rings (checkrad option).', 'green')\n", (7003, 7076), False, 'from termcolor import cprint\n'), ((9805, 9818), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9811, 9818), True, 'import numpy as np\n'), ((9842, 9855), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9848, 9855), True, 'import numpy as np\n'), ((4309, 4323), 'numpy.max', 'np.max', (['fluxes'], {}), '(fluxes)\n', (4315, 4323), True, 'import numpy as np\n'), ((4326, 4340), 'numpy.min', 'np.min', (['fluxes'], {}), '(fluxes)\n', (4332, 4340), True, 'import numpy as np\n')] |
from requests import get
import time
import gi
ip_starting = ""
recon_command = ""
rate = 60
def main():
print("Example: nordvpn disconnect && nordvpn connect")
recon_command = input("Enter the command used to reconnect to VPN: ")
ip_starting = get('https://api.ipify.org').text
print("Starting ip:", ip_starting)
print("Connect to your VPN now!")
import subprocess
while True:
time.sleep(rate)
print("checking ip")
if ip_starting == get('https://api.ipify.org').text:
print("Snakeswitch activated")
subprocess.run('notify-send Snakeswitch activated', shell=True)
subprocess.run(recon_command)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
import sys
sys.exit(0)
except SystemExit:
import os
os._exit(0)
| [
"subprocess.run",
"time.sleep",
"requests.get",
"os._exit",
"sys.exit"
] | [((253, 281), 'requests.get', 'get', (['"""https://api.ipify.org"""'], {}), "('https://api.ipify.org')\n", (256, 281), False, 'from requests import get\n'), ((393, 409), 'time.sleep', 'time.sleep', (['rate'], {}), '(rate)\n', (403, 409), False, 'import time\n'), ((525, 588), 'subprocess.run', 'subprocess.run', (['"""notify-send Snakeswitch activated"""'], {'shell': '(True)'}), "('notify-send Snakeswitch activated', shell=True)\n", (539, 588), False, 'import subprocess\n'), ((592, 621), 'subprocess.run', 'subprocess.run', (['recon_command'], {}), '(recon_command)\n', (606, 621), False, 'import subprocess\n'), ((453, 481), 'requests.get', 'get', (['"""https://api.ipify.org"""'], {}), "('https://api.ipify.org')\n", (456, 481), False, 'from requests import get\n'), ((740, 751), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (748, 751), False, 'import sys\n'), ((789, 800), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (797, 800), False, 'import os\n')] |
import sys
import torch
from torch import nn
from torch.autograd import Variable
from view import *
from holder import *
from util import *
from join_table import *
from trilinear_prod import *
from fusion import *
# fused bidir attention
class FusedBiAttention(torch.nn.Module):
def __init__(self, opt, shared):
super(FusedBiAttention, self).__init__()
self.opt = opt
self.shared = shared
enc_size = opt.hidden_size if 'elmo' not in opt.enc else opt.hidden_size + opt.elmo_size
self.trilinear_prod = TrilinearProd(opt, enc_size)
self.fusion = Fusion(opt, enc_size)
self.softmax2 = nn.Softmax(2)
self.phi_joiner = JoinTable(2)
def biattention(self, scores, C, Q):
batch_l = self.shared.batch_l
context_l = self.shared.context_l
enc_size = C.shape[2]
# attention
att1 = self.softmax2(scores) # (batch_l, context_l, max_query_l)
att2 = self.softmax2(scores.transpose(1,2)) # (batch_l, max_query_l, context_l)
# attend
agg1 = att1.bmm(Q) # (batch_l, context_l, enc_size)
agg2 = att2.bmm(C) # (batch_l, max_query_l, enc_size)
agg2 = self.masked_fill_query(agg2)
return att1, att2, agg1, agg2
def masked_fill_scores(self, scores):
return scores * self.shared.score_mask + (self.shared.one - self.shared.score_mask) * self.shared.neg_inf
def masked_fill_query(self, query):
return query * self.shared.query_mask.unsqueeze(-1)
# input encodings of context (C) and query (Q)
# C of shape (batch_l, context_l, hidden_size)
# Q of shape (batch_l, query_l, hidden_size)
def forward(self, C, Q):
self.update_context()
batch_l = self.shared.batch_l
context_l = self.shared.context_l
max_query_l = self.shared.query_l.max()
hidden_size = self.opt.hidden_size
# get similarity score
scores = self.trilinear_prod(C, Q)
scores = self.masked_fill_scores(scores)
#
att1, att2, agg1, agg2 = self.biattention(scores, C, Q)
#
G = self.fusion(C, agg1)
P = self.fusion(Q, agg2)
P = self.masked_fill_query(P)
# bookkeeping
self.shared.att_soft1 = att1
self.shared.att_soft2 = att2
self.shared.G = G
self.shared.P = P
return att1, att2, G
def update_context(self):
batch_l = self.shared.batch_l
context_l = self.shared.context_l
max_query_l = self.shared.query_l.max()
word_vec_size = self.opt.word_vec_size
hidden_size = self.opt.hidden_size
def begin_pass(self):
pass
def end_pass(self):
pass
| [
"torch.nn.Softmax"
] | [((602, 615), 'torch.nn.Softmax', 'nn.Softmax', (['(2)'], {}), '(2)\n', (612, 615), False, 'from torch import nn\n')] |
import random
import string
def generate() -> str:
"""
direct reimpl of secretID.js from account.neosvr.com
"""
length = 12
valid_chars = string.ascii_letters + string.digits
return "".join(random.choices(valid_chars, k=length))
| [
"random.choices"
] | [((216, 253), 'random.choices', 'random.choices', (['valid_chars'], {'k': 'length'}), '(valid_chars, k=length)\n', (230, 253), False, 'import random\n')] |
# Simplified Bres Maker
# Version: 1.0
#Python Version: 2.0
# IMPORTS
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from numpy import asarray
from numpy import savetxt
import sys
import os
# DEFINITIONS
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
# DATALOAD
#user_input = str(sys.argv[1])
#ranking = str(sys.argv[2])
#working = str(sys.argv[3])
#iterations = int(sys.argv[4])
#trys = int(sys.argv[5])
user_input = "D:/Proseeker/exampledeets.csv"
ranking = "D:/Proseeker/ranking.csv"
working = "D:/Proseeker"
iterations = 1000000
trys = 1000
aavals = pd.read_csv(ranking, usecols=['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V'],
sep =',')
d = {}
for i in range(0,544):
for j in range(0,20):
rowmin = min(aavals.iloc[i])
rowmax = max(aavals.iloc[i])
val = aavals.iloc[i, j]
aavals.replace([aavals.iloc[i, j]], (val - rowmin)/(rowmax - rowmin))
d['A'] = list(aavals['A'])
d['R'] = list(aavals['R'])
d['D'] = list(aavals['D'])
d['N'] = list(aavals['N'])
d['C'] = list(aavals['C'])
d['E'] = list(aavals['E'])
d['Q'] = list(aavals['Q'])
d['G'] = list(aavals['G'])
d['H'] = list(aavals['H'])
d['I'] = list(aavals['I'])
d['L'] = list(aavals['L'])
d['K'] = list(aavals['K'])
d['M'] = list(aavals['M'])
d['F'] = list(aavals['F'])
d['P'] = list(aavals['P'])
d['S'] = list(aavals['S'])
d['T'] = list(aavals['T'])
d['W'] = list(aavals['W'])
d['Y'] = list(aavals['Y'])
d['V'] = list(aavals['V'])
library = pd.read_csv(user_input, header=None, sep=',')
seqs = library[0]
sites = library[1]
# PROCESSING
for x in range(0, len(seqs)):
subjectstd = list(seqs[x])
subject = list.copy(subjectstd)
for p in range(0,len(subjectstd)):
subject.append(subjectstd[p])
for z in range(0, len(subject)):
if subject[z] == 'A':
subject[z] = d['A']
elif subject[z] == 'a':
subject[z] = d['A']
elif subject[z] == 'R':
subject[z] = d['R']
elif subject[z] == 'r':
subject[z] = d['R']
elif subject[z] == 'N':
subject[z] = d['N']
elif subject[z] == 'n':
subject[z] = d['N']
elif subject[z] == 'D':
subject[z] = d['D']
elif subject[z] == 'd':
subject[z] = d['D']
elif subject[z] == 'C':
subject[z] = d['C']
elif subject[z] == 'c':
subject[z] = d['C']
elif subject[z] == 'Q':
subject[z] = d['Q']
elif subject[z] == 'q':
subject[z] = d['Q']
elif subject[z] == 'E':
subject[z] = d['E']
elif subject[z] == 'e':
subject[z] = d['E']
elif subject[z] == 'G':
subject[z] = d['G']
elif subject[z] == 'g':
subject[z] = d['G']
elif subject[z] == 'H':
subject[z] = d['H']
elif subject[z] == 'h':
subject[z] = d['H']
elif subject[z] == 'I':
subject[z] = d['I']
elif subject[z] == 'i':
subject[z] = d['I']
elif subject[z] == 'L':
subject[z] = d['L']
elif subject[z] == 'l':
subject[z] = d['L']
elif subject[z] == 'K':
subject[z] = d['K']
elif subject[z] == 'k':
subject[z] = d['K']
elif subject[z] == 'M':
subject[z] = d['M']
elif subject[z] == 'm':
subject[z] = d['M']
elif subject[z] == 'F':
subject[z] = d['F']
elif subject[z] == 'f':
subject[z] = d['F']
elif subject[z] == 'P':
subject[z] = d['P']
elif subject[z] == 'p':
subject[z] = d['P']
elif subject[z] == 'S':
subject[z] = d['S']
elif subject[z] == 's':
subject[z] = d['S']
elif subject[z] == 'T':
subject[z] = d['T']
elif subject[z] == 't':
subject[z] = d['T']
elif subject[z] == 'W':
subject[z] = d['W']
elif subject[z] == 'w':
subject[z] = d['W']
elif subject[z] == 'Y':
subject[z] = d['Y']
elif subject[z] == 'y':
subject[z] = d['Y']
elif subject[z] == 'V':
subject[z] = d['V']
elif subject[z] == 'v':
subject[z] = d['V']
subjectsites = str(sites[x])
splits = find(subjectsites, ':')
splits.append(len(subjectsites))
if sum(splits) > 0:
for q in range(len(splits)):
if q == 0:
subpos = int(subjectsites[0:splits[q]])
else:
subpos = int(subjectsites[splits[q-1]+1:splits[q]])
breswindow = list((subject[subpos-6], subject[subpos-5], subject[subpos-4], subject[subpos-3],
subject[subpos-2], subject[subpos-1], subject[subpos], subject[subpos+1],
subject[subpos+2], subject[subpos+3], subject[subpos+4], subject[subpos+5],
subject[subpos+6]))
breswindow = np.column_stack(breswindow)
kmeans = KMeans(n_clusters=50, n_init=trys, max_iter=iterations, algorithm="full")
kmeans.fit(breswindow)
clusters = kmeans.labels_
breswindow = np.insert(breswindow, 13, clusters, axis=1)
savetxt(os.path.join(working, 'p{}.bres{}.csv'.format(x+1, q+1)), breswindow, delimiter=',', fmt='%f')
| [
"sklearn.cluster.KMeans",
"numpy.insert",
"numpy.column_stack",
"pandas.read_csv"
] | [((616, 759), 'pandas.read_csv', 'pd.read_csv', (['ranking'], {'usecols': "['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S', 'T', 'W', 'Y', 'V']", 'sep': '""","""'}), "(ranking, usecols=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H',\n 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'], sep=',')\n", (627, 759), True, 'import pandas as pd\n'), ((1554, 1599), 'pandas.read_csv', 'pd.read_csv', (['user_input'], {'header': 'None', 'sep': '""","""'}), "(user_input, header=None, sep=',')\n", (1565, 1599), True, 'import pandas as pd\n'), ((5185, 5212), 'numpy.column_stack', 'np.column_stack', (['breswindow'], {}), '(breswindow)\n', (5200, 5212), True, 'import numpy as np\n'), ((5234, 5307), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(50)', 'n_init': 'trys', 'max_iter': 'iterations', 'algorithm': '"""full"""'}), "(n_clusters=50, n_init=trys, max_iter=iterations, algorithm='full')\n", (5240, 5307), False, 'from sklearn.cluster import KMeans\n'), ((5406, 5449), 'numpy.insert', 'np.insert', (['breswindow', '(13)', 'clusters'], {'axis': '(1)'}), '(breswindow, 13, clusters, axis=1)\n', (5415, 5449), True, 'import numpy as np\n')] |
import requests
"""
Delete a project version.
If there are no more versions available for a given project, that project will be deleted too.
"""
def delete_version(server, project, version):
url = "http://{}/delversion.json".format(server)
data = {
"project": project,
"version": version
}
with requests.Session() as session:
try:
r = session.post(url, data=data)
except:
return None
return r.json()
| [
"requests.Session"
] | [((307, 325), 'requests.Session', 'requests.Session', ([], {}), '()\n', (323, 325), False, 'import requests\n')] |
import utils
import os
import json
def getjsondata(path):
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
f = open(path)
data = json.loads(f.read())
return data
def getconfig():
return getjsondata('./conf.json') | [
"os.path.realpath",
"os.path.isabs"
] | [((70, 89), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (83, 89), False, 'import os\n'), ((135, 161), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n')] |
import cv2
import time
import logging
class Sentry:
#__face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
__face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
#__face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_upperbody.xml')
__eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
__ALERT_INTERVAL = 5
def __init__(self):
self.face_count = 0
self.image = None#cv2.imread('notfound.jpg',0)
self.alert_reason = None
self.__alert_time = 0.0
self.__alert_face_count = 0
self.__faces = None
def is_alert(self, image, indicate=False):
is_detected = self.__is_face_detected(image, indicate)
should_alert = self.__should_alert()
#logging.debug("Detected: %r alert: %r"%(is_detected, should_alert))
return is_detected and should_alert
def __is_face_detected(self, image, indicate):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = self.__face_cascade.detectMultiScale(gray, 1.2, 2)
self.face_count = len(faces)
self.image = image
if self.face_count > 0:
self.__faces = faces
if indicate:
for (x,y,w,h) in self.__faces:
self.image = cv2.rectangle(self.image,(x,y),(x+w,y+h),(200,10,10),2)
return self.face_count > 0
# Alert Requirements
# - No alert - when no faces were detected currently and previously
# - alert - first time face_count is different than previous
# - - If face_count is same as before and not 0, then alert every 5 seconds
def __should_alert(self):
if self.face_count == 0 and self.__alert_face_count == 0: return False
if self.face_count != self.__alert_face_count :
self.alert_reason = "Face count was %s but is now %s"%(self.__alert_face_count, self.face_count)
self.__alert_face_count = self.face_count
self.__alert_time = time.perf_counter()
return True
duration = int(time.perf_counter() - self.__alert_time)
if duration > self.__ALERT_INTERVAL:
self.alert_reason = "Duration is greater than %s"%self.__ALERT_INTERVAL
self.__alert_time = time.perf_counter()
return duration > self.__ALERT_INTERVAL
| [
"cv2.rectangle",
"cv2.CascadeClassifier",
"time.perf_counter",
"cv2.cvtColor"
] | [((166, 235), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascades/haarcascade_frontalface_alt.xml"""'], {}), "('haarcascades/haarcascade_frontalface_alt.xml')\n", (187, 235), False, 'import cv2\n'), ((338, 395), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascades/haarcascade_eye.xml"""'], {}), "('haarcascades/haarcascade_eye.xml')\n", (359, 395), False, 'import cv2\n'), ((952, 991), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (964, 991), False, 'import cv2\n'), ((1911, 1930), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1928, 1930), False, 'import time\n'), ((2155, 2174), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2172, 2174), False, 'import time\n'), ((1969, 1988), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1986, 1988), False, 'import time\n'), ((1248, 1315), 'cv2.rectangle', 'cv2.rectangle', (['self.image', '(x, y)', '(x + w, y + h)', '(200, 10, 10)', '(2)'], {}), '(self.image, (x, y), (x + w, y + h), (200, 10, 10), 2)\n', (1261, 1315), False, 'import cv2\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
import torch
from transformers import TrainingArguments, Trainer
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import EarlyStoppingCallback
import pdb
import argparse
import traceback
import sys
DEFAULT_MODEL_PATH="./model"
DEFAULT_OUTPUT_DIR="./output"
DEFAULT_SEQUENCE_LENGTH=512
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
# Define Trainer parameters
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
recall = recall_score(y_true=labels, y_pred=pred)
precision = precision_score(y_true=labels, y_pred=pred)
f1 = f1_score(y_true=labels, y_pred=pred)
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
def fine_tune(params):
input_file = params.input
model_name_or_path = params.model
output_dir = params.output
paired = params.paired
seq_length = params.seq_length
# Read data
#data = pd.read_csv("data/tokenized_train.csv",sep='\t')
data = pd.read_csv(input_file,sep='\t')
# Define pretrained tokenizer and model
#model_name = "bert-large-cased"
model_name = model_name_or_path
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=2)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
# ----- 1. Preprocess data -----#
# Preprocess data
if (paired):
X1 = list(data["text1"])
X2 = list(data["text2"])
assert(len(X1) == len(X2))
X = []
for i in range(len(X1)):
X.append(X1[i] + '\t' + X2[i])
else:
X = list(data["text"])
y = list(data["label"])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.01)
if (paired):
X1 = []
X2 = []
for i in range(len(X_train)):
arr = X_train[i].split('\t')
assert(len(arr) == 2)
X1.append(arr[0])
X2.append(arr[1])
#pdb.set_trace()
X_train_tokenized = tokenizer(text=X1, text_pair = X2, padding=True, truncation=True, max_length=seq_length)
else:
X_train_tokenized = tokenizer(X_train, padding=True, truncation=True, max_length=seq_length)
if (paired):
X1 = []
X2 = []
for i in range(len(X_val)):
arr = X_val[i].split('\t')
assert(len(arr) == 2)
X1.append(arr[0])
X2.append(arr[1])
X_val_tokenized = tokenizer(text = X1, text_pair = X2, padding=True, truncation=True, max_length=seq_length)
else:
X_val_tokenized = tokenizer(X_val, padding=True, truncation=True, max_length=seq_length)
# Create torch dataset
train_dataset = Dataset(X_train_tokenized, y_train)
val_dataset = Dataset(X_val_tokenized, y_val)
# ----- 2. Fine-tune pretrained model -----#
# Define Trainer
args = TrainingArguments(
output_dir=output_dir,
evaluation_strategy="steps",
eval_steps=100,
save_steps=100,
warmup_steps=500,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
#learning_rate = 1e-5,
num_train_epochs=5,
#weight_decay=0.01,
seed=0,
load_best_model_at_end=True,
logging_dir='./logs', # directory for storing logs
logging_steps=10,
metric_for_best_model="accuracy"
)
trainer = Trainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
compute_metrics=compute_metrics,
#callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
# Train pre-trained model
trainer.train()
trainer.save_model(output_dir)
print("Model saved. Training complete")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fine tune model ',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-model', action="store", dest="model", default=DEFAULT_MODEL_PATH,help='BERT pretrained models, or custom model path')
parser.add_argument('-input', action="store", dest="input",required=True, help='Input train data file')
parser.add_argument('-output', action="store", dest="output",default=DEFAULT_OUTPUT_DIR, help='Output directory where model is saved')
parser.add_argument('-seq_length', action="store", dest="seq_length",type=int,default=DEFAULT_SEQUENCE_LENGTH, help='Default max sequence length of input')
parser.add_argument('-paired', dest="paired", action='store_true',help='Input is expected to be **pairs** of sentences')
parser.add_argument('-no-paired', dest="paired", action='store_false',help='Input is expected to be **single** sentence - not pairs of sentences')
parser.set_defaults(paired=False)
results = parser.parse_args()
try:
torch.cuda.empty_cache()
fine_tune(results)
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
| [
"sklearn.metrics.f1_score",
"pandas.read_csv",
"transformers.TrainingArguments",
"sklearn.model_selection.train_test_split",
"argparse.ArgumentParser",
"transformers.BertTokenizer.from_pretrained",
"numpy.argmax",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"torch.tensor",
... | [((1036, 1059), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1045, 1059), True, 'import numpy as np\n'), ((1076, 1118), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1090, 1118), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1132, 1172), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1144, 1172), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1189, 1232), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1204, 1232), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1242, 1278), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1250, 1278), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1640, 1673), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'sep': '"""\t"""'}), "(input_file, sep='\\t')\n", (1651, 1673), True, 'import pandas as pd\n'), ((1807, 1848), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (1836, 1848), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((1861, 1932), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['model_name'], {'num_labels': '(2)'}), '(model_name, num_labels=2)\n', (1906, 1932), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((2422, 2460), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.01)'}), '(X, y, test_size=0.01)\n', (2438, 2460), False, 'from sklearn.model_selection import train_test_split\n'), ((3594, 3924), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': 'output_dir', 'evaluation_strategy': '"""steps"""', 'eval_steps': '(100)', 'save_steps': '(100)', 'warmup_steps': '(500)', 'per_device_train_batch_size': '(32)', 'per_device_eval_batch_size': '(32)', 'num_train_epochs': '(5)', 'seed': '(0)', 'load_best_model_at_end': '(True)', 'logging_dir': '"""./logs"""', 'logging_steps': '(10)', 'metric_for_best_model': '"""accuracy"""'}), "(output_dir=output_dir, evaluation_strategy='steps',\n eval_steps=100, save_steps=100, warmup_steps=500,\n per_device_train_batch_size=32, per_device_eval_batch_size=32,\n num_train_epochs=5, seed=0, load_best_model_at_end=True, logging_dir=\n './logs', logging_steps=10, metric_for_best_model='accuracy')\n", (3611, 3924), False, 'from transformers import TrainingArguments, Trainer\n'), ((4149, 4273), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'args', 'train_dataset': 'train_dataset', 'eval_dataset': 'val_dataset', 'compute_metrics': 'compute_metrics'}), '(model=model, args=args, train_dataset=train_dataset, eval_dataset=\n val_dataset, compute_metrics=compute_metrics)\n', (4156, 4273), False, 'from transformers import TrainingArguments, Trainer\n'), ((4562, 4678), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fine tune model """', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Fine tune model ', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (4585, 4678), False, 'import argparse\n'), ((1970, 1995), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1993, 1995), False, 'import torch\n'), ((1946, 1966), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1958, 1966), False, 'import torch\n'), ((2001, 2020), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2013, 2020), False, 'import torch\n'), ((5590, 5614), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5612, 5614), False, 'import torch\n'), ((711, 733), 'torch.tensor', 'torch.tensor', (['val[idx]'], {}), '(val[idx])\n', (723, 733), False, 'import torch\n'), ((827, 857), 'torch.tensor', 'torch.tensor', (['self.labels[idx]'], {}), '(self.labels[idx])\n', (839, 857), False, 'import torch\n'), ((5716, 5752), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (5735, 5752), False, 'import traceback\n'), ((5689, 5703), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5701, 5703), False, 'import sys\n')] |
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" OpenAI Gym integration modules """
import warnings
from gym.envs.registration import register
from diplomacy_research.models.gym.wrappers import AutoDraw, LimitNumberYears, LoopDetection, SetInitialState, \
AssignPlayers, RandomizePlayers, SetPlayerSeed, SaveGame
# Ignore specific warnings
warnings.filterwarnings('ignore', message='Parameters to load are deprecated')
register(
id='DiplomacyEnv-v0',
entry_point='diplomacy_research.models.gym.environment:DiplomacyEnv')
| [
"gym.envs.registration.register",
"warnings.filterwarnings"
] | [((1091, 1169), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Parameters to load are deprecated"""'}), "('ignore', message='Parameters to load are deprecated')\n", (1114, 1169), False, 'import warnings\n'), ((1171, 1276), 'gym.envs.registration.register', 'register', ([], {'id': '"""DiplomacyEnv-v0"""', 'entry_point': '"""diplomacy_research.models.gym.environment:DiplomacyEnv"""'}), "(id='DiplomacyEnv-v0', entry_point=\n 'diplomacy_research.models.gym.environment:DiplomacyEnv')\n", (1179, 1276), False, 'from gym.envs.registration import register\n')] |
from bs4 import BeautifulSoup
import time
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement, XMPPResponse
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class OutgoingAcknowledgement(XMPPElement):
"""
Represents an outgoing acknowledgement for a message ID
"""
def __init__(self, sender_jid, is_receipt, ack_id, group_jid):
super().__init__()
self.sender_jid = sender_jid
self.group_jid = group_jid
self.is_receipt = is_receipt
self.ack_id = ack_id
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
user_ack_data = (
'<sender jid="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, str(self.is_receipt).lower(), self.ack_id)
group_ack_data = (
'<sender jid="{}" g="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, self.group_jid, str(self.is_receipt).lower(), self.ack_id)
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks>'
'{}'
'</msg-acks>'
'<history attach="false" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp, user_ack_data if self.group_jid != None else group_ack_data)
return data.encode()
class OutgoingHistoryRequest(XMPPElement):
"""
Represents an outgoing request for the account's messaging history
"""
def __init__(self):
super().__init__()
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks />'
'<history attach="true" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp,)
return data.encode()
class HistoryResponse(XMPPResponse):
"""
Represents a Kik messaging history response.
"""
def __init__(self, data: BeautifulSoup):
super().__init__(data)
self.id = data["id"]
if data.query.history:
self.more = data.query.history.has_attr("more")
self.from_jid = data["from"]
self.messages = []
for message in data.query.history:
if message["type"] == "receipt":
args = {
'type':'receipt',
'from_jid': message["from"],
'receipt_type':message.receipt["type"],
'id':message.receipt.msgid["id"]
}
self.messages.append(Struct(**args))
elif message["type"] == "chat":
args = {
'type':'chat',
'id':message["id"],
'from_jid':message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"]
}
self.messages.append(Struct(**args))
elif message["type"] == "groupchat":
args = {
'type': 'groupchat',
'id': message["id"],
'from_jid': message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"],
'group_jid': message.g["jid"]
}
self.messages.append(Struct(**args))
| [
"time.time"
] | [((647, 658), 'time.time', 'time.time', ([], {}), '()\n', (656, 658), False, 'import time\n'), ((1906, 1917), 'time.time', 'time.time', ([], {}), '()\n', (1915, 1917), False, 'import time\n')] |
import responses
from cloudscale import (
CLOUDSCALE_API_URL,
Cloudscale,
CloudscaleApiException,
CloudscaleException,
)
FLAVOR_RESP = {
"slug": "flex-2",
"name": "Flex-2",
"vcpu_count": 1,
"memory_gb": 2,
"zones": [{"slug": "rma1"}, {"slug": "lpg1"}],
}
@responses.activate
def test_flavor_get_all():
responses.add(
responses.GET, CLOUDSCALE_API_URL + "/flavors", json=[FLAVOR_RESP], status=200
)
responses.add(
responses.GET, CLOUDSCALE_API_URL + "/flavors", json=[FLAVOR_RESP], status=200
)
responses.add(responses.GET, CLOUDSCALE_API_URL + "/flavors", json={}, status=500)
cloudscale = Cloudscale(api_token="token")
flavors = cloudscale.flavor.get_all()
assert flavors[0]["slug"] == "flex-2"
assert flavors[0]["name"] == "Flex-2"
| [
"responses.add",
"cloudscale.Cloudscale"
] | [((345, 443), 'responses.add', 'responses.add', (['responses.GET', "(CLOUDSCALE_API_URL + '/flavors')"], {'json': '[FLAVOR_RESP]', 'status': '(200)'}), "(responses.GET, CLOUDSCALE_API_URL + '/flavors', json=[\n FLAVOR_RESP], status=200)\n", (358, 443), False, 'import responses\n'), ((457, 555), 'responses.add', 'responses.add', (['responses.GET', "(CLOUDSCALE_API_URL + '/flavors')"], {'json': '[FLAVOR_RESP]', 'status': '(200)'}), "(responses.GET, CLOUDSCALE_API_URL + '/flavors', json=[\n FLAVOR_RESP], status=200)\n", (470, 555), False, 'import responses\n'), ((569, 655), 'responses.add', 'responses.add', (['responses.GET', "(CLOUDSCALE_API_URL + '/flavors')"], {'json': '{}', 'status': '(500)'}), "(responses.GET, CLOUDSCALE_API_URL + '/flavors', json={},\n status=500)\n", (582, 655), False, 'import responses\n'), ((670, 699), 'cloudscale.Cloudscale', 'Cloudscale', ([], {'api_token': '"""token"""'}), "(api_token='token')\n", (680, 699), False, 'from cloudscale import CLOUDSCALE_API_URL, Cloudscale, CloudscaleApiException, CloudscaleException\n')] |