code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#!/usr/bin/env python3
# Project: OMRON 2JCIE-BU01
# Module:
from setuptools import setup
import sys
if sys.version_info < (3, 6):
raise NotImplementedError("Sorry, you need at least Python 3.6 to use OMRON 2JCIE-BU01.")
import omron_2jcie_bu01
MODNAME = "omron_2jcie_bu01"
setup(
name = "omron-2jcie-bu01",
version = omron_2jcie_bu01.__version__,
description = "API for OMRON 2JCIE-BU01 Environment Sensor",
long_description = omron_2jcie_bu01.__doc__,
author = omron_2jcie_bu01.__author__,
author_email = "<EMAIL>",
url = "https://github.com/nobrin/omron-2jcie-bu01",
py_modules = [MODNAME, f"{MODNAME}.ble", f"{MODNAME}.serial"],
scripts = [f"{MODNAME}/__init__.py", f"{MODNAME}/ble.py", f"{MODNAME}/serial.py"],
install_requires = ["pyserial"],
extras_require = {"ble": ["bleak"]},
license = "MIT",
platforms = "any",
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Home Automation",
"Topic :: System :: Hardware",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| [
"setuptools.setup"
] | [((280, 1164), 'setuptools.setup', 'setup', ([], {'name': '"""omron-2jcie-bu01"""', 'version': 'omron_2jcie_bu01.__version__', 'description': '"""API for OMRON 2JCIE-BU01 Environment Sensor"""', 'long_description': 'omron_2jcie_bu01.__doc__', 'author': 'omron_2jcie_bu01.__author__', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/nobrin/omron-2jcie-bu01"""', 'py_modules': "[MODNAME, f'{MODNAME}.ble', f'{MODNAME}.serial']", 'scripts': "[f'{MODNAME}/__init__.py', f'{MODNAME}/ble.py', f'{MODNAME}/serial.py']", 'install_requires': "['pyserial']", 'extras_require': "{'ble': ['bleak']}", 'license': '"""MIT"""', 'platforms': '"""any"""', 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Topic :: Home Automation',\n 'Topic :: System :: Hardware', 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8']"}), "(name='omron-2jcie-bu01', version=omron_2jcie_bu01.__version__,\n description='API for OMRON 2JCIE-BU01 Environment Sensor',\n long_description=omron_2jcie_bu01.__doc__, author=omron_2jcie_bu01.\n __author__, author_email='<EMAIL>', url=\n 'https://github.com/nobrin/omron-2jcie-bu01', py_modules=[MODNAME,\n f'{MODNAME}.ble', f'{MODNAME}.serial'], scripts=[\n f'{MODNAME}/__init__.py', f'{MODNAME}/ble.py', f'{MODNAME}/serial.py'],\n install_requires=['pyserial'], extras_require={'ble': ['bleak']},\n license='MIT', platforms='any', classifiers=[\n 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Topic :: Home Automation',\n 'Topic :: System :: Hardware', 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'])\n", (285, 1164), False, 'from setuptools import setup\n')] |
import time
from lwt.args import parse_args
from lwt.processes import filter_processes
from lwt.report import report
def run():
args = parse_args()
while True:
offending = filter_processes(args)
report(offending)
if not args.monitor:
return
time.sleep(args.monitor_time)
| [
"time.sleep",
"lwt.args.parse_args",
"lwt.processes.filter_processes",
"lwt.report.report"
] | [((149, 161), 'lwt.args.parse_args', 'parse_args', ([], {}), '()\n', (159, 161), False, 'from lwt.args import parse_args\n'), ((199, 221), 'lwt.processes.filter_processes', 'filter_processes', (['args'], {}), '(args)\n', (215, 221), False, 'from lwt.processes import filter_processes\n'), ((231, 248), 'lwt.report.report', 'report', (['offending'], {}), '(offending)\n', (237, 248), False, 'from lwt.report import report\n'), ((307, 336), 'time.sleep', 'time.sleep', (['args.monitor_time'], {}), '(args.monitor_time)\n', (317, 336), False, 'import time\n')] |
from __future__ import absolute_import
from __future__ import with_statement
import pickle
from nose import SkipTest
from kombu import Connection, Consumer, Producer, parse_url
from kombu.connection import Resource
from .mocks import Transport
from .utils import TestCase
from .utils import Mock, skip_if_not_module
class test_connection_utils(TestCase):
def setUp(self):
self.url = 'amqp://user:pass@localhost:5672/my/vhost'
self.nopass = 'amqp://user@localhost:5672/my/vhost'
self.expected = {
'transport': 'amqp',
'userid': 'user',
'password': '<PASSWORD>',
'hostname': 'localhost',
'port': 5672,
'virtual_host': 'my/vhost',
}
def test_parse_url(self):
result = parse_url(self.url)
self.assertDictEqual(result, self.expected)
def test_parse_url_mongodb(self):
result = parse_url('mongodb://example.com/')
self.assertEqual(result['hostname'], 'example.com/')
def test_parse_generated_as_uri(self):
conn = Connection(self.url)
info = conn.info()
for k, v in self.expected.items():
self.assertEqual(info[k], v)
# by default almost the same- no password
self.assertEqual(conn.as_uri(), self.nopass)
self.assertEqual(conn.as_uri(include_password=True), self.url)
@skip_if_not_module('pymongo')
def test_as_uri_when_mongodb(self):
x = Connection('mongodb://localhost')
self.assertTrue(x.as_uri())
def test_bogus_scheme(self):
with self.assertRaises(KeyError):
Connection('bogus://localhost:7421').transport
def assert_info(self, conn, **fields):
info = conn.info()
for field, expected in fields.iteritems():
self.assertEqual(info[field], expected)
def test_rabbitmq_example_urls(self):
# see Appendix A of http://www.rabbitmq.com/uri-spec.html
self.assert_info(
Connection('amqp://user:pass@host:10000/vhost'),
userid='user', password='<PASSWORD>', hostname='host',
port=10000, virtual_host='vhost')
self.assert_info(
Connection('amqp://user%61:%61pass@ho%61st:10000/v%2fhost'),
userid='usera', password='<PASSWORD>',
hostname='hoast', port=10000,
virtual_host='v/host')
self.assert_info(
Connection('amqp://'),
userid='guest', password='<PASSWORD>',
hostname='localhost', port=5672,
virtual_host='/')
self.assert_info(
Connection('amqp://:@/'),
userid='guest', password='<PASSWORD>',
hostname='localhost', port=5672,
virtual_host='/')
self.assert_info(
Connection('amqp://user@/'),
userid='user', password='<PASSWORD>',
hostname='localhost', port=5672,
virtual_host='/')
self.assert_info(
Connection('amqp://user:pass@/'),
userid='user', password='<PASSWORD>',
hostname='localhost', port=5672,
virtual_host='/')
self.assert_info(
Connection('amqp://host'),
userid='guest', password='<PASSWORD>',
hostname='host', port=5672,
virtual_host='/')
self.assert_info(
Connection('amqp://:10000'),
userid='guest', password='<PASSWORD>',
hostname='localhost', port=10000,
virtual_host='/')
self.assert_info(
Connection('amqp:///vhost'),
userid='guest', password='<PASSWORD>',
hostname='localhost', port=5672,
virtual_host='vhost')
self.assert_info(
Connection('amqp://host/'),
userid='guest', password='<PASSWORD>',
hostname='host', port=5672,
virtual_host='/')
self.assert_info(
Connection('amqp://host/%2f'),
userid='guest', password='guest',
hostname='host', port=5672,
virtual_host='/')
def test_url_IPV6(self):
raise SkipTest("urllib can't parse ipv6 urls")
self.assert_info(
Connection('amqp://[::1]'),
userid='guest', password='<PASSWORD>',
hostname='[::1]', port=5672,
virtual_host='/')
class test_Connection(TestCase):
def setUp(self):
self.conn = Connection(port=5672, transport=Transport)
def test_establish_connection(self):
conn = self.conn
conn.connect()
self.assertTrue(conn.connection.connected)
self.assertEqual(conn.host, 'localhost:5672')
channel = conn.channel()
self.assertTrue(channel.open)
self.assertEqual(conn.drain_events(), 'event')
_connection = conn.connection
conn.close()
self.assertFalse(_connection.connected)
self.assertIsInstance(conn.transport, Transport)
def test__enter____exit__(self):
conn = self.conn
context = conn.__enter__()
self.assertIs(context, conn)
conn.connect()
self.assertTrue(conn.connection.connected)
conn.__exit__()
self.assertIsNone(conn.connection)
conn.close() # again
def test_close_survives_connerror(self):
class _CustomError(Exception):
pass
class MyTransport(Transport):
connection_errors = (_CustomError, )
def close_connection(self, connection):
raise _CustomError('foo')
conn = Connection(transport=MyTransport)
conn.connect()
conn.close()
self.assertTrue(conn._closed)
def test_close_when_default_channel(self):
conn = self.conn
conn._default_channel = Mock()
conn._close()
conn._default_channel.close.assert_called_with()
def test_close_when_default_channel_close_raises(self):
class Conn(Connection):
@property
def connection_errors(self):
return (KeyError, )
conn = Conn('memory://')
conn._default_channel = Mock()
conn._default_channel.close.side_effect = KeyError()
conn._close()
conn._default_channel.close.assert_called_with()
def test_revive_when_default_channel(self):
conn = self.conn
defchan = conn._default_channel = Mock()
conn.revive(Mock())
defchan.close.assert_called_with()
self.assertIsNone(conn._default_channel)
def test_ensure_connection(self):
self.assertTrue(self.conn.ensure_connection())
def test_ensure_success(self):
def publish():
return 'foobar'
ensured = self.conn.ensure(None, publish)
self.assertEqual(ensured(), 'foobar')
def test_ensure_failure(self):
class _CustomError(Exception):
pass
def publish():
raise _CustomError('bar')
ensured = self.conn.ensure(None, publish)
with self.assertRaises(_CustomError):
ensured()
def test_ensure_connection_failure(self):
class _ConnectionError(Exception):
pass
def publish():
raise _ConnectionError('failed connection')
self.conn.transport.connection_errors = (_ConnectionError,)
ensured = self.conn.ensure(self.conn, publish)
with self.assertRaises(_ConnectionError):
ensured()
def test_autoretry(self):
myfun = Mock()
myfun.__name__ = 'test_autoretry'
self.conn.transport.connection_errors = (KeyError, )
def on_call(*args, **kwargs):
myfun.side_effect = None
raise KeyError('foo')
myfun.side_effect = on_call
insured = self.conn.autoretry(myfun)
insured()
self.assertTrue(myfun.called)
def test_SimpleQueue(self):
conn = self.conn
q = conn.SimpleQueue('foo')
self.assertIs(q.channel, conn.default_channel)
chan = conn.channel()
q2 = conn.SimpleQueue('foo', channel=chan)
self.assertIs(q2.channel, chan)
def test_SimpleBuffer(self):
conn = self.conn
q = conn.SimpleBuffer('foo')
self.assertIs(q.channel, conn.default_channel)
chan = conn.channel()
q2 = conn.SimpleBuffer('foo', channel=chan)
self.assertIs(q2.channel, chan)
def test_Producer(self):
conn = self.conn
self.assertIsInstance(conn.Producer(), Producer)
self.assertIsInstance(conn.Producer(conn.default_channel), Producer)
def test_Consumer(self):
conn = self.conn
self.assertIsInstance(conn.Consumer(queues=[]), Consumer)
self.assertIsInstance(conn.Consumer(queues=[],
channel=conn.default_channel), Consumer)
def test__repr__(self):
self.assertTrue(repr(self.conn))
def test__reduce__(self):
x = pickle.loads(pickle.dumps(self.conn))
self.assertDictEqual(x.info(), self.conn.info())
def test_channel_errors(self):
class MyTransport(Transport):
channel_errors = (KeyError, ValueError)
conn = Connection(transport=MyTransport)
self.assertTupleEqual(conn.channel_errors, (KeyError, ValueError))
def test_connection_errors(self):
class MyTransport(Transport):
connection_errors = (KeyError, ValueError)
conn = Connection(transport=MyTransport)
self.assertTupleEqual(conn.connection_errors, (KeyError, ValueError))
class test_Connection_with_transport_options(TestCase):
transport_options = {'pool_recycler': 3600, 'echo': True}
def setUp(self):
self.conn = Connection(port=5672, transport=Transport,
transport_options=self.transport_options)
def test_establish_connection(self):
conn = self.conn
self.assertEqual(conn.transport_options, self.transport_options)
class xResource(Resource):
def setup(self):
pass
class ResourceCase(TestCase):
abstract = True
def create_resource(self, limit, preload):
raise NotImplementedError('subclass responsibility')
def assertState(self, P, avail, dirty):
self.assertEqual(P._resource.qsize(), avail)
self.assertEqual(len(P._dirty), dirty)
def test_setup(self):
if self.abstract:
with self.assertRaises(NotImplementedError):
Resource()
def test_acquire__release(self):
if self.abstract:
return
P = self.create_resource(10, 0)
self.assertState(P, 10, 0)
chans = [P.acquire() for _ in xrange(10)]
self.assertState(P, 0, 10)
with self.assertRaises(P.LimitExceeded):
P.acquire()
chans.pop().release()
self.assertState(P, 1, 9)
[chan.release() for chan in chans]
self.assertState(P, 10, 0)
def test_acquire_no_limit(self):
if self.abstract:
return
P = self.create_resource(None, 0)
P.acquire().release()
def test_replace_when_limit(self):
if self.abstract:
return
P = self.create_resource(10, 0)
r = P.acquire()
P._dirty = Mock()
P.close_resource = Mock()
P.replace(r)
P._dirty.discard.assert_called_with(r)
P.close_resource.assert_called_with(r)
def test_replace_no_limit(self):
if self.abstract:
return
P = self.create_resource(None, 0)
r = P.acquire()
P._dirty = Mock()
P.close_resource = Mock()
P.replace(r)
self.assertFalse(P._dirty.discard.called)
P.close_resource.assert_called_with(r)
def test_interface_prepare(self):
if not self.abstract:
return
x = xResource()
self.assertEqual(x.prepare(10), 10)
def test_force_close_all_handles_AttributeError(self):
if self.abstract:
return
P = self.create_resource(10, 10)
cr = P.close_resource = Mock()
cr.side_effect = AttributeError('x')
P.acquire()
self.assertTrue(P._dirty)
P.force_close_all()
def test_force_close_all_no_mutex(self):
if self.abstract:
return
P = self.create_resource(10, 10)
P.close_resource = Mock()
m = P._resource = Mock()
m.mutex = None
m.queue.pop.side_effect = IndexError
P.force_close_all()
def test_add_when_empty(self):
if self.abstract:
return
P = self.create_resource(None, None)
P._resource.queue[:] = []
self.assertFalse(P._resource.queue)
P._add_when_empty()
self.assertTrue(P._resource.queue)
class test_ConnectionPool(ResourceCase):
abstract = False
def create_resource(self, limit, preload):
return Connection(port=5672, transport=Transport).Pool(limit, preload)
def test_setup(self):
P = self.create_resource(10, 2)
q = P._resource.queue
self.assertIsNotNone(q[0]._connection)
self.assertIsNotNone(q[1]._connection)
self.assertIsNone(q[2]()._connection)
def test_setup_no_limit(self):
P = self.create_resource(None, None)
self.assertFalse(P._resource.queue)
self.assertIsNone(P.limit)
def test_prepare_not_callable(self):
P = self.create_resource(None, None)
conn = Connection('memory://')
self.assertIs(P.prepare(conn), conn)
def test_acquire_channel(self):
P = self.create_resource(10, 0)
with P.acquire_channel() as (conn, channel):
self.assertIs(channel, conn.default_channel)
class test_ChannelPool(ResourceCase):
abstract = False
def create_resource(self, limit, preload):
return Connection(port=5672, transport=Transport) \
.ChannelPool(limit, preload)
def test_setup(self):
P = self.create_resource(10, 2)
q = P._resource.queue
self.assertTrue(q[0].basic_consume)
self.assertTrue(q[1].basic_consume)
with self.assertRaises(AttributeError):
getattr(q[2], 'basic_consume')
def test_setup_no_limit(self):
P = self.create_resource(None, None)
self.assertFalse(P._resource.queue)
self.assertIsNone(P.limit)
def test_prepare_not_callable(self):
P = self.create_resource(10, 0)
conn = Connection('memory://')
chan = conn.default_channel
self.assertIs(P.prepare(chan), chan)
| [
"kombu.parse_url",
"pickle.dumps",
"kombu.connection.Resource",
"nose.SkipTest",
"kombu.Connection"
] | [((793, 812), 'kombu.parse_url', 'parse_url', (['self.url'], {}), '(self.url)\n', (802, 812), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((921, 956), 'kombu.parse_url', 'parse_url', (['"""mongodb://example.com/"""'], {}), "('mongodb://example.com/')\n", (930, 956), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((1077, 1097), 'kombu.Connection', 'Connection', (['self.url'], {}), '(self.url)\n', (1087, 1097), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((1471, 1504), 'kombu.Connection', 'Connection', (['"""mongodb://localhost"""'], {}), "('mongodb://localhost')\n", (1481, 1504), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((4284, 4324), 'nose.SkipTest', 'SkipTest', (['"""urllib can\'t parse ipv6 urls"""'], {}), '("urllib can\'t parse ipv6 urls")\n', (4292, 4324), False, 'from nose import SkipTest\n'), ((4603, 4645), 'kombu.Connection', 'Connection', ([], {'port': '(5672)', 'transport': 'Transport'}), '(port=5672, transport=Transport)\n', (4613, 4645), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((5741, 5774), 'kombu.Connection', 'Connection', ([], {'transport': 'MyTransport'}), '(transport=MyTransport)\n', (5751, 5774), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((9371, 9404), 'kombu.Connection', 'Connection', ([], {'transport': 'MyTransport'}), '(transport=MyTransport)\n', (9381, 9404), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((9629, 9662), 'kombu.Connection', 'Connection', ([], {'transport': 'MyTransport'}), '(transport=MyTransport)\n', (9639, 9662), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((9904, 9993), 'kombu.Connection', 'Connection', ([], {'port': '(5672)', 'transport': 'Transport', 'transport_options': 'self.transport_options'}), '(port=5672, transport=Transport, transport_options=self.\n transport_options)\n', (9914, 9993), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((13665, 13688), 'kombu.Connection', 'Connection', (['"""memory://"""'], {}), "('memory://')\n", (13675, 13688), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((14672, 14695), 'kombu.Connection', 'Connection', (['"""memory://"""'], {}), "('memory://')\n", (14682, 14695), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((1998, 2045), 'kombu.Connection', 'Connection', (['"""amqp://user:pass@host:10000/vhost"""'], {}), "('amqp://user:pass@host:10000/vhost')\n", (2008, 2045), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((2207, 2266), 'kombu.Connection', 'Connection', (['"""amqp://user%61:%61pass@ho%61st:10000/v%2fhost"""'], {}), "('amqp://user%61:%61pass@ho%61st:10000/v%2fhost')\n", (2217, 2266), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((2447, 2468), 'kombu.Connection', 'Connection', (['"""amqp://"""'], {}), "('amqp://')\n", (2457, 2468), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((2647, 2671), 'kombu.Connection', 'Connection', (['"""amqp://:@/"""'], {}), "('amqp://:@/')\n", (2657, 2671), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((2850, 2877), 'kombu.Connection', 'Connection', (['"""amqp://user@/"""'], {}), "('amqp://user@/')\n", (2860, 2877), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((3055, 3087), 'kombu.Connection', 'Connection', (['"""amqp://user:pass@/"""'], {}), "('amqp://user:pass@/')\n", (3065, 3087), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((3265, 3290), 'kombu.Connection', 'Connection', (['"""amqp://host"""'], {}), "('amqp://host')\n", (3275, 3290), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((3464, 3491), 'kombu.Connection', 'Connection', (['"""amqp://:10000"""'], {}), "('amqp://:10000')\n", (3474, 3491), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((3671, 3698), 'kombu.Connection', 'Connection', (['"""amqp:///vhost"""'], {}), "('amqp:///vhost')\n", (3681, 3698), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((3881, 3907), 'kombu.Connection', 'Connection', (['"""amqp://host/"""'], {}), "('amqp://host/')\n", (3891, 3907), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((4081, 4110), 'kombu.Connection', 'Connection', (['"""amqp://host/%2f"""'], {}), "('amqp://host/%2f')\n", (4091, 4110), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((4364, 4390), 'kombu.Connection', 'Connection', (['"""amqp://[::1]"""'], {}), "('amqp://[::1]')\n", (4374, 4390), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((9146, 9169), 'pickle.dumps', 'pickle.dumps', (['self.conn'], {}), '(self.conn)\n', (9158, 9169), False, 'import pickle\n'), ((1629, 1665), 'kombu.Connection', 'Connection', (['"""bogus://localhost:7421"""'], {}), "('bogus://localhost:7421')\n", (1639, 1665), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((10656, 10666), 'kombu.connection.Resource', 'Resource', ([], {}), '()\n', (10664, 10666), False, 'from kombu.connection import Resource\n'), ((13102, 13144), 'kombu.Connection', 'Connection', ([], {'port': '(5672)', 'transport': 'Transport'}), '(port=5672, transport=Transport)\n', (13112, 13144), False, 'from kombu import Connection, Consumer, Producer, parse_url\n'), ((14045, 14087), 'kombu.Connection', 'Connection', ([], {'port': '(5672)', 'transport': 'Transport'}), '(port=5672, transport=Transport)\n', (14055, 14087), False, 'from kombu import Connection, Consumer, Producer, parse_url\n')] |
import httpsServer
def server():
httpsServer.server() | [
"httpsServer.server"
] | [((35, 55), 'httpsServer.server', 'httpsServer.server', ([], {}), '()\n', (53, 55), False, 'import httpsServer\n')] |
from tfs_integration import TFS_Integration
tfs = TFS_Integration('')
projects = tfs.get_projects()
# Pegando os times e os membros de cada time
for project in projects:
print ("ID: "+ project.id+" - "+project.name)
teams = tfs.get_teams(project_id=project.id)
for team in teams:
print ("Team Name:" + team.name)
team_members = tfs.get_team_members(project_id=project.id, team_id=team.id)
for team_member in team_members:
print("--->"+team_member.display_name)
print(team_member)
| [
"tfs_integration.TFS_Integration"
] | [((51, 70), 'tfs_integration.TFS_Integration', 'TFS_Integration', (['""""""'], {}), "('')\n", (66, 70), False, 'from tfs_integration import TFS_Integration\n')] |
import os
import sys
from .extraction import Pattern, Extraction
__all__ = ['Pattern', 'Extraction', 'Walker']
class Walker:
def __init__(self, pattern, followlinks=False):
if not pattern.startswith('/'):
top = os.getcwd()
self.pattern = Pattern(os.path.join(top, pattern))
else:
self.pattern = Pattern(pattern)
self.top = os.path.dirname(self.pattern.split('<')[0])
self.followlinks = followlinks
if sys.version_info >= (3, 5):
self.walk = self.__walk_3_5__
else:
self.walk = self.__walk_legacy__
def __walk_3_5__(self, top=None):
pattern = self.pattern
followlinks = self.followlinks
if top is None:
top = self.top
ext = []
reduced_pat = None
reduced_ext = []
for dir_entry in os.scandir(top):
matching, extraction = pattern.match(dir_entry.path, extract=True)
if matching:
ext.append(extraction)
elif dir_entry.is_dir():
new_pat = pattern.reduce_pattern(dir_entry.path)[0]
if new_pat is not None:
matching, extraction = new_pat.match(dir_entry.path,
extract=True,
sub=True)
if matching:
reduced_ext.append(extraction)
if reduced_pat is None:
reduced_pat = new_pat
else:
assert new_pat == reduced_pat
yield pattern, ext, reduced_pat, reduced_ext
if reduced_pat is None:
new_pathes = []
else:
new_pathes = [reduced_pat + ext_i for ext_i in reduced_ext]
for new_path in new_pathes:
if followlinks or not os.path.islink(new_path):
for x in self.walk(new_path):
yield x
def __walk_legacy__(self, top=None):
pattern = self.pattern
followlinks = self.followlinks
if top is None:
top = self.top
ext = []
reduced_pat = None
reduced_ext = []
names = os.listdir(top)
names = sorted(names)
for name in names:
path = os.path.join(top, name)
matching, extraction = pattern.match(path, extract=True)
if matching:
ext.append(extraction)
elif os.path.isdir(path):
new_pat = pattern.reduce_pattern(path)[0]
if new_pat is not None:
matching, extraction = new_pat.match(path,
extract=True,
sub=True)
if matching:
reduced_ext.append(extraction)
if reduced_pat is None:
reduced_pat = new_pat
else:
assert new_pat == reduced_pat
yield pattern, ext, reduced_pat, reduced_ext
if reduced_pat is None:
new_pathes = []
else:
new_pathes = [reduced_pat + ext_i for ext_i in reduced_ext]
for new_path in new_pathes:
if followlinks or not os.path.islink(new_path):
for x in self.walk(new_path):
yield x
| [
"os.listdir",
"os.scandir",
"os.path.join",
"os.getcwd",
"os.path.isdir",
"os.path.islink"
] | [((869, 884), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (879, 884), False, 'import os\n'), ((2278, 2293), 'os.listdir', 'os.listdir', (['top'], {}), '(top)\n', (2288, 2293), False, 'import os\n'), ((239, 250), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (248, 250), False, 'import os\n'), ((2370, 2393), 'os.path.join', 'os.path.join', (['top', 'name'], {}), '(top, name)\n', (2382, 2393), False, 'import os\n'), ((286, 312), 'os.path.join', 'os.path.join', (['top', 'pattern'], {}), '(top, pattern)\n', (298, 312), False, 'import os\n'), ((2544, 2563), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2557, 2563), False, 'import os\n'), ((1929, 1953), 'os.path.islink', 'os.path.islink', (['new_path'], {}), '(new_path)\n', (1943, 1953), False, 'import os\n'), ((3408, 3432), 'os.path.islink', 'os.path.islink', (['new_path'], {}), '(new_path)\n', (3422, 3432), False, 'import os\n')] |
#!/usr/bin/python
import os
import slack
import json
client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])
print("channels_retrieve_test: \n" )
print(client.channels_list())
# still testing files to struct a correct model for it
print("channel test:\n ")
client.channels_history(channel='CQNUEAH2N')
print("replies test : \n")
print(client.channels_replies(channel='CQNUEAH2N',thread_ts='1575037903.018300'))
| [
"slack.WebClient"
] | [((62, 114), 'slack.WebClient', 'slack.WebClient', ([], {'token': "os.environ['SLACK_API_TOKEN']"}), "(token=os.environ['SLACK_API_TOKEN'])\n", (77, 114), False, 'import slack\n')] |
from lettuce import step, world
@step("the empty xml swrl document")
def step_impl(step):
"""
:type step lettuce.core.Step
"""
pass
@step("I retrieve (\d+)")
def step_impl(step, expected):
"""
:type step lettuce.core.Step
"""
assert int(expected) == 1 | [
"lettuce.step"
] | [((35, 70), 'lettuce.step', 'step', (['"""the empty xml swrl document"""'], {}), "('the empty xml swrl document')\n", (39, 70), False, 'from lettuce import step, world\n'), ((152, 177), 'lettuce.step', 'step', (['"""I retrieve (\\\\d+)"""'], {}), "('I retrieve (\\\\d+)')\n", (156, 177), False, 'from lettuce import step, world\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SpiDetectionTask import SpiDetectionTask
class AlipayEcoTextDetectModel(object):
def __init__(self):
self._task = None
@property
def task(self):
return self._task
@task.setter
def task(self, value):
if isinstance(value, list):
self._task = list()
for i in value:
if isinstance(i, SpiDetectionTask):
self._task.append(i)
else:
self._task.append(SpiDetectionTask.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.task:
if isinstance(self.task, list):
for i in range(0, len(self.task)):
element = self.task[i]
if hasattr(element, 'to_alipay_dict'):
self.task[i] = element.to_alipay_dict()
if hasattr(self.task, 'to_alipay_dict'):
params['task'] = self.task.to_alipay_dict()
else:
params['task'] = self.task
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoTextDetectModel()
if 'task' in d:
o.task = d['task']
return o
| [
"alipay.aop.api.domain.SpiDetectionTask.SpiDetectionTask.from_alipay_dict"
] | [((628, 664), 'alipay.aop.api.domain.SpiDetectionTask.SpiDetectionTask.from_alipay_dict', 'SpiDetectionTask.from_alipay_dict', (['i'], {}), '(i)\n', (661, 664), False, 'from alipay.aop.api.domain.SpiDetectionTask import SpiDetectionTask\n')] |
"""Test CNRM-CM5 fixes."""
import unittest
from cf_units import Unit
from iris.cube import Cube
from esmvalcore.cmor.fix import Fix
from esmvalcore.cmor._fixes.cmip5.cnrm_cm5 import Msftmyz, Msftmyzba
class TestMsftmyz(unittest.TestCase):
"""Test msftmyz fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='msftmyz', units='J')
self.fix = Msftmyz(None)
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'CNRM-CM5', 'Amon', 'msftmyz'),
[Msftmyz(None)])
def test_fix_data(self):
"""Test data fix."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], 1.0e6)
self.assertEqual(cube.units, Unit('J'))
class TestMsftmyzba(unittest.TestCase):
"""Test msftmyzba fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='msftmyzba', units='J')
self.fix = Msftmyzba(None)
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'CNRM-CM5', 'Amon', 'msftmyzba'),
[Msftmyzba(None)])
def test_fix_data(self):
"""Test data fix."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], 1.0e6)
self.assertEqual(cube.units, Unit('J'))
| [
"cf_units.Unit",
"esmvalcore.cmor._fixes.cmip5.cnrm_cm5.Msftmyzba",
"esmvalcore.cmor.fix.Fix.get_fixes",
"esmvalcore.cmor._fixes.cmip5.cnrm_cm5.Msftmyz",
"iris.cube.Cube"
] | [((343, 385), 'iris.cube.Cube', 'Cube', (['[1.0]'], {'var_name': '"""msftmyz"""', 'units': '"""J"""'}), "([1.0], var_name='msftmyz', units='J')\n", (347, 385), False, 'from iris.cube import Cube\n'), ((405, 418), 'esmvalcore.cmor._fixes.cmip5.cnrm_cm5.Msftmyz', 'Msftmyz', (['None'], {}), '(None)\n', (412, 418), False, 'from esmvalcore.cmor._fixes.cmip5.cnrm_cm5 import Msftmyz, Msftmyzba\n'), ((938, 982), 'iris.cube.Cube', 'Cube', (['[1.0]'], {'var_name': '"""msftmyzba"""', 'units': '"""J"""'}), "([1.0], var_name='msftmyzba', units='J')\n", (942, 982), False, 'from iris.cube import Cube\n'), ((1002, 1017), 'esmvalcore.cmor._fixes.cmip5.cnrm_cm5.Msftmyzba', 'Msftmyzba', (['None'], {}), '(None)\n', (1011, 1017), False, 'from esmvalcore.cmor._fixes.cmip5.cnrm_cm5 import Msftmyz, Msftmyzba\n'), ((513, 566), 'esmvalcore.cmor.fix.Fix.get_fixes', 'Fix.get_fixes', (['"""CMIP5"""', '"""CNRM-CM5"""', '"""Amon"""', '"""msftmyz"""'], {}), "('CMIP5', 'CNRM-CM5', 'Amon', 'msftmyz')\n", (526, 566), False, 'from esmvalcore.cmor.fix import Fix\n'), ((783, 792), 'cf_units.Unit', 'Unit', (['"""J"""'], {}), "('J')\n", (787, 792), False, 'from cf_units import Unit\n'), ((1112, 1167), 'esmvalcore.cmor.fix.Fix.get_fixes', 'Fix.get_fixes', (['"""CMIP5"""', '"""CNRM-CM5"""', '"""Amon"""', '"""msftmyzba"""'], {}), "('CMIP5', 'CNRM-CM5', 'Amon', 'msftmyzba')\n", (1125, 1167), False, 'from esmvalcore.cmor.fix import Fix\n'), ((1386, 1395), 'cf_units.Unit', 'Unit', (['"""J"""'], {}), "('J')\n", (1390, 1395), False, 'from cf_units import Unit\n'), ((581, 594), 'esmvalcore.cmor._fixes.cmip5.cnrm_cm5.Msftmyz', 'Msftmyz', (['None'], {}), '(None)\n', (588, 594), False, 'from esmvalcore.cmor._fixes.cmip5.cnrm_cm5 import Msftmyz, Msftmyzba\n'), ((1182, 1197), 'esmvalcore.cmor._fixes.cmip5.cnrm_cm5.Msftmyzba', 'Msftmyzba', (['None'], {}), '(None)\n', (1191, 1197), False, 'from esmvalcore.cmor._fixes.cmip5.cnrm_cm5 import Msftmyz, Msftmyzba\n')] |
# !/usr/bin/env python
# coding=utf8
import tempfile
from pdbsync.core.plugins.execute import PyExecute
class DbCreateExecutor(PyExecute):
def __init__(self, dest_db):
super(DbCreateExecutor, self).__init__(dest_db)
def run(self, **kwargs):
create_sql = "DROP DATABASE IF EXISTS `%s`;\
CREATE DATABASE `%s`\
DEFAULT CHARACTER SET utf8mb4\
COLLATE utf8mb4_unicode_ci;" % (self.dest_db.db_name, self.dest_db.db_name)
with tempfile.NamedTemporaryFile() as fd:
fd.write(create_sql)
fd.flush()
super(DbCreateExecutor, self).run(fd.name)
| [
"tempfile.NamedTemporaryFile"
] | [((533, 562), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (560, 562), False, 'import tempfile\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray
import numpy as np
from gluonnlp import optimizer
def compare_ndarray_tuple(t1, t2, rtol=None, atol=None):
"""Compare ndarray tuple."""
if t1 is not None and t2 is not None:
if isinstance(t1, tuple):
for s1, s2 in zip(t1, t2):
compare_ndarray_tuple(s1, s2, rtol, atol)
else:
assert_almost_equal(t1.asnumpy(), t2.asnumpy(), rtol=rtol, atol=atol)
def compare_optimizer(opt1, opt2, shape, dtype, w_stype='default', g_stype='default',
rtol=1e-4, atol=1e-5, compare_states=True):
"""Compare opt1 and opt2."""
if w_stype == 'default':
w2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
w1 = w2.copyto(default_context())
elif w_stype == 'row_sparse' or w_stype == 'csr':
w2 = rand_ndarray(shape, w_stype, density=1, dtype=dtype)
w1 = w2.copyto(default_context()).tostype('default')
else:
raise Exception("type not supported yet")
if g_stype == 'default':
g2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
g1 = g2.copyto(default_context())
elif g_stype == 'row_sparse' or g_stype == 'csr':
g2 = rand_ndarray(shape, g_stype, dtype=dtype)
g1 = g2.copyto(default_context()).tostype('default')
else:
raise Exception("type not supported yet")
state1 = opt1.create_state_multi_precision(0, w1)
state2 = opt2.create_state_multi_precision(0, w2)
if compare_states:
compare_ndarray_tuple(state1, state2)
opt1.update_multi_precision(0, w1, g1, state1)
opt2.update_multi_precision(0, w2, g2, state2)
if compare_states:
compare_ndarray_tuple(state1, state2, rtol=rtol, atol=atol)
assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=rtol, atol=atol)
# BERT ADAM
class PyBERTAdam(mx.optimizer.Optimizer):
"""python reference implemenation of BERT style adam"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
wd=0, **kwargs):
super(PyBERTAdam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.wd = wd
def create_state(self, index, weight):
"""Create additional optimizer state: mean, variance
Parameters
----------
weight : NDArray
The weight data
"""
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
mean, variance = state
grad = grad * self.rescale_grad
# clip gradients
if self.clip_gradient is not None:
mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient, out=grad)
# update mean
mean[:] = self.beta1 * mean + (1. - self.beta1) * grad
# update variance
variance[:] = self.beta2 * variance + (1 - self.beta2) * grad.square()
# include weight decay
update = mean / (mx.nd.sqrt(variance) + self.epsilon) + wd * weight
# update weight
weight -= lr * update
def test_bert_adam():
opt1 = PyBERTAdam
opt2 = optimizer.BERTAdam
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
for dtype in [np.float16, np.float32, np.float64]:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
kwarg = {}
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
try:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
rtol=1e-4, atol=2e-5)
except ImportError:
print('skipping test_bert_adam() because an old version of MXNet is found')
return
| [
"mxnet.test_utils.rand_ndarray",
"mxnet.nd.zeros",
"mxnet.nd.clip",
"mxnet.nd.sqrt",
"mxnet.test_utils.default_context"
] | [((1614, 1631), 'mxnet.test_utils.default_context', 'default_context', ([], {}), '()\n', (1629, 1631), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((1700, 1752), 'mxnet.test_utils.rand_ndarray', 'rand_ndarray', (['shape', 'w_stype'], {'density': '(1)', 'dtype': 'dtype'}), '(shape, w_stype, density=1, dtype=dtype)\n', (1712, 1752), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((2006, 2023), 'mxnet.test_utils.default_context', 'default_context', ([], {}), '()\n', (2021, 2023), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((2092, 2133), 'mxnet.test_utils.rand_ndarray', 'rand_ndarray', (['shape', 'g_stype'], {'dtype': 'dtype'}), '(shape, g_stype, dtype=dtype)\n', (2104, 2133), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((3339, 3400), 'mxnet.nd.zeros', 'mx.nd.zeros', (['weight.shape', 'weight.context'], {'dtype': 'weight.dtype'}), '(weight.shape, weight.context, dtype=weight.dtype)\n', (3350, 3400), True, 'import mxnet as mx\n'), ((3426, 3487), 'mxnet.nd.zeros', 'mx.nd.zeros', (['weight.shape', 'weight.context'], {'dtype': 'weight.dtype'}), '(weight.shape, weight.context, dtype=weight.dtype)\n', (3437, 3487), True, 'import mxnet as mx\n'), ((4173, 4240), 'mxnet.nd.clip', 'mx.nd.clip', (['grad', '(-self.clip_gradient)', 'self.clip_gradient'], {'out': 'grad'}), '(grad, -self.clip_gradient, self.clip_gradient, out=grad)\n', (4183, 4240), True, 'import mxnet as mx\n'), ((1559, 1576), 'mxnet.test_utils.default_context', 'default_context', ([], {}), '()\n', (1574, 1576), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((1951, 1968), 'mxnet.test_utils.default_context', 'default_context', ([], {}), '()\n', (1966, 1968), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((4487, 4507), 'mxnet.nd.sqrt', 'mx.nd.sqrt', (['variance'], {}), '(variance)\n', (4497, 4507), True, 'import mxnet as mx\n'), ((1776, 1793), 'mxnet.test_utils.default_context', 'default_context', ([], {}), '()\n', (1791, 1793), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n'), ((2157, 2174), 'mxnet.test_utils.default_context', 'default_context', ([], {}), '()\n', (2172, 2174), False, 'from mxnet.test_utils import default_context, assert_almost_equal, rand_ndarray\n')] |
import sys
from datetime import datetime
LColours = [
'red', 'green', 'blue',
'purple', 'orange', 'brown',
'pink', 'cyan', 'magenta',
'yellow'
]
class WebServiceManager:
def __init__(self, jinja2_env):
self.jinja2_env = jinja2_env
self.DServices = {} # TODO: Replace me, directly using the Services instance!
def set_services(self, services):
self.services = services
def set_logger_parent(self, logger_parent):
"""
Set the logger parent (a FIFOJSONLog instance)
:param logger_parent:
:return:
"""
# TODO: Move this somewhere more appropriate!
self.logger_parent = logger_parent
def iter_services_by_name(self):
"""
:return:
"""
for service in sorted(
self.DServices.values(),
key=lambda service: service.name.lower()
):
yield service
def iter_services_by_port(self):
"""
:return:
"""
for service in sorted(
self.DServices.values(),
key=lambda service: service.port
):
yield service
def iter_service_ports(self):
"""
:return:
"""
for port in sorted(self.DServices):
yield port
#=====================================================================#
# Manage Web Services #
#=====================================================================#
def add_service(self, service):
"""
:param service:
:return:
"""
self.DServices[service.original_server_methods.port] = service
def remove_service(self, port):
"""
:param port:
:return:
"""
del self.DServices[port]
def start_service(self, port):
"""
:param port:
:return:
"""
self.services.start_service_by_port(port)
def stop_service(self, port):
"""
:param port:
:return:
"""
self.services.stop_service_by_port(port)
#=====================================================================#
# Get All Service Status/Stats #
#=====================================================================#
def get_overall_log(self, offset=None):
"""
Get the "overall" log for all services
:param offset:
:return:
"""
offset, LHTML = self.logger_parent.get_html_log(
offset=offset
)
return offset, '<br>'.join(LHTML) + ('<br>' if LHTML else '')
def get_overall_table_html(self, add_links=True):
"""
:param add_links:
:return:
"""
return self.jinja2_env.from_string(
'{% from "service_macros.html" import service_status_table %}\n'
'{{ service_status_table(LServiceTable, add_links) }}'
).render(
LServiceTable=self.get_overall_service_table(),
add_links=add_links
)
def get_overall_service_table(self):
"""
:return:
"""
L = []
for service in self.iter_services_by_name():
L.append(self.get_D_service_info(service.original_server_methods.port))
return L
def get_overall_service_methods(self, max_methods=15):
"""
Get a summary of the methods from all services, sorted
by total time the method has taken over all calls
:param max_methods:
:return:
"""
L = []
for service in self.iter_services_by_name():
DMethodStats = service.get_D_method_stats()
L.extend([
(
service.original_server_methods.port,
service.original_server_methods.name,
method_name,
D['num_calls'],
D['avg_call_time'],
D['total_time'])
for method_name, D
in DMethodStats.items()
])
L.sort(key=lambda i: -i[-1])
if max_methods is not None:
L = L[:max_methods]
return L
def get_overall_service_methods_html(self, max_methods=15):
L = self.get_overall_service_methods(max_methods)
return self.jinja2_env.from_string(
'{% from "service_macros.html" import overall_method_stats_html %}\n'
'{{ overall_method_stats_html(LMethodStats) }}'
).render(
LMethodStats=L
)
#=====================================================================#
# Get Single Service Status/Stats #
#=====================================================================#
def get_D_service_info(self, port, console_offset=None):
"""
:param port:
:param console_offset:
:return:
"""
service = self.DServices[port]
stsd = service.service_time_series_data
recent_values = stsd.get_recent_values()
offset, LHTML = service.fifo_json_log.get_html_log(
offset=console_offset
)
method_stats_html = self.get_method_stats_html(port)
D = {
"graphs": self.__get_D_graphs(recent_values),
"console_text": '\n'.join(LHTML),
"console_offset": offset,
"method_stats_html": method_stats_html
}
D.update(self.__get_D_table_info(port, recent_values))
D["table_html"] = self.__get_table_html(D)
return D
def get_method_stats_html(self, port):
"""
:param port:
:return:
"""
DMethodStats = self.DServices[port].get_D_method_stats()
LMethodStats = [
(method_name, D['num_calls'], D['avg_call_time'], D['total_time'])
for method_name, D
in DMethodStats.items()
]
return self.jinja2_env.from_string(
'{% from "service_macros.html" import method_stats_html %}\n'
'{{ method_stats_html(LMethodStats) }}'
).render(
LMethodStats=LMethodStats
)
def __get_D_table_info(self, port, recent_values):
"""
:param port:
:param recent_values:
:return:
"""
service = self.DServices[port]
return {
"port": port,
"name": service.original_server_methods.name,
"bound_to_tcp": service.tcp_bind,
"status": service.get_service_status(),
'workers': len(service.LPIDs), # TODO: MAKE BASED ON INTERFACE, NOT IMPLEMENTATION!
'physical_mem': recent_values[-1]['physical_mem'] // 1024 // 1024,
# We'll average over 3 iterations, as this can spike pretty quickly.
# Note that recent_values is actually reversed for displaying on the graph rtl
'cpu': round(sum([recent_values[-x]['cpu_usage_pc'] for x in range(3)]) / 3)
}
def __get_D_graphs(self, recent_values):
"""
:param recent_values:
:return:
"""
labels = [
datetime.utcfromtimestamp(D['timestamp']).strftime(
'%H:%M:%S' # %m/%d
) for D in recent_values
]
return {
"labels": labels,
"ram": self.__get_data_for_keys(
recent_values,
*(
('shared_mem', 'physical_mem', 'virtual_mem')
if sys.platform != 'win32'
else ('physical_mem', 'virtual_mem')
),
divisor=1024 * 1024
),
"io": self.__get_data_for_keys(
recent_values,
'io_read', 'io_written',
divisor=1024 * 1024
),
"cpu": self.__get_data_for_keys(
recent_values,
'cpu_usage_pc'
),
}
def __get_data_for_keys(self, values, *keys, divisor=None):
"""
:param values:
:param keys:
:param divisor:
:return:
"""
LData = []
for x, key in enumerate(keys):
LOut = []
for D in values:
i = D[key]
if divisor is not None:
i //= divisor
LOut.append(i)
LData.append([key, LOut, LColours[x]])
return LData
def __get_table_html(self, DService):
"""
:param DService:
:return:
"""
return self.jinja2_env.from_string(
'{% from "service_macros.html" import service_status_table %}\n'
'{{ service_status_table([DService]) }}'
).render(
DService=DService
)
| [
"datetime.datetime.utcfromtimestamp"
] | [((7234, 7275), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["D['timestamp']"], {}), "(D['timestamp'])\n", (7259, 7275), False, 'from datetime import datetime\n')] |
import sys
from sys import argv
sys.path.append("../athene")
import requests
from athene.console import console
from rich.status import Status
# parse identifiers to keys
def parse_key(key: str):
try:
return {
"close": "3B",
"vidmute": "3E",
"freeze": "47",
"voldown": "57",
"volup": "56",
"srcpc": "43",
"srcvid": "46",
"srcsvid": "45",
"srclan": "8A",
"srcbnc": "40",
"srchdmi": "1D",
}[key]
except KeyError:
return None
# run request to perform control
def remote(args):
ip = args[0]
key = parse_key(args[1])
if key == None:
console.print("Invalid key", style="error")
return
url = f"http://{ip}:80/cgi-bin/webconf.dll?KEY={key}"
headers = {
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Referer": f"http://{ip}/cgi-bin/webconf.dll?page=13",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "close",
}
try:
spinner = Status(
status="Sending request...",
spinner="dots"
)
spinner.start()
resp = requests.get(url, headers)
except requests.exceptions.ConnectionError:
console.print("\nConnection failed", style="error")
spinner.stop()
return
spinner.stop()
console.print("\nSuccessfull", style="success")
console.print(resp.text, style="standard")
if __name__ == "__main__":
remote(argv[1:])
| [
"athene.console.console.print",
"sys.path.append",
"requests.get",
"rich.status.Status"
] | [((33, 61), 'sys.path.append', 'sys.path.append', (['"""../athene"""'], {}), "('../athene')\n", (48, 61), False, 'import sys\n'), ((1727, 1774), 'athene.console.console.print', 'console.print', (['"""\nSuccessfull"""'], {'style': '"""success"""'}), "('\\nSuccessfull', style='success')\n", (1740, 1774), False, 'from athene.console import console\n'), ((1779, 1821), 'athene.console.console.print', 'console.print', (['resp.text'], {'style': '"""standard"""'}), "(resp.text, style='standard')\n", (1792, 1821), False, 'from athene.console import console\n'), ((717, 760), 'athene.console.console.print', 'console.print', (['"""Invalid key"""'], {'style': '"""error"""'}), "('Invalid key', style='error')\n", (730, 760), False, 'from athene.console import console\n'), ((1406, 1457), 'rich.status.Status', 'Status', ([], {'status': '"""Sending request..."""', 'spinner': '"""dots"""'}), "(status='Sending request...', spinner='dots')\n", (1412, 1457), False, 'from rich.status import Status\n'), ((1531, 1557), 'requests.get', 'requests.get', (['url', 'headers'], {}), '(url, headers)\n', (1543, 1557), False, 'import requests\n'), ((1614, 1668), 'athene.console.console.print', 'console.print', (['"""\nConnection failed"""'], {'style': '"""error"""'}), '("""\nConnection failed""", style=\'error\')\n', (1627, 1668), False, 'from athene.console import console\n')] |
from http import HTTPStatus
import pytest
from django import urls
from crashbin_app.models import Bin, OutgoingMessage, NoteMessage, Label
pytestmark = pytest.mark.django_db
def test_home(admin_client):
response = admin_client.get(urls.reverse("home"))
assert response.status_code == HTTPStatus.OK
assert b"Bins you maintain" in response.content
@pytest.mark.parametrize(
"query, matches",
[(None, True), ("test", True), ("Test", True), ("", True), ("blah", False)],
)
def test_report_list(admin_client, report_obj, query, matches):
url = urls.reverse("report_list")
if query is not None:
url += f"?q={query}"
response = admin_client.get(url)
assert response.status_code == HTTPStatus.OK
assert (b">testreport<" in response.content) == matches
assert (b'href="/report/1/"' in response.content) == matches
@pytest.fixture
def report_detail_url(report_obj):
return urls.reverse("report_detail", kwargs={"pk": report_obj.id})
def test_detail(admin_client, report_obj, report_detail_url):
response = admin_client.get(report_detail_url)
assert response.status_code == HTTPStatus.OK
assert b">testreport<" in response.content
assert b">Debug log<" in response.content
class TestReportReply:
@pytest.fixture
def report_reply_url(self, report_obj):
return urls.reverse("report_reply", kwargs={"pk": report_obj.id})
@pytest.mark.parametrize(
"typ, klass, fragment",
[("Reply", OutgoingMessage, "#reply-1"), ("Note", NoteMessage, "#note-1")],
)
def test_reply(
self,
admin_client,
report_obj,
report_reply_url,
report_detail_url,
typ,
klass,
fragment,
):
text = "test text"
assert not report_obj.all_messages()
response = admin_client.post(report_reply_url, {"typ": typ, "text": text})
assert response.status_code == HTTPStatus.FOUND
assert response.url == report_detail_url + fragment
messages = report_obj.all_messages()
assert len(messages) == 1
message = messages[0]
assert isinstance(message, klass)
assert message.text == text
def test_invalid_type(self, admin_client, report_obj, report_reply_url):
response = admin_client.post(
report_reply_url, {"typ": "blabla", "text": "test text"}
)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert not report_obj.all_messages()
def test_get(self, admin_client, report_reply_url):
response = admin_client.get(report_reply_url)
assert response.status_code == HTTPStatus.METHOD_NOT_ALLOWED
@pytest.mark.parametrize(
"query, matches",
[(None, True), ("test", True), ("Test", True), ("", True), ("blah", False)],
)
@pytest.mark.parametrize(
"view, match", [("bin_list", ">testbin<"), ("label_list", ">testlabel<")]
)
def test_lists(admin_client, bin_obj, label_obj, view, query, match, matches):
url = urls.reverse(view)
if query is not None:
url += f"?q={query}"
response = admin_client.get(url)
assert response.status_code == HTTPStatus.OK
content = response.content.decode("utf-8")
assert (match in content) == matches
class TestSearchDispatch:
@pytest.fixture
def search_dispatch_url(self):
return urls.reverse("search_dispatch")
@pytest.mark.parametrize("scope", ["Reports", "Bins", "Labels"])
def test_search_dispatch(
self, search_dispatch_url, admin_client, bin_obj, report_obj, scope
):
response = admin_client.get(
search_dispatch_url, {"q": "foo&bar", "scope": scope}
)
assert response.status_code == HTTPStatus.FOUND
response2 = admin_client.get(response.url)
content = response2.content.decode()
assert f"No {scope.lower()} found" in content
assert f'{scope} matching "foo&bar"' in content
def test_invalid_scope(
self, search_dispatch_url, admin_client, bin_obj, report_obj
):
response = admin_client.get(
search_dispatch_url, {"q": "foo", "scope": "blabla"}
)
assert response.status_code == HTTPStatus.BAD_REQUEST
@pytest.fixture
def bin_detail_url(bin_obj):
return urls.reverse("bin_detail", kwargs={"pk": bin_obj.id})
@pytest.fixture
def inbox_bin():
return Bin.get_inbox()
def test_bin_detail(admin_client, bin_detail_url):
response = admin_client.get(bin_detail_url)
assert response.status_code == HTTPStatus.OK
assert b">testbin<" in response.content
class TestBinNewEdit:
@pytest.fixture
def bin_new_url(self):
return urls.reverse("bin_new_edit")
@pytest.fixture
def bin_edit_url(self, bin_obj):
return urls.reverse("bin_new_edit", kwargs={"pk": bin_obj.id})
def test_new_post(self, admin_client, bin_new_url):
response = admin_client.post(bin_new_url, {"name": "newbin"})
assert response.status_code == HTTPStatus.FOUND
new_bin_obj = Bin.objects.get(name="newbin")
assert new_bin_obj.name == "newbin"
assert not new_bin_obj.description
assert response.url == urls.reverse("bin_detail", kwargs={"pk": new_bin_obj.id})
def test_edit_post(self, admin_client, bin_obj, bin_edit_url, bin_detail_url):
data = {"name": bin_obj.name, "description": "Bin description"}
response = admin_client.post(bin_edit_url, data)
assert response.status_code == HTTPStatus.FOUND
assert response.url == bin_detail_url
bin_obj.refresh_from_db()
assert bin_obj.description == "Bin description"
def test_new_get(self, admin_client, bin_new_url):
response = admin_client.get(bin_new_url)
assert response.status_code == HTTPStatus.OK
content = response.content.decode("utf-8")
assert '<form method="POST"' in content
assert "csrfmiddlewaretoken" in content
assert "New bin" in content
assert "Delete bin" not in content
def test_edit_get(self, admin_client, bin_edit_url, bin_obj):
response = admin_client.get(bin_edit_url)
assert response.status_code == HTTPStatus.OK
content = response.content.decode("utf-8")
assert '<form method="POST"' in content
assert "csrfmiddlewaretoken" in content
assert "Edit bin" in content
assert "Delete bin" in content
assert bin_obj.name in content
assert bin_obj.description in content
def test_edit_inbox(self, admin_client, inbox_bin):
url = urls.reverse("bin_new_edit", kwargs={"pk": inbox_bin.id})
response = admin_client.get(url)
assert response.status_code == HTTPStatus.OK
content = response.content.decode("utf-8")
assert "Delete bin" not in content
assert f'value="{inbox_bin.name}"' not in content
def test_edit_inbox_post(self, admin_client, inbox_bin):
data = {"name": "<NAME>", "description": "Bin description"}
url = urls.reverse("bin_new_edit", kwargs={"pk": inbox_bin.id})
response = admin_client.post(url, data)
assert response.status_code == HTTPStatus.FOUND
inbox_bin.refresh_from_db()
assert inbox_bin.name == "Inbox"
@pytest.mark.parametrize(
"back_url, is_valid",
{("/", True), ("/reports", True), ("https://evil.example.com/", False)},
)
def test_back_url(
self, admin_client, bin_obj, bin_edit_url, bin_detail_url, back_url, is_valid
):
bin_edit_url += f"?back={back_url}"
data = {"name": bin_obj.name, "description": bin_obj.description}
response = admin_client.post(bin_edit_url, data)
assert response.status_code == HTTPStatus.FOUND
assert response.url == (back_url if is_valid else bin_detail_url)
class TestLabelNewEdit:
@pytest.fixture
def label_new_url(self):
return urls.reverse("label_new_edit")
@pytest.fixture
def label_edit_url(self, label_obj):
return urls.reverse("label_new_edit", kwargs={"pk": label_obj.id})
@pytest.fixture
def label_list_url(self):
return urls.reverse("label_list")
def test_new_post(self, admin_client, label_new_url):
response = admin_client.post(
label_new_url, {"name": "newlabel", "color": "#424242"}
)
assert response.status_code == HTTPStatus.FOUND
new_label_obj = Label.objects.get(name="newlabel")
assert new_label_obj.name == "newlabel"
assert new_label_obj.color == "#424242"
assert not new_label_obj.description
assert response.url == urls.reverse("label_list")
def test_edit_post(self, admin_client, label_obj, label_edit_url, label_list_url):
data = {
"name": label_obj.name,
"description": "Label description",
"color": "#424242",
}
response = admin_client.post(label_edit_url, data)
assert response.status_code == HTTPStatus.FOUND
assert response.url == label_list_url
label_obj.refresh_from_db()
assert label_obj.description == "Label description"
assert label_obj.color == "#424242"
def test_new_get(self, admin_client, label_new_url):
response = admin_client.get(label_new_url)
assert response.status_code == HTTPStatus.OK
content = response.content.decode("utf-8")
assert '<form method="POST"' in content
assert "csrfmiddlewaretoken" in content
assert "New label" in content
assert "Delete label" not in content
def test_edit_get(self, admin_client, label_edit_url, label_obj):
response = admin_client.get(label_edit_url)
assert response.status_code == HTTPStatus.OK
content = response.content.decode("utf-8")
assert '<form method="POST"' in content
assert "csrfmiddlewaretoken" in content
assert "Edit label" in content
assert "Delete label" in content
assert label_obj.name in content
assert label_obj.description in content
@pytest.mark.parametrize(
"back_url, is_valid",
{("/", True), ("/reports", True), ("https://evil.example.com/", False)},
)
def test_back_url(
self,
admin_client,
label_obj,
label_edit_url,
label_list_url,
back_url,
is_valid,
):
label_edit_url += f"?back={back_url}"
data = {
"name": label_obj.name,
"description": label_obj.description,
"color": label_obj.color,
}
response = admin_client.post(label_edit_url, data)
assert response.status_code == HTTPStatus.FOUND
assert response.url == (back_url if is_valid else label_list_url)
def test_subscribe(bin_obj, admin_user, admin_client):
url = urls.reverse("bin_subscribe", kwargs={"pk": bin_obj.id})
assert admin_user not in bin_obj.subscribers.all()
response = admin_client.post(url)
assert response.status_code == HTTPStatus.OK
assert admin_user in bin_obj.subscribers.all()
response = admin_client.post(url)
assert response.status_code == HTTPStatus.OK
assert admin_user not in bin_obj.subscribers.all()
class TestSettings:
def _get_url(self, obj, setting):
view = "bin_settings" if isinstance(obj, Bin) else "report_settings"
return urls.reverse(view, kwargs={"setting": setting, "pk": obj.id})
# Getting settings
def test_get_bin_maintainer(self, bin_obj, admin_client):
url = self._get_url(bin_obj, "maintainer")
response = admin_client.get(url)
content = response.content.decode("utf-8")
assert ">Maintainers for testbin<" in content
assert 'type="checkbox"' in content
assert ">admin<" in content
def test_get_bin_label(self, bin_obj, label_obj, admin_client):
url = self._get_url(bin_obj, "label")
response = admin_client.get(url)
content = response.content.decode("utf-8")
assert ">Labels for testbin<" in content
assert 'type="checkbox"' in content
assert ">testlabel<" in content
def test_get_report_label(self, report_obj, label_obj, admin_client):
url = self._get_url(report_obj, "label")
response = admin_client.get(url)
content = response.content.decode("utf-8")
assert ">Labels for testreport<" in content
assert 'type="checkbox"' in content
assert ">testlabel<" in content
def test_get_bin_related(self, bin_obj, admin_client):
url = self._get_url(bin_obj, "related")
response = admin_client.get(url)
content = response.content.decode("utf-8")
assert ">Related to testbin<" in content
assert 'type="checkbox"' in content
assert ">testbin<" not in content # Bin can't relate to itself
def test_get_report_bin(self, report_obj, bin_obj, admin_client):
url = self._get_url(report_obj, "bin")
response = admin_client.get(url)
content = response.content.decode("utf-8")
assert ">Bin for testreport<" in content
assert 'type="radio"' in content
assert ">testbin<" in content
def test_get_invalid(self, report_obj, admin_client):
url = self._get_url(report_obj, "blabla")
response = admin_client.get(url)
assert response.status_code == HTTPStatus.BAD_REQUEST
# Setting settings
def test_set_bin_maintainer(
self, bin_obj, bin_detail_url, admin_client, admin_user
):
assert not bin_obj.maintainers.exists()
url = self._get_url(bin_obj, "maintainer")
response = admin_client.post(url, {"maintainer": admin_user.id})
assert response.status_code == HTTPStatus.FOUND
assert response.url == bin_detail_url
assert bin_obj.maintainers.get() == admin_user
def test_set_bin_label(self, bin_obj, label_obj, bin_detail_url, admin_client):
assert not bin_obj.labels.exists()
url = self._get_url(bin_obj, "label")
response = admin_client.post(url, {"label": label_obj.id})
assert response.status_code == HTTPStatus.FOUND
assert response.url == bin_detail_url
assert bin_obj.labels.get() == label_obj
def test_set_report_label(
self, report_detail_url, report_obj, label_obj, admin_client
):
assert not report_obj.labels.exists()
url = self._get_url(report_obj, "label")
response = admin_client.post(url, {"label": label_obj.id})
assert response.status_code == HTTPStatus.FOUND
assert response.url == report_detail_url
assert report_obj.labels.get() == label_obj
def test_set_bin_related(self, bin_obj, bin_detail_url, admin_client):
assert not bin_obj.related_bins.exists()
new_bin = Bin.objects.create(name="related bin")
url = self._get_url(bin_obj, "related")
response = admin_client.post(url, {"related": new_bin.id})
assert response.status_code == HTTPStatus.FOUND
assert response.url == bin_detail_url
assert bin_obj.related_bins.get() == new_bin
assert new_bin.related_bins.get() == bin_obj
def test_set_report_bin(
self, report_detail_url, report_obj, bin_obj, inbox_bin, admin_client
):
assert report_obj.bin == bin_obj
url = self._get_url(report_obj, "bin")
response = admin_client.post(url, {"bin": inbox_bin.id})
assert response.url == report_detail_url
assert response.status_code == HTTPStatus.FOUND
report_obj.refresh_from_db()
assert report_obj.bin == inbox_bin
def test_set_invalid(self, report_obj, admin_client):
url = self._get_url(report_obj, "blabla")
response = admin_client.post(url)
assert response.status_code == HTTPStatus.BAD_REQUEST
@pytest.mark.parametrize(
"view, kwargs",
[
("home", {}),
("search_dispatch", {}),
("report_list", {}),
("report_detail", {"pk": 1}),
("report_reply", {"pk": 1}),
("report_settings", {"pk": 1, "setting": "label"}),
("bin_list", {}),
("bin_new_edit", {}),
("bin_detail", {"pk": 1}),
("bin_subscribe", {"pk": 1}),
("bin_settings", {"pk": 1, "setting": "label"}),
("label_list", {}),
("label_new_edit", {}),
],
)
def test_logged_out(client, view, kwargs):
view_url = urls.reverse(view, kwargs=kwargs)
response = client.get(view_url)
assert response.status_code == HTTPStatus.FOUND
assert response.url == urls.reverse("login") + "?next=" + view_url
@pytest.mark.parametrize(
"view, method",
[
("report_detail", "get"),
("bin_detail", "get"),
("bin_new_edit", "get"),
("bin_new_edit", "post"),
("bin_subscribe", "post"),
("label_new_edit", "get"),
("label_new_edit", "post"),
("report_reply", "post"),
],
)
def test_404(admin_client, view, method):
url = urls.reverse(view, kwargs={"pk": 1337})
func = getattr(admin_client, method)
response = func(url)
assert response.status_code == HTTPStatus.NOT_FOUND
| [
"crashbin_app.models.Bin.objects.create",
"crashbin_app.models.Label.objects.get",
"pytest.mark.parametrize",
"crashbin_app.models.Bin.get_inbox",
"django.urls.reverse",
"crashbin_app.models.Bin.objects.get"
] | [((367, 490), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""query, matches"""', "[(None, True), ('test', True), ('Test', True), ('', True), ('blah', False)]"], {}), "('query, matches', [(None, True), ('test', True), (\n 'Test', True), ('', True), ('blah', False)])\n", (390, 490), False, 'import pytest\n'), ((2691, 2814), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""query, matches"""', "[(None, True), ('test', True), ('Test', True), ('', True), ('blah', False)]"], {}), "('query, matches', [(None, True), ('test', True), (\n 'Test', True), ('', True), ('blah', False)])\n", (2714, 2814), False, 'import pytest\n'), ((2822, 2925), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view, match"""', "[('bin_list', '>testbin<'), ('label_list', '>testlabel<')]"], {}), "('view, match', [('bin_list', '>testbin<'), (\n 'label_list', '>testlabel<')])\n", (2845, 2925), False, 'import pytest\n'), ((15938, 16362), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view, kwargs"""', "[('home', {}), ('search_dispatch', {}), ('report_list', {}), (\n 'report_detail', {'pk': 1}), ('report_reply', {'pk': 1}), (\n 'report_settings', {'pk': 1, 'setting': 'label'}), ('bin_list', {}), (\n 'bin_new_edit', {}), ('bin_detail', {'pk': 1}), ('bin_subscribe', {'pk':\n 1}), ('bin_settings', {'pk': 1, 'setting': 'label'}), ('label_list', {}\n ), ('label_new_edit', {})]"], {}), "('view, kwargs', [('home', {}), ('search_dispatch',\n {}), ('report_list', {}), ('report_detail', {'pk': 1}), ('report_reply',\n {'pk': 1}), ('report_settings', {'pk': 1, 'setting': 'label'}), (\n 'bin_list', {}), ('bin_new_edit', {}), ('bin_detail', {'pk': 1}), (\n 'bin_subscribe', {'pk': 1}), ('bin_settings', {'pk': 1, 'setting':\n 'label'}), ('label_list', {}), ('label_new_edit', {})])\n", (15961, 16362), False, 'import pytest\n'), ((16717, 16979), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view, method"""', "[('report_detail', 'get'), ('bin_detail', 'get'), ('bin_new_edit', 'get'),\n ('bin_new_edit', 'post'), ('bin_subscribe', 'post'), ('label_new_edit',\n 'get'), ('label_new_edit', 'post'), ('report_reply', 'post')]"], {}), "('view, method', [('report_detail', 'get'), (\n 'bin_detail', 'get'), ('bin_new_edit', 'get'), ('bin_new_edit', 'post'),\n ('bin_subscribe', 'post'), ('label_new_edit', 'get'), ('label_new_edit',\n 'post'), ('report_reply', 'post')])\n", (16740, 16979), False, 'import pytest\n'), ((571, 598), 'django.urls.reverse', 'urls.reverse', (['"""report_list"""'], {}), "('report_list')\n", (583, 598), False, 'from django import urls\n'), ((930, 989), 'django.urls.reverse', 'urls.reverse', (['"""report_detail"""'], {'kwargs': "{'pk': report_obj.id}"}), "('report_detail', kwargs={'pk': report_obj.id})\n", (942, 989), False, 'from django import urls\n'), ((1416, 1543), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""typ, klass, fragment"""', "[('Reply', OutgoingMessage, '#reply-1'), ('Note', NoteMessage, '#note-1')]"], {}), "('typ, klass, fragment', [('Reply', OutgoingMessage,\n '#reply-1'), ('Note', NoteMessage, '#note-1')])\n", (1439, 1543), False, 'import pytest\n'), ((3016, 3034), 'django.urls.reverse', 'urls.reverse', (['view'], {}), '(view)\n', (3028, 3034), False, 'from django import urls\n'), ((3401, 3464), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scope"""', "['Reports', 'Bins', 'Labels']"], {}), "('scope', ['Reports', 'Bins', 'Labels'])\n", (3424, 3464), False, 'import pytest\n'), ((4297, 4350), 'django.urls.reverse', 'urls.reverse', (['"""bin_detail"""'], {'kwargs': "{'pk': bin_obj.id}"}), "('bin_detail', kwargs={'pk': bin_obj.id})\n", (4309, 4350), False, 'from django import urls\n'), ((4397, 4412), 'crashbin_app.models.Bin.get_inbox', 'Bin.get_inbox', ([], {}), '()\n', (4410, 4412), False, 'from crashbin_app.models import Bin, OutgoingMessage, NoteMessage, Label\n'), ((7303, 7426), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""back_url, is_valid"""', "{('/', True), ('/reports', True), ('https://evil.example.com/', False)}"], {}), "('back_url, is_valid', {('/', True), ('/reports', \n True), ('https://evil.example.com/', False)})\n", (7326, 7426), False, 'import pytest\n'), ((10134, 10257), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""back_url, is_valid"""', "{('/', True), ('/reports', True), ('https://evil.example.com/', False)}"], {}), "('back_url, is_valid', {('/', True), ('/reports', \n True), ('https://evil.example.com/', False)})\n", (10157, 10257), False, 'import pytest\n'), ((10898, 10954), 'django.urls.reverse', 'urls.reverse', (['"""bin_subscribe"""'], {'kwargs': "{'pk': bin_obj.id}"}), "('bin_subscribe', kwargs={'pk': bin_obj.id})\n", (10910, 10954), False, 'from django import urls\n'), ((16521, 16554), 'django.urls.reverse', 'urls.reverse', (['view'], {'kwargs': 'kwargs'}), '(view, kwargs=kwargs)\n', (16533, 16554), False, 'from django import urls\n'), ((17101, 17140), 'django.urls.reverse', 'urls.reverse', (['view'], {'kwargs': "{'pk': 1337}"}), "(view, kwargs={'pk': 1337})\n", (17113, 17140), False, 'from django import urls\n'), ((241, 261), 'django.urls.reverse', 'urls.reverse', (['"""home"""'], {}), "('home')\n", (253, 261), False, 'from django import urls\n'), ((1351, 1409), 'django.urls.reverse', 'urls.reverse', (['"""report_reply"""'], {'kwargs': "{'pk': report_obj.id}"}), "('report_reply', kwargs={'pk': report_obj.id})\n", (1363, 1409), False, 'from django import urls\n'), ((3363, 3394), 'django.urls.reverse', 'urls.reverse', (['"""search_dispatch"""'], {}), "('search_dispatch')\n", (3375, 3394), False, 'from django import urls\n'), ((4693, 4721), 'django.urls.reverse', 'urls.reverse', (['"""bin_new_edit"""'], {}), "('bin_new_edit')\n", (4705, 4721), False, 'from django import urls\n'), ((4795, 4850), 'django.urls.reverse', 'urls.reverse', (['"""bin_new_edit"""'], {'kwargs': "{'pk': bin_obj.id}"}), "('bin_new_edit', kwargs={'pk': bin_obj.id})\n", (4807, 4850), False, 'from django import urls\n'), ((5057, 5087), 'crashbin_app.models.Bin.objects.get', 'Bin.objects.get', ([], {'name': '"""newbin"""'}), "(name='newbin')\n", (5072, 5087), False, 'from crashbin_app.models import Bin, OutgoingMessage, NoteMessage, Label\n'), ((6607, 6664), 'django.urls.reverse', 'urls.reverse', (['"""bin_new_edit"""'], {'kwargs': "{'pk': inbox_bin.id}"}), "('bin_new_edit', kwargs={'pk': inbox_bin.id})\n", (6619, 6664), False, 'from django import urls\n'), ((7056, 7113), 'django.urls.reverse', 'urls.reverse', (['"""bin_new_edit"""'], {'kwargs': "{'pk': inbox_bin.id}"}), "('bin_new_edit', kwargs={'pk': inbox_bin.id})\n", (7068, 7113), False, 'from django import urls\n'), ((7956, 7986), 'django.urls.reverse', 'urls.reverse', (['"""label_new_edit"""'], {}), "('label_new_edit')\n", (7968, 7986), False, 'from django import urls\n'), ((8064, 8123), 'django.urls.reverse', 'urls.reverse', (['"""label_new_edit"""'], {'kwargs': "{'pk': label_obj.id}"}), "('label_new_edit', kwargs={'pk': label_obj.id})\n", (8076, 8123), False, 'from django import urls\n'), ((8190, 8216), 'django.urls.reverse', 'urls.reverse', (['"""label_list"""'], {}), "('label_list')\n", (8202, 8216), False, 'from django import urls\n'), ((8473, 8507), 'crashbin_app.models.Label.objects.get', 'Label.objects.get', ([], {'name': '"""newlabel"""'}), "(name='newlabel')\n", (8490, 8507), False, 'from crashbin_app.models import Bin, OutgoingMessage, NoteMessage, Label\n'), ((11444, 11505), 'django.urls.reverse', 'urls.reverse', (['view'], {'kwargs': "{'setting': setting, 'pk': obj.id}"}), "(view, kwargs={'setting': setting, 'pk': obj.id})\n", (11456, 11505), False, 'from django import urls\n'), ((14903, 14941), 'crashbin_app.models.Bin.objects.create', 'Bin.objects.create', ([], {'name': '"""related bin"""'}), "(name='related bin')\n", (14921, 14941), False, 'from crashbin_app.models import Bin, OutgoingMessage, NoteMessage, Label\n'), ((5207, 5264), 'django.urls.reverse', 'urls.reverse', (['"""bin_detail"""'], {'kwargs': "{'pk': new_bin_obj.id}"}), "('bin_detail', kwargs={'pk': new_bin_obj.id})\n", (5219, 5264), False, 'from django import urls\n'), ((8681, 8707), 'django.urls.reverse', 'urls.reverse', (['"""label_list"""'], {}), "('label_list')\n", (8693, 8707), False, 'from django import urls\n'), ((16670, 16691), 'django.urls.reverse', 'urls.reverse', (['"""login"""'], {}), "('login')\n", (16682, 16691), False, 'from django import urls\n')] |
import lightnion as lnn
import nacl.public
import base64
def hand(guard, encode=True):
identity = base64.b64decode(guard['router']['identity'] + '====')
onion_key = base64.b64decode(guard['ntor-onion-key'] + '====')
ephemeral_key, payload = lnn.crypto.ntor.hand(identity, onion_key)
if encode:
payload = str(base64.b64encode(payload), 'utf8')
return payload, (onion_key, ephemeral_key, identity)
def shake(payload, material):
payload = base64.b64decode(payload)
onion_key, ephemeral_key, identity = material
material = lnn.crypto.ntor.shake(ephemeral_key, payload,
identity, onion_key, length=92)
return lnn.crypto.ntor.kdf(material)
| [
"lightnion.crypto.ntor.shake",
"base64.b64encode",
"base64.b64decode",
"lightnion.crypto.ntor.hand",
"lightnion.crypto.ntor.kdf"
] | [((105, 159), 'base64.b64decode', 'base64.b64decode', (["(guard['router']['identity'] + '====')"], {}), "(guard['router']['identity'] + '====')\n", (121, 159), False, 'import base64\n'), ((176, 226), 'base64.b64decode', 'base64.b64decode', (["(guard['ntor-onion-key'] + '====')"], {}), "(guard['ntor-onion-key'] + '====')\n", (192, 226), False, 'import base64\n'), ((257, 298), 'lightnion.crypto.ntor.hand', 'lnn.crypto.ntor.hand', (['identity', 'onion_key'], {}), '(identity, onion_key)\n', (277, 298), True, 'import lightnion as lnn\n'), ((474, 499), 'base64.b64decode', 'base64.b64decode', (['payload'], {}), '(payload)\n', (490, 499), False, 'import base64\n'), ((566, 643), 'lightnion.crypto.ntor.shake', 'lnn.crypto.ntor.shake', (['ephemeral_key', 'payload', 'identity', 'onion_key'], {'length': '(92)'}), '(ephemeral_key, payload, identity, onion_key, length=92)\n', (587, 643), True, 'import lightnion as lnn\n'), ((664, 693), 'lightnion.crypto.ntor.kdf', 'lnn.crypto.ntor.kdf', (['material'], {}), '(material)\n', (683, 693), True, 'import lightnion as lnn\n'), ((337, 362), 'base64.b64encode', 'base64.b64encode', (['payload'], {}), '(payload)\n', (353, 362), False, 'import base64\n')] |
from django.db import models
# Create your models here.
# example model
class Plant(models.Model):
common_name = models.CharField(max_length=100, unique=True)
img_name = models.CharField(max_length=100, default=False)
sunlight = models.CharField(max_length=100, blank=True)
moisture = models.CharField(max_length=500, default=False, blank=True)
toxic_to_dogs= models.BooleanField(default=False)
toxic_to_cats= models.BooleanField(default=False)
plant_habit = models.CharField(max_length=500, default=False, blank=True)
bloom_period = models.CharField(max_length=500, default=False, blank=True)
humidity = models.CharField(max_length=100, default=False, blank=True)
ph_soil = models.CharField(max_length=100, default=False, blank=True)
description = models.CharField(max_length=500, default=False, blank=True)
image = models.ImageField(upload_to="images/", null=True, blank=True)
| [
"django.db.models.ImageField",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((120, 165), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (136, 165), False, 'from django.db import models\n'), ((181, 228), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '(False)'}), '(max_length=100, default=False)\n', (197, 228), False, 'from django.db import models\n'), ((244, 288), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (260, 288), False, 'from django.db import models\n'), ((304, 363), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'default': '(False)', 'blank': '(True)'}), '(max_length=500, default=False, blank=True)\n', (320, 363), False, 'from django.db import models\n'), ((383, 417), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (402, 417), False, 'from django.db import models\n'), ((437, 471), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (456, 471), False, 'from django.db import models\n'), ((490, 549), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'default': '(False)', 'blank': '(True)'}), '(max_length=500, default=False, blank=True)\n', (506, 549), False, 'from django.db import models\n'), ((569, 628), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'default': '(False)', 'blank': '(True)'}), '(max_length=500, default=False, blank=True)\n', (585, 628), False, 'from django.db import models\n'), ((644, 703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '(False)', 'blank': '(True)'}), '(max_length=100, default=False, blank=True)\n', (660, 703), False, 'from django.db import models\n'), ((718, 777), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '(False)', 'blank': '(True)'}), '(max_length=100, default=False, blank=True)\n', (734, 777), False, 'from django.db import models\n'), ((796, 855), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'default': '(False)', 'blank': '(True)'}), '(max_length=500, default=False, blank=True)\n', (812, 855), False, 'from django.db import models\n'), ((868, 929), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/"""', 'null': '(True)', 'blank': '(True)'}), "(upload_to='images/', null=True, blank=True)\n", (885, 929), False, 'from django.db import models\n')] |
from __future__ import absolute_import
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from scipy.stats import norm
import numpy as np
class VirtualCE(nn.Module):
def __init__(self, beta=0.1):
super(VirtualCE, self).__init__()
self.beta = beta
def forward(self, inputs, targets):
# norm first
n = inputs.shape[0]
inputs = F.normalize(inputs, p=2)
allPids = targets.cpu().numpy().tolist()
# All Centers
centerHash = {
pid: F.normalize(inputs[targets == pid, :].mean(dim=0, keepdim=True), p=2).detach() for pid in set(allPids)
}
allCenters = torch.autograd.Variable(torch.cat(list(centerHash.values()))).cuda()
centerPID = torch.from_numpy(np.asarray(list(centerHash.keys())))
# sampler vs center
samplerCenter = torch.autograd.Variable(torch.cat([allCenters[centerPID == pid, :] for pid in allPids])).cuda()
# inputs--(128*1024), allCenters--(32*1024)
vce = torch.diag(torch.exp(samplerCenter.mm(inputs.t()) / self.beta)) # 1*128
centerScore = torch.exp(allCenters.mm(inputs.t()) / self.beta).sum(dim=0) # 32(center number)*128->1*128
return -torch.log(vce.div(centerScore)).mean()
class VirtualKCE(nn.Module):
def __init__(self, beta=0.1):
super(VirtualKCE, self).__init__()
self.beta = beta
def forward(self, inputs, targets):
# norm first
n = inputs.shape[0]
inputs = F.normalize(inputs, p=2)
allPids = targets.cpu().numpy().tolist()
# All Centers
centerHash = {
pid: F.normalize(inputs[targets == pid, :].mean(dim=0, keepdim=True), p=2).detach() for pid in set(allPids)
}
allCenters = torch.autograd.Variable(torch.cat(list(centerHash.values()))).cuda()
centerPID = torch.from_numpy(np.asarray(list(centerHash.keys())))
samplerCenter = torch.autograd.Variable(torch.cat([allCenters[centerPID == pid, :] for pid in allPids])).cuda()
# inputs--(128*1024), allCenters--(32*1024)
vce = torch.diag(torch.exp(samplerCenter.mm(inputs.t()) / self.beta)) # 1*128
centerScore = torch.exp(allCenters.mm(inputs.t()) / self.beta).sum(dim=0) # 32*128->1*128
kNegScore = torch.diag(inputs.mm(inputs.t()))
return -torch.log(vce.div(kNegScore + centerScore)).mean()
| [
"torch.nn.functional.normalize",
"torch.cat"
] | [((434, 458), 'torch.nn.functional.normalize', 'F.normalize', (['inputs'], {'p': '(2)'}), '(inputs, p=2)\n', (445, 458), True, 'from torch.nn import functional as F\n'), ((1543, 1567), 'torch.nn.functional.normalize', 'F.normalize', (['inputs'], {'p': '(2)'}), '(inputs, p=2)\n', (1554, 1567), True, 'from torch.nn import functional as F\n'), ((923, 986), 'torch.cat', 'torch.cat', (['[allCenters[centerPID == pid, :] for pid in allPids]'], {}), '([allCenters[centerPID == pid, :] for pid in allPids])\n', (932, 986), False, 'import torch\n'), ((2004, 2067), 'torch.cat', 'torch.cat', (['[allCenters[centerPID == pid, :] for pid in allPids]'], {}), '([allCenters[centerPID == pid, :] for pid in allPids])\n', (2013, 2067), False, 'import torch\n')] |
import pandas as pd
import pickle as pkl
data=pd.read_stata("data/BloombergVOTELEVEL_Touse.dta",chunksize=100,convert_categoricals=False)
data_sliced=pd.DataFrame()
print("started slicing")
for chunk in data:
data_sliced = data_sliced.append(chunk[['Circuit', 'caseid', 'date']], ignore_index=True)
print("started parsing date")
data_sliced['date']=data_sliced['date'].dt.year
print("started creating dictionary of circuit year level")
groups = dict(list(data_sliced.groupby(['Circuit','date'])['caseid']))
print('dumping into circuit_year_level')
pkl.dump(groups, open("circuit_year_level", "wb"))
| [
"pandas.DataFrame",
"pandas.read_stata"
] | [((46, 143), 'pandas.read_stata', 'pd.read_stata', (['"""data/BloombergVOTELEVEL_Touse.dta"""'], {'chunksize': '(100)', 'convert_categoricals': '(False)'}), "('data/BloombergVOTELEVEL_Touse.dta', chunksize=100,\n convert_categoricals=False)\n", (59, 143), True, 'import pandas as pd\n'), ((150, 164), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (162, 164), True, 'import pandas as pd\n')] |
import os
import sys
from tweepy import API
from tweepy import OAuthHandler
def get_twitter_auth():
try:
consumer_key='F0imGgOVzAzOpDuayoaIvSTC0'
consumer_secret_key='<KEY>'
access_token='<KEY>'
access_secret_token='<KEY>'
print("Connected Successfully")
except KeyError:
sys.stderr.write("Twitter_* environment variables not set\n")
sys.exit(1)
auth= OAuthHandler(consumer_key,consumer_secret_key)
auth.set_access_token(access_token,access_secret_token)
return auth
def get_twitter_client():
auth=get_twitter_auth()
client=API(auth)
return client
| [
"sys.stderr.write",
"tweepy.API",
"sys.exit",
"tweepy.OAuthHandler"
] | [((440, 487), 'tweepy.OAuthHandler', 'OAuthHandler', (['consumer_key', 'consumer_secret_key'], {}), '(consumer_key, consumer_secret_key)\n', (452, 487), False, 'from tweepy import OAuthHandler\n'), ((635, 644), 'tweepy.API', 'API', (['auth'], {}), '(auth)\n', (638, 644), False, 'from tweepy import API\n'), ((344, 405), 'sys.stderr.write', 'sys.stderr.write', (['"""Twitter_* environment variables not set\n"""'], {}), "('Twitter_* environment variables not set\\n')\n", (360, 405), False, 'import sys\n'), ((415, 426), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (423, 426), False, 'import sys\n')] |
"""
WSGI config for pinterest_example project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
try:
from dj_static import Cling
except ImportError:
pass
else:
application = Cling(application)
| [
"os.environ.setdefault",
"dj_static.Cling",
"django.core.wsgi.get_wsgi_application"
] | [((243, 307), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""core.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'core.settings')\n", (264, 307), False, 'import os\n'), ((373, 395), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ([], {}), '()\n', (393, 395), False, 'from django.core.wsgi import get_wsgi_application\n'), ((478, 496), 'dj_static.Cling', 'Cling', (['application'], {}), '(application)\n', (483, 496), False, 'from dj_static import Cling\n')] |
import emoji
print(emoji.emojize('Python é :thumbs_up:')) | [
"emoji.emojize"
] | [((20, 57), 'emoji.emojize', 'emoji.emojize', (['"""Python é :thumbs_up:"""'], {}), "('Python é :thumbs_up:')\n", (33, 57), False, 'import emoji\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from datas.benchmark import Benchmark
from datas.div2k import DIV2K
from models.ecbsr import ECBSR
from torch.utils.data import DataLoader
import math
import argparse, yaml
import utils
import os
from tqdm import tqdm
import logging
import sys
import time
parser = argparse.ArgumentParser(description='ECBSR')
## yaml configuration files
parser.add_argument('--config', type=str, default=None, help = 'pre-config file for training')
## paramters for ecbsr
parser.add_argument('--scale', type=int, default=2, help = 'scale for sr network')
parser.add_argument('--colors', type=int, default=1, help = '1(Y channls of YCbCr)')
parser.add_argument('--m_ecbsr', type=int, default=4, help = 'number of ecb')
parser.add_argument('--c_ecbsr', type=int, default=8, help = 'channels of ecb')
parser.add_argument('--idt_ecbsr', type=int, default=0, help = 'incorporate identity mapping in ecb or not')
parser.add_argument('--act_type', type=str, default='prelu', help = 'prelu, relu, splus, rrelu')
parser.add_argument('--pretrain', type=str, default=None, help = 'path of pretrained model')
## parameters for model training
parser.add_argument('--patch_size', type=int, default=64, help = 'patch size of HR image')
parser.add_argument('--batch_size', type=int, default=32, help = 'batch size of training data')
parser.add_argument('--data_repeat', type=int, default=1, help = 'times of repetition for training data')
parser.add_argument('--data_augment', type=int, default=1, help = 'data augmentation for training')
parser.add_argument('--epochs', type=int, default=600, help = 'number of epochs')
parser.add_argument('--test_every', type=int, default=1, help = 'test the model every N epochs')
parser.add_argument('--log_every', type=int, default=1, help = 'print log of loss, every N steps')
parser.add_argument('--log_path', type=str, default="./experiments/")
parser.add_argument('--lr', type=float, default=5e-4, help = 'learning rate of optimizer')
parser.add_argument('--store_in_ram', type=int, default=0, help = 'store the whole training data in RAM or not')
## hardware specification
parser.add_argument('--gpu_id', type=int, default=0, help = 'gpu id for training')
parser.add_argument('--threads', type=int, default=1, help = 'number of threads for training')
## dataset specification
parser.add_argument('--div2k_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/DIV2K/DIV2K_train_HR', help = '')
parser.add_argument('--div2k_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/DIV2K/DIV2K_train_LR_bicubic', help = '')
parser.add_argument('--set5_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set5/HR', help = '')
parser.add_argument('--set5_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set5/LR_bicubic', help = '')
parser.add_argument('--set14_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set14/HR', help = '')
parser.add_argument('--set14_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set14/LR_bicubic', help = '')
parser.add_argument('--b100_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/B100/HR', help = '')
parser.add_argument('--b100_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/B100/LR_bicubic', help = '')
parser.add_argument('--u100_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Urban100/HR', help = '')
parser.add_argument('--u100_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Urban100/LR_bicubic', help = '')
if __name__ == '__main__':
args = parser.parse_args()
if args.config:
opt = vars(args)
yaml_args = yaml.load(open(args.config), Loader=yaml.FullLoader)
opt.update(yaml_args)
if args.colors == 3:
raise ValueError("ECBSR is trained and tested with colors=1.")
device = None
if args.gpu_id >= 0 and torch.cuda.is_available():
print("use cuda & cudnn for acceleration!")
print("the gpu id is: {}".format(args.gpu_id))
device = torch.device('cuda:{}'.format(args.gpu_id))
torch.backends.cudnn.benchmark = True
else:
print("use cpu for training!")
device = torch.device('cpu')
torch.set_num_threads(args.threads)
div2k = DIV2K(
args.div2k_hr_path,
args.div2k_lr_path,
train=True,
augment=args.data_augment,
scale=args.scale,
colors=args.colors,
patch_size=args.patch_size,
repeat=args.data_repeat,
store_in_ram=args.store_in_ram
)
set5 = Benchmark(args.set5_hr_path, args.set5_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
set14 = Benchmark(args.set14_hr_path, args.set14_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
b100 = Benchmark(args.b100_hr_path, args.b100_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
u100 = Benchmark(args.u100_hr_path, args.u100_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
train_dataloader = DataLoader(dataset=div2k, num_workers=args.threads, batch_size=args.batch_size, shuffle=True, pin_memory=True, drop_last=True)
valid_dataloaders = []
valid_dataloaders += [{'name': 'set5', 'dataloader': DataLoader(dataset=set5, batch_size=1, shuffle=False)}]
valid_dataloaders += [{'name': 'set14', 'dataloader': DataLoader(dataset=set14, batch_size=1, shuffle=False)}]
valid_dataloaders += [{'name': 'b100', 'dataloader': DataLoader(dataset=b100, batch_size=1, shuffle=False)}]
valid_dataloaders += [{'name': 'u100', 'dataloader': DataLoader(dataset=u100, batch_size=1, shuffle=False)}]
## definitions of model, loss, and optimizer
model = ECBSR(module_nums=args.m_ecbsr, channel_nums=args.c_ecbsr, with_idt=args.idt_ecbsr, act_type=args.act_type, scale=args.scale, colors=args.colors).to(device)
loss_func = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if args.pretrain is not None:
print("load pretrained model: {}!".format(args.pretrain))
model.load_state_dict(torch.load(args.pretrain))
else:
print("train the model from scratch!")
## auto-generate the output logname
timestamp = utils.cur_timestamp_str()
experiment_name = "ecbsr-x{}-m{}c{}-{}-{}".format(args.scale, args.m_ecbsr, args.c_ecbsr, args.act_type, timestamp)
experiment_path = os.path.join(args.log_path, experiment_name)
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
experiment_model_path = os.path.join(experiment_path, 'models')
if not os.path.exists(experiment_model_path):
os.makedirs(experiment_model_path)
log_name = os.path.join(experiment_path, "log.txt")
sys.stdout = utils.ExperimentLogger(log_name, sys.stdout)
stat_dict = utils.get_stat_dict()
## save training paramters
exp_params = vars(args)
exp_params_name = os.path.join(experiment_path, 'config.yml')
with open(exp_params_name, 'w') as exp_params_file:
yaml.dump(exp_params, exp_params_file, default_flow_style=False)
timer_start = time.time()
for epoch in range(args.epochs):
epoch_loss = 0.0
stat_dict['epochs'] = epoch
model = model.train()
print("##===========Epoch: {}=============##".format(epoch))
for iter, batch in enumerate(train_dataloader):
optimizer.zero_grad()
lr, hr = batch
lr, hr = lr.to(device), hr.to(device)
sr = model(lr)
loss = loss_func(sr, hr)
loss.backward()
optimizer.step()
epoch_loss += float(loss)
if (iter + 1) % args.log_every == 0:
cur_steps = (iter+1)*args.batch_size
total_steps = len(train_dataloader.dataset)
fill_width = math.ceil(math.log10(total_steps))
cur_steps = str(cur_steps).zfill(fill_width)
epoch_width = math.ceil(math.log10(args.epochs))
cur_epoch = str(epoch).zfill(epoch_width)
avg_loss = epoch_loss / (iter + 1)
stat_dict['losses'].append(avg_loss)
timer_end = time.time()
duration = timer_end - timer_start
timer_start = timer_end
print("Epoch:{}, {}/{}, loss: {:.4f}, time: {:.3f}".format(cur_epoch, cur_steps, total_steps, avg_loss, duration))
if (epoch + 1) % args.test_every == 0:
torch.set_grad_enabled(False)
test_log = ""
model = model.eval()
for valid_dataloader in valid_dataloaders:
avg_psnr = 0.0
avg_ssim = 0.0
name = valid_dataloader['name']
loader = valid_dataloader['dataloader']
for lr, hr in tqdm(loader, ncols=80):
lr, hr = lr.to(device), hr.to(device)
sr = model(lr)
# crop
hr = hr[:, :, args.scale:-args.scale, args.scale:-args.scale]
sr = sr[:, :, args.scale:-args.scale, args.scale:-args.scale]
# quantize
hr = hr.clamp(0, 255)
sr = sr.clamp(0, 255)
# calculate psnr
psnr = utils.calc_psnr(sr, hr)
ssim = utils.calc_ssim(sr, hr)
avg_psnr += psnr
avg_ssim += ssim
avg_psnr = round(avg_psnr/len(loader), 2)
avg_ssim = round(avg_ssim/len(loader), 4)
stat_dict[name]['psnrs'].append(avg_psnr)
stat_dict[name]['ssims'].append(avg_ssim)
if stat_dict[name]['best_psnr']['value'] < avg_psnr:
stat_dict[name]['best_psnr']['value'] = avg_psnr
stat_dict[name]['best_psnr']['epoch'] = epoch
if stat_dict[name]['best_ssim']['value'] < avg_ssim:
stat_dict[name]['best_ssim']['value'] = avg_ssim
stat_dict[name]['best_ssim']['epoch'] = epoch
test_log += "[{}-X{}], PSNR/SSIM: {:.2f}/{:.4f} (Best: {:.2f}/{:.4f}, Epoch: {}/{})\n".format(
name, args.scale, float(avg_psnr), float(avg_ssim),
stat_dict[name]['best_psnr']['value'], stat_dict[name]['best_ssim']['value'],
stat_dict[name]['best_psnr']['epoch'], stat_dict[name]['best_ssim']['epoch'])
# print log & flush out
print(test_log)
sys.stdout.flush()
# save model
saved_model_path = os.path.join(experiment_model_path, 'model_x{}_{}.pt'.format(args.scale, epoch))
torch.save(model.state_dict(), saved_model_path)
torch.set_grad_enabled(True)
# save stat dict
## save training paramters
stat_dict_name = os.path.join(experiment_path, 'stat_dict.yml')
with open(stat_dict_name, 'w') as stat_dict_file:
yaml.dump(stat_dict, stat_dict_file, default_flow_style=False) | [
"datas.benchmark.Benchmark",
"torch.nn.L1Loss",
"models.ecbsr.ECBSR",
"torch.cuda.is_available",
"math.log10",
"os.path.exists",
"argparse.ArgumentParser",
"torch.set_num_threads",
"utils.calc_ssim",
"sys.stdout.flush",
"utils.calc_psnr",
"yaml.dump",
"time.time",
"torch.device",
"utils.... | [((334, 378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ECBSR"""'}), "(description='ECBSR')\n", (357, 378), False, 'import argparse, yaml\n'), ((4355, 4390), 'torch.set_num_threads', 'torch.set_num_threads', (['args.threads'], {}), '(args.threads)\n', (4376, 4390), False, 'import torch\n'), ((4404, 4621), 'datas.div2k.DIV2K', 'DIV2K', (['args.div2k_hr_path', 'args.div2k_lr_path'], {'train': '(True)', 'augment': 'args.data_augment', 'scale': 'args.scale', 'colors': 'args.colors', 'patch_size': 'args.patch_size', 'repeat': 'args.data_repeat', 'store_in_ram': 'args.store_in_ram'}), '(args.div2k_hr_path, args.div2k_lr_path, train=True, augment=args.\n data_augment, scale=args.scale, colors=args.colors, patch_size=args.\n patch_size, repeat=args.data_repeat, store_in_ram=args.store_in_ram)\n', (4409, 4621), False, 'from datas.div2k import DIV2K\n'), ((4711, 4833), 'datas.benchmark.Benchmark', 'Benchmark', (['args.set5_hr_path', 'args.set5_lr_path'], {'scale': 'args.scale', 'colors': 'args.colors', 'store_in_ram': 'args.store_in_ram'}), '(args.set5_hr_path, args.set5_lr_path, scale=args.scale, colors=\n args.colors, store_in_ram=args.store_in_ram)\n', (4720, 4833), False, 'from datas.benchmark import Benchmark\n'), ((4841, 4965), 'datas.benchmark.Benchmark', 'Benchmark', (['args.set14_hr_path', 'args.set14_lr_path'], {'scale': 'args.scale', 'colors': 'args.colors', 'store_in_ram': 'args.store_in_ram'}), '(args.set14_hr_path, args.set14_lr_path, scale=args.scale, colors=\n args.colors, store_in_ram=args.store_in_ram)\n', (4850, 4965), False, 'from datas.benchmark import Benchmark\n'), ((4973, 5095), 'datas.benchmark.Benchmark', 'Benchmark', (['args.b100_hr_path', 'args.b100_lr_path'], {'scale': 'args.scale', 'colors': 'args.colors', 'store_in_ram': 'args.store_in_ram'}), '(args.b100_hr_path, args.b100_lr_path, scale=args.scale, colors=\n args.colors, store_in_ram=args.store_in_ram)\n', (4982, 5095), False, 'from datas.benchmark import Benchmark\n'), ((5103, 5225), 'datas.benchmark.Benchmark', 'Benchmark', (['args.u100_hr_path', 'args.u100_lr_path'], {'scale': 'args.scale', 'colors': 'args.colors', 'store_in_ram': 'args.store_in_ram'}), '(args.u100_hr_path, args.u100_lr_path, scale=args.scale, colors=\n args.colors, store_in_ram=args.store_in_ram)\n', (5112, 5225), False, 'from datas.benchmark import Benchmark\n'), ((5245, 5376), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'div2k', 'num_workers': 'args.threads', 'batch_size': 'args.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(dataset=div2k, num_workers=args.threads, batch_size=args.\n batch_size, shuffle=True, pin_memory=True, drop_last=True)\n', (5255, 5376), False, 'from torch.utils.data import DataLoader\n'), ((6088, 6099), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6097, 6099), True, 'import torch.nn as nn\n'), ((6436, 6461), 'utils.cur_timestamp_str', 'utils.cur_timestamp_str', ([], {}), '()\n', (6459, 6461), False, 'import utils\n'), ((6604, 6648), 'os.path.join', 'os.path.join', (['args.log_path', 'experiment_name'], {}), '(args.log_path, experiment_name)\n', (6616, 6648), False, 'import os\n'), ((6758, 6797), 'os.path.join', 'os.path.join', (['experiment_path', '"""models"""'], {}), "(experiment_path, 'models')\n", (6770, 6797), False, 'import os\n'), ((6907, 6947), 'os.path.join', 'os.path.join', (['experiment_path', '"""log.txt"""'], {}), "(experiment_path, 'log.txt')\n", (6919, 6947), False, 'import os\n'), ((6965, 7009), 'utils.ExperimentLogger', 'utils.ExperimentLogger', (['log_name', 'sys.stdout'], {}), '(log_name, sys.stdout)\n', (6987, 7009), False, 'import utils\n'), ((7026, 7047), 'utils.get_stat_dict', 'utils.get_stat_dict', ([], {}), '()\n', (7045, 7047), False, 'import utils\n'), ((7131, 7174), 'os.path.join', 'os.path.join', (['experiment_path', '"""config.yml"""'], {}), "(experiment_path, 'config.yml')\n", (7143, 7174), False, 'import os\n'), ((7324, 7335), 'time.time', 'time.time', ([], {}), '()\n', (7333, 7335), False, 'import time\n'), ((4024, 4049), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4047, 4049), False, 'import torch\n'), ((4331, 4350), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4343, 4350), False, 'import torch\n'), ((6660, 6691), 'os.path.exists', 'os.path.exists', (['experiment_path'], {}), '(experiment_path)\n', (6674, 6691), False, 'import os\n'), ((6701, 6729), 'os.makedirs', 'os.makedirs', (['experiment_path'], {}), '(experiment_path)\n', (6712, 6729), False, 'import os\n'), ((6809, 6846), 'os.path.exists', 'os.path.exists', (['experiment_model_path'], {}), '(experiment_model_path)\n', (6823, 6846), False, 'import os\n'), ((6856, 6890), 'os.makedirs', 'os.makedirs', (['experiment_model_path'], {}), '(experiment_model_path)\n', (6867, 6890), False, 'import os\n'), ((7239, 7303), 'yaml.dump', 'yaml.dump', (['exp_params', 'exp_params_file'], {'default_flow_style': '(False)'}), '(exp_params, exp_params_file, default_flow_style=False)\n', (7248, 7303), False, 'import argparse, yaml\n'), ((5456, 5509), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'set5', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=set5, batch_size=1, shuffle=False)\n', (5466, 5509), False, 'from torch.utils.data import DataLoader\n'), ((5570, 5624), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'set14', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=set14, batch_size=1, shuffle=False)\n', (5580, 5624), False, 'from torch.utils.data import DataLoader\n'), ((5684, 5737), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'b100', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=b100, batch_size=1, shuffle=False)\n', (5694, 5737), False, 'from torch.utils.data import DataLoader\n'), ((5797, 5850), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'u100', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=u100, batch_size=1, shuffle=False)\n', (5807, 5850), False, 'from torch.utils.data import DataLoader\n'), ((5915, 6065), 'models.ecbsr.ECBSR', 'ECBSR', ([], {'module_nums': 'args.m_ecbsr', 'channel_nums': 'args.c_ecbsr', 'with_idt': 'args.idt_ecbsr', 'act_type': 'args.act_type', 'scale': 'args.scale', 'colors': 'args.colors'}), '(module_nums=args.m_ecbsr, channel_nums=args.c_ecbsr, with_idt=args.\n idt_ecbsr, act_type=args.act_type, scale=args.scale, colors=args.colors)\n', (5920, 6065), False, 'from models.ecbsr import ECBSR\n'), ((6295, 6320), 'torch.load', 'torch.load', (['args.pretrain'], {}), '(args.pretrain)\n', (6305, 6320), False, 'import torch\n'), ((8701, 8730), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (8723, 8730), False, 'import torch\n'), ((10790, 10808), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10806, 10808), False, 'import sys\n'), ((11019, 11047), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (11041, 11047), False, 'import torch\n'), ((11145, 11191), 'os.path.join', 'os.path.join', (['experiment_path', '"""stat_dict.yml"""'], {}), "(experiment_path, 'stat_dict.yml')\n", (11157, 11191), False, 'import os\n'), ((8407, 8418), 'time.time', 'time.time', ([], {}), '()\n', (8416, 8418), False, 'import time\n'), ((9041, 9063), 'tqdm.tqdm', 'tqdm', (['loader'], {'ncols': '(80)'}), '(loader, ncols=80)\n', (9045, 9063), False, 'from tqdm import tqdm\n'), ((11270, 11332), 'yaml.dump', 'yaml.dump', (['stat_dict', 'stat_dict_file'], {'default_flow_style': '(False)'}), '(stat_dict, stat_dict_file, default_flow_style=False)\n', (11279, 11332), False, 'import argparse, yaml\n'), ((8063, 8086), 'math.log10', 'math.log10', (['total_steps'], {}), '(total_steps)\n', (8073, 8086), False, 'import math\n'), ((8190, 8213), 'math.log10', 'math.log10', (['args.epochs'], {}), '(args.epochs)\n', (8200, 8213), False, 'import math\n'), ((9528, 9551), 'utils.calc_psnr', 'utils.calc_psnr', (['sr', 'hr'], {}), '(sr, hr)\n', (9543, 9551), False, 'import utils\n'), ((9586, 9609), 'utils.calc_ssim', 'utils.calc_ssim', (['sr', 'hr'], {}), '(sr, hr)\n', (9601, 9609), False, 'import utils\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------
# PyTorch Lightning PBT
# Authors: <NAME>
# Rocket Romero
# Updated: May. 2020
# ---------------------
""" Root. """
import logging as python_logging
_logger = python_logging.getLogger('lightning:pbt:lab')
python_logging.basicConfig(level=python_logging.INFO)
__version__ = '0.0.1'
__author__ = '<NAME>' 'Rocket Romero'
__author_email__ = '<EMAIL>' '<EMAIL>'
__homepage__ = 'https://github.com/corypaik/pytorch-lightning-pbt'
__docs__ = "PyTorch Lightning PBT."
| [
"logging.getLogger",
"logging.basicConfig"
] | [((243, 288), 'logging.getLogger', 'python_logging.getLogger', (['"""lightning:pbt:lab"""'], {}), "('lightning:pbt:lab')\n", (267, 288), True, 'import logging as python_logging\n'), ((289, 342), 'logging.basicConfig', 'python_logging.basicConfig', ([], {'level': 'python_logging.INFO'}), '(level=python_logging.INFO)\n', (315, 342), True, 'import logging as python_logging\n')] |
# -*- coding: utf-8 -*-
"""
Patch for aiologger.logger.Logger
aiologger (https://github.com/B2W-BIT/aiologger) implements 'async/await' syntax
for logging. But there is a problem with aiologger working in Windows.
The stdout and stderr streams in aiologger are connected as pipes. But in
Windows it doesn't work.
In the DSLogger we use the modified stream handler DSAsyncStreamHandler.
And there is a method for changing of logging level yet.
"""
import logging
import sys
from asyncio import AbstractEventLoop
from typing import Optional, Union
from aiologger.filters import StdoutFilter
from aiologger.logger import Logger
from directory_scan.dsaiologger.handlers import DSAsyncStreamHandler
LoggingLevel = Union[int, str]
OptionalLoggingFormatter = Optional[logging.Formatter]
OptionalAbstractEventLoop = Optional[AbstractEventLoop]
class DSLogger(Logger):
"""Patch for class aiologger.logger.Logger.
There is patched method `DSLogger.with_default_handlers` (use modified class
DSAsyncStreamHandler). And method `DSLogger.set_level` is added.
"""
def __init__(
self,
*,
name: str = "dslogger",
level: LoggingLevel = logging.NOTSET,
loop: OptionalAbstractEventLoop = None
) -> None:
"""Init logger."""
super().__init__(name=name, level=level, loop=loop)
self._stdout_handler: DSAsyncStreamHandler = None
self._stderr_handler: DSAsyncStreamHandler = None
@classmethod
def with_default_handlers(
cls,
*,
name: str = "dslogger",
level: LoggingLevel = logging.NOTSET,
formatter: OptionalLoggingFormatter = None,
loop: OptionalAbstractEventLoop = None,
**kwargs,
):
"""Create new logger."""
self = cls(name=name, level=level, loop=loop, **kwargs)
self._stdout_handler = DSAsyncStreamHandler(
stream=sys.stdout,
level=level,
formatter=formatter,
filter=StdoutFilter()
)
self._stderr_handler = DSAsyncStreamHandler(
stream=sys.stderr,
level=logging.WARNING,
formatter=formatter
)
self.addHandler(self._stdout_handler)
self.addHandler(self._stderr_handler)
return self
async def set_level(self, level):
"""Set logging level"""
if self._stdout_handler.writer is not None:
await self._stdout_handler.flush()
self._cache.clear()
super().setLevel(level)
self._stdout_handler.setLevel(level)
| [
"aiologger.filters.StdoutFilter",
"directory_scan.dsaiologger.handlers.DSAsyncStreamHandler"
] | [((2056, 2144), 'directory_scan.dsaiologger.handlers.DSAsyncStreamHandler', 'DSAsyncStreamHandler', ([], {'stream': 'sys.stderr', 'level': 'logging.WARNING', 'formatter': 'formatter'}), '(stream=sys.stderr, level=logging.WARNING, formatter=\n formatter)\n', (2076, 2144), False, 'from directory_scan.dsaiologger.handlers import DSAsyncStreamHandler\n'), ((2000, 2014), 'aiologger.filters.StdoutFilter', 'StdoutFilter', ([], {}), '()\n', (2012, 2014), False, 'from aiologger.filters import StdoutFilter\n')] |
import os
from unittest import TestCase
from inbm_vision_lib.xml_handler import XmlHandler, XmlException
from inbm_vision_lib.ota_parser import ParseException, get_children, parse_pota
MISSING_HEADER_XML = '<?xml version="1.0" encoding="utf-8"?> <manifest><type>ota</type>' \
'<ota><type><fota name="sample"><targets><target>123ABC</target></targets>' \
'<biosversion>5.12</biosversion><vendor>American Megatrends Inc.</vendor> ' \
'<manufacturer>Default string</manufacturer><product>Default string</product> ' \
'<releasedate>2018-03-30</releasedate> ' \
'<path>/var/cache/manageability/X041_BIOS.tar</path> ' \
'</fota></type></ota> </manifest>'
POTA_GOOD_MANIFEST = '<?xml version="1.0" encoding="utf-8"?><manifest><type>ota</type><ota><header>' \
'<type>pota</type><repo>local</repo></header><type><pota><targetType>node</targetType>' \
'<targets><target>None</target></targets><fota name="sample"><biosversion>5.12</biosversion>' \
'<manufacturer>intel</manufacturer><product>kmb-hddl2</product><vendor>Intel</vendor>' \
'<releasedate>2021-12-25</releasedate>' \
'<path>/var/cache/manageability/repository-tool/test_fota</path></fota><sota>' \
'<cmd logtofile="y">update</cmd><release_date>2021-12-25</release_date>' \
'<path>/var/cache/manageability/repository-tool/test_sota</path></sota>' \
'</pota></type></ota></manifest>'
PARSED_POTA = {'biosversion': '5.12',
'cmd': 'update',
'fota_path': '/var/cache/manageability/repository-tool/test_fota',
'fota_signature': None,
'logtofile': 'y',
'manufacturer': 'intel',
'product': 'kmb-hddl2',
'release_date': '2021-12-25',
'releasedate': '2021-12-25',
'sota_path': '/var/cache/manageability/repository-tool/test_sota',
'sota_signature': None,
'vendor': 'Intel'}
TEST_SCHEMA_LOCATION = os.path.join(os.path.dirname(__file__),
'./vision_manifest_schema.xsd')
class TestManParser(TestCase):
def setUp(self) -> None:
self.maxDiff = None
def test_missing_header_throws(self):
with self.assertRaises(ParseException):
get_children(XmlHandler(MISSING_HEADER_XML), 'ota/header')
def test_parse_pota_pass(self):
parsed = XmlHandler(xml=POTA_GOOD_MANIFEST, schema_location=TEST_SCHEMA_LOCATION)
parsed_dict = parse_pota(parsed, 'ota/type/pota')
self.assertEqual(parsed_dict, PARSED_POTA)
| [
"inbm_vision_lib.xml_handler.XmlHandler",
"os.path.dirname",
"inbm_vision_lib.ota_parser.parse_pota"
] | [((2173, 2198), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2188, 2198), False, 'import os\n'), ((2575, 2647), 'inbm_vision_lib.xml_handler.XmlHandler', 'XmlHandler', ([], {'xml': 'POTA_GOOD_MANIFEST', 'schema_location': 'TEST_SCHEMA_LOCATION'}), '(xml=POTA_GOOD_MANIFEST, schema_location=TEST_SCHEMA_LOCATION)\n', (2585, 2647), False, 'from inbm_vision_lib.xml_handler import XmlHandler, XmlException\n'), ((2670, 2705), 'inbm_vision_lib.ota_parser.parse_pota', 'parse_pota', (['parsed', '"""ota/type/pota"""'], {}), "(parsed, 'ota/type/pota')\n", (2680, 2705), False, 'from inbm_vision_lib.ota_parser import ParseException, get_children, parse_pota\n'), ((2475, 2505), 'inbm_vision_lib.xml_handler.XmlHandler', 'XmlHandler', (['MISSING_HEADER_XML'], {}), '(MISSING_HEADER_XML)\n', (2485, 2505), False, 'from inbm_vision_lib.xml_handler import XmlHandler, XmlException\n')] |
from machine import Machine, Plot, Colors
import numpy as np
class MyPlot(Plot):
width = 65
steps = width // 2
copies = 1
char = "."
spacing = 0
default_color = Colors.BLUE
def __call__(self, plane, loop, step, *args, **kwargs):
char = Colors.white("@") if loop % 2 == 0 else Colors.green("@")
pos = ((self.width // 2) - step)
plane[pos] = char
pos = ((self.width // 2) + step)
plane[pos] = char
char = Colors.orange("@") if loop % 2 == 0 else Colors.red("@")
plane[step] = char
plane[-step] = char
if __name__ == "__main__":
machine = Machine(MyPlot, delay=0.02)
machine.run()
| [
"machine.Colors.red",
"machine.Colors.green",
"machine.Machine",
"machine.Colors.white",
"machine.Colors.orange"
] | [((638, 665), 'machine.Machine', 'Machine', (['MyPlot'], {'delay': '(0.02)'}), '(MyPlot, delay=0.02)\n', (645, 665), False, 'from machine import Machine, Plot, Colors\n'), ((275, 292), 'machine.Colors.white', 'Colors.white', (['"""@"""'], {}), "('@')\n", (287, 292), False, 'from machine import Machine, Plot, Colors\n'), ((315, 332), 'machine.Colors.green', 'Colors.green', (['"""@"""'], {}), "('@')\n", (327, 332), False, 'from machine import Machine, Plot, Colors\n'), ((483, 501), 'machine.Colors.orange', 'Colors.orange', (['"""@"""'], {}), "('@')\n", (496, 501), False, 'from machine import Machine, Plot, Colors\n'), ((524, 539), 'machine.Colors.red', 'Colors.red', (['"""@"""'], {}), "('@')\n", (534, 539), False, 'from machine import Machine, Plot, Colors\n')] |
"""Group Revit built-in categories logically and output the data in json
The built-in categories are provided in text files under DATA_DIR
Usage:
python3 ./cgroups.py group and output categories
python3 ./cgroups.py <catname> group and output <catname> category only
"""
# pylint: disable=bad-continuation
import sys
import os
import os.path as op
from typing import Set, List, TypeVar
import json
import re
DATA_DIR = "./bic_data"
CGROUP_T = TypeVar("CGROUP") # pylint: disable=invalid-name
class CGROUP:
"""Represents a category grouping"""
def __init__(
self,
name: str,
exclusives: List[str],
includes: List[str],
excludes: List[str],
cgroups: List[CGROUP_T],
hidden: bool = False,
):
self.name: str = name
self.exclusives: List[str] = exclusives
self.includes: List[str] = includes
self.excludes: List[str] = excludes
self.cgroups: List[CGROUP_T] = cgroups
self.hidden: bool = hidden
class CategoryComp:
"""Represents data for a category selector component"""
def __init__(self, name: str, categories: List[str]):
self.name = name
self.categories = categories
class CategoryCompCollection:
"""Represents data for a collection of category selector components"""
def __init__(
self,
version: str,
bics: List[str],
components: List[CategoryComp],
used_bics: Set[str],
):
self.meta = {
"version": version,
"total": len(bics),
"included": len(used_bics),
"excluded": list(bics.difference(used_bics)),
}
self.components = components
# =============================================================================
# this is a hand-crafted tree of CGroups that represents the grouping logic
# -----------------------------------------------------------------------------
CGROUPS = [
CGROUP(
name="Skip",
exclusives=[
r".+Obsolete.*",
r".+OBSOLETE.*",
r".+Deprecated.*",
r"OST_GbXML.*",
r"OST_gbXML.*",
r"OST_DSR_.*",
],
includes=[],
excludes=[],
cgroups=[],
hidden=True,
),
CGROUP(
name="Site",
exclusives=[],
includes=[
r"OST_Site.*",
r"OST_Sewer.*",
r"OST_Road.*",
r"OST_Building.*",
r"OST_Contour.*",
r"OST_Parking.*",
],
excludes=[],
cgroups=[
CGROUP(
name="Topography",
exclusives=[],
includes=[r"OST_.*Topo.*"],
excludes=[],
cgroups=[],
),
],
),
CGROUP(
name="References",
exclusives=[],
includes=[
r"OST_Grid.*",
r"OST_Level.*",
r"OST_Level.*",
r"OST_Constraint.*",
r"OST_Reference.*",
],
excludes=[
r"OST_GridChains.*",
r"OST_ReferencePoints.*",
r"OST_ReferenceViewer.*",
],
cgroups=[],
),
CGROUP(
name="Modeling",
exclusives=[],
includes=[r"OST_Generic.*",],
excludes=["OST_GenericLines",],
cgroups=[
CGROUP(
name="Mass",
exclusives=[],
includes=[r"OST_Mass.*"],
excludes=[
r"OST_.+Cutter",
r"OST_.+Splitter",
r"OST_.+All",
r"OST_.+Outlines",
],
cgroups=[],
),
CGROUP(
name="Ceilings",
exclusives=[],
includes=[r"OST_Ceiling.*"],
excludes=[
r"OST_.+Cut.*",
r"OST_.+Projection.*",
r"OST_.+Default.*",
],
cgroups=[],
),
CGROUP(
name="Columns",
exclusives=[],
includes=[r"OST_Column.*"],
excludes=[r"OST_.+LocalCoordSys"],
cgroups=[],
),
CGROUP(
name="Curtain Systems",
exclusives=[],
includes=[r"OST_Curta.*"],
excludes=[
r"OST_.+FaceManager.*",
r"OST_CurtainGrids.+",
r"OST_Curtain.+Cut",
],
cgroups=[],
),
CGROUP(
name="Floors",
exclusives=[],
includes=[r"OST_Floor.*"],
excludes=[
r"OST_.+LocalCoordSys",
r"OST_.+Cut.*",
r"OST_.+Projection.*",
r"OST_.+Default.*",
],
cgroups=[],
),
CGROUP(
name="Doors",
exclusives=[],
includes=[r"OST_Door.*"],
excludes=[r"OST_.+Cut.*", r"OST_.+Projection.*",],
cgroups=[],
),
CGROUP(
name="Casework",
exclusives=[],
includes=[r"OST_Casework.*"],
excludes=[],
cgroups=[],
),
CGROUP(
name="Windows",
exclusives=[],
includes=[r"OST_Window.*"],
excludes=[r"OST_.+Cut.*", r"OST_.+Projection.*",],
cgroups=[],
),
CGROUP(
name="Furniture",
exclusives=[],
includes=[r"OST_Furniture.*"],
excludes=[],
cgroups=[],
),
CGROUP(
name="Adaptive",
exclusives=[],
includes=[r"OST_Adaptive.*"],
excludes=[],
cgroups=[],
),
CGROUP(
name="Speciality",
exclusives=[],
includes=[r"OST_Speciality.*"],
excludes=[],
cgroups=[],
),
CGROUP(
name="Openings",
exclusives=[r"OST_.+Opening", r"OST_Arc.*", r"OST_Shaft.*",],
includes=[],
excludes=[r"OST_.+Cut.*", r"OST_.+Projection.*",],
cgroups=[],
),
CGROUP(
name="Railing",
exclusives=[],
includes=[r"OST_Railing.*"],
excludes=[r"OST_.+Cut.*", r"OST_.+Projection.*",],
cgroups=[],
),
CGROUP(
name="Stairs",
exclusives=[],
includes=[r"OST_Stair.*", r"OST_.+Stairs"],
excludes=[r"OST_.+Cut.*", r"OST_.+Projection.*",],
cgroups=[],
),
CGROUP(
name="Ramps",
exclusives=[],
includes=[r"OST_Ramp.*"],
excludes=[r"OST_.+Cut.*", r"OST_.+Projection.*",],
cgroups=[],
),
CGROUP(
name="Walls",
exclusives=[],
includes=[r"OST_Wall.*", r"OST_Reveals", r"OST_Stacked.*"],
excludes=[
r"OST_.+LocalCoordSys",
r"OST_.+RefPlanes",
r"OST_.+Default",
r"OST_.+Cut.*",
r"OST_.+Projection.*",
],
cgroups=[],
),
CGROUP(
name="Roofs",
exclusives=[],
includes=[
r"OST_Roof.*",
r"OST_Fascia.*",
r"OST_Purlin.*",
r"OST_Gutter.*",
r"OST_Cornices.*",
r"OST_Dormer.*",
],
excludes=[
r"OST_.+Opening.*",
r"OST_.+Cut.*",
r"OST_.+Projection.*",
],
cgroups=[],
),
CGROUP(
name="Spatial",
exclusives=[],
includes=[
r"OST_Area.*",
r"OST_Zone.*",
r"OST_MEPSpace.*",
r"OST_Zoning.*",
r"OST_Room.*",
],
excludes=[
r"OST_.+Fill",
r"OST_.+Visibility",
r"OST_AreaRein.*",
r"OST_AreaReport.*",
],
cgroups=[],
),
CGROUP(
name="Structural",
exclusives=[],
includes=[
r"OST_Struct.+",
r"OST_.+Bracing",
r"OST_Truss.*",
r"OST_Joist.*",
r"OST_FabricArea.*",
r"OST_Rebar.*",
r"OST_Girder.*",
r"OST_Edge.*",
r"OST_Load.*",
r"OST_Internal.*Load.*",
r"OST_Isolated.*",
r"OST_Framing.*",
r"OST_Footing.*",
r"OST_Foundation.*",
r"OST_Fnd.*",
r"OST_Span.*",
r"OST_Steel.*",
r"OST_SWall.*",
r"OST_Brace.*",
r"OST_Bridge.*",
r"OST_.*PointLoad.*",
r"OST_Beam.*",
],
excludes=[
r"OST_.+LocalCoordSys",
r"OST_.+Other",
r"OST_.+LocationLine",
r"OST_.+PlanReps",
r"OST_.+NobleWarning",
r"OST_.+Failed",
],
cgroups=[],
),
CGROUP(
name="Mechanical",
exclusives=[],
includes=[
r"OST_Mechanical.*",
r"OST_.+Ducts",
r"OST_Duct.*",
r"OST_MEPAnalytical.*",
r"OST_Flex.*",
r"OST_MEPSystem.*",
r"OST_HVAC.*",
r"OST_Fabrication.+",
],
excludes=[
r"OST_.+Reference.*",
r"OST_.+TmpGraphic.*",
r"OST_.+Visibility",
],
cgroups=[],
),
CGROUP(
name="Electrical",
exclusives=[],
includes=[
r"OST_.+Pipes",
r"OST_Conduit.*",
r"OST_Cable.*",
r"OST_Wire.*",
r"OST_Light.*",
r"OST_Device.*",
r"OST_Panel.*",
r"OST_Elec.*",
r"OST_Routing.*",
r"OST_Switch.*",
r"OST_Connector.*",
r"OST_Route.*",
r"OST_.+Devices|OST_.+Device(Tags)|OST_.+Templates?",
],
excludes=[
r"OST_.+Axis",
r"OST_.+Template.*",
r"OST_.+Definition.*",
r"OST_.+Material",
],
cgroups=[],
),
CGROUP(
name="Plumbing",
exclusives=[],
includes=[
r"OST_Pipe.*",
r"OST_Fluid.*",
r"OST_Fixture.*",
r"OST_PlumbingFixture.*",
r"OST_Piping.*",
r"OST_Sprinkler.*",
],
excludes=[r"OST_.+Reference.*", r"OST_.+Material",],
cgroups=[],
),
],
),
CGROUP(
name="Drafting",
exclusives=[],
includes=[],
excludes=[],
cgroups=[
CGROUP(
name="Views",
exclusives=[],
includes=[
r"OST_.*Annotation.*",
"OST_Views",
"OST_PlanRegion",
r"OST_Schedule.*",
r"OST_Camera.*",
r"OST_Crop.*",
r"OST_Compass.*",
r"OST_Section.*",
r"OST_Sun.*",
r"OST_RenderRegions",
],
excludes=[r"OST_.+ViewParamGroup",],
cgroups=[],
),
CGROUP(
name="Sheets",
exclusives=[],
includes=[
r"OST_Sheet.*",
r"OST_Viewport.*",
r"OST_Title.*",
r"OST_Guide.*",
r"OST_Revisions.*",
],
excludes=[],
cgroups=[],
),
CGROUP(
name="Tags",
exclusives=[r"OST_Tag.*", r"OST_.+Tags", r"OST_.+Labels"],
includes=[],
excludes=[],
cgroups=[],
),
CGROUP(
name="Annotation",
exclusives=[
r"OST_.+DownArrow.*",
r"OST_.+DownText.*",
r"OST_.+UpArrow.*",
r"OST_.+UpText.*",
r"OST_.+Annotation.*",
r"OST_Callout.*",
r"OST_Spot.*",
r"OST_Cloud.*",
r"OST_Elev.*",
r"OST_Repeating.*",
"OST_BrokenSectionLine",
r"OST_Legend.*",
r"OST_Detail.*",
"OST_InvisibleLines",
"OST_DemolishedLines",
"OST_InsulationLines",
"OST_FillPatterns",
"OST_FilledRegion",
"OST_HiddenLines",
r"OST_Center.*",
r"OST_Keynote.*",
r"OST_Matchline.*",
r"OST_Model.*",
r"OST_.+Text.*",
r"OST_.+Overhead.*",
r"OST_Curve.*",
r"OST_Dim.*",
r"OST_Dimension.*",
r"OST_Masking.*",
r"OST_.+Tag.*",
r"OST_.+Label.*",
r"OST_.+Symbol.*",
r"OST_.+TickMark.*",
"OST_RevisionClouds",
],
includes=[],
excludes=[r"OST_DimLock.+", r"OST_IOS.+", r"OST_.+Symbology",],
cgroups=[],
),
],
),
CGROUP(
name="Containers",
exclusives=[],
includes=[
r"OST_Part.*",
r"OST_Assemblies.*",
r"OST_Group.*",
r"OST_.+Groups",
],
excludes=[],
cgroups=[],
),
CGROUP(
name="Links",
exclusives=[
"OST_RvtLinks",
"OST_TopographyLink",
r"OST_Coordination.*",
r"OST_PointCloud.*",
r"OST_Raster.*",
],
includes=[],
excludes=[],
cgroups=[],
),
CGROUP(
name="Analysis",
exclusives=[r"OST_.*Analy.*"],
includes=[],
excludes=[r"OST_AnalysisResults"],
cgroups=[
CGROUP(
name="Paths",
exclusives=[r"OST_Path.*"],
includes=[],
excludes=[],
cgroups=[],
),
],
),
CGROUP(
name="Rendering",
exclusives=[],
includes=[r"OST_Entourage.*",],
excludes=[],
cgroups=[
CGROUP(
name="Materials",
exclusives=[
r"OST_Material.*",
r"OST_Appearance.*",
r"OST_Decal.*",
r"OST_Planting.*",
],
includes=[],
excludes=[],
cgroups=[],
)
],
),
]
# =============================================================================
def expand_exclusives(
cgroup: CGROUP, used_bics: Set[str], remaining_bics: Set[str]
):
"""Apply the exclusive filters and expand to builtin category names"""
exclusives = set()
excludes = set()
local_bics = remaining_bics.copy()
for bic in local_bics:
for excluspat in cgroup.exclusives:
if re.match(excluspat, bic):
if bic in used_bics:
raise Exception(
f'Exclusive conflict in "{cgroup.name}" @ "{excluspat}"'
)
exclusives.add(bic)
filtered_exclusives = exclusives.copy()
for exclusitem in exclusives:
for excpat in cgroup.excludes:
if re.match(excpat, exclusitem):
excludes.add(exclusitem)
filtered_exclusives.difference_update(excludes)
used_bics.update(filtered_exclusives)
remaining_bics.difference_update(used_bics)
sub_components = []
for sub_cgroup in cgroup.cgroups:
sub_components.append(
expand_exclusives(sub_cgroup, used_bics, remaining_bics)
)
cgroup.exclusives = filtered_exclusives
def expand_includes(
cgroup: CGROUP, used_bics: Set[str], remaining_bics: Set[str]
):
"""Apply the include filters and expand to builtin category names"""
includes = set()
excludes = set()
local_bics = remaining_bics.copy()
for bic in local_bics:
for incpat in cgroup.includes:
if re.match(incpat, bic):
includes.add(bic)
filtered_includes = includes.copy()
for incitem in includes:
for excpat in cgroup.excludes:
if re.match(excpat, incitem):
excludes.add(incitem)
filtered_includes.difference_update(excludes)
used_bics.update(filtered_includes)
sub_components = []
for sub_cgroup in cgroup.cgroups:
sub_components.append(
expand_includes(sub_cgroup, used_bics, remaining_bics)
)
cgroup.includes = filtered_includes
def filter_cgroup(cgroup: CGROUP, name: str):
"""Find a cgroup in tree by name"""
if cgroup.name == name:
return cgroup
for scgroup in cgroup.cgroups:
if mcg := filter_cgroup(scgroup, name):
return mcg
def create_ccomp(cgroup: CGROUP) -> CategoryComp:
"""Create component data from expanded cgroup"""
root_categories = cgroup.exclusives
root_categories.update(cgroup.includes)
sub_components = []
for sub_cgroup in cgroup.cgroups:
sub_components.append(create_ccomp(sub_cgroup))
sub_categories = {}
for sub_comp in sub_components:
sub_categories[sub_comp.name] = sub_comp.categories
all_sub_bips = []
for sub_bips in sub_comp.categories.values():
all_sub_bips.extend(sub_bips)
root_categories = root_categories.difference(all_sub_bips)
categories = {"_": sorted(list(root_categories))}
categories.update(sub_categories)
return CategoryComp(name=cgroup.name, categories=categories)
def create_ccomp_collection(
version: str, builtin_category_names: List[str]
) -> CategoryCompCollection:
"""Create component collection from list of builtin category names"""
remaining_bics = builtin_category_names.copy()
used_bics: Set[str] = set()
for cgroup in CGROUPS:
expand_exclusives(cgroup, used_bics, remaining_bics)
for cgroup in CGROUPS:
expand_includes(cgroup, used_bics, remaining_bics)
all_comps: List[CategoryComp] = []
if len(sys.argv) > 1:
matching_cgroup = None
for cgroup in CGROUPS:
matching_cgroup = filter_cgroup(cgroup, name=sys.argv[1])
if matching_cgroup:
all_comps.append(create_ccomp(matching_cgroup))
else:
for cgroup in CGROUPS:
if not cgroup.hidden:
all_comps.append(create_ccomp(cgroup))
return CategoryCompCollection(
version=version,
bics=builtin_category_names,
components=all_comps,
used_bics=used_bics,
)
def load_bics(data_file: str):
"""Load builtin category names from file"""
bics_data: Set[str] = set()
with open(data_file, "r") as bicfile:
bics_data.update([x.strip() for x in bicfile.readlines()])
return bics_data
def dump_bics(data_file: str, ccomps_col: CategoryCompCollection):
"""Dump component collection data into file"""
with open(data_file, "w") as datafile:
json.dump(
ccomps_col, datafile, indent=2, default=lambda x: x.__dict__,
)
for entry in os.listdir(DATA_DIR):
if entry.endswith(".txt"):
bic_file = op.join(DATA_DIR, entry)
dafa_filename = op.splitext(op.basename(bic_file))[0]
bic_file_version = dafa_filename.split("_")[1]
bic_names = load_bics(bic_file)
ccomp_collection = create_ccomp_collection(bic_file_version, bic_names)
json_file = op.join(DATA_DIR, dafa_filename + ".json")
dump_bics(json_file, ccomp_collection)
| [
"os.listdir",
"os.path.join",
"re.match",
"os.path.basename",
"json.dump",
"typing.TypeVar"
] | [((473, 490), 'typing.TypeVar', 'TypeVar', (['"""CGROUP"""'], {}), "('CGROUP')\n", (480, 490), False, 'from typing import Set, List, TypeVar\n'), ((21245, 21265), 'os.listdir', 'os.listdir', (['DATA_DIR'], {}), '(DATA_DIR)\n', (21255, 21265), False, 'import os\n'), ((21135, 21206), 'json.dump', 'json.dump', (['ccomps_col', 'datafile'], {'indent': '(2)', 'default': '(lambda x: x.__dict__)'}), '(ccomps_col, datafile, indent=2, default=lambda x: x.__dict__)\n', (21144, 21206), False, 'import json\n'), ((21317, 21341), 'os.path.join', 'op.join', (['DATA_DIR', 'entry'], {}), '(DATA_DIR, entry)\n', (21324, 21341), True, 'import os.path as op\n'), ((21599, 21641), 'os.path.join', 'op.join', (['DATA_DIR', "(dafa_filename + '.json')"], {}), "(DATA_DIR, dafa_filename + '.json')\n", (21606, 21641), True, 'import os.path as op\n'), ((16992, 17016), 're.match', 're.match', (['excluspat', 'bic'], {}), '(excluspat, bic)\n', (17000, 17016), False, 'import re\n'), ((17364, 17392), 're.match', 're.match', (['excpat', 'exclusitem'], {}), '(excpat, exclusitem)\n', (17372, 17392), False, 'import re\n'), ((18123, 18144), 're.match', 're.match', (['incpat', 'bic'], {}), '(incpat, bic)\n', (18131, 18144), False, 'import re\n'), ((18304, 18329), 're.match', 're.match', (['excpat', 'incitem'], {}), '(excpat, incitem)\n', (18312, 18329), False, 'import re\n'), ((21378, 21399), 'os.path.basename', 'op.basename', (['bic_file'], {}), '(bic_file)\n', (21389, 21399), True, 'import os.path as op\n')] |
"""Data types used for local invocations."""
from typing import Any, Generic, Iterable, Iterator, Mapping, NamedTuple, TypeVar, Union
from dagger.serializer import Serializer
T = TypeVar("T")
class OutputFile(NamedTuple):
"""Represents a file in the local file system that holds the serialized value for a node output."""
filename: str
serializer: Serializer
class PartitionedOutput(Generic[T]):
"""Represents a partitioned output explicitly."""
def __init__(self, iterable: Iterable[T]):
"""Build a partitioned output from an Iterable."""
self._iterable = iterable
self._iterator = iter(iterable)
def __iter__(self) -> Iterator[T]:
"""Return an iterator over the partitions of the output."""
return self
def __next__(self) -> T:
"""Return the next element in the partitioned output."""
return next(self._iterator)
def __repr__(self) -> str:
"""Return a human-readable representation of the partitioned output."""
return repr(self._iterable)
#: One of the outputs of a node, which may be partitioned
NodeOutput = Union[OutputFile, PartitionedOutput[OutputFile]]
#: All outputs of a node indexed by their name. Node executions may be partitioned, in which case this is a list.
NodeOutputs = Mapping[str, NodeOutput]
#: All executions of a node. If the node is partitioned there will only be one. Otherwise, there may be many.
NodeExecutions = Union[NodeOutputs, PartitionedOutput[NodeOutputs]]
#: The parameters supplied to a node (plain, not serialized)
NodeParams = Mapping[str, Any]
| [
"typing.TypeVar"
] | [((182, 194), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (189, 194), False, 'from typing import Any, Generic, Iterable, Iterator, Mapping, NamedTuple, TypeVar, Union\n')] |
# -*- coding: utf-8 -*-
from functools import wraps
from inspect import getargspec, getmodule
from diskcache import Cache
def qualified_name(x):
return '{}.{}'.format(getmodule(x).__name__, x.__name__)
def permoize(cache_dir, explicit=False):
def decorator(fn):
arg_spec = getargspec(fn)
nb_pos_args = len(arg_spec.args)
@wraps(fn)
def wrapper(*args, **kwargs):
nb_args = len(args)
nb_pos_args_without_val = nb_pos_args - nb_args
if arg_spec.defaults and nb_pos_args_without_val > 0:
args += arg_spec.defaults[-nb_pos_args_without_val:]
desc = {'fn': qualified_name(fn),
'args': args,
'kwargs': kwargs}
val, fn_was_executed = None, False
with Cache(cache_dir) as cache:
try:
val = cache[desc]
except KeyError:
val = fn(*args, **kwargs)
fn_was_executed = True
cache[desc] = val
if explicit:
return val, fn_was_executed
return val
return wrapper
return decorator
| [
"inspect.getmodule",
"diskcache.Cache",
"functools.wraps",
"inspect.getargspec"
] | [((294, 308), 'inspect.getargspec', 'getargspec', (['fn'], {}), '(fn)\n', (304, 308), False, 'from inspect import getargspec, getmodule\n'), ((361, 370), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (366, 370), False, 'from functools import wraps\n'), ((174, 186), 'inspect.getmodule', 'getmodule', (['x'], {}), '(x)\n', (183, 186), False, 'from inspect import getargspec, getmodule\n'), ((822, 838), 'diskcache.Cache', 'Cache', (['cache_dir'], {}), '(cache_dir)\n', (827, 838), False, 'from diskcache import Cache\n')] |
from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget
from PyQt5.QtWidgets import QTableWidgetItem, QGraphicsSceneMouseEvent
from PyQt5.QtCore import Qt, pyqtSignal, QRect, QPointF
from PyQt5.QtGui import QMouseEvent, QTransform, QKeyEvent, QPainter
from element import PFSActivity, PFSDistributor
from statemachine import PFSStateMachine
from extra import PFSTextBox
from undo import *
from relation import *
class PFSScene(QGraphicsScene):
DELTA = 20.0
inserted = pyqtSignal()
edited = pyqtSignal()
shiftInserted = pyqtSignal()
def __init__(self, w: int, h: int, parentState: PFSStateMachine, page):
super(QGraphicsScene, self).__init__()
self._backgroundPoints = []
self.resize(w,h)
self._paintGrid = False
self._parentState = parentState
self._page = page
self._tempSource = None
self._tempActivity = None
self._lastPos = QPointF(0,0)
self._lastItemClicked = None
self._wasMoving = False
def setPaintGrid(self, v: bool= True):
self._paintGrid = v
self.update()
def resize(self, w: int, h: int, l: int= 0, t: int= 0):
self.setSceneRect(l, t, w, h)
sx = int(w/self.DELTA - 1)
sy = int(h/self.DELTA - 1)
self._backgroundPoints = [QPointF((i+0.5)*self.DELTA, (j+0.5)*self.DELTA) for i in range(sx) for j in range(sy)]
self.update()
def mouseReleaseEvent(self, ev: QGraphicsSceneMouseEvent):
if self._parentState._sNormal and not self._wasMoving:
it = self.itemAt(ev.scenePos(), QTransform())
if int(ev.modifiers()) & Qt.ShiftModifier == 0:
self.clearSelection()
if it is not None:
it.setSelected(True)
itList = self.selectedItems()
if len(itList) == 1:
self._page._net.fillProperties(itList[0].propertiesTable())
if len(itList) == 0:
self._page._net.fillProperties(self._page.propertiesTable())
self.update()
self._wasMoving = False
QGraphicsScene.mouseReleaseEvent(self, ev)
def mousePressEvent(self, ev: QGraphicsSceneMouseEvent):
if ev.button() == Qt.RightButton:
self._page._net._window._main.tabChanged.emit()
return
self._lastPos = ev.scenePos()
self._lastItemClicked = self.itemAt(ev.scenePos(), QTransform())
print(ev.scenePos())
if self._parentState._sPasting:
self._page._net.pasteItems(self._lastPos)
self.inserted.emit()
if self._parentState._sDistributor:
pos = ev.scenePos()
elem = PFSDistributor(self._page._net.requestId(PFSDistributor), pos.x(), pos.y())
self._page._net.addItem(elem, self._page)
if int(ev.modifiers()) & Qt.ShiftModifier == 0:
self.inserted.emit()
return
if self._parentState._sActivity:
pos = ev.scenePos()
elem = PFSActivity(self._page._net.requestId(PFSActivity), pos.x(), pos.y(), "Activity")
self._page._net.addItem(elem, self._page)
if int(ev.modifiers()) & Qt.ShiftModifier == 0:
self.inserted.emit()
return
if self._parentState._sRelationS:
it = self._lastItemClicked
if it is not None:
self._tempSource = it
self.inserted.emit()
return
if self._parentState._sSFlowS:
it = self._lastItemClicked
if it is not None:
self._tempSource = it
self.inserted.emit()
return
if self._parentState._sRelationT:
it = self._lastItemClicked
if it is not None:
elem = PFSRelation.createRelation(self._page._net.requestId(PFSRelation), self._tempSource, it)
if elem is not None:
self._page._net.addItem(elem, self._page)
if int(ev.modifiers()) & Qt.ShiftModifier == 0:
self.inserted.emit()
else:
self.shiftInserted.emit()
self._tempSource = None
return
if self._parentState._sSFlowT:
it = self._lastItemClicked
if it is not None:
elem = PFSSecondaryFlow.createSecondaryFlow(self._page._net.requestId(PFSRelation), self._tempSource, it)
if elem is not None:
self._page._net.addItem(elem, self._page)
if int(ev.modifiers()) & Qt.ShiftModifier == 0:
self.inserted.emit()
else:
self.shiftInserted.emit()
self._tempSource = None
return
if self._parentState._sNormal:
self._page._net._prop.clear()
it = self._lastItemClicked
if int(ev.modifiers()) & Qt.ControlModifier == Qt.ControlModifier:
if isinstance(it, PFSActivity):
if not it.hasSubPage():
self._page._net.createPage(it)
self._page._net.openPage(it)
elif isinstance(it, PFSRelation):
if not it.closeMiddlePoint(ev.scenePos()):
it.createMiddlePoint(ev.scenePos())
return
if self._parentState._sTiping:
it = self._lastItemClicked
if it is None or not isinstance(it, QGraphicsProxyWidget):
if self._tempActivity is not None:
x = PFSUndoSetText(self._tempActivity, self._line.widget().toPlainText(), self)
self._page._net.undoStack.push(x)
self.removeItem(self._line)
self.inserted.emit()
QGraphicsScene.mousePressEvent(self, ev)
return
QGraphicsScene.mousePressEvent(self, ev)
def keyPressEvent(self, ev:QKeyEvent):
if self._parentState._sTiping:
QGraphicsScene.keyPressEvent(self, ev)
return
if ev.key() == Qt.Key_Escape:
self._page._net._window._main.tabChanged.emit()
return
if ev.key() == Qt.Key_Up:
itList = self.selectedItems()
if len(itList) > 0:
x = PFSUndoKeyMove(itList, 0, -10)
self._page._net.undoStack.push(x)
else:
QGraphicsScene.keyPressEvent(self, ev)
return
if ev.key() == Qt.Key_Down:
itList = self.selectedItems()
if len(itList) > 0:
x = PFSUndoKeyMove(itList, 0, 10)
self._page._net.undoStack.push(x)
else:
QGraphicsScene.keyPressEvent(self, ev)
return
if ev.key() == Qt.Key_Left:
itList = self.selectedItems()
if len(itList) > 0:
x = PFSUndoKeyMove(itList, -10, 0)
self._page._net.undoStack.push(x)
else:
QGraphicsScene.keyPressEvent(self, ev)
return
if ev.key() == Qt.Key_Right:
itList = self.selectedItems()
if len(itList) > 0:
x = PFSUndoKeyMove(itList, 10, 0)
self._page._net.undoStack.push(x)
else:
QGraphicsScene.keyPressEvent(self, ev)
return
QGraphicsScene.keyPressEvent(self, ev)
def mouseMoveEvent(self, ev: QGraphicsSceneMouseEvent):
if ev.buttons() == Qt.NoButton:
return
pos = ev.scenePos()
if self._lastItemClicked is None or not self._lastItemClicked.isSelected():
return
itList = self.selectedItems()
if len(itList) > 0:
self._wasMoving = True
x = pos.x() - self._lastPos.x()
y = pos.y() - self._lastPos.y()
x = PFSUndoMouseMove(itList, x, y)
self._page._net.undoStack.push(x)
self._lastPos = pos
self.update()
def drawBackground(self, p: QPainter, r: QRect):
p.drawRect(self.sceneRect())
if not self._paintGrid:
return
p.setPen(Qt.SolidLine)
for point in self._backgroundPoints:
p.drawPoint(point)
class PFSView(QGraphicsView):
def __init__(self, scene: PFSScene):
super(QGraphicsView, self).__init__(scene) | [
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QGraphicsScene.mousePressEvent",
"PyQt5.QtGui.QTransform",
"PyQt5.QtWidgets.QGraphicsScene.mouseReleaseEvent",
"PyQt5.QtWidgets.QGraphicsScene.keyPressEvent",
"PyQt5.QtCore.QPointF"
] | [((495, 507), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (505, 507), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QRect, QPointF\n'), ((518, 530), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (528, 530), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QRect, QPointF\n'), ((548, 560), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (558, 560), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QRect, QPointF\n'), ((876, 889), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (883, 889), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QRect, QPointF\n'), ((1856, 1898), 'PyQt5.QtWidgets.QGraphicsScene.mouseReleaseEvent', 'QGraphicsScene.mouseReleaseEvent', (['self', 'ev'], {}), '(self, ev)\n', (1888, 1898), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((4815, 4855), 'PyQt5.QtWidgets.QGraphicsScene.mousePressEvent', 'QGraphicsScene.mousePressEvent', (['self', 'ev'], {}), '(self, ev)\n', (4845, 4855), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((5975, 6013), 'PyQt5.QtWidgets.QGraphicsScene.keyPressEvent', 'QGraphicsScene.keyPressEvent', (['self', 'ev'], {}), '(self, ev)\n', (6003, 6013), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((1205, 1260), 'PyQt5.QtCore.QPointF', 'QPointF', (['((i + 0.5) * self.DELTA)', '((j + 0.5) * self.DELTA)'], {}), '((i + 0.5) * self.DELTA, (j + 0.5) * self.DELTA)\n', (1212, 1260), False, 'from PyQt5.QtCore import Qt, pyqtSignal, QRect, QPointF\n'), ((2142, 2154), 'PyQt5.QtGui.QTransform', 'QTransform', ([], {}), '()\n', (2152, 2154), False, 'from PyQt5.QtGui import QMouseEvent, QTransform, QKeyEvent, QPainter\n'), ((4762, 4802), 'PyQt5.QtWidgets.QGraphicsScene.mousePressEvent', 'QGraphicsScene.mousePressEvent', (['self', 'ev'], {}), '(self, ev)\n', (4792, 4802), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((4934, 4972), 'PyQt5.QtWidgets.QGraphicsScene.keyPressEvent', 'QGraphicsScene.keyPressEvent', (['self', 'ev'], {}), '(self, ev)\n', (4962, 4972), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((1463, 1475), 'PyQt5.QtGui.QTransform', 'QTransform', ([], {}), '()\n', (1473, 1475), False, 'from PyQt5.QtGui import QMouseEvent, QTransform, QKeyEvent, QPainter\n'), ((5250, 5288), 'PyQt5.QtWidgets.QGraphicsScene.keyPressEvent', 'QGraphicsScene.keyPressEvent', (['self', 'ev'], {}), '(self, ev)\n', (5278, 5288), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((5474, 5512), 'PyQt5.QtWidgets.QGraphicsScene.keyPressEvent', 'QGraphicsScene.keyPressEvent', (['self', 'ev'], {}), '(self, ev)\n', (5502, 5512), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((5699, 5737), 'PyQt5.QtWidgets.QGraphicsScene.keyPressEvent', 'QGraphicsScene.keyPressEvent', (['self', 'ev'], {}), '(self, ev)\n', (5727, 5737), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n'), ((5924, 5962), 'PyQt5.QtWidgets.QGraphicsScene.keyPressEvent', 'QGraphicsScene.keyPressEvent', (['self', 'ev'], {}), '(self, ev)\n', (5952, 5962), False, 'from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsProxyWidget\n')] |
import hashlib
import io
import sys
from typing import Optional
from PIL import Image, ImageOps
from django.core import checks
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db.models import ImageField
class ResizedHashNameImageField(ImageField):
"""A custom ImageField class that will resize the images and uses the file hash as filename."""
def __init__(
self,
verbose_name: Optional[str] = None,
name: Optional[str] = None,
max_width: int = 1620,
max_height: int = 1620,
width_field: Optional[str] = None,
height_field: Optional[str] = None,
**kwargs
) -> None:
self.max_width = max_width
self.max_height = max_height
super().__init__(verbose_name, name, width_field, height_field, **kwargs)
def pre_save(self, model_instance, add):
"""Prepares the image for saving by scaling it to the desired dimensions."""
file = getattr(model_instance, self.attname)
if file and not file._committed:
# Open the image with Pillow and save the original format attribute
im = Image.open(file.file)
im_format = im.format
# If the image contains exif rotation data, rotate it accordingly
im = ImageOps.exif_transpose(im)
# Rescale the image, if necessary
im.thumbnail((self.max_width, self.max_height), resample=Image.LANCZOS)
# Save it to an in-memory file
temp = io.BytesIO()
im.save(temp, format=im_format, quality=75)
# Hash the contents for a filename
filename = f"{hashlib.md5(temp.getvalue()).hexdigest()}.{im_format}"
# Create a new InMemoryUploadedFile
new_file = InMemoryUploadedFile(
temp, self.name, filename, f'image/{im_format.lower()}', sys.getsizeof(temp), None
)
# Reassign the `file` and `name` attributes
file.file = new_file
file.name = filename
# Save the file
file.save(file.name, file.file, save=False)
return file
def check(self, **kwargs):
"""Check maximum dimension values for integrity."""
return [
*super().check(**kwargs),
*self._check_maximum_dimension_values(),
]
def _check_maximum_dimension_values(self):
"""Check if the maximum dimension field values were set with an integer."""
if not isinstance(self.max_width, int) or not isinstance(self.max_height, int):
return [
checks.Error(
f"{self.__class__.__name__}'s 'max_width' and `max_height` arguments"
" must be integers.",
obj=self,
hint='Provide integer values',
)
]
else:
return []
| [
"PIL.Image.open",
"PIL.ImageOps.exif_transpose",
"sys.getsizeof",
"io.BytesIO",
"django.core.checks.Error"
] | [((1151, 1172), 'PIL.Image.open', 'Image.open', (['file.file'], {}), '(file.file)\n', (1161, 1172), False, 'from PIL import Image, ImageOps\n'), ((1303, 1330), 'PIL.ImageOps.exif_transpose', 'ImageOps.exif_transpose', (['im'], {}), '(im)\n', (1326, 1330), False, 'from PIL import Image, ImageOps\n'), ((1525, 1537), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1535, 1537), False, 'import io\n'), ((1890, 1909), 'sys.getsizeof', 'sys.getsizeof', (['temp'], {}), '(temp)\n', (1903, 1909), False, 'import sys\n'), ((2625, 2777), 'django.core.checks.Error', 'checks.Error', (['f"""{self.__class__.__name__}\'s \'max_width\' and `max_height` arguments must be integers."""'], {'obj': 'self', 'hint': '"""Provide integer values"""'}), '(\n f"{self.__class__.__name__}\'s \'max_width\' and `max_height` arguments must be integers."\n , obj=self, hint=\'Provide integer values\')\n', (2637, 2777), False, 'from django.core import checks\n')] |
"""
------------------------
Loggers module
The module consists Logger class that can be used by various modules in various scenarios for logging purpose.
------------------------
"""
import logging
from datetime import datetime
from flask import current_app
import os
class Logger:
"""
Custom logger class
"""
logger = None
logger_folder = None
logger_handler = None
logger_level = None
formatter = logging.Formatter('%(asctime)s %(levelname)s:%(name)s: %(message)s', '%d-%m-%Y %H:%M:%S')
def __init__(self, logger_type, param=None):
if param:
# Custom parameter is found.
self.formatter = logging.Formatter('%(asctime)s %(levelname)s:%(name)s: [%(param)s] %(message)s',
'%d-%m-%Y %H:%M:%S')
today = datetime.today().strftime("%d-%m-%Y")
root_dir = os.path.dirname(current_app.instance_path)
self.logger = logging.getLogger(logger_type)
if logger_type == 'INFO_LOGGER':
self.logger_level = logging.INFO
self.logger.setLevel(self.logger_level)
self.logger_folder = 'app_info_logs'
self.logger_handler = logging.FileHandler(
f'{root_dir}/logs/{self.logger_folder}/{today}_info.log')
self.logger_handler.setFormatter(self.formatter)
else:
# Default logger is error logger.
self.logger_level = logging.ERROR
self.logger.setLevel(self.logger_level)
self.logger_folder = 'app_errors'
self.logger_handler = logging.FileHandler(
f'{root_dir}/logs/{self.logger_folder}/{today}_errors.log')
self.logger_handler.setFormatter(self.formatter)
# Clearing up the handlers if any previous ones are found.
if self.logger.hasHandlers():
self.logger.handlers.clear()
self.logger.addHandler(self.logger_handler)
if param:
# If the custom parameter 'param' is provided in the constructor,
# add the param in the as an extra (A dict parameter that provides contextual information)
# to the logger adapter.
extra = {'param': param}
self.logger = logging.LoggerAdapter(self.logger, extra)
def get_logger(self):
return self.logger
def error_logger(param=None):
"""
Function to get an error logger, object of Logger class.
@param param : Custom parameter that can be passed to the logger.
@return: custom logger
"""
logger = Logger('ERROR_LOGGER', param)
return logger.get_logger()
def info_logger(param=None):
"""
Function to get a info logger, an object of Logger class.
@param param : Custom parameter that can be passed to the logger.
@return: custom logger
"""
logger = Logger('INFO_LOGGER', param)
return logger.get_logger()
| [
"logging.getLogger",
"logging.Formatter",
"os.path.dirname",
"logging.FileHandler",
"datetime.datetime.today",
"logging.LoggerAdapter"
] | [((435, 528), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s:%(name)s: %(message)s"""', '"""%d-%m-%Y %H:%M:%S"""'], {}), "('%(asctime)s %(levelname)s:%(name)s: %(message)s',\n '%d-%m-%Y %H:%M:%S')\n", (452, 528), False, 'import logging\n'), ((886, 928), 'os.path.dirname', 'os.path.dirname', (['current_app.instance_path'], {}), '(current_app.instance_path)\n', (901, 928), False, 'import os\n'), ((951, 981), 'logging.getLogger', 'logging.getLogger', (['logger_type'], {}), '(logger_type)\n', (968, 981), False, 'import logging\n'), ((664, 770), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s:%(name)s: [%(param)s] %(message)s"""', '"""%d-%m-%Y %H:%M:%S"""'], {}), "('%(asctime)s %(levelname)s:%(name)s: [%(param)s] %(message)s'\n , '%d-%m-%Y %H:%M:%S')\n", (681, 770), False, 'import logging\n'), ((1204, 1281), 'logging.FileHandler', 'logging.FileHandler', (['f"""{root_dir}/logs/{self.logger_folder}/{today}_info.log"""'], {}), "(f'{root_dir}/logs/{self.logger_folder}/{today}_info.log')\n", (1223, 1281), False, 'import logging\n'), ((1598, 1677), 'logging.FileHandler', 'logging.FileHandler', (['f"""{root_dir}/logs/{self.logger_folder}/{today}_errors.log"""'], {}), "(f'{root_dir}/logs/{self.logger_folder}/{today}_errors.log')\n", (1617, 1677), False, 'import logging\n'), ((2253, 2294), 'logging.LoggerAdapter', 'logging.LoggerAdapter', (['self.logger', 'extra'], {}), '(self.logger, extra)\n', (2274, 2294), False, 'import logging\n'), ((829, 845), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (843, 845), False, 'from datetime import datetime\n')] |
import logging
import time
logger = logging.getLogger("namedLogger")
def sleep(secs):
logger.info("Sleeping for %d seconds.", secs)
time.sleep(secs)
| [
"logging.getLogger",
"time.sleep"
] | [((37, 69), 'logging.getLogger', 'logging.getLogger', (['"""namedLogger"""'], {}), "('namedLogger')\n", (54, 69), False, 'import logging\n'), ((143, 159), 'time.sleep', 'time.sleep', (['secs'], {}), '(secs)\n', (153, 159), False, 'import time\n')] |
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ))
import os
import torch
from utils.model_serialization import strip_prefix_if_present
from utils import zipreader
import argparse
from tqdm import tqdm
import pickle
import cv2
import numpy as np
parser = argparse.ArgumentParser(description="PyTorch Keypoints Training")
parser.add_argument(
"--src",
default="~/datasets",
help="source model",
type=str,
)
parser.add_argument(
"--dst",
default="~/local/datasets/h36m/undistortedimages",
help="dst model",
type=str,
)
parser.add_argument(
"--anno",
default="~/datasets/h36m/annot/h36m_validation.pkl",
type=str,
)
args = parser.parse_args()
src = os.path.expanduser(args.src)
dst = os.path.expanduser(args.dst)
with open(os.path.expanduser(args.anno), 'rb') as f:
data = pickle.load(f)
for db_rec in tqdm(data):
path = db_rec['image']
image_dir = 'images.zip@'
image_file = os.path.join(src, db_rec['source'], image_dir, 'images', db_rec['image'])
output_path = os.path.join(dst, path)
if os.path.exists(output_path):
continue
output_dir = os.path.dirname(output_path)
os.makedirs(output_dir, exist_ok=True)
data_numpy = zipreader.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
camera = db_rec['camera']
K = np.array([
[float(camera['fx']), 0, float(camera['cx'])],
[0, float(camera['fy']), float(camera['cy'])],
[0, 0, 1.],
])
distCoeffs = np.array([float(i) for i in [camera['k'][0], camera['k'][1], camera['p'][0], camera['p'][1], camera['k'][2]]])
data_numpy = cv2.undistort(data_numpy, K, distCoeffs)
#cv2.imwrite(output_path, data_numpy, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
#cv2.imwrite(output_path, data_numpy)
cv2.imwrite(output_path, data_numpy, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"pickle.load",
"cv2.undistort",
"os.path.dirname",
"os.path.abspath",
"utils.zipreader.imread",
"os.path.expanduser"
] | [((373, 438), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Keypoints Training"""'}), "(description='PyTorch Keypoints Training')\n", (396, 438), False, 'import argparse\n'), ((809, 837), 'os.path.expanduser', 'os.path.expanduser', (['args.src'], {}), '(args.src)\n', (827, 837), False, 'import os\n'), ((844, 872), 'os.path.expanduser', 'os.path.expanduser', (['args.dst'], {}), '(args.dst)\n', (862, 872), False, 'import os\n'), ((968, 978), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (972, 978), False, 'from tqdm import tqdm\n'), ((937, 951), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (948, 951), False, 'import pickle\n'), ((1054, 1127), 'os.path.join', 'os.path.join', (['src', "db_rec['source']", 'image_dir', '"""images"""', "db_rec['image']"], {}), "(src, db_rec['source'], image_dir, 'images', db_rec['image'])\n", (1066, 1127), False, 'import os\n'), ((1146, 1169), 'os.path.join', 'os.path.join', (['dst', 'path'], {}), '(dst, path)\n', (1158, 1169), False, 'import os\n'), ((1177, 1204), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1191, 1204), False, 'import os\n'), ((1240, 1268), 'os.path.dirname', 'os.path.dirname', (['output_path'], {}), '(output_path)\n', (1255, 1268), False, 'import os\n'), ((1273, 1311), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (1284, 1311), False, 'import os\n'), ((1329, 1407), 'utils.zipreader.imread', 'zipreader.imread', (['image_file', '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), '(image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n', (1345, 1407), False, 'from utils import zipreader\n'), ((1755, 1795), 'cv2.undistort', 'cv2.undistort', (['data_numpy', 'K', 'distCoeffs'], {}), '(data_numpy, K, distCoeffs)\n', (1768, 1795), False, 'import cv2\n'), ((883, 912), 'os.path.expanduser', 'os.path.expanduser', (['args.anno'], {}), '(args.anno)\n', (901, 912), False, 'import os\n'), ((139, 161), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'from os import path\n')] |
import os
import shutil
import wave
from collections import defaultdict
from statistics import mean
#directories = [r'D:\Data\aligner_comp\organized']
directories = ['/media/share/datasets/aligner_benchmarks/AlignerTestData/1_English_13000files',
'/data/mmcauliffe/data/LibriSpeech/clean']
for d in directories:
print(d)
total_duration = 0
speaker_durations = defaultdict(float)
for root, dirs, files in os.walk(d):
for f in files:
if not f.lower().endswith('.wav'):
continue
speaker = os.path.basename(root)
with wave.open(os.path.join(root, f), 'rb') as wavef:
sr = wavef.getframerate()
nframe = wavef.getnframes()
duration = nframe / sr
total_duration += duration
speaker_durations[speaker] += duration
print('Total:', total_duration / 60)
print('Mean per speaker:', mean(speaker_durations.values())/60)
| [
"os.path.join",
"collections.defaultdict",
"os.path.basename",
"os.walk"
] | [((390, 408), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (401, 408), False, 'from collections import defaultdict\n'), ((439, 449), 'os.walk', 'os.walk', (['d'], {}), '(d)\n', (446, 449), False, 'import os\n'), ((569, 591), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (585, 591), False, 'import os\n'), ((619, 640), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (631, 640), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import mimetypes
import os
from flask import abort, send_file
from flask_restful import Resource
from flask_restful_swagger import root_path
from flask_restful_swagger.registry import get_current_registry
from flask_restful_swagger.utils import render_page
__author__ = 'sobolevn'
class StaticFiles(Resource):
# TODO: is it possible to change this signature?
def get(self, **kwargs):
req_registry = get_current_registry()
if not kwargs:
file_path = "index.html"
else:
keys = sorted(kwargs.keys())
file_path = '/'.join(
kwargs[k].strip('/') for k in keys if kwargs[k] is not None
)
if file_path in [ # TODO: refactor to TemplateResource
"index.html",
"o2c.html",
"swagger-ui.js",
"swagger-ui.min.js",
"lib/swagger-oauth.js",
]:
conf = {'resource_list_url': req_registry['spec_endpoint_path']}
return render_page(file_path, conf)
mime = mimetypes.guess_type(file_path)[0]
file_path = os.path.join(root_path, 'static', file_path)
if os.path.exists(file_path):
return send_file(file_path, mimetype=mime)
abort(404)
| [
"os.path.exists",
"flask.abort",
"os.path.join",
"flask_restful_swagger.utils.render_page",
"mimetypes.guess_type",
"flask_restful_swagger.registry.get_current_registry",
"flask.send_file"
] | [((445, 467), 'flask_restful_swagger.registry.get_current_registry', 'get_current_registry', ([], {}), '()\n', (465, 467), False, 'from flask_restful_swagger.registry import get_current_registry\n'), ((1129, 1173), 'os.path.join', 'os.path.join', (['root_path', '"""static"""', 'file_path'], {}), "(root_path, 'static', file_path)\n", (1141, 1173), False, 'import os\n'), ((1185, 1210), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1199, 1210), False, 'import os\n'), ((1275, 1285), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1280, 1285), False, 'from flask import abort, send_file\n'), ((1028, 1056), 'flask_restful_swagger.utils.render_page', 'render_page', (['file_path', 'conf'], {}), '(file_path, conf)\n', (1039, 1056), False, 'from flask_restful_swagger.utils import render_page\n'), ((1073, 1104), 'mimetypes.guess_type', 'mimetypes.guess_type', (['file_path'], {}), '(file_path)\n', (1093, 1104), False, 'import mimetypes\n'), ((1231, 1266), 'flask.send_file', 'send_file', (['file_path'], {'mimetype': 'mime'}), '(file_path, mimetype=mime)\n', (1240, 1266), False, 'from flask import abort, send_file\n')] |
__Author__ = "noduez"
from socket import *
from time import ctime
import os
import re
HOST = ""
PORT = 21567
BUFSIZ = 1024
ADDR = (HOST, PORT)
tcpSerSock = socket(AF_INET, SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5)
responsedic = {'date': ctime(), 'os': os.name, 'ls': str(os.listdir(os.curdir))}
while True:
print
"Waiting for connect..."
tcpCliSock, addr = tcpSerSock.accept()
print('...connected from:', addr)
while True:
data = tcpCliSock.recv(BUFSIZ)
findre = re.match(r'ls dir\((.+)\)', str(data))
data_str = data.decode('utf-8')
if not data:
break
elif responsedic.get(data_str):
tcpCliSock.send(responsedic[data_str].encode('utf-8'))
elif findre:
print(os.listdir(findre.group(1)))
tcpCliSock.send(str(os.listdir(findre.group(1))))
else:
tcpCliSock.send(data)
tcpCliSock.close()
tcpCliSock.close() | [
"time.ctime",
"os.listdir"
] | [((254, 261), 'time.ctime', 'ctime', ([], {}), '()\n', (259, 261), False, 'from time import ctime\n'), ((288, 309), 'os.listdir', 'os.listdir', (['os.curdir'], {}), '(os.curdir)\n', (298, 309), False, 'import os\n')] |
import logging
import tflite
import numpy as np
from tflite2onnx import mapping
from tflite2onnx.op.common import Operator
from tflite2onnx.op.binary import PowerWrapper
logger = logging.getLogger('tflite2onnx')
class Rsqrt(Operator):
# use square root as input operator and propagate output to power
TypeMapping = {
tflite.BuiltinOperator.RSQRT: 'Sqrt',
}
def __init__(self, TFactory, index):
super().__init__(TFactory, index)
self.setInited()
@property
def type(self):
if self.status.uninitialized:
return 'Sqrt'
else:
op = self.tflite
opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
assert(opcode in self.TypeMapping)
return self.TypeMapping[opcode]
def parse(self):
logger.debug("Parsing %s...", self.type)
op = self.tflite
opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
assert(opcode in self.TypeMapping)
assert(op.InputsLength() == 1)
assert(op.OutputsLength() == 1)
self.parseInput(0)
self.parseOutput(0)
# invert square root result
self.appendInvert()
self.setParsed()
def propagatableTensors(self):
return self.inputs + self.outputs
def transform(self):
pass
def appendInvert(self):
invert = PowerWrapper(self.TFactory, -1)
invert_name = 'TFLITE2ONNX_Invert_%s' % self.outputs[0].name
invert_t = self.TFactory.getWithRef(self.outputs[0], invert_name, True)
invert_t.setParsed()
invert_t.addProducer(self)
invert_t.addConsumer(invert)
pow_t = 'TFLITE2ONNX_PowData_%s' % self.outputs[0].name
pow_t = self.TFactory.getWithRef(self.outputs[0], pow_t, True)
pow_dtype = mapping.DTYPE_ONNX2NAME[pow_t.dtype]
pow_t.data = np.full(shape=pow_t.shape, fill_value=-1, dtype=pow_dtype)
pow_t.setParsed()
pow_t.addConsumer(invert)
invert.inputs.append(invert_t)
invert.inputs.append(pow_t)
invert.outputs.append(self.outputs[0])
self.replaceOutput(self.outputs[0], invert_t)
invert.setParsed()
self.post.append(invert)
| [
"logging.getLogger",
"tflite2onnx.op.binary.PowerWrapper",
"numpy.full"
] | [((181, 213), 'logging.getLogger', 'logging.getLogger', (['"""tflite2onnx"""'], {}), "('tflite2onnx')\n", (198, 213), False, 'import logging\n'), ((1405, 1436), 'tflite2onnx.op.binary.PowerWrapper', 'PowerWrapper', (['self.TFactory', '(-1)'], {}), '(self.TFactory, -1)\n', (1417, 1436), False, 'from tflite2onnx.op.binary import PowerWrapper\n'), ((1902, 1960), 'numpy.full', 'np.full', ([], {'shape': 'pow_t.shape', 'fill_value': '(-1)', 'dtype': 'pow_dtype'}), '(shape=pow_t.shape, fill_value=-1, dtype=pow_dtype)\n', (1909, 1960), True, 'import numpy as np\n')] |
from argparse import ArgumentParser
from logging import INFO, basicConfig, getLogger
from pathlib import Path
from typing import Union
import cv2
from face_region_extractor import (
FaceDetectionError,
FaceNotFoundError,
FaceRegionExtractor,
)
basicConfig(level=INFO)
logger = getLogger(__name__)
def batch_crop(
input_dir: Union[str, Path], output_dir: Union[str, Path], glob="**/*.jpg"
):
input_dir = Path(input_dir)
if not input_dir.is_dir():
raise ValueError(f"{input_dir} is not directory")
output_dir = Path(output_dir)
if not output_dir.is_dir():
raise ValueError(f"{output_dir} is not directory")
extractor = FaceRegionExtractor()
for img_file in input_dir.glob(glob):
logger.info(f"process {img_file}...")
img = cv2.imread(str(img_file), cv2.IMREAD_COLOR)
try:
face_imgs = extractor.extract(img)
except FaceDetectionError as e:
logger.warning(f"{e}. file: {img_file}")
continue
except FaceNotFoundError as e:
logger.warning(f"{e}. file: {img_file}")
continue
n_faces = len(face_imgs)
if n_faces > 1:
logger.info(f"Two more faces are detected. skip: {img_file}")
continue
out_file = output_dir / img_file.relative_to(input_dir)
out_file.parent.mkdir(parents=True, exist_ok=True)
cv2.imwrite(str(out_file), face_imgs[0])
logger.info(f"save to {out_file}.")
def _parse_args():
parser = ArgumentParser()
parser.add_argument("--input-dir", "-i", type=str, required=True)
parser.add_argument("--output-dir", "-o", type=str, required=True)
return parser.parse_args()
if __name__ == "__main__":
args = _parse_args()
batch_crop(args.input_dir, args.output_dir)
| [
"logging.basicConfig",
"logging.getLogger",
"argparse.ArgumentParser",
"pathlib.Path",
"face_region_extractor.FaceRegionExtractor"
] | [((259, 282), 'logging.basicConfig', 'basicConfig', ([], {'level': 'INFO'}), '(level=INFO)\n', (270, 282), False, 'from logging import INFO, basicConfig, getLogger\n'), ((292, 311), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (301, 311), False, 'from logging import INFO, basicConfig, getLogger\n'), ((428, 443), 'pathlib.Path', 'Path', (['input_dir'], {}), '(input_dir)\n', (432, 443), False, 'from pathlib import Path\n'), ((551, 567), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (555, 567), False, 'from pathlib import Path\n'), ((676, 697), 'face_region_extractor.FaceRegionExtractor', 'FaceRegionExtractor', ([], {}), '()\n', (695, 697), False, 'from face_region_extractor import FaceDetectionError, FaceNotFoundError, FaceRegionExtractor\n'), ((1536, 1552), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1550, 1552), False, 'from argparse import ArgumentParser\n')] |
#!/usr/bin/env python3
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Anonymize a newline-delimited JSON data file. By default, both the keys and
values are anonymized to prevent any chance of extracting the original source of
the data file. To preserve the keys, use the --preserve_keys flag.
The purpose of this script is to anonymize large JSON data files for
benchmarking purposes, while preserving the structure of the JSON data file so
that the BigQuery schema of the anonymized data file is structurally identical
to the original data file (modulo the names of the keys). If the --preserve_keys
flag is used, then the BigQuery schema file from the anonymized data should
be identical to the schema file from the original dataset.
Usage: anonymize.py [-h] [flags ...] < file.data.json > file.anon.data.json
"""
import argparse
import json
import logging
import sys
from collections import OrderedDict
from bigquery_schema_generator.generate_schema import SchemaGenerator
class Anonymizer:
"""Anonymize both the key and value of a newline-delimited JSON file.
The anon_map is used to keep track of the (key -> anon_key) mapping.
anon_map := {
key: anon_entry,
...
}
anon_entry := {
'anon_key': anonymous_key,
'anon_map': anon_map,
'next_anon_key': next_key
}
"""
def __init__(self, debugging_interval=1000, preserve_keys=False):
self.debugging_interval = debugging_interval
self.preserve_keys = preserve_keys
self.line_number = 0
def log_error(self, msg):
logging.error('line: %s; msg: %s', self.line_number, msg)
def anonymize_file(self, file):
"""Anonymous the JSON data record one line at a time from the
given file-like object.
"""
anon_entry = {}
for line in file:
self.line_number += 1
if self.line_number % self.debugging_interval == 0:
logging.info("Processing line %s", self.line_number)
json_object = json.loads(line)
if not isinstance(json_object, dict):
self.log_error(
'Top level record must be an Object but was a %s' %
type(json_object))
continue
try:
anon_dict = self.anonymize_dict(json_object, anon_entry)
except Exception as e:
self.log_error(str(e))
json.dump(anon_dict, sys.stdout)
print()
logging.info("Processed %s lines", self.line_number)
def anonymize_dict(self, json_dict, anon_entry):
"""Recursively anonymize the JSON dictionary object, replacing the key
and the value with their anonymized versions. Returns the 'anon_dict'
with the 'anon_entry' updated.
"""
# Add some bookkeeping variables to 'anon_entry' for a dict.
anon_map = anon_entry.get('anon_map')
if not anon_map:
anon_map = {}
anon_entry['anon_map'] = anon_map
next_anon_key = anon_entry.get('next_anon_key')
if not next_anon_key:
next_anon_key = 'a'
anon_dict = OrderedDict()
for key, value in json_dict.items():
child_anon_entry = anon_map.get(key)
if not child_anon_entry:
child_anon_entry = {}
if self.preserve_keys:
child_anon_entry['anon_key'] = key
else:
# Pad the anonymous key to preserve length
padding = max(0, len(key) - len(next_anon_key))
child_anon_entry['anon_key'] = \
next_anon_key + ('.' * padding)
next_anon_key = increment_anon_key(next_anon_key)
anon_map[key] = child_anon_entry
if isinstance(value, dict):
value = self.anonymize_dict(value, child_anon_entry)
elif isinstance(value, list):
value = self.anonymize_list(value, child_anon_entry)
else:
value = self.anonymize_value(value)
child_anon_key = child_anon_entry['anon_key']
anon_dict[child_anon_key] = value
# Update the next_anon_key so that anon_entry can be reused
# for multiple dicts, e.g. in a list or lines in a file.
anon_entry['next_anon_key'] = next_anon_key
return anon_dict
def anonymize_list(self, json_list, anon_entry):
"""Anonymize the given list, calling anonymize_dict() recursively if
necessary.
"""
anon_list = []
for item in json_list:
if isinstance(item, list):
item = self.anonymize_list(item, anon_entry)
elif isinstance(item, dict):
item = self.anonymize_dict(item, anon_entry)
else:
item = self.anonymize_value(item)
anon_list.append(item)
return anon_list
def anonymize_value(self, value):
"""Anonymize the value. A string is replaced with a string of an equal
number of '*' characters. DATE, TIME and TIMESTAMP values are replaced
with a fixed versions of those. An integer is replaced with just a '1'.
A float is replaced with just a '2.0'. A boolean is replaced with just a
'True'.
"""
if isinstance(value, str):
if SchemaGenerator.TIMESTAMP_MATCHER.match(value):
return '2018-07-17T09:05:00-07:00'
elif SchemaGenerator.DATE_MATCHER.match(value):
return '2018-07-17'
elif SchemaGenerator.TIME_MATCHER.match(value):
return '09:05:00'
else:
# Pad the anonymous string to the same length as the original
return '*' * len(value)
elif isinstance(value, bool):
return True
elif isinstance(value, int):
return 1
elif isinstance(value, float):
return 2.0
elif value is None:
return None
else:
raise Exception('Unsupported node type: %s' % type(value))
def run(self):
self.anonymize_file(sys.stdin)
def increment_anon_key(key):
"""Increment the key in base-26 to the next key. The sequence looks like
this: [a, ..., z, ba, bb, ..., bz, ..., baa, ...].
Note that this is not the the Excel column label sequence. Base-26 is easier
to generate and it's good enough for this use-case. Also note that this
method performs NO validation, it assumes that all the digits are in the
[a-z] range.
"""
reversed_key = key[::-1]
new_key = ''
carry = 1
for c in reversed_key:
if carry == 0:
new_key += c
continue
new_ord = ord(c) + carry
if new_ord == ord('z') + 1:
newc = 'a'
carry = 1
else:
newc = chr(new_ord)
carry = 0
new_key += newc
if carry == 1:
new_key += 'b'
return new_key[::-1]
def main():
# Configure command line flags.
parser = argparse.ArgumentParser(
description='Anonymize newline-delimited JSON data file.')
parser.add_argument(
'--preserve_keys',
help='Preserve the keys, do not anonymize them',
action="store_true")
parser.add_argument(
'--debugging_interval',
help='Number of lines between heartbeat debugging messages.',
type=int,
default=1000)
args = parser.parse_args()
# Configure logging.
logging.basicConfig(level=logging.INFO)
anonymizer = Anonymizer(args.debugging_interval, args.preserve_keys)
anonymizer.run()
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"collections.OrderedDict",
"json.loads",
"bigquery_schema_generator.generate_schema.SchemaGenerator.DATE_MATCHER.match",
"argparse.ArgumentParser",
"logging.info",
"bigquery_schema_generator.generate_schema.SchemaGenerator.TIME_MATCHER.match",
"bigquery_schema_generator.generate... | [((7658, 7745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Anonymize newline-delimited JSON data file."""'}), "(description=\n 'Anonymize newline-delimited JSON data file.')\n", (7681, 7745), False, 'import argparse\n'), ((8116, 8155), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (8135, 8155), False, 'import logging\n'), ((2102, 2159), 'logging.error', 'logging.error', (['"""line: %s; msg: %s"""', 'self.line_number', 'msg'], {}), "('line: %s; msg: %s', self.line_number, msg)\n", (2115, 2159), False, 'import logging\n'), ((3028, 3080), 'logging.info', 'logging.info', (['"""Processed %s lines"""', 'self.line_number'], {}), "('Processed %s lines', self.line_number)\n", (3040, 3080), False, 'import logging\n'), ((3694, 3707), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3705, 3707), False, 'from collections import OrderedDict\n'), ((2554, 2570), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2564, 2570), False, 'import json\n'), ((2967, 2999), 'json.dump', 'json.dump', (['anon_dict', 'sys.stdout'], {}), '(anon_dict, sys.stdout)\n', (2976, 2999), False, 'import json\n'), ((5939, 5985), 'bigquery_schema_generator.generate_schema.SchemaGenerator.TIMESTAMP_MATCHER.match', 'SchemaGenerator.TIMESTAMP_MATCHER.match', (['value'], {}), '(value)\n', (5978, 5985), False, 'from bigquery_schema_generator.generate_schema import SchemaGenerator\n'), ((2475, 2527), 'logging.info', 'logging.info', (['"""Processing line %s"""', 'self.line_number'], {}), "('Processing line %s', self.line_number)\n", (2487, 2527), False, 'import logging\n'), ((6055, 6096), 'bigquery_schema_generator.generate_schema.SchemaGenerator.DATE_MATCHER.match', 'SchemaGenerator.DATE_MATCHER.match', (['value'], {}), '(value)\n', (6089, 6096), False, 'from bigquery_schema_generator.generate_schema import SchemaGenerator\n'), ((6151, 6192), 'bigquery_schema_generator.generate_schema.SchemaGenerator.TIME_MATCHER.match', 'SchemaGenerator.TIME_MATCHER.match', (['value'], {}), '(value)\n', (6185, 6192), False, 'from bigquery_schema_generator.generate_schema import SchemaGenerator\n')] |
"""Compiler
This module exports a single function
complile(source_code_string, output_type)
The second argument tells the compiler what to produce. It must be one of:
tokens the token sequence
ast the abstract syntax tree
analyzed the semantically analyzed representation
optimized the optimized semantically analyzed representation
js the translation to JavaScript
c the translation to C
llvm the translation to LLVM
"""
from ael.scanner import tokenize
from ael.parser import parse
from ael.analyzer import analyze
from ael.optimizer import optimize
from ael.generator import generate
def compile(source, output_type):
output_type = output_type.lower()
if output_type == 'tokens':
return tokenize(source)
elif output_type == 'ast':
return(parse(tokenize(source)))
elif output_type == 'analyzed':
return(analyze(parse(tokenize(source))))
elif output_type == 'optimized':
return(optimize(analyze(parse(tokenize(source)))))
elif output_type in ('js', 'c', 'llvm'):
return(generate[output_type](optimize(analyze(parse(tokenize(source))))))
else:
return('Unknown output type')
| [
"ael.scanner.tokenize"
] | [((782, 798), 'ael.scanner.tokenize', 'tokenize', (['source'], {}), '(source)\n', (790, 798), False, 'from ael.scanner import tokenize\n'), ((851, 867), 'ael.scanner.tokenize', 'tokenize', (['source'], {}), '(source)\n', (859, 867), False, 'from ael.scanner import tokenize\n'), ((935, 951), 'ael.scanner.tokenize', 'tokenize', (['source'], {}), '(source)\n', (943, 951), False, 'from ael.scanner import tokenize\n'), ((1030, 1046), 'ael.scanner.tokenize', 'tokenize', (['source'], {}), '(source)\n', (1038, 1046), False, 'from ael.scanner import tokenize\n'), ((1156, 1172), 'ael.scanner.tokenize', 'tokenize', (['source'], {}), '(source)\n', (1164, 1172), False, 'from ael.scanner import tokenize\n')] |
# WTF uses a class to represent a traditional form.
# The class LoginForm describes the fields of our form. Note; we're
# paying no attention to the appearance of the form just yet.
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
# DataRequired ensures that a user has inputted data into the field.
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
| [
"wtforms.BooleanField",
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((587, 614), 'wtforms.BooleanField', 'BooleanField', (['"""Remember Me"""'], {}), "('Remember Me')\n", (599, 614), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField\n'), ((628, 650), 'wtforms.SubmitField', 'SubmitField', (['"""Sign In"""'], {}), "('Sign In')\n", (639, 650), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField\n'), ((482, 496), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (494, 496), False, 'from wtforms.validators import DataRequired\n'), ((552, 566), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (564, 566), False, 'from wtforms.validators import DataRequired\n')] |
"""
********************************************************************************
main file to execute
********************************************************************************
"""
import time
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from pinn import PINN
from config_gpu import config_gpu
from params import params
from prp_dat import prp_dat
from plot_sol import *
from fdm import FDM
def main():
# gpu confiuration
config_gpu(gpu_flg = 1)
# params
f_in, f_out, width, depth, \
w_init, b_init, act, \
lr, opt, \
f_scl, laaf, c, \
w_ini, w_bnd, w_pde, BC, \
f_mntr, r_seed, \
n_epch, n_btch, c_tol = params()
# domain
tmin = 0.; tmax = 10.; nt = int(5e2) + 1
xmin = 0.; xmax = 5.; nx = int(1e2) + 1
ymin = 0.; ymax = 5.; ny = int(1e2) + 1
t_ = np.linspace(tmin, tmax, nt)
x_ = np.linspace(xmin, xmax, nx)
y_ = np.linspace(ymin, ymax, ny)
dt = t_[1] - t_[0]
dx = x_[1] - x_[0]
dy = y_[1] - y_[0]
cfl = c * dt / dx
print("CFL number:", cfl)
x, y = np.meshgrid(x_, y_)
u = np.empty((nt, nx, ny))
print("tmin: %.3f, tmax: %.3f, nt: %d, dt: %.3e" % (tmin, tmax, nt, dt))
print("xmin: %.3f, xmax: %.3f, nx: %d, dx: %.3e" % (xmin, xmax, nx, dx))
print("ymin: %.3f, ymax: %.3f, ny: %d, dy: %.3e" % (ymin, ymax, ny, dy))
# FDM simulation
u_FDM = FDM(xmin, xmax, nx, dx,
ymin, ymax, ny, dy,
nt, dt,
x, y, u, c, BC)
# prep data
TX, lb, ub, \
t_ini, x_ini, y_ini, u_ini, \
t_bnd, x_bnd, y_bnd, \
t_pde, x_pde, y_pde = prp_dat(t_, x_, y_,
N_ini = int(5e3), N_bnd = int(1e4), N_pde = int(3e4))
pinn = PINN(t_ini, x_ini, y_ini, u_ini,
t_bnd, x_bnd, y_bnd,
t_pde, x_pde, y_pde,
f_in, f_out, width, depth,
w_init, b_init, act,
lr, opt,
f_scl, laaf, c,
w_ini, w_bnd, w_pde, BC,
f_mntr, r_seed)
t0 = time.time()
with tf.device("/device:GPU:0"):
pinn.train(epoch = n_epch, batch = n_btch, tol = c_tol)
t1 = time.time()
elps = t1 - t0
print(">>>>> elapse time for training (sec):", elps)
print(">>>>> elapse time for training (min):", elps / 60.)
# inference
x_inf, y_inf = np.meshgrid(x_, y_)
x_inf, y_inf = x_inf.reshape(-1, 1), y_inf.reshape(-1, 1)
elps = 0
for t in t_:
t_inf = np.ones_like(x_inf) * t
t0 = time.time()
u_, gv_ = pinn.infer(t_inf, x_inf, y_inf)
t1 = time.time()
temp = t1 - t0
elps += temp
print(">>>>> elapse time for inference (sec):", elps)
print(">>>>> elapse time for inference (min):", elps / 60.)
# x_inf = np.unique(TX[:,1:2])
# y_inf = np.unique(TX[:,2:3])
# x_inf, y_inf = np.meshgrid(x_inf, y_inf)
# x_inf, y_inf = x_inf.reshape(-1, 1), y_inf.reshape(-1, 1)
# elps = 0.
# for n in range(nt):
# if n % 100 == 0:
# print("currently", n)
# t = n * dt # convert to real time
# u_fdm = u_FDM[n,:,:]
# n = np.array([n])
# t_inf = np.unique(TX[:,0:1])
# t_inf = np.tile(t_inf.reshape(-1, 1), (1, x_inf.shape[0])).T[:,n]
# t0 = time.time()
# u_, gv_ = pinn.infer(t_inf, x_inf, y_inf)
# t1 = time.time()
# temp = t1 - t0
# elps += temp
# print(">>>>> elapse time for inference (sec):", elps)
# print(">>>>> elapse time for inference (min):", elps / 60.)
plt.figure(figsize = (8, 4))
plt.plot(pinn.ep_log, pinn.loss_log, alpha = .7, linestyle = "-", label = "loss", c = "k")
plt.plot(pinn.ep_log, pinn.loss_ini_log, alpha = .5, linestyle = "--", label = "loss_ini")
plt.plot(pinn.ep_log, pinn.loss_bnd_log, alpha = .5, linestyle = "--", label = "loss_bnd")
plt.plot(pinn.ep_log, pinn.loss_pde_log, alpha = .5, linestyle = "--", label = "loss_pde")
plt.yscale("log")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(loc = "upper right")
plt.grid(alpha = .5)
plt.show()
for n in range(nt):
if n % (int(nt / 20)) == 0:
t = n * dt # convert to real time
u_fdm = u_FDM[n,:,:]
n = np.array([n])
t_inf = np.unique(TX[:,0:1])
x_inf = np.unique(TX[:,1:2])
y_inf = np.unique(TX[:,2:3])
x_inf, y_inf = np.meshgrid(x_inf, y_inf)
x_inf, y_inf = x_inf.reshape(-1, 1), y_inf.reshape(-1, 1)
t_inf = np.tile(t_inf.reshape(-1, 1), (1, x_inf.shape[0])).T[:,n]
u_, gv_ = pinn.infer(t_inf, x_inf, y_inf)
fig = plt.figure(figsize=(16, 4))
ax = fig.add_subplot(1, 1, 1, projection = "3d")
ax.plot_surface(x, y, u_fdm, vmin = -1., vmax = 1.)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(-1., 1.)
ax = fig.add_subplot(1, 2, 2, projection = "3d")
ax.plot_surface(x, y, u_.numpy().reshape(nx, ny), vmin = -1., vmax = 1.)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(-1., 1.)
u_diff = u_fdm - u_.numpy().reshape(nx, ny)
u_l2 = np.linalg.norm(u_diff, ord=2) / np.linalg.norm(u_fdm, ord=2)
u_mse = np.mean(np.square(u_diff)) / np.sqrt(nx * ny)
u_sem = np.std (np.square(u_diff), ddof = 1) / np.sqrt(nx * ny)
print("t: %.3f, l2: %.3e, mse: %.3e, sem: %.3e" % (t, u_l2, u_mse, u_sem))
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.linalg.norm",
"fdm.FDM",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.empty",
"params.params",
"numpy.meshgrid",
"matplotlib.pyplot.yscale",
"tensorflow.device",
"p... | [((473, 494), 'config_gpu.config_gpu', 'config_gpu', ([], {'gpu_flg': '(1)'}), '(gpu_flg=1)\n', (483, 494), False, 'from config_gpu import config_gpu\n'), ((689, 697), 'params.params', 'params', ([], {}), '()\n', (695, 697), False, 'from params import params\n'), ((856, 883), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', 'nt'], {}), '(tmin, tmax, nt)\n', (867, 883), True, 'import numpy as np\n'), ((893, 920), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (904, 920), True, 'import numpy as np\n'), ((930, 957), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'ny'], {}), '(ymin, ymax, ny)\n', (941, 957), True, 'import numpy as np\n'), ((1091, 1110), 'numpy.meshgrid', 'np.meshgrid', (['x_', 'y_'], {}), '(x_, y_)\n', (1102, 1110), True, 'import numpy as np\n'), ((1122, 1144), 'numpy.empty', 'np.empty', (['(nt, nx, ny)'], {}), '((nt, nx, ny))\n', (1130, 1144), True, 'import numpy as np\n'), ((1410, 1477), 'fdm.FDM', 'FDM', (['xmin', 'xmax', 'nx', 'dx', 'ymin', 'ymax', 'ny', 'dy', 'nt', 'dt', 'x', 'y', 'u', 'c', 'BC'], {}), '(xmin, xmax, nx, dx, ymin, ymax, ny, dy, nt, dt, x, y, u, c, BC)\n', (1413, 1477), False, 'from fdm import FDM\n'), ((1774, 1970), 'pinn.PINN', 'PINN', (['t_ini', 'x_ini', 'y_ini', 'u_ini', 't_bnd', 'x_bnd', 'y_bnd', 't_pde', 'x_pde', 'y_pde', 'f_in', 'f_out', 'width', 'depth', 'w_init', 'b_init', 'act', 'lr', 'opt', 'f_scl', 'laaf', 'c', 'w_ini', 'w_bnd', 'w_pde', 'BC', 'f_mntr', 'r_seed'], {}), '(t_ini, x_ini, y_ini, u_ini, t_bnd, x_bnd, y_bnd, t_pde, x_pde, y_pde,\n f_in, f_out, width, depth, w_init, b_init, act, lr, opt, f_scl, laaf, c,\n w_ini, w_bnd, w_pde, BC, f_mntr, r_seed)\n', (1778, 1970), False, 'from pinn import PINN\n'), ((2109, 2120), 'time.time', 'time.time', ([], {}), '()\n', (2118, 2120), False, 'import time\n'), ((2231, 2242), 'time.time', 'time.time', ([], {}), '()\n', (2240, 2242), False, 'import time\n'), ((2418, 2437), 'numpy.meshgrid', 'np.meshgrid', (['x_', 'y_'], {}), '(x_, y_)\n', (2429, 2437), True, 'import numpy as np\n'), ((3628, 3654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (3638, 3654), True, 'import matplotlib.pyplot as plt\n'), ((3661, 3748), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_log'], {'alpha': '(0.7)', 'linestyle': '"""-"""', 'label': '"""loss"""', 'c': '"""k"""'}), "(pinn.ep_log, pinn.loss_log, alpha=0.7, linestyle='-', label='loss',\n c='k')\n", (3669, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3761, 3851), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_ini_log'], {'alpha': '(0.5)', 'linestyle': '"""--"""', 'label': '"""loss_ini"""'}), "(pinn.ep_log, pinn.loss_ini_log, alpha=0.5, linestyle='--', label=\n 'loss_ini')\n", (3769, 3851), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3946), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_bnd_log'], {'alpha': '(0.5)', 'linestyle': '"""--"""', 'label': '"""loss_bnd"""'}), "(pinn.ep_log, pinn.loss_bnd_log, alpha=0.5, linestyle='--', label=\n 'loss_bnd')\n", (3864, 3946), True, 'import matplotlib.pyplot as plt\n'), ((3951, 4041), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_pde_log'], {'alpha': '(0.5)', 'linestyle': '"""--"""', 'label': '"""loss_pde"""'}), "(pinn.ep_log, pinn.loss_pde_log, alpha=0.5, linestyle='--', label=\n 'loss_pde')\n", (3959, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4063), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4056, 4063), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4078, 4087), True, 'import matplotlib.pyplot as plt\n'), ((4092, 4110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (4102, 4110), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4144), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4125, 4144), True, 'import matplotlib.pyplot as plt\n'), ((4151, 4170), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (4159, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4184, 4186), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2156), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (2139, 2156), True, 'import tensorflow as tf\n'), ((2583, 2594), 'time.time', 'time.time', ([], {}), '()\n', (2592, 2594), False, 'import time\n'), ((2658, 2669), 'time.time', 'time.time', ([], {}), '()\n', (2667, 2669), False, 'import time\n'), ((2546, 2565), 'numpy.ones_like', 'np.ones_like', (['x_inf'], {}), '(x_inf)\n', (2558, 2565), True, 'import numpy as np\n'), ((4345, 4358), 'numpy.array', 'np.array', (['[n]'], {}), '([n])\n', (4353, 4358), True, 'import numpy as np\n'), ((4379, 4400), 'numpy.unique', 'np.unique', (['TX[:, 0:1]'], {}), '(TX[:, 0:1])\n', (4388, 4400), True, 'import numpy as np\n'), ((4420, 4441), 'numpy.unique', 'np.unique', (['TX[:, 1:2]'], {}), '(TX[:, 1:2])\n', (4429, 4441), True, 'import numpy as np\n'), ((4461, 4482), 'numpy.unique', 'np.unique', (['TX[:, 2:3]'], {}), '(TX[:, 2:3])\n', (4470, 4482), True, 'import numpy as np\n'), ((4509, 4534), 'numpy.meshgrid', 'np.meshgrid', (['x_inf', 'y_inf'], {}), '(x_inf, y_inf)\n', (4520, 4534), True, 'import numpy as np\n'), ((4756, 4783), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 4)'}), '(figsize=(16, 4))\n', (4766, 4783), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5372), 'numpy.linalg.norm', 'np.linalg.norm', (['u_diff'], {'ord': '(2)'}), '(u_diff, ord=2)\n', (5357, 5372), True, 'import numpy as np\n'), ((5375, 5403), 'numpy.linalg.norm', 'np.linalg.norm', (['u_fdm'], {'ord': '(2)'}), '(u_fdm, ord=2)\n', (5389, 5403), True, 'import numpy as np\n'), ((5453, 5469), 'numpy.sqrt', 'np.sqrt', (['(nx * ny)'], {}), '(nx * ny)\n', (5460, 5469), True, 'import numpy as np\n'), ((5529, 5545), 'numpy.sqrt', 'np.sqrt', (['(nx * ny)'], {}), '(nx * ny)\n', (5536, 5545), True, 'import numpy as np\n'), ((5432, 5449), 'numpy.square', 'np.square', (['u_diff'], {}), '(u_diff)\n', (5441, 5449), True, 'import numpy as np\n'), ((5498, 5515), 'numpy.square', 'np.square', (['u_diff'], {}), '(u_diff)\n', (5507, 5515), True, 'import numpy as np\n')] |
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import skimage.io
from skimage.transform import resize
from imgaug import augmenters as iaa
from random import randint
import PIL
from PIL import Image
import cv2
from sklearn.utils import class_weight, shuffle
import keras
import warnings
from keras.utils import Sequence
import tensorflow as tf
warnings.filterwarnings("ignore")
SIZE = 256
SEED = 777
THRESHOLD = 0.2
# Load dataset info
DIR = '../input/'
# data = pd.read_csv('../input/train.csv')
def getTrainDataset():
path_to_train = DIR + '/train/'
data = pd.read_csv(DIR + '/train.csv')
paths = []
labels = []
for name, lbl in zip(data['Id'], data['Target'].str.split(' ')):
y = np.zeros(28)
for key in lbl:
y[int(key)] = 1
paths.append(os.path.join(path_to_train, name))
labels.append(y)
return np.array(paths[:5000]), np.array(labels[:5000])
def getTestDataset():
path_to_test = DIR + '/test/'
data = pd.read_csv(DIR + '/sample_submission.csv')
paths = []
labels = []
for name in data['Id']:
y = np.ones(28)
paths.append(os.path.join(path_to_test, name))
labels.append(y)
return np.array(paths), np.array(labels)
# paths, labels = getTrainDataset()
# credits: https://github.com/keras-team/keras/blob/master/keras/utils/data_utils.py#L302
# credits: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
class ProteinDataGenerator(keras.utils.Sequence):
def __init__(self, paths, labels, batch_size, shape, channels = [], shuffle = False, use_cache = False, augmentor = False):
self.paths, self.labels = paths, labels
self.batch_size = batch_size
self.shape = shape
self.shuffle = shuffle
self.use_cache = use_cache
self.channels = channels
self.augmentor = augmentor
self.clahe = cv2.createCLAHE()
if use_cache == True:
self.cache = np.zeros((paths.shape[0], shape[0], shape[1], len(channels)))
self.is_cached = np.zeros((paths.shape[0]))
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.paths) / float(self.batch_size)))
def __getitem__(self, idx):
indexes = self.indexes[idx * self.batch_size : (idx+1) * self.batch_size]
paths = self.paths[indexes]
X = np.zeros((paths.shape[0], self.shape[0], self.shape[1], self.shape[2]))
# Generate data
if self.use_cache == True:
X = self.cache[indexes]
for i, path in enumerate(paths[np.where(self.is_cached[indexes] == 0)]):
image = self.__load_image(path)
self.is_cached[indexes[i]] = 1
self.cache[indexes[i]] = image
X[i] = image
else:
for i, path in enumerate(paths):
X[i] = self.__load_image(path)
if self.augmentor == True:
for i, item in enumerate(X):
X[i] = self.augment(item)
y = self.labels[indexes]
return X, y
def on_epoch_end(self):
# Updates indexes after each epoch
self.indexes = np.arange(len(self.paths))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def __load_image(self, path):
images = []
for channel in self.channels:
im = np.array(Image.open(path + '_' + channel + '.png'))
# im = clahe.apply(im)
images.append(im)
if len(self.channels) >= 2:
im = np.stack((
images
), -1)
im = cv2.resize(im, (SIZE,SIZE))
im = np.divide(im, 255)
else:
im = images[0]
im = cv2.resize(im, (SIZE,SIZE))
im = np.divide(im, 255)
im = np.expand_dims(im, 2)
return im
def augment(self, image):
if randint(0,1) == 1:
augment_img = iaa.Sequential([
iaa.OneOf([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # horizontal flips
iaa.Crop(percent=(0, 0.1)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=0.2),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)},
rotate=(-180, 180),
shear=(-4, 4)
)
])], random_order=True)
image_aug = augment_img.augment_image(image)
return image_aug
else:
return image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D
from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import ModelCheckpoint
from keras import metrics
from keras.optimizers import Adam
from keras import backend as K
import keras
from keras.models import Model
from keras.utils import multi_gpu_model
def f1(y_true, y_pred):
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), THRESHOLD), K.floatx())
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
def f1_loss(y_true, y_pred):
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return 1-K.mean(f1)
def focal_loss(gamma=2., alpha=.25):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
pt_1 = K.clip(pt_1, 1e-3, .999)
pt_0 = K.clip(pt_0, 1e-3, .999)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
def create_model(input_shape, n_out, channels):
input_tensor = Input(shape=(256,256,len(channels)))
# print(len(channels))
bn = BatchNormalization()(input_tensor)
base_model = InceptionV3(include_top=False, weights='imagenet')
# base_model.summary()
# base_model.get_layer(index=)
# for idx, layer in enumerate(base_model.layers):
# print(idx, layer.name)
base_output = base_model.get_layer(index=132).output
base_input = base_model.input
base_model = Model(inputs=base_input, outputs=base_output)
x = base_model(bn)
x = Dropout(0.5)(x)
x = Conv2D(128, kernel_size=(3,3), activation='relu')(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(n_out, activation='sigmoid')(x)
model = Model(input_tensor, output)
return model
## Load data
SHAPE = (256, 256, 3)
channels = ["green", "blue", "red"]
# create callbacks list
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
epochs = 10; batch_size = 64; VAL_RATIO = .1; DEBUG = False
# split data into train, valid
paths, labels = getTrainDataset()
# divide to
keys = np.arange(paths.shape[0], dtype=np.int)
np.random.seed(SEED)
np.random.shuffle(keys)
lastTrainIndex = int((1-VAL_RATIO) * paths.shape[0])
if DEBUG == True: # use only small subset for debugging, Kaggle's RAM is limited
pathsTrain = paths[0:256]
labelsTrain = labels[0:256]
pathsVal = paths[lastTrainIndex:lastTrainIndex+256]
labelsVal = labels[lastTrainIndex:lastTrainIndex+256]
use_cache = True
else:
pathsTrain = paths[0:lastTrainIndex]
labelsTrain = labels[0:lastTrainIndex]
pathsVal = paths[lastTrainIndex:]
labelsVal = labels[lastTrainIndex:]
use_cache = False
use_cache = False
# print(paths.shape, labels.shape)
# print(pathsTrain.shape, labelsTrain.shape, pathsVal.shape, labelsVal.shape)
tg = ProteinDataGenerator(pathsTrain, labelsTrain, batch_size, SHAPE, channels, use_cache=use_cache)
vg = ProteinDataGenerator(pathsVal, labelsVal, batch_size, SHAPE, channels, use_cache=use_cache)
checkpoint = ModelCheckpoint('../working/InceptionV3_3chan.h5', monitor='val_f1', verbose=1,
save_best_only=True, mode='max', save_weights_only = False)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_f1', factor=0.5, patience=10,
verbose=1, mode='max', epsilon=0.0001)
early = EarlyStopping(monitor="val_f1",
mode="max",
patience=20)
callbacks_list = [checkpoint, early, reduceLROnPlat]
# warm up model
import tensorflow as tf
channels = ["green", "blue", "red"]
# with tf.device('/cpu:0'):
model = create_model(
input_shape=(SIZE,SIZE,3),
n_out=28, channels = channels)
for layer in model.layers:
layer.trainable = False
for i in range(-6,2):
model.layers[i].trainable = True
model.summary()
model.compile(loss="binary_crossentropy",
optimizer=Adam(lr=1e-4),
metrics=['accuracy', f1])
hist = model.fit_generator(
tg,
steps_per_epoch=np.ceil(float(len(pathsTrain)) / float(batch_size))/2,
validation_data=vg,
validation_steps=np.ceil(float(len(pathsVal)) / float(batch_size))/2,
epochs=1,
verbose=1,
callbacks = callbacks_list)
# Set all layers back to trainable
for layer in model.layers:
layer.trainable = True
model.compile(loss="binary_crossentropy",
optimizer=Adam(lr=1e-4),
metrics=['accuracy', f1])
batch_size = 64
tg = ProteinDataGenerator(pathsTrain, labelsTrain, batch_size, SHAPE, channels, use_cache=use_cache)
vg = ProteinDataGenerator(pathsVal, labelsVal, batch_size, SHAPE, channels, use_cache=use_cache)
hist = model.fit_generator(
tg,
steps_per_epoch=np.ceil(float(len(pathsTrain)) / float(batch_size))/2,
validation_data=vg,
validation_steps=np.ceil(float(len(pathsVal)) / float(batch_size))/2,
epochs=100,
verbose=1,
callbacks=callbacks_list)
fig, ax = plt.subplots(1, 2, figsize=(15,5))
ax[0].set_title('loss')
ax[0].plot(hist.epoch, hist.history["loss"], label="Train loss")
ax[0].plot(hist.epoch, hist.history["val_loss"], label="Validation loss")
ax[1].set_title('acc')
ax[1].plot(hist.epoch, hist.history["f1"], label="Train F1")
ax[1].plot(hist.epoch, hist.history["val_f1"], label="Validation F1")
ax[0].legend()
ax[1].legend()
plt.show() | [
"keras.layers.Conv2D",
"tensorflow.equal",
"pandas.read_csv",
"imgaug.augmenters.GaussianBlur",
"keras.backend.floatx",
"numpy.array",
"tensorflow.is_nan",
"keras.layers.Dense",
"tensorflow.ones_like",
"imgaug.augmenters.Fliplr",
"numpy.arange",
"numpy.divide",
"imgaug.augmenters.Flipud",
... | [((384, 417), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (407, 417), False, 'import warnings\n'), ((8892, 8931), 'numpy.arange', 'np.arange', (['paths.shape[0]'], {'dtype': 'np.int'}), '(paths.shape[0], dtype=np.int)\n', (8901, 8931), True, 'import numpy as np\n'), ((8934, 8954), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (8948, 8954), True, 'import numpy as np\n'), ((8955, 8978), 'numpy.random.shuffle', 'np.random.shuffle', (['keys'], {}), '(keys)\n', (8972, 8978), True, 'import numpy as np\n'), ((9847, 9988), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""../working/InceptionV3_3chan.h5"""'], {'monitor': '"""val_f1"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""', 'save_weights_only': '(False)'}), "('../working/InceptionV3_3chan.h5', monitor='val_f1',\n verbose=1, save_best_only=True, mode='max', save_weights_only=False)\n", (9862, 9988), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n'), ((10034, 10137), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_f1"""', 'factor': '(0.5)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""max"""', 'epsilon': '(0.0001)'}), "(monitor='val_f1', factor=0.5, patience=10, verbose=1,\n mode='max', epsilon=0.0001)\n", (10051, 10137), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n'), ((10178, 10234), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_f1"""', 'mode': '"""max"""', 'patience': '(20)'}), "(monitor='val_f1', mode='max', patience=20)\n", (10191, 10234), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n'), ((11820, 11855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 5)'}), '(1, 2, figsize=(15, 5))\n', (11832, 11855), True, 'import matplotlib.pyplot as plt\n'), ((12202, 12212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12210, 12212), True, 'import matplotlib.pyplot as plt\n'), ((614, 645), 'pandas.read_csv', 'pd.read_csv', (["(DIR + '/train.csv')"], {}), "(DIR + '/train.csv')\n", (625, 645), True, 'import pandas as pd\n'), ((1043, 1086), 'pandas.read_csv', 'pd.read_csv', (["(DIR + '/sample_submission.csv')"], {}), "(DIR + '/sample_submission.csv')\n", (1054, 1086), True, 'import pandas as pd\n'), ((6670, 6680), 'keras.backend.mean', 'K.mean', (['f1'], {}), '(f1)\n', (6676, 6680), True, 'from keras import backend as K\n'), ((7802, 7852), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (7813, 7852), False, 'from keras.applications.inception_v3 import InceptionV3\n'), ((8110, 8155), 'keras.models.Model', 'Model', ([], {'inputs': 'base_input', 'outputs': 'base_output'}), '(inputs=base_input, outputs=base_output)\n', (8115, 8155), False, 'from keras.models import Model\n'), ((8443, 8470), 'keras.models.Model', 'Model', (['input_tensor', 'output'], {}), '(input_tensor, output)\n', (8448, 8470), False, 'from keras.models import Model\n'), ((764, 776), 'numpy.zeros', 'np.zeros', (['(28)'], {}), '(28)\n', (772, 776), True, 'import numpy as np\n'), ((922, 944), 'numpy.array', 'np.array', (['paths[:5000]'], {}), '(paths[:5000])\n', (930, 944), True, 'import numpy as np\n'), ((946, 969), 'numpy.array', 'np.array', (['labels[:5000]'], {}), '(labels[:5000])\n', (954, 969), True, 'import numpy as np\n'), ((1164, 1175), 'numpy.ones', 'np.ones', (['(28)'], {}), '(28)\n', (1171, 1175), True, 'import numpy as np\n'), ((1268, 1283), 'numpy.array', 'np.array', (['paths'], {}), '(paths)\n', (1276, 1283), True, 'import numpy as np\n'), ((1285, 1301), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1293, 1301), True, 'import numpy as np\n'), ((1972, 1989), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {}), '()\n', (1987, 1989), False, 'import cv2\n'), ((2457, 2528), 'numpy.zeros', 'np.zeros', (['(paths.shape[0], self.shape[0], self.shape[1], self.shape[2])'], {}), '((paths.shape[0], self.shape[0], self.shape[1], self.shape[2]))\n', (2465, 2528), True, 'import numpy as np\n'), ((6244, 6254), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (6252, 6254), True, 'from keras import backend as K\n'), ((6271, 6303), 'keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (6277, 6303), True, 'from keras import backend as K\n'), ((6326, 6370), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * (1 - y_pred))', '"""float"""'], {}), "((1 - y_true) * (1 - y_pred), 'float')\n", (6332, 6370), True, 'from keras import backend as K\n'), ((6389, 6427), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * y_pred)', '"""float"""'], {}), "((1 - y_true) * y_pred, 'float')\n", (6395, 6427), True, 'from keras import backend as K\n'), ((6448, 6486), 'keras.backend.cast', 'K.cast', (['(y_true * (1 - y_pred))', '"""float"""'], {}), "(y_true * (1 - y_pred), 'float')\n", (6454, 6486), True, 'from keras import backend as K\n'), ((6621, 6634), 'tensorflow.is_nan', 'tf.is_nan', (['f1'], {}), '(f1)\n', (6630, 6634), True, 'import tensorflow as tf\n'), ((6636, 6653), 'tensorflow.zeros_like', 'tf.zeros_like', (['f1'], {}), '(f1)\n', (6649, 6653), True, 'import tensorflow as tf\n'), ((6728, 6760), 'keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (6734, 6760), True, 'from keras import backend as K\n'), ((6783, 6827), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * (1 - y_pred))', '"""float"""'], {}), "((1 - y_true) * (1 - y_pred), 'float')\n", (6789, 6827), True, 'from keras import backend as K\n'), ((6846, 6884), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * y_pred)', '"""float"""'], {}), "((1 - y_true) * y_pred, 'float')\n", (6852, 6884), True, 'from keras import backend as K\n'), ((6905, 6943), 'keras.backend.cast', 'K.cast', (['(y_true * (1 - y_pred))', '"""float"""'], {}), "(y_true * (1 - y_pred), 'float')\n", (6911, 6943), True, 'from keras import backend as K\n'), ((7078, 7091), 'tensorflow.is_nan', 'tf.is_nan', (['f1'], {}), '(f1)\n', (7087, 7091), True, 'import tensorflow as tf\n'), ((7093, 7110), 'tensorflow.zeros_like', 'tf.zeros_like', (['f1'], {}), '(f1)\n', (7106, 7110), True, 'import tensorflow as tf\n'), ((7129, 7139), 'keras.backend.mean', 'K.mean', (['f1'], {}), '(f1)\n', (7135, 7139), True, 'from keras import backend as K\n'), ((7388, 7414), 'keras.backend.clip', 'K.clip', (['pt_1', '(0.001)', '(0.999)'], {}), '(pt_1, 0.001, 0.999)\n', (7394, 7414), True, 'from keras import backend as K\n'), ((7428, 7454), 'keras.backend.clip', 'K.clip', (['pt_0', '(0.001)', '(0.999)'], {}), '(pt_0, 0.001, 0.999)\n', (7434, 7454), True, 'from keras import backend as K\n'), ((7750, 7770), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7768, 7770), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8192, 8204), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8199, 8204), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8216, 8266), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (8222, 8266), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8277, 8286), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8284, 8286), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8298, 8310), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8305, 8310), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8322, 8352), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (8327, 8352), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8364, 8376), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8371, 8376), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8393, 8427), 'keras.layers.Dense', 'Dense', (['n_out'], {'activation': '"""sigmoid"""'}), "(n_out, activation='sigmoid')\n", (8398, 8427), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((10730, 10745), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (10734, 10745), False, 'from keras.optimizers import Adam\n'), ((11239, 11254), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (11243, 11254), False, 'from keras.optimizers import Adam\n'), ((850, 883), 'os.path.join', 'os.path.join', (['path_to_train', 'name'], {}), '(path_to_train, name)\n', (862, 883), False, 'import os, sys\n'), ((1197, 1229), 'os.path.join', 'os.path.join', (['path_to_test', 'name'], {}), '(path_to_test, name)\n', (1209, 1229), False, 'import os, sys\n'), ((2136, 2160), 'numpy.zeros', 'np.zeros', (['paths.shape[0]'], {}), '(paths.shape[0])\n', (2144, 2160), True, 'import numpy as np\n'), ((3346, 3377), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (3363, 3377), True, 'import numpy as np\n'), ((3866, 3886), 'numpy.stack', 'np.stack', (['images', '(-1)'], {}), '(images, -1)\n', (3874, 3886), True, 'import numpy as np\n'), ((3936, 3964), 'cv2.resize', 'cv2.resize', (['im', '(SIZE, SIZE)'], {}), '(im, (SIZE, SIZE))\n', (3946, 3964), False, 'import cv2\n'), ((3981, 3999), 'numpy.divide', 'np.divide', (['im', '(255)'], {}), '(im, 255)\n', (3990, 3999), True, 'import numpy as np\n'), ((4059, 4087), 'cv2.resize', 'cv2.resize', (['im', '(SIZE, SIZE)'], {}), '(im, (SIZE, SIZE))\n', (4069, 4087), False, 'import cv2\n'), ((4104, 4122), 'numpy.divide', 'np.divide', (['im', '(255)'], {}), '(im, 255)\n', (4113, 4122), True, 'import numpy as np\n'), ((4140, 4161), 'numpy.expand_dims', 'np.expand_dims', (['im', '(2)'], {}), '(im, 2)\n', (4154, 4161), True, 'import numpy as np\n'), ((4221, 4234), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4228, 4234), False, 'from random import randint\n'), ((6210, 6230), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (6216, 6230), True, 'from keras import backend as K\n'), ((6517, 6528), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6526, 6528), True, 'from keras import backend as K\n'), ((6554, 6565), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6563, 6565), True, 'from keras import backend as K\n'), ((6590, 6601), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6599, 6601), True, 'from keras import backend as K\n'), ((6974, 6985), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6983, 6985), True, 'from keras import backend as K\n'), ((7011, 7022), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7020, 7022), True, 'from keras import backend as K\n'), ((7047, 7058), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7056, 7058), True, 'from keras import backend as K\n'), ((7245, 7264), 'tensorflow.equal', 'tf.equal', (['y_true', '(1)'], {}), '(y_true, 1)\n', (7253, 7264), True, 'import tensorflow as tf\n'), ((7274, 7294), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (7286, 7294), True, 'import tensorflow as tf\n'), ((7320, 7339), 'tensorflow.equal', 'tf.equal', (['y_true', '(0)'], {}), '(y_true, 0)\n', (7328, 7339), True, 'import tensorflow as tf\n'), ((7349, 7370), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (7362, 7370), True, 'import tensorflow as tf\n'), ((3679, 3720), 'PIL.Image.open', 'Image.open', (["(path + '_' + channel + '.png')"], {}), "(path + '_' + channel + '.png')\n", (3689, 3720), False, 'from PIL import Image\n'), ((2667, 2705), 'numpy.where', 'np.where', (['(self.is_cached[indexes] == 0)'], {}), '(self.is_cached[indexes] == 0)\n', (2675, 2705), True, 'import numpy as np\n'), ((7563, 7580), 'keras.backend.log', 'K.log', (['(1.0 - pt_0)'], {}), '(1.0 - pt_0)\n', (7568, 7580), True, 'from keras import backend as K\n'), ((7510, 7521), 'keras.backend.log', 'K.log', (['pt_1'], {}), '(pt_1)\n', (7515, 7521), True, 'from keras import backend as K\n'), ((7541, 7559), 'keras.backend.pow', 'K.pow', (['pt_0', 'gamma'], {}), '(pt_0, gamma)\n', (7546, 7559), True, 'from keras import backend as K\n'), ((4331, 4346), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(0.5)'], {}), '(0.5)\n', (4341, 4346), True, 'from imgaug import augmenters as iaa\n'), ((4387, 4402), 'imgaug.augmenters.Flipud', 'iaa.Flipud', (['(0.5)'], {}), '(0.5)\n', (4397, 4402), True, 'from imgaug import augmenters as iaa\n'), ((4443, 4469), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '(0, 0.1)'}), '(percent=(0, 0.1))\n', (4451, 4469), True, 'from imgaug import augmenters as iaa\n'), ((4992, 5033), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.8, 1.2)'], {'per_channel': '(0.2)'}), '((0.8, 1.2), per_channel=0.2)\n', (5004, 5033), True, 'from imgaug import augmenters as iaa\n'), ((5209, 5356), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': (0.9, 1.1), 'y': (0.9, 1.1)}", 'translate_percent': "{'x': (-0.1, 0.1), 'y': (-0.1, 0.1)}", 'rotate': '(-180, 180)', 'shear': '(-4, 4)'}), "(scale={'x': (0.9, 1.1), 'y': (0.9, 1.1)}, translate_percent={'x':\n (-0.1, 0.1), 'y': (-0.1, 0.1)}, rotate=(-180, 180), shear=(-4, 4))\n", (5219, 5356), True, 'from imgaug import augmenters as iaa\n'), ((7484, 7508), 'keras.backend.pow', 'K.pow', (['(1.0 - pt_1)', 'gamma'], {}), '(1.0 - pt_1, gamma)\n', (7489, 7508), True, 'from keras import backend as K\n'), ((4692, 4724), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0, 0.5)'}), '(sigma=(0, 0.5))\n', (4708, 4724), True, 'from imgaug import augmenters as iaa\n')] |
from torchvision.transforms import *
import numbers
import random
from PIL import Image
class GroupToTensor(object):
def __init__(self):
pass
def __call__(self, img_group):
return [ToTensor()(img) for img in img_group]
class GroupCenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img_group):
return [CenterCrop(self.size)(img) for img in img_group]
class GroupResize(object):
def __init__(self, size):
self.size = size
def __call__(self, img_group):
return [Resize(self.size)(img) for img in img_group]
class GroupResizeFit(object):
def __init__(self, size):
self.size = size
def __call__(self, img_group):
print(img_group[0].size)
return [Resize(self.size)(img) for img in img_group]
class GroupExpand(object):
def __init__(self, size):
self.size = size
def __call__(self, img_group):
w, h = img_group[0].size
tw, th = self.size
out_images = list()
if(w >= tw and h >= th):
assert img_group[0].size == self.size
return img_group
for img in img_group:
new_im = Image.new("RGB", (tw, th))
new_im.paste(img, ((tw-w)//2, (th-w)//2))
out_images.append(new_im)
assert out_images[0].size == self.size
return out_images
class GroupRandomCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img_group):
w, h = img_group[0].size #120, 160
th, tw = self.size #100, 140
out_images = list()
if (w - tw) < 0:
print('W < TW')
for img in img_group:
new_im = Image.new("RGB", (tw, th))
new_im.paste(img_group[0], ((tw-w)//2, (th-w)//2))
out_images.append(new_im)
return out_images
x1 = random.randint(0, (w - tw))
y1 = random.randint(0, (h - th))
for img in img_group:
if w == tw and h == th:
out_images.append(img)
else:
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return out_images
class GroupRandomRotation(object):
def __init__(self, max):
self.max = max
def __call__(self, img_group):
angle = random.randint(-self.max, self.max)
return [functional.rotate(img, angle) for img in img_group]
class GroupNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor_list):
# TODO: make efficient
for t, m, s in zip(tensor_list, self.mean, self.std):
t.sub_(m).div_(s)
return tensor_list
class GroupUnormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor_list):
# TODO: make efficient
for t, m, s in zip(tensor_list, self.mean, self.std):
t.mul_(s).add_(m)
return tensor_list | [
"PIL.Image.new",
"random.randint"
] | [((1721, 1746), 'random.randint', 'random.randint', (['(0)', '(w - tw)'], {}), '(0, w - tw)\n', (1735, 1746), False, 'import random\n'), ((1756, 1781), 'random.randint', 'random.randint', (['(0)', '(h - th)'], {}), '(0, h - th)\n', (1770, 1781), False, 'import random\n'), ((2074, 2109), 'random.randint', 'random.randint', (['(-self.max)', 'self.max'], {}), '(-self.max, self.max)\n', (2088, 2109), False, 'import random\n'), ((1054, 1080), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(tw, th)'], {}), "('RGB', (tw, th))\n", (1063, 1080), False, 'from PIL import Image\n'), ((1580, 1606), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(tw, th)'], {}), "('RGB', (tw, th))\n", (1589, 1606), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""Plot step wind
"""
import matplotlib.pyplot as plt
import numpy as np
import os
from _inputs import (step_dir, model_keys, i_gspd, i_pit, i_gtrq, fig_dir, fast_labels, h2_labels)
from _utils import read_step
plot_keys = [('GenSpeed', i_gspd,'Generator Speed [rpm]', 1),
('BldPitch1', i_pit, 'Blade Pitch [deg]', 1),
('GenTq', i_gtrq, 'Generator Torque [MNm]', -1e-6)]
# ('GenPwr', i_pow, 1e-3)
alpha = 0.9
save_fig = False
#%% load data
print('Loading data...')
step_data = []
steady_data =[]
for i, (fastname, h2name) in enumerate(model_keys):
# path names
fast_path = step_dir + f'IEA15MW_step_torque_{fastname}_0.outb'
h2_path = step_dir + (f'iea_15mw_{h2name}_rwt_step.sel').lower()
# load data
fast_df = read_step(fast_path, usecols=[t[0] for t in plot_keys])
h2_df = read_step(h2_path)
# add to list
step_data.append([fast_df, h2_df])
del fast_df, h2_df
#%% plot data
pltprms = {'font.size': 10, 'axes.labelsize': 10}
with plt.rc_context(pltprms):
fig, axs = plt.subplots(2, len(plot_keys), num=8, clear=True, figsize=(9, 4))
for i, (fastname, h2name) in enumerate(model_keys):
fast_df, h2_df = step_data[i]
for j, (fast_key, h2_chan, label, h2scl) in enumerate(plot_keys):
# get axes
ax = axs[i, j]
ax.grid('on')
# isolate and scale step data
fast_data = fast_df[fast_key]
h2_data = h2_df[h2_chan] * h2scl
if 'GenTq' in fast_key:
fast_data = fast_data * 1e-3
# plot
c1, c2 = None, None
if i > 0:
c1, c2 = l1.get_color(), l2.get_color()
l1, = ax.plot(fast_df['Time'], fast_data, label=['ElastoDyn', 'BeamDyn'][i],
linestyle=['-', '--'][i], c=c1, alpha=alpha)
l2, = ax.plot(h2_data, label=h2_labels[i],
linestyle=['-', '--'][i], c=c2, alpha=alpha)
ax.set_xlim([200, h2_data.index[-1]])
if i == 0:
ax.set_title(label, fontsize=10)
else:
ax.set_xlabel('Time [s]')
axs[0, -1].legend(loc=4)
axs[1, -1].legend(loc=4)
plt.tight_layout()
# save figure
if save_fig:
figname = os.path.basename(__file__).replace('.py', '.png')
fig.savefig(fig_dir + figname, dpi=150)
| [
"matplotlib.pyplot.rc_context",
"os.path.basename",
"_utils.read_step",
"matplotlib.pyplot.tight_layout"
] | [((2142, 2160), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2158, 2160), True, 'import matplotlib.pyplot as plt\n'), ((785, 840), '_utils.read_step', 'read_step', (['fast_path'], {'usecols': '[t[0] for t in plot_keys]'}), '(fast_path, usecols=[t[0] for t in plot_keys])\n', (794, 840), False, 'from _utils import read_step\n'), ((853, 871), '_utils.read_step', 'read_step', (['h2_path'], {}), '(h2_path)\n', (862, 871), False, 'from _utils import read_step\n'), ((1023, 1046), 'matplotlib.pyplot.rc_context', 'plt.rc_context', (['pltprms'], {}), '(pltprms)\n', (1037, 1046), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2229), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (2219, 2229), False, 'import os\n')] |
import pandas as pd
import plotly.graph_objs as go
flist = ['data/data_cleaned/poss_ppp_data/poss2015.csv', 'data/data_cleaned/poss_ppp_data/poss2016.csv', 'data/data_cleaned/poss_ppp_data/poss2017.csv',
'data/data_cleaned/poss_ppp_data/poss2018.csv', 'data/data_cleaned/poss_ppp_data/poss2019.csv']
def scatter_plot_in(fname):
'''
Function that outputs total poss, ppp, and name of each player through 3 lists
fname is expected directory path
fname must use data in ./poss_ppp_data
'''
pt_abrv = ['iso', 'tr', 'prb', 'prr', 'pu', 'su', 'ho', 'cut', 'os', 'putback', 'misc']
df = pd.read_csv(fname)
df_calc = pd.DataFrame()
for i in pt_abrv:
df_poss = df[i + '_poss']
df_ppp = df[i + '_ppp']
df_points = df_ppp * df_poss
df_calc[i + '_points'] = df_points
ppp = df_calc.sum(axis=1) / df['total_poss']
return df['total_poss'].tolist(), ppp.tolist(), df['PLAYER_NAME'].tolist()
def create_slider_scatter(fname_list, title_graph, yaxis_label, x_axis_label):
"""
get xy should take in the list of filenames for each year and output the x values, i.e. total possessions for the year vs total PPP.
PPP should be weighted according to the number of possessions right, like a players season PPP, is PPP_i * Poss_i, where i is the playtype
"""
fig = go.Figure()
colorscale_curr = [[0.0, "rgb(165,0,38)"], [0.1111111111111111, "rgb(215,48,39)"],
[0.2222222222222222, "rgb(244,109,67)"], [0.3333333333333333, "rgb(253,174,97)"],
[0.4444444444444444, "rgb(254,224,144)"], [0.5555555555555556, "rgb(224,243,248)"],
[0.6666666666666666, "rgb(171,217,233)"], [0.7777777777777778, "rgb(116,173,209)"],
[0.8888888888888888, "rgb(69,117,180)"], [1.0, "rgb(49,54,149)"]]
colorscale_curr.reverse()
# just going to use global variable list of path strings;
for i in range(len(flist)):
x_1, y_1, names = scatter_plot_in(flist[i])
fig.add_trace(
go.Scatter(x=x_1, y=y_1, text=names, hoverinfo='text', mode='markers',
marker=dict(color=y_1, colorscale=colorscale_curr, size=12,
line=dict(width=2, color='DarkSlateGrey')), visible=False,
name="Points Per Possession vs Possessions " + str(i)
))
fig.data[0].visible = True
steps = []
for i in range(len(fig.data)):
step = dict(
method="restyle",
args=["visible", [False] * len(fig.data)],
label='Year ' + str(i + 2015)
)
step["args"][1][i] = True # Toggle i'th trace to "visible"
steps.append(step)
sliders = [dict(
active=5,
currentvalue={"prefix": "Year: "},
pad={"t": 5},
steps=steps
)]
start_index = 2015
fig.update_layout(
sliders=sliders,
# title = "9 Cluster classification of players based on Scoring Styles"
title={"text": title_graph},
xaxis_title=x_axis_label,
yaxis_title=yaxis_label,
)
return fig | [
"pandas.DataFrame",
"plotly.graph_objs.Figure",
"pandas.read_csv"
] | [((623, 641), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (634, 641), True, 'import pandas as pd\n'), ((656, 670), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (668, 670), True, 'import pandas as pd\n'), ((1374, 1385), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (1383, 1385), True, 'import plotly.graph_objs as go\n')] |
# encoding: utf-8
from contextlib import contextmanager
import time
import pytest
import requests
from simply.platform import factory
from simply.utils import ConfAttrDict
@contextmanager
def platform_setup(conf):
platform = factory(conf)
platform.setup('all_containers')
yield platform
platform.reset()
@pytest.mark.parametrize("image", ['conda2', 'conda3', 'debian8'])
def test_matrix(image):
print('test on image <{}>'.format(image))
@pytest.mark.parametrize("interprter,http_server", [('python2', 'SimpleHTTPServer'), ('python3', 'http.server')])
def test_python_server(interprter, http_server):
conf = ConfAttrDict(
backend='docker',
frontend='debian',
image='phusion',
)
with platform_setup(conf) as platform:
platform.execute("{} -m {} 8000".format(interprter, http_server), daemon=True)
time.sleep(0.1)
req = requests.get('http://{}:8000'.format(platform.get_container_ip()), timeout=1)
assert req.status_code == 200
assert 'Directory listing for' in req.text
| [
"pytest.mark.parametrize",
"time.sleep",
"simply.platform.factory",
"simply.utils.ConfAttrDict"
] | [((328, 393), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""image"""', "['conda2', 'conda3', 'debian8']"], {}), "('image', ['conda2', 'conda3', 'debian8'])\n", (351, 393), False, 'import pytest\n'), ((467, 583), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""interprter,http_server"""', "[('python2', 'SimpleHTTPServer'), ('python3', 'http.server')]"], {}), "('interprter,http_server', [('python2',\n 'SimpleHTTPServer'), ('python3', 'http.server')])\n", (490, 583), False, 'import pytest\n'), ((234, 247), 'simply.platform.factory', 'factory', (['conf'], {}), '(conf)\n', (241, 247), False, 'from simply.platform import factory\n'), ((640, 706), 'simply.utils.ConfAttrDict', 'ConfAttrDict', ([], {'backend': '"""docker"""', 'frontend': '"""debian"""', 'image': '"""phusion"""'}), "(backend='docker', frontend='debian', image='phusion')\n", (652, 706), False, 'from simply.utils import ConfAttrDict\n'), ((876, 891), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (886, 891), False, 'import time\n')] |
import os
import logging
from pathlib import Path
class ServiceLogger:
def __init__(self, name, file, loglevel='DEBUG'):
p = str(Path(file).parent) + '/logs/'
i = 0
while True:
if not os.path.exists(p):
i = i + 1
p = str(Path(file).parents[i]) + '/logs/'
else:
self.dirpath = p
break
self.logger = self.__get_logger(loglevel, name)
# private method
def __get_logger(self, loglevel, name=__name__, encoding='utf-8'):
log = logging.getLogger(name)
level = logging.getLevelName(loglevel)
log.setLevel(level)
formatter = logging.Formatter('[%(asctime)s] %(filename)s:%(lineno)d %(levelname)-1s %(message)s')
file_name = self.dirpath + name + '.log'
fh = logging.FileHandler(file_name, mode='a', encoding=encoding)
fh.setFormatter(formatter)
log.addHandler(fh)
return log | [
"logging.getLogger",
"os.path.exists",
"pathlib.Path",
"logging.Formatter",
"logging.getLevelName",
"logging.FileHandler"
] | [((566, 589), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (583, 589), False, 'import logging\n'), ((606, 636), 'logging.getLevelName', 'logging.getLevelName', (['loglevel'], {}), '(loglevel)\n', (626, 636), False, 'import logging\n'), ((686, 777), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(filename)s:%(lineno)d %(levelname)-1s %(message)s"""'], {}), "(\n '[%(asctime)s] %(filename)s:%(lineno)d %(levelname)-1s %(message)s')\n", (703, 777), False, 'import logging\n'), ((837, 896), 'logging.FileHandler', 'logging.FileHandler', (['file_name'], {'mode': '"""a"""', 'encoding': 'encoding'}), "(file_name, mode='a', encoding=encoding)\n", (856, 896), False, 'import logging\n'), ((227, 244), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (241, 244), False, 'import os\n'), ((144, 154), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (148, 154), False, 'from pathlib import Path\n'), ((296, 306), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (300, 306), False, 'from pathlib import Path\n')] |
from django import forms
from django.utils.translation import ugettext_lazy as _
from .....base import BasePluginForm, get_theme
__title__ = 'fobi.contrib.plugins.form_handlers.http_repost.forms'
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = '2014-2017 <NAME>'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('HTTPRepostForm',)
theme = get_theme(request=None, as_instance=True)
class HTTPRepostForm(forms.Form, BasePluginForm):
"""Form for ``HTTPRepostPlugin``."""
plugin_data_fields = [
("endpoint_url", ""),
]
endpoint_url = forms.URLField(
label=_("Endpoint URL"),
required=True,
widget=forms.widgets.TextInput(
attrs={'class': theme.form_element_html_class}
)
)
| [
"django.utils.translation.ugettext_lazy",
"django.forms.widgets.TextInput"
] | [((586, 603), 'django.utils.translation.ugettext_lazy', '_', (['"""Endpoint URL"""'], {}), "('Endpoint URL')\n", (587, 603), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((643, 714), 'django.forms.widgets.TextInput', 'forms.widgets.TextInput', ([], {'attrs': "{'class': theme.form_element_html_class}"}), "(attrs={'class': theme.form_element_html_class})\n", (666, 714), False, 'from django import forms\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 8 00:34:40 2021
@author: Alex1
"""
import serial
import time
class QCL_comms():
"""docstring for QCL_comms"""
def __init__(self, arg=None):
super(QCL_comms, self).__init__()
self.arg = arg
self.serActive = False
def connect(self,port = 'COM9'):
self.ser = serial.Serial(port,57600)
# don't know if useful
self.ser.parity=serial.PARITY_ODD
self.ser.stopbits=serial.STOPBITS_ONE
self.ser.bytesize=serial.EIGHTBITS
# let time for bootloader
time.sleep(1)
self.serActive = True
def disconnect(self):
if self.serActive == True:
self.ser.close()
self.serActive = False
def sendCmd(self,cmd):
cmd = cmd + '\n'
self.ser.write(cmd.encode())
time.sleep(0.1)
def powerComb(self,comb='1',status = 0):
cmd = 'Power'+str(comb)+':'+str(status)
self.sendCmd(cmd)
def enableComb(self,comb='1',status = 0):
cmd = 'Enable'+str(comb)+':'+str(status)
self.sendCmd(cmd)
| [
"serial.Serial",
"time.sleep"
] | [((321, 347), 'serial.Serial', 'serial.Serial', (['port', '(57600)'], {}), '(port, 57600)\n', (334, 347), False, 'import serial\n'), ((517, 530), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (527, 530), False, 'import time\n'), ((730, 745), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (740, 745), False, 'import time\n')] |
import numpy as np
from . import utils
def tile_position(x0, y0, x1=None, y1=None):
"""Need doc string..."""
if x1 is None and y1 is None:
x1 = x0
y1 = y0
if (x0.size != y0.size) or (x1.size != y1.size):
raise ValueError("x0 and y0 or x1 and y1 size do not match.")
x0g = np.tile(x0.ravel()[:, np.newaxis], (1, x1.size))
y0g = np.tile(y0.ravel()[:, np.newaxis], (1, x1.size))
x1g = np.tile(x1.ravel()[np.newaxis, :], (x0.size, 1))
y1g = np.tile(y1.ravel()[np.newaxis, :], (x0.size, 1))
return x0g, y0g, x1g, y1g
def xy_distance(x0, y0, x1=None, y1=None):
"""
Output x and y distance matrices.
If x1 and y1 are not supplied we calculate the auto-distance matrices.
"""
if x1 is None and y1 is None:
x1 = x0
y1 = y0
dx = x0.ravel()[:, np.newaxis] - x1.ravel()[np.newaxis, :]
dy = y0.ravel()[:, np.newaxis] - y1.ravel()[np.newaxis, :]
return dx, dy
def r_distance(x0, y0, x1=None, y1=None, coords="cartesian"):
"""
Distance matrix.
If x1 and y1 are not supplied we calculate the auto-distance matrix.
"""
if coords == "cartesian":
dx, dy = xy_distance(x0, y0, x1, y1)
r = np.sqrt(dx ** 2 + dy ** 2)
elif coords == "latlon":
r = utils.haversine_distance(*tile_position(x0, y0, x1, y1))
return r
| [
"numpy.sqrt"
] | [((1225, 1251), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (1232, 1251), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
loads the dom and fetches html source after javascript rendering w/ firefox, geckodriver & selenium
use sharedutils.py:socksfetcher for faster results if no post-processing required
'''
import time
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import WebDriverException
from sharedutils import checktcp
from sharedutils import randomagent
from sharedutils import sockshost, socksport
from sharedutils import stdlog, dbglog, errlog, honk
requests.packages.urllib3.disable_warnings()
def main(webpage):
'''
main function to fetch webpages with javascript rendering supporting onion routing
'''
stdlog('geckodriver: ' + 'starting to fetch ' + webpage)
dbglog('geckodriver: ' + 'configuring options, user agent & cert preacceptance')
options = Options()
options.headless = True
options.set_preference('dom.max_script_run_time', 15)
options.add_argument("start-maximized")
options.accept_untrusted_certs = True
options.set_preference('network.http.timeout', 20000)
options.set_preference("general.useragent.override", randomagent())
if '.onion' in webpage:
stdlog('geckodriver: ' + 'appears we are dealing with an onionsite')
if not checktcp(sockshost, socksport):
honk('geckodriver: ' + 'socks proxy not available and required for onionsites!')
else:
stdlog(
'geckodriver: ' + 'assumed torsocks proxy found - tcp://' \
+ sockshost + ':' + str(socksport)
)
stdlog('geckodriver: ' + 'configuring proxy settings')
options.set_preference('network.proxy.type', 1)
options.set_preference('network.proxy.socks', sockshost)
options.set_preference('network.proxy.socks_port', int(socksport))
options.set_preference("network.proxy.socks_remote_dns", True)
try:
stdlog('geckodriver: ' + 'starting webdriver')
driver = webdriver.Firefox(options=options)
stdlog('geckodriver: ' + 'fetching webpage')
driver.get(webpage)
# set the number of seconds to wait before working with the DOM
sleeptz = 5
stdlog('geckodriver: ' + 'waiting ' + str(sleeptz) + ' seconds to render elements')
time.sleep(sleeptz)
#if 'lockbitapt' in webpage:
# stdlog('geckodriver: ' + 'special detected, waiting for captcha')
# driver.add_cookie({"name": "ddosproteck", "value": "lol"})
# driver.find_element_by_css_selector('button').click()
'''
get html from dom after js processing and page rendering complete
'''
source = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
stdlog('geckodriver: ' + 'fetched')
except WebDriverException as e:
# if e contains neterror?e=dnsNotFound, then we are dealing with an onion site failing hsdir
if 'about:neterror?e=dnsNotFound' in str(e):
errlog('geckodriver: ' + 'socks request unable to route to host, check hsdir resolution status!')
elif 'about:neterror?e=netTimeout' in str(e):
errlog('geckodriver: ' + 'geckodriver socks request timed out!')
else:
errlog('geckodriver: ' + 'error: ' + str(e))
driver.quit()
stdlog('geckodriver: ' + 'webdriver quit')
return None
if driver:
driver.quit()
stdlog('geckodriver: ' + 'webdriver quit')
return source
| [
"sharedutils.stdlog",
"requests.packages.urllib3.disable_warnings",
"sharedutils.checktcp",
"sharedutils.errlog",
"selenium.webdriver.Firefox",
"time.sleep",
"selenium.webdriver.firefox.options.Options",
"sharedutils.randomagent",
"sharedutils.dbglog",
"sharedutils.honk"
] | [((580, 624), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (622, 624), False, 'import requests\n'), ((752, 808), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'starting to fetch ' + webpage)"], {}), "('geckodriver: ' + 'starting to fetch ' + webpage)\n", (758, 808), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((813, 898), 'sharedutils.dbglog', 'dbglog', (["('geckodriver: ' + 'configuring options, user agent & cert preacceptance')"], {}), "('geckodriver: ' + 'configuring options, user agent & cert preacceptance'\n )\n", (819, 898), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((908, 917), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (915, 917), False, 'from selenium.webdriver.firefox.options import Options\n'), ((1205, 1218), 'sharedutils.randomagent', 'randomagent', ([], {}), '()\n', (1216, 1218), False, 'from sharedutils import randomagent\n'), ((1256, 1324), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'appears we are dealing with an onionsite')"], {}), "('geckodriver: ' + 'appears we are dealing with an onionsite')\n", (1262, 1324), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((2007, 2053), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'starting webdriver')"], {}), "('geckodriver: ' + 'starting webdriver')\n", (2013, 2053), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((2071, 2105), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options'}), '(options=options)\n', (2088, 2105), False, 'from selenium import webdriver\n'), ((2114, 2158), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'fetching webpage')"], {}), "('geckodriver: ' + 'fetching webpage')\n", (2120, 2158), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((2379, 2398), 'time.sleep', 'time.sleep', (['sleeptz'], {}), '(sleeptz)\n', (2389, 2398), False, 'import time\n'), ((2860, 2895), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'fetched')"], {}), "('geckodriver: ' + 'fetched')\n", (2866, 2895), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((3536, 3578), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'webdriver quit')"], {}), "('geckodriver: ' + 'webdriver quit')\n", (3542, 3578), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((1340, 1370), 'sharedutils.checktcp', 'checktcp', (['sockshost', 'socksport'], {}), '(sockshost, socksport)\n', (1348, 1370), False, 'from sharedutils import checktcp\n'), ((1384, 1469), 'sharedutils.honk', 'honk', (["('geckodriver: ' + 'socks proxy not available and required for onionsites!')"], {}), "('geckodriver: ' + 'socks proxy not available and required for onionsites!'\n )\n", (1388, 1469), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((1652, 1706), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'configuring proxy settings')"], {}), "('geckodriver: ' + 'configuring proxy settings')\n", (1658, 1706), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((3428, 3470), 'sharedutils.stdlog', 'stdlog', (["('geckodriver: ' + 'webdriver quit')"], {}), "('geckodriver: ' + 'webdriver quit')\n", (3434, 3470), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((3098, 3199), 'sharedutils.errlog', 'errlog', (["('geckodriver: ' +\n 'socks request unable to route to host, check hsdir resolution status!')"], {}), "('geckodriver: ' +\n 'socks request unable to route to host, check hsdir resolution status!')\n", (3104, 3199), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n'), ((3262, 3326), 'sharedutils.errlog', 'errlog', (["('geckodriver: ' + 'geckodriver socks request timed out!')"], {}), "('geckodriver: ' + 'geckodriver socks request timed out!')\n", (3268, 3326), False, 'from sharedutils import stdlog, dbglog, errlog, honk\n')] |
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append("../")
import pandas as pd
import numpy as np
import pathlib
import pickle
import os
import itertools
import argparse
import logging
import helpers.feature_helpers as fh
from collections import Counter
OUTPUT_DF_TR = 'df_steps_tr.csv'
OUTPUT_DF_VAL = 'df_steps_val.csv'
OUTPUT_DF_TRAIN = 'df_steps_train.csv'
OUTPUT_DF_TEST = 'df_steps_test.csv'
OUTPUT_DF_SESSIONS = 'df_sessions.csv'
OUTPUT_ENCODING_DICT = 'enc_dicts_v02.pkl'
OUTPUT_CONFIG = 'config.pkl'
OUTPUT_NORMLIZATIONS_VAL = 'Dwell_normalizations_val.pkl'
OUTPUT_NORMLIZATIONS_SUBM = 'Dwell_normalizations_submission.pkl'
DEFAULT_FEATURES_DIR_NAME = 'nn_vnormal'
DEFAULT_PREPROC_DIR_NAME = 'data_processed_vnormal'
def setup_args_parser():
parser = argparse.ArgumentParser(description='Create cv features')
parser.add_argument('--processed_data_dir_name', help='path to preprocessed data', default=DEFAULT_PREPROC_DIR_NAME)
parser.add_argument('--features_dir_name', help='features directory name', default=DEFAULT_FEATURES_DIR_NAME)
#parser.add_argument('--split_option', help='split type. Options: normal, future', default=DEFAULT_SPLIT)
parser.add_argument('--debug', help='debug mode (verbose output and no saving)', action='store_true')
return parser
def setup_logger(debug):
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def main():
parser = setup_args_parser()
args = parser.parse_args()
logger = setup_logger(args.debug)
#logger.info('split option: %s' % args.split_option)
logger.info(100*'-')
logger.info('Running 013_Features_Dwell.py')
logger.info(100*'-')
logger.info('processed data directory name: %s' % args.processed_data_dir_name)
logger.info('features directory name: %s' % args.features_dir_name)
#Set up arguments
# # split_option
# if args.split_option=='normal':
# SPLIT_OPTION = 'normal'
# elif args.split_option=='future':
# SPLIT_OPTION = 'leave_out_only_clickout_with_nans'
# processed data path
DATA_PATH = '../data/' + args.processed_data_dir_name + '/'
#os.makedirs(DATA_PATH) if not os.path.exists(DATA_PATH) else None
logger.info('processed data path: %s' % DATA_PATH)
# features data path
FEATURES_PATH = '../features/' + args.features_dir_name + '/'
#os.makedirs(FEATURES_PATH) if not os.path.exists(FEATURES_PATH) else None
logger.info('features path: %s' % FEATURES_PATH)
# End of set up arguments
config = pickle.load(open(DATA_PATH+OUTPUT_CONFIG, "rb" ))
config
# ### read data
df_steps_tr = pd.read_csv(DATA_PATH+OUTPUT_DF_TR)
df_steps_val = pd.read_csv(DATA_PATH+OUTPUT_DF_VAL)
df_steps_train = pd.read_csv(DATA_PATH+OUTPUT_DF_TRAIN)
df_steps_test = pd.read_csv(DATA_PATH+OUTPUT_DF_TEST)
df_sessions = pd.read_csv(DATA_PATH+OUTPUT_DF_SESSIONS)
enc_dict = pickle.load(open(DATA_PATH+OUTPUT_ENCODING_DICT, "rb" ))
# ## Concatenate all data
# #### validation
df_tr = df_steps_tr.merge(df_sessions, on='session_id')
df_val = df_steps_val.merge(df_sessions, on='session_id')
df_all_cv = pd.concat([df_tr, df_val], axis=0).reset_index(drop=True)
del df_tr, df_val, df_steps_tr, df_steps_val
# #### all
df_test_new = df_steps_test.merge(df_sessions, on='session_id')
df_train_new = df_steps_train.merge(df_sessions, on='session_id')
df_all = pd.concat([df_train_new, df_test_new], axis=0).reset_index(drop=True)
del df_train_new, df_test_new, df_steps_train, df_steps_test
del df_sessions
# ### create a dataframe with impressions list¶
idx = df_all.action_type=='clickout item'
df_all_imp_list = df_all.loc[idx,['session_id', 'step', 'impressions']].reset_index(drop=True)
df_all_imp_list['impressions_list_enc'] = df_all_imp_list.impressions.fillna('').str.split('|') \
.apply(lambda s: [enc_dict['reference'].get(i) for i in s])
df_all_imp_list.drop('impressions', axis=1, inplace=True)
# # Get Dwell
VAR_GROUPBY = 'session_id'
FEATURE_NAME = 'past_dwell_with_items_%s' % VAR_GROUPBY
print (FEATURE_NAME)
df_all_cv = df_all_cv.sort_values(['user_id', 'day','session_id', 'step', 'timestamp']).reset_index(drop=True)
df_all = df_all.sort_values(['user_id', 'day','session_id', 'step', 'timestamp']).reset_index(drop=True)
# ### validation
VARS_ = ['session_id', 'step', 'timestamp', 'action_type', 'reference']
df = df_all_cv[VARS_].copy()
FILE_NAME = 'Dcv_%s.gz' % FEATURE_NAME
print (FILE_NAME)
df['reference_enc'] = df.reference.apply(lambda s: str(enc_dict['reference'].get(s)))
df = df.drop('reference', axis=1)
df['next_timestamp'] = df.groupby('session_id').timestamp.shift(-1)
df['duration'] = df.next_timestamp-df.timestamp
df['duration'] = df['duration'].fillna(0)
df = df.drop(['timestamp', 'next_timestamp'], axis=1)
df['ref_dwell_dict'] = df.apply(lambda row: dict([(row.reference_enc, row.duration)]), axis=1).apply(Counter)
df = df.drop(['reference_enc', 'duration'], axis=1)
df['cumsum_dwell_dict'] = df.groupby('session_id').ref_dwell_dict.transform(pd.Series.cumsum)
df['cumsum_dwell_dict_shift'] = df.groupby('session_id').cumsum_dwell_dict.shift()
df = df.drop(['ref_dwell_dict', 'cumsum_dwell_dict'], axis=1)
df_feat = df.merge(df_all_imp_list, on=['session_id', 'step'])
df_feat[FEATURE_NAME] = df_feat.apply(lambda row: [row.cumsum_dwell_dict_shift.get(str(s), -1) for s in row.impressions_list_enc] \
if pd.notnull(row.cumsum_dwell_dict_shift) else [-1 for s in row.impressions_list_enc], axis=1)
df_feat = df_feat[['session_id', 'step', FEATURE_NAME]]
df_feat.to_csv(FEATURES_PATH+FILE_NAME, index=False, compression='gzip')
print (FEATURES_PATH+FILE_NAME)
def get_imp_means_and_stds(df_tr_=None, var_group = 'seq_num_new'):
aux = df_tr_[[var_group]].reset_index(drop=True)[var_group]
lista=list(itertools.chain.from_iterable(aux))
listasemnan = [s for s in lista if s!=-1]
means = np.mean(listasemnan)
stds = np.std(listasemnan)
maxv = np.max(listasemnan)
return means, stds, maxv
def get_log_imp_means_and_stds(df_tr_=None, var_group = 'seq_num_new'):
aux = df_tr_[[var_group]].reset_index(drop=True)[var_group]
lista=list(itertools.chain.from_iterable(aux))
listasemnan = np.log(np.array([s for s in lista if s!=-1])+1.9)
means = np.mean(listasemnan)
stds = np.std(listasemnan)
maxv = np.max(listasemnan)
return means, stds,maxv
normalizations_dict = {}
normalizations_dict['dwell_times'] = {}
means, stds, maxv = get_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times']['means'] = means
normalizations_dict['dwell_times']['stds'] = stds
normalizations_dict['dwell_times']['max'] = maxv
normalizations_dict['dwell_times_log'] = {}
means, stds, maxv = get_log_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times_log']['means'] = means
normalizations_dict['dwell_times_log']['stds'] = stds
normalizations_dict['dwell_times_log']['max'] = maxv
with open(FEATURES_PATH+OUTPUT_NORMLIZATIONS_VAL, 'wb') as handle:
pickle.dump(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ### all
VARS_ = ['session_id', 'step', 'timestamp', 'action_type', 'reference']
df = df_all[VARS_].copy()
FILE_NAME = 'D_%s.gz' % FEATURE_NAME
print (FILE_NAME)
df['reference_enc'] = df.reference.apply(lambda s: str(enc_dict['reference'].get(s)))
df = df.drop('reference', axis=1)
df['next_timestamp'] = df.groupby('session_id').timestamp.shift(-1)
df['duration'] = df.next_timestamp-df.timestamp
df['duration'] = df['duration'].fillna(0)
df = df.drop(['timestamp', 'next_timestamp'], axis=1)
df['ref_dwell_dict'] = df.apply(lambda row: dict([(row.reference_enc, row.duration)]), axis=1).apply(Counter)
df = df.drop(['reference_enc', 'duration'], axis=1)
df['cumsum_dwell_dict'] = df.groupby('session_id').ref_dwell_dict.transform(pd.Series.cumsum)
df['cumsum_dwell_dict_shift'] = df.groupby('session_id').cumsum_dwell_dict.shift()
df = df.drop(['ref_dwell_dict', 'cumsum_dwell_dict'], axis=1)
df_feat = df.merge(df_all_imp_list, on=['session_id', 'step'])
df_feat[FEATURE_NAME] = df_feat.apply(lambda row: [row.cumsum_dwell_dict_shift.get(str(s), -1) for s in row.impressions_list_enc] \
if pd.notnull(row.cumsum_dwell_dict_shift) else [-1 for s in row.impressions_list_enc], axis=1)
df_feat = df_feat[['session_id', 'step', FEATURE_NAME]]
df_feat.to_csv(FEATURES_PATH+FILE_NAME, index=False, compression='gzip')
print (FEATURES_PATH+FILE_NAME)
normalizations_dict = {}
normalizations_dict['dwell_times'] = {}
means, stds, maxv = get_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times']['means'] = means
normalizations_dict['dwell_times']['stds'] = stds
normalizations_dict['dwell_times']['max'] = maxv
normalizations_dict['dwell_times_log'] = {}
means, stds, maxv = get_log_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times_log']['means'] = means
normalizations_dict['dwell_times_log']['stds'] = stds
normalizations_dict['dwell_times_log']['max'] = maxv
with open(FEATURES_PATH+OUTPUT_NORMLIZATIONS_SUBM, 'wb') as handle:
pickle.dump(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.basicConfig",
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.max",
"itertools.chain.from_iterable",
"numpy.array",
"pandas.concat",
"numpy.std",
"pandas.notnull",
"sys.path.append"
] | [((50, 72), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (65, 72), False, 'import sys\n'), ((785, 842), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create cv features"""'}), "(description='Create cv features')\n", (808, 842), False, 'import argparse\n'), ((1351, 1370), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1368, 1370), False, 'import logging\n'), ((1375, 1467), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.INFO)\n", (1394, 1467), False, 'import logging\n'), ((2813, 2850), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_TR)'], {}), '(DATA_PATH + OUTPUT_DF_TR)\n', (2824, 2850), True, 'import pandas as pd\n'), ((2868, 2906), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_VAL)'], {}), '(DATA_PATH + OUTPUT_DF_VAL)\n', (2879, 2906), True, 'import pandas as pd\n'), ((2926, 2966), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_TRAIN)'], {}), '(DATA_PATH + OUTPUT_DF_TRAIN)\n', (2937, 2966), True, 'import pandas as pd\n'), ((2985, 3024), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_TEST)'], {}), '(DATA_PATH + OUTPUT_DF_TEST)\n', (2996, 3024), True, 'import pandas as pd\n'), ((3041, 3084), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_SESSIONS)'], {}), '(DATA_PATH + OUTPUT_DF_SESSIONS)\n', (3052, 3084), True, 'import pandas as pd\n'), ((6359, 6379), 'numpy.mean', 'np.mean', (['listasemnan'], {}), '(listasemnan)\n', (6366, 6379), True, 'import numpy as np\n'), ((6396, 6415), 'numpy.std', 'np.std', (['listasemnan'], {}), '(listasemnan)\n', (6402, 6415), True, 'import numpy as np\n'), ((6431, 6450), 'numpy.max', 'np.max', (['listasemnan'], {}), '(listasemnan)\n', (6437, 6450), True, 'import numpy as np\n'), ((6775, 6795), 'numpy.mean', 'np.mean', (['listasemnan'], {}), '(listasemnan)\n', (6782, 6795), True, 'import numpy as np\n'), ((6812, 6831), 'numpy.std', 'np.std', (['listasemnan'], {}), '(listasemnan)\n', (6818, 6831), True, 'import numpy as np\n'), ((6847, 6866), 'numpy.max', 'np.max', (['listasemnan'], {}), '(listasemnan)\n', (6853, 6866), True, 'import numpy as np\n'), ((7671, 7745), 'pickle.dump', 'pickle.dump', (['normalizations_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (7682, 7745), False, 'import pickle\n'), ((9979, 10053), 'pickle.dump', 'pickle.dump', (['normalizations_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (9990, 10053), False, 'import pickle\n'), ((3348, 3382), 'pandas.concat', 'pd.concat', (['[df_tr, df_val]'], {'axis': '(0)'}), '([df_tr, df_val], axis=0)\n', (3357, 3382), True, 'import pandas as pd\n'), ((3623, 3669), 'pandas.concat', 'pd.concat', (['[df_train_new, df_test_new]'], {'axis': '(0)'}), '([df_train_new, df_test_new], axis=0)\n', (3632, 3669), True, 'import pandas as pd\n'), ((6257, 6291), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['aux'], {}), '(aux)\n', (6286, 6291), False, 'import itertools\n'), ((6651, 6685), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['aux'], {}), '(aux)\n', (6680, 6685), False, 'import itertools\n'), ((5827, 5866), 'pandas.notnull', 'pd.notnull', (['row.cumsum_dwell_dict_shift'], {}), '(row.cumsum_dwell_dict_shift)\n', (5837, 5866), True, 'import pandas as pd\n'), ((6716, 6755), 'numpy.array', 'np.array', (['[s for s in lista if s != -1]'], {}), '([s for s in lista if s != -1])\n', (6724, 6755), True, 'import numpy as np\n'), ((8940, 8979), 'pandas.notnull', 'pd.notnull', (['row.cumsum_dwell_dict_shift'], {}), '(row.cumsum_dwell_dict_shift)\n', (8950, 8979), True, 'import pandas as pd\n')] |
""".. Line to protect from pydocstyle D205, D400.
Cutadapt
--------
Remove adapter sequences from reads in FASTQ file.
"""
import os
import shutil
import subprocess
import tempfile
import iCount
from iCount.files.fastq import get_qual_encoding, ENCODING_TO_OFFSET
def get_version():
"""Get cutadapt version."""
args = ['cutadapt', '--version']
try:
ver = subprocess.check_output(args, shell=False, universal_newlines=True)
return str(ver).rstrip('\n\r')
except (FileNotFoundError, subprocess.CalledProcessError):
return None
def run(reads,
adapter,
reads_trimmed=None,
overwrite=False,
qual_trim=None,
minimum_length=None,
overlap=None,
untrimmed_output=None,
error_rate=None,
):
"""
Remove adapter sequences from high-throughput sequencing reads.
Parameters
----------
reads : str
Input FASTQ file.
adapter : str
Sequence of an adapter ligated to the 3' end.
reads_trimmed : str
Output FASTQ file containing trimmed reads. If not provided
overwrite : bool
If true, overwrite input file (reads) with trimmed file.
qual_trim : int
Trim low-quality bases before adapter removal.
minimum_length : int
Discard trimmed reads that are shorter than `minimum_length`.
overlap : int
Require ``overlap`` overlap between read and adapter for an
adapter to be found.
untrimmed_output : str
Write reads that do not contain any adapter to this file.
error_rate : float
Maximum allowed error rate (no. of errors divided by the length
of the matching region).
Returns
-------
int
Return code of the `cutadapt` program.
"""
args = [
'cutadapt',
'--quiet',
'-a', adapter,
]
qual_base = ENCODING_TO_OFFSET.get(get_qual_encoding(reads), 33)
args.extend(['--quality-base={}'.format(qual_base)])
if reads_trimmed is None:
# Auto-generate output name:
extension = '.gz' if reads.endswith('.gz') else ''
name = next(tempfile._get_candidate_names()) + '.fq' + extension # pylint: disable=protected-access
reads_trimmed = os.path.join(iCount.TMP_ROOT, name)
if qual_trim is not None:
args.extend(['-q', '{:d}'.format(qual_trim)])
if minimum_length is not None:
args.extend(['-m', '{:d}'.format(minimum_length)])
if overlap is not None:
args.extend(['--overlap', '{:d}'.format(overlap)])
if untrimmed_output is not None:
args.extend(['--untrimmed-output', '{}'.format(untrimmed_output)])
if error_rate is not None:
args.extend(['--error-rate', '{}'.format(error_rate)])
args.extend(['-o', reads_trimmed, reads])
rcode = subprocess.call(args, shell=False)
if overwrite:
shutil.move(reads_trimmed, reads)
return rcode
| [
"subprocess.check_output",
"shutil.move",
"iCount.files.fastq.get_qual_encoding",
"os.path.join",
"subprocess.call",
"tempfile._get_candidate_names"
] | [((2827, 2861), 'subprocess.call', 'subprocess.call', (['args'], {'shell': '(False)'}), '(args, shell=False)\n', (2842, 2861), False, 'import subprocess\n'), ((381, 448), 'subprocess.check_output', 'subprocess.check_output', (['args'], {'shell': '(False)', 'universal_newlines': '(True)'}), '(args, shell=False, universal_newlines=True)\n', (404, 448), False, 'import subprocess\n'), ((1914, 1938), 'iCount.files.fastq.get_qual_encoding', 'get_qual_encoding', (['reads'], {}), '(reads)\n', (1931, 1938), False, 'from iCount.files.fastq import get_qual_encoding, ENCODING_TO_OFFSET\n'), ((2261, 2296), 'os.path.join', 'os.path.join', (['iCount.TMP_ROOT', 'name'], {}), '(iCount.TMP_ROOT, name)\n', (2273, 2296), False, 'import os\n'), ((2889, 2922), 'shutil.move', 'shutil.move', (['reads_trimmed', 'reads'], {}), '(reads_trimmed, reads)\n', (2900, 2922), False, 'import shutil\n'), ((2148, 2179), 'tempfile._get_candidate_names', 'tempfile._get_candidate_names', ([], {}), '()\n', (2177, 2179), False, 'import tempfile\n')] |
import copy
import importlib
import os
import numpy as np
import tensorflow as tf
import logging
tf.get_logger().setLevel(logging.ERROR)
from client import Client
from server import Server
from model import ServerModel
from baseline_constants import MAIN_PARAMS, MODEL_PARAMS
from fedbayes_helper import *
from fedbayes_tinyhelper import *
import metrics.writer as metrics_writer
STAT_METRICS_PATH = 'metrics/stat_metrics.csv'
SYS_METRICS_PATH = 'metrics/sys_metrics.csv'
#from utils.matching.pfnm import layer_group_descent as pdm_multilayer_group_descent
from utils.matching.cnn_pfnm import layerwise_sampler
from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching
def print_metrics(metrics, weights):
ordered_weights = [weights[c] for c in sorted(weights)]
metric_names = metrics_writer.get_metrics_names(metrics)
for metric in metric_names:
ordered_metric = [metrics[c][metric] for c in sorted(metrics)]
print('%s: %g, 10th percentile: %g, 90th percentile %g' \
% (metric,
np.average(ordered_metric, weights=ordered_weights),
np.percentile(ordered_metric, 10),
np.percentile(ordered_metric, 90)))
class Fedbayes_Sing_Trainer:
def __init__(self, users, groups, train_data, test_data):
# matching requires num of classes to be set during
# model_config stage, or it can cause program failure
self.users = users
self.train_data = train_data
self.test_data = test_data
self.num_classes = 0
self.shape_func = None
self.upd_collector = []
def recover_weights(self, weights, assignment, model_summary, model_meta_data):
res_weights = []
conv_varname, dense_varname, weight_varname = "conv", "dense", "kernel"
#print("checking len, model summ: {}, model meta data: {}".format(len(model_summary), len(model_meta_data)))
for var_name, o, v in zip(model_summary, model_meta_data, weights):
print("name {}, old shape is {}, new shape is {}".format(var_name, o, v.shape))
if var_name.startswith(conv_varname):
if var_name.endswith(weight_varname):
w = v.reshape(o)
w = w.transpose((2, 3, 1, 0))
else:
w = v
elif var_name.startswith("batch"):
w = np.ones(o)
elif var_name.startswith(dense_varname):
if var_name.endswith(weight_varname):
w = v.transpose()
else:
w = v
res_weights.append(w)
# just change last layer, carefully, not sure how it works
# do check the reason out after Jan, find a way to
# improve it.
res_weights[-2] = res_weights[-2].T
return res_weights
def train_model(self, client_model, train_data, weights, assignment, config):
# what maintain by assignment is a dictionary of
#{layer_name: [global_id]}
# the meaning of it is a worker's all layer(except last) matching assignment
epochs = config["epochs"]
batch_size = config["batch-size"]
client_model.set_params(weights)
client_model.train(train_data, num_epochs=epochs, batch_size=batch_size)
update = client_model.get_params()
self.upd_collector.append(update)
def model_config(self, config, dataset, my_model):
shared_model = my_model
model_path = '%s/%s.py' % (dataset, shared_model)
if not os.path.exists(model_path):
print('Please specify a valid dataset and a valid model.')
model_path = '%s.%s' % (dataset, shared_model)
print('############################## %s ##############################' % model_path)
mod = importlib.import_module(model_path)
ClientModel = getattr(mod, 'ClientModel')
self.shape_func = getattr(mod, 'get_convolution_extractor_shape')
# Suppress tf warnings
tf.logging.set_verbosity(tf.logging.WARN)
# Create 2 models
model_params = MODEL_PARAMS[model_path]
model_params_list = list(model_params)
self.num_classes = model_params[1] # setting num_class to be a member of the trainer
model_params_list.insert(0, config["seed"])
model_params_list[1] = config["lr"]
model_params = tuple(model_params_list)
tf.reset_default_graph()
client_model = ClientModel(*model_params)
# Create server
server = Server(client_model)
# Create clients
_users = self.users
groups = [[] for _ in _users]
clients = [Client(u, g, self.train_data[u], self.test_data[u], client_model) \
for u, g in zip(_users, groups)]
print('%d Clients in Total' % len(clients))
return clients, server, client_model
def begins(self, config, args):
clients, server, client_model = self.model_config(config, args.dataset, 'cnn')
num_rounds = config["num-rounds"]
eval_every = config["eval-every"]
epochs_per_round = config['epochs']
batch_size = config['batch-size']
clients_per_round = config["clients-per-round"]
state_dict = {}
# Test untrained model on all clients
# stat_metrics = server.test_model(clients)
# all_ids, all_groups, all_num_samples = server.get_clients_info(clients)
# print_metrics(stat_metrics, all_num_samples)
model_summary = client_model.get_summary()
model_meta_data = client_model.get_meta_data()
# gl_weight = client_model.get_params()
gl_weight = self.batch_BBPMAP(clients[:40], state_dict, client_model, config, args)
gl_weight = self.recover_weights(gl_weight, [], model_summary, model_meta_data)
server.model = gl_weight
stat_metrics = server.test_model(clients[:40])
all_ids, all_groups, all_num_samples = server.get_clients_info(clients[:40])
print_metrics(stat_metrics, all_num_samples)
first = True
# for i in range(num_rounds):
# print('--- Round %d of %d: Training %d Clients ---' % (i+1, num_rounds, clients_per_round))
# server.select_clients(clients, num_clients=clients_per_round)
# batch_clients = server.selected_clients
# if first:
# cw = gl_weight
# else:
# cw = self.recover_weights(gl_weight, assignment, model_summary, model_meta_data)
# for k in batch_clients:
# if first or not (k.id in state_dict):
# assignment = []
# else:
# assignment = state_dict[k.id]
# self.train_model(client_model, k.train_data, cw, assignment, config)
# gl_weight = self.batch_BBPMAP(batch_clients, state_dict, client_model, config, args)
# if (i + 1) % eval_every == 0 or (i + 1) == num_rounds:
# cw = self.recover_weights(gl_weight, assignment, model_summary, model_meta_data)
# server.model = cw
# stat_metrics = server.test_model(clients)
# print_metrics(stat_metrics, all_num_samples)
# first = False
client_model.close()
def ends(self):
print("experiment of Fedbayes finished.")
return
def batch_BBPMAP(self, batch_clients, state_dict, client_model, config, args):
model_summary = client_model.get_summary()
model_meta_data = client_model.get_meta_data()
n_classes = self.num_classes
# averaging_weights, cls_freqs = avg_cls_weights(batch_clients, args.dataset, n_classes)
averaging_weights, cls_freqs = avg_cls_weights(args.dataset, n_classes)
sigma=config["sigma"]
sigma0=config["sigma0"]
gamma=config["gamma"]
it = config["sample-iter"]
assignments_list = []
# param names explained:
# C is the number of layers for model structure, no counting bias
# J is the number of clients (workers)
net_list = load_files()
C = int(len(model_meta_data) / 2)
J = len(net_list)
matching_shapes = []
fc_pos = None
apply_by_j = lambda j: load_local_model_weight_func(j, model_summary)
batch_weights = list(map(apply_by_j, net_list))
batch_freqs = pdm_prepare_freq(cls_freqs, self.num_classes)
for cur_l in range(1, C):
layer_hungarian_weights, assignment, L_next = layerwise_sampler(
batch_weights=batch_weights,
layer_index=cur_l,
sigma0_layers=sigma0,
sigma_layers=sigma,
batch_frequencies=batch_freqs,
it=it,
gamma_layers=gamma,
model_meta_data=model_meta_data,
model_layer_type= model_summary,
n_layers= C,
matching_shapes=matching_shapes,
)
assignments_list.append(assignment)
for client, a_val in zip(batch_clients, assignment):
p_index = 2 * (cur_l -1)
v_name = model_summary[p_index]
if client.id in state_dict:
cdict = state_dict[client.id]
else:
cdict = {}
cdict.update({v_name: a_val})
state_dict.update({client.id : cdict})
print("Number of assignment: {}, L_next: {}, matched_weight shape: {} ".format(
len(assignment), L_next, layer_hungarian_weights[0].shape) )
matching_shapes.append(L_next)
temp_network_weg = combine_network_after_matching(batch_weights, cur_l,
model_summary, model_meta_data,
layer_hungarian_weights, L_next, assignment,
matching_shapes, self.shape_func)
old_data = client_model.get_params()
gl_weights = []
for worker in range(J):
j = worker
gl_weights.append(reconstruct_weights(temp_network_weg[j], assignment[j],
model_summary, old_data,
model_summary[2 * cur_l - 2]))
models = local_train(batch_clients, gl_weights, cur_l, config)
batch_weights = list(map(apply_by_j, models))
## we handle the last layer carefully here ...
## averaging the last layer
matched_weights = []
last_layer_weights_collector = []
for worker in range(J):
# firstly we combine last layer's weight and bias
bias_shape = batch_weights[worker][-1].shape
last_layer_bias = batch_weights[worker][-1].reshape((1, bias_shape[0]))
last_layer_weights = np.concatenate((batch_weights[worker][-2].T, last_layer_bias), axis=0)
# the directed normalization doesn't work well, let's try weighted averaging
last_layer_weights_collector.append(last_layer_weights)
last_layer_weights_collector = np.array(last_layer_weights_collector)
avg_last_layer_weight = np.zeros(last_layer_weights_collector[0].shape, dtype=np.float32)
for i in range(n_classes):
avg_weight_collector = np.zeros(last_layer_weights_collector[0][:, 0].shape, dtype=np.float32)
for j in range(J):
avg_weight_collector += averaging_weights[j][i]*last_layer_weights_collector[j][:, i]
avg_last_layer_weight[:, i] = avg_weight_collector
#avg_last_layer_weight = np.mean(last_layer_weights_collector, axis=0)
for i in range(C * 2):
if i < (C * 2 - 2):
matched_weights.append(batch_weights[0][i])
matched_weights.append(avg_last_layer_weight[0:-1, :])
matched_weights.append(avg_last_layer_weight[-1, :])
self.upd_collector = []
return matched_weights
| [
"os.path.exists",
"tensorflow.reset_default_graph",
"importlib.import_module",
"numpy.ones",
"numpy.average",
"tensorflow.logging.set_verbosity",
"metrics.writer.get_metrics_names",
"server.Server",
"numpy.array",
"numpy.zeros",
"utils.matching.cnn_pfnm.layerwise_sampler",
"utils.matching.cnn_... | [((836, 877), 'metrics.writer.get_metrics_names', 'metrics_writer.get_metrics_names', (['metrics'], {}), '(metrics)\n', (868, 877), True, 'import metrics.writer as metrics_writer\n'), ((98, 113), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (111, 113), True, 'import tensorflow as tf\n'), ((3894, 3929), 'importlib.import_module', 'importlib.import_module', (['model_path'], {}), '(model_path)\n', (3917, 3929), False, 'import importlib\n'), ((4093, 4134), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.WARN'], {}), '(tf.logging.WARN)\n', (4117, 4134), True, 'import tensorflow as tf\n'), ((4510, 4534), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4532, 4534), True, 'import tensorflow as tf\n'), ((4627, 4647), 'server.Server', 'Server', (['client_model'], {}), '(client_model)\n', (4633, 4647), False, 'from server import Server\n'), ((11589, 11627), 'numpy.array', 'np.array', (['last_layer_weights_collector'], {}), '(last_layer_weights_collector)\n', (11597, 11627), True, 'import numpy as np\n'), ((11661, 11726), 'numpy.zeros', 'np.zeros', (['last_layer_weights_collector[0].shape'], {'dtype': 'np.float32'}), '(last_layer_weights_collector[0].shape, dtype=np.float32)\n', (11669, 11726), True, 'import numpy as np\n'), ((3630, 3656), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (3644, 3656), False, 'import os\n'), ((4760, 4825), 'client.Client', 'Client', (['u', 'g', 'self.train_data[u]', 'self.test_data[u]', 'client_model'], {}), '(u, g, self.train_data[u], self.test_data[u], client_model)\n', (4766, 4825), False, 'from client import Client\n'), ((8814, 9106), 'utils.matching.cnn_pfnm.layerwise_sampler', 'layerwise_sampler', ([], {'batch_weights': 'batch_weights', 'layer_index': 'cur_l', 'sigma0_layers': 'sigma0', 'sigma_layers': 'sigma', 'batch_frequencies': 'batch_freqs', 'it': 'it', 'gamma_layers': 'gamma', 'model_meta_data': 'model_meta_data', 'model_layer_type': 'model_summary', 'n_layers': 'C', 'matching_shapes': 'matching_shapes'}), '(batch_weights=batch_weights, layer_index=cur_l,\n sigma0_layers=sigma0, sigma_layers=sigma, batch_frequencies=batch_freqs,\n it=it, gamma_layers=gamma, model_meta_data=model_meta_data,\n model_layer_type=model_summary, n_layers=C, matching_shapes=matching_shapes\n )\n', (8831, 9106), False, 'from utils.matching.cnn_pfnm import layerwise_sampler\n'), ((9998, 10169), 'utils.matching.cnn_retrain.combine_network_after_matching', 'combine_network_after_matching', (['batch_weights', 'cur_l', 'model_summary', 'model_meta_data', 'layer_hungarian_weights', 'L_next', 'assignment', 'matching_shapes', 'self.shape_func'], {}), '(batch_weights, cur_l, model_summary,\n model_meta_data, layer_hungarian_weights, L_next, assignment,\n matching_shapes, self.shape_func)\n', (10028, 10169), False, 'from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching\n'), ((10768, 10821), 'utils.matching.cnn_retrain.local_train', 'local_train', (['batch_clients', 'gl_weights', 'cur_l', 'config'], {}), '(batch_clients, gl_weights, cur_l, config)\n', (10779, 10821), False, 'from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching\n'), ((11320, 11390), 'numpy.concatenate', 'np.concatenate', (['(batch_weights[worker][-2].T, last_layer_bias)'], {'axis': '(0)'}), '((batch_weights[worker][-2].T, last_layer_bias), axis=0)\n', (11334, 11390), True, 'import numpy as np\n'), ((11798, 11869), 'numpy.zeros', 'np.zeros', (['last_layer_weights_collector[0][:, 0].shape'], {'dtype': 'np.float32'}), '(last_layer_weights_collector[0][:, 0].shape, dtype=np.float32)\n', (11806, 11869), True, 'import numpy as np\n'), ((1089, 1140), 'numpy.average', 'np.average', (['ordered_metric'], {'weights': 'ordered_weights'}), '(ordered_metric, weights=ordered_weights)\n', (1099, 1140), True, 'import numpy as np\n'), ((1159, 1192), 'numpy.percentile', 'np.percentile', (['ordered_metric', '(10)'], {}), '(ordered_metric, 10)\n', (1172, 1192), True, 'import numpy as np\n'), ((1211, 1244), 'numpy.percentile', 'np.percentile', (['ordered_metric', '(90)'], {}), '(ordered_metric, 90)\n', (1224, 1244), True, 'import numpy as np\n'), ((2444, 2454), 'numpy.ones', 'np.ones', (['o'], {}), '(o)\n', (2451, 2454), True, 'import numpy as np\n'), ((10524, 10638), 'utils.matching.cnn_retrain.reconstruct_weights', 'reconstruct_weights', (['temp_network_weg[j]', 'assignment[j]', 'model_summary', 'old_data', 'model_summary[2 * cur_l - 2]'], {}), '(temp_network_weg[j], assignment[j], model_summary,\n old_data, model_summary[2 * cur_l - 2])\n', (10543, 10638), False, 'from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching\n')] |
#!/usr/bin/env python3
from pathlib import Path
import sys
import subprocess
import re
import argparse
import os
#Path of file
dir_path = os.path.dirname(os.path.realpath(__file__))
#TODO: insert correct org name
org='YOUR_ORG_NAME_HERE'
pmd_pos = dir_path + '/pmd-bin-6.14.0/bin'
pos_yaclu = dir_path+ '/yaclu'
root = dir_path+ '/student-repos'
pos_classroom_sh = dir_path+ '/classroom-util'
parser = argparse.ArgumentParser(description='Use to download and run tests on git repos')
parser.add_argument('lab', help='the lab to utilise i.e. lab01')
parser.add_argument('-clone', action='store_true', help='clone the repositories for the lab')
parser.add_argument('-pull', action='store_true', help='pull the repositories for the lab')
parser.add_argument('-check', action='store_true', help='run pmd on the repositories')
parser.add_argument('-report', action='store_true', help='create a report with the total of the violation per pmd rule, need -check beforehand')
parser.add_argument('-enablers', action='store_true', help='use jar for enablers')
args = parser.parse_args()
#assume path is correct
def get_all_folders(root):
p = Path(root);
folders = [];
for folder in p.iterdir():
if folder.is_dir():
folders.append(folder.name);
return folders;
def run_pmd(root,folders,command):
for folder in folders:
subprocess.run('cd '+root+'/'';'+command +' '+folder, shell=True);
def run_pmd_check(root,folders):
subprocess.run('cd '+root+'; echo "" > violation.csv', shell=True);
for folder in folders:
if not args.enablers:
jar = '/JavaRules/pmd-examples/pf2_custom_rules.jar'
en = 'v'
else:
jar = '/RulesEnabler/pmd-examples/pf2_custom_rules_enabler.jar'
en = 'enablerV'
subprocess.run('cd ' + root + '/ ;CLASSPATH=' + dir_path + jar +' ' + pmd_pos +'/run.sh pmd -no-cache -f csv -d ' + folder + ' -R myrule.xml -t 8 | grep -v "Test.java" | tee '+root+'/'+folder+ '/' + en + 'iolation.csv >> violation.csv', shell=True);
def run_pmd_output(root,folders,command):
out = {};
for folder in folders:
p = subprocess.check_output('cd '+root+'/'+folder+';'+command, shell=True);
p = p.decode("utf-8");
p = p.split('\n');
for part in p:
if len(part) < 2:
continue;
rule_name = part.split(' ')[4][5:];
if rule_name in out:
out[rule_name] = out[rule_name] + 1;
else:
out[rule_name] = 1;
return out;
if(len(sys.argv) < 2):
print("need a lab");
exit();
path = sys.argv[1];
if(not re.match("lab[0-9]+",path)):
print("invalid lab ",path,' should be like lab[0-1]+');
exit();
#convert to absolute path
lab = path.replace('lab','lab-');
path = root + '/' + path;
if(not Path(path).exists()):
print('creating folder ' + lab);
subprocess.run('mkdir ' + path, shell=True);
if(not Path(path).is_dir()):
print('need a folder name as first argument');
exit();
subfolders = get_all_folders(path);
if(args.clone):
subfolders = get_all_folders(path);
#python3 ../../yaclu/get-assignment-repos.py usi-pf2-2019 lab-01 > meta.txt
command = 'python3 ' + pos_yaclu + '/get-assignment-repos.py ' + org + ' ' + lab + '> ' + path + '/meta.txt';
print(command);
subprocess.run(command, shell=True);
command = 'cd ' + path + ' ; ' + pos_classroom_sh + '/classroom.sh collect ' +path + '/meta.txt ' + lab;
print(command);
subprocess.run(command, shell=True)
if(args.pull):
command = 'cd ' + path + ' ; ' + pos_classroom_sh + '/classroom.sh collect ' +path + '/meta.txt ' + lab;
print(command);
subprocess.run(command, shell=True)
if(args.check):
run_pmd_check(path,subfolders);
if(args.report):
F = open(path+'/violation.csv','r')
#already checked student
done = False
violationsTotal = {};
violationsStudent = {};
lastRule = ''
for line in F:
pieces = line.split(',')
if(len(pieces) < 7):
continue
rule_name = pieces[-1]
if 'Test.java' in line:
continue
if rule_name == '"Rule"\n':
continue
#total count
if rule_name in violationsTotal:
violationsTotal[rule_name] = violationsTotal[rule_name] + 1;
else:
violationsTotal[rule_name] = 1;
student_name = re.search('lab-\d+-((\d|[a-z]|[A-Z])+)', pieces[2]).group(1)
if rule_name in violationsStudent:
if not student_name in violationsStudent[rule_name]:
violationsStudent[rule_name].append(student_name);
else:
violationsStudent[rule_name] = [student_name];
#out = run_pmd_output(path,subfolders,'cat pmd_failures');
csv = open(path+'/report_rules_total.csv', "w");
csv.write('Rule, Number\n');
for key in violationsTotal:
csv.write(key.strip()+','+str(violationsTotal[key])+'\n');
csv.close();
csv = open(path+'/report_rules_total_nr_students.csv', "w");
csv.write('Rule, Number\n');
for key in violationsStudent:
csv.write(key.strip()+','+str(len(violationsStudent[key]))+'\n');
csv.close();
print()
| [
"subprocess.check_output",
"argparse.ArgumentParser",
"pathlib.Path",
"subprocess.run",
"re.match",
"os.path.realpath",
"re.search"
] | [((409, 495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Use to download and run tests on git repos"""'}), "(description=\n 'Use to download and run tests on git repos')\n", (432, 495), False, 'import argparse\n'), ((155, 181), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'import os\n'), ((1145, 1155), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (1149, 1155), False, 'from pathlib import Path\n'), ((1471, 1541), 'subprocess.run', 'subprocess.run', (['(\'cd \' + root + \'; echo "" > violation.csv\')'], {'shell': '(True)'}), '(\'cd \' + root + \'; echo "" > violation.csv\', shell=True)\n', (1485, 1541), False, 'import subprocess\n'), ((2658, 2685), 're.match', 're.match', (['"""lab[0-9]+"""', 'path'], {}), "('lab[0-9]+', path)\n", (2666, 2685), False, 'import re\n'), ((2917, 2960), 'subprocess.run', 'subprocess.run', (["('mkdir ' + path)"], {'shell': '(True)'}), "('mkdir ' + path, shell=True)\n", (2931, 2960), False, 'import subprocess\n'), ((3369, 3404), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (3383, 3404), False, 'import subprocess\n'), ((3539, 3574), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (3553, 3574), False, 'import subprocess\n'), ((3724, 3759), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (3738, 3759), False, 'import subprocess\n'), ((1366, 1438), 'subprocess.run', 'subprocess.run', (["('cd ' + root + '/;' + command + ' ' + folder)"], {'shell': '(True)'}), "('cd ' + root + '/;' + command + ' ' + folder, shell=True)\n", (1380, 1438), False, 'import subprocess\n'), ((1808, 2077), 'subprocess.run', 'subprocess.run', (['(\'cd \' + root + \'/ ;CLASSPATH=\' + dir_path + jar + \' \' + pmd_pos +\n \'/run.sh pmd -no-cache -f csv -d \' + folder +\n \' -R myrule.xml -t 8 | grep -v "Test.java" | tee \' + root + \'/\' +\n folder + \'/\' + en + \'iolation.csv >> violation.csv\')'], {'shell': '(True)'}), '(\'cd \' + root + \'/ ;CLASSPATH=\' + dir_path + jar + \' \' +\n pmd_pos + \'/run.sh pmd -no-cache -f csv -d \' + folder +\n \' -R myrule.xml -t 8 | grep -v "Test.java" | tee \' + root + \'/\' +\n folder + \'/\' + en + \'iolation.csv >> violation.csv\', shell=True)\n', (1822, 2077), False, 'import subprocess\n'), ((2154, 2239), 'subprocess.check_output', 'subprocess.check_output', (["('cd ' + root + '/' + folder + ';' + command)"], {'shell': '(True)'}), "('cd ' + root + '/' + folder + ';' + command, shell=True\n )\n", (2177, 2239), False, 'import subprocess\n'), ((2854, 2864), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2858, 2864), False, 'from pathlib import Path\n'), ((2970, 2980), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2974, 2980), False, 'from pathlib import Path\n'), ((4452, 4505), 're.search', 're.search', (['"""lab-\\\\d+-((\\\\d|[a-z]|[A-Z])+)"""', 'pieces[2]'], {}), "('lab-\\\\d+-((\\\\d|[a-z]|[A-Z])+)', pieces[2])\n", (4461, 4505), False, 'import re\n')] |
import numpy as np
import pyverilator
import os
from . import to_float, to_fix_point_int
import taichi as ti
from .all_python_functions import calc_next_pos_and_velocity, rectify_positions_and_velocities, \
rectify_positions_in_collision, calc_after_collision_velocity, two_ball_collides, normalize_vector
def rectify_positions_in_collision_test(sim, ball1_pos, ball2_pos, radius):
sim.io.rst = 0
sim.io.rst = 1
sim.io.rst = 0
sim.io.x0, sim.io.y0 = to_fix_point_int(ball1_pos)
sim.io.x1, sim.io.y1 = to_fix_point_int(ball2_pos)
sim.io.radius = to_fix_point_int(radius)
done = bool(sim.io.done.value)
while not done:
sim.clock.tick()
done = bool(sim.io.done.value)
rectified_ball1_pos = to_float(np.array([sim.io.new_x0.value, sim.io.new_y0.value]))
rectified_ball2_pos = to_float(np.array([sim.io.new_x1.value, sim.io.new_y1.value]))
return rectified_ball1_pos, rectified_ball2_pos
def test_rectifier():
os.chdir("./pyverilog")
sim = pyverilator.PyVerilator.build("rectify_p_in_collision.v")
ti.init(ti.cpu)
resolution = (500, 500)
fps = 60
g = 9.8
drag_coefficient = 0.001
# world space [0.0, 1.0] ^ 2
cue_ball_velocity_magnitude_wc = 1.0
ball_pixel_radius = 10
ball_radius_wc = 1.0 / resolution[0] * ball_pixel_radius
gui = ti.GUI("billiard_game_dual_ball", resolution)
gui.fps_limit = fps
delta_t = 1.0 / fps
cue_ball_velocity_wc, _ = normalize_vector(np.random.rand(2))
cue_ball_velocity_wc *= cue_ball_velocity_magnitude_wc
cue_ball_pos_wc = np.ones(2) * 0.5
ball_pos_wc = np.array([0.25, 0.25])
ball_velocity_wc = np.zeros(2)
boundary_begin = np.array([
[0.0, 0.0],
[0.0, 0.0],
[1.0, 1.0],
[1.0, 1.0]
])
boundary_end = np.array([
[1.0, 0.0],
[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]
])
virtual_bound_x = [ball_radius_wc, 1.0 - ball_radius_wc]
virtual_bound_y = [ball_radius_wc, 1.0 - ball_radius_wc]
while gui.running:
gui.lines(begin=boundary_begin, end=boundary_end, radius=2)
gui.circle(cue_ball_pos_wc, radius=ball_pixel_radius)
gui.circle(ball_pos_wc, radius=ball_pixel_radius)
gui.show()
cue_ball_pos_wc, cue_ball_velocity_wc = calc_next_pos_and_velocity(cue_ball_pos_wc, cue_ball_velocity_wc,
delta_t, drag_coefficient, g)
cue_ball_pos_wc, cue_ball_velocity_wc = rectify_positions_and_velocities(virtual_bound_x[0], virtual_bound_x[1],
virtual_bound_y[0], virtual_bound_y[1],
cue_ball_pos_wc,
cue_ball_velocity_wc)
ball_pos_wc, ball_velocity_wc = calc_next_pos_and_velocity(ball_pos_wc, ball_velocity_wc, delta_t,
drag_coefficient, g)
ball_pos_wc, ball_velocity_wc = rectify_positions_and_velocities(virtual_bound_x[0], virtual_bound_x[1],
virtual_bound_y[0], virtual_bound_y[1],
ball_pos_wc, ball_velocity_wc)
if two_ball_collides(cue_ball_pos_wc, ball_pos_wc, ball_radius_wc):
old_cue_ball_pos_wc, old_ball_pos_wc = cue_ball_pos_wc, ball_pos_wc
cue_ball_pos_wc_ref, ball_pos_wc_ref = rectify_positions_in_collision(old_cue_ball_pos_wc, old_ball_pos_wc,
ball_radius_wc)
cue_ball_pos_wc, ball_pos_wc = rectify_positions_in_collision_test(sim, old_cue_ball_pos_wc,
old_ball_pos_wc,
ball_radius_wc)
assert np.allclose(cue_ball_pos_wc_ref, cue_ball_pos_wc, atol=0.0001)
assert np.allclose(ball_pos_wc_ref, ball_pos_wc, atol=0.0001)
cue_ball_velocity_wc, ball_velocity_wc = calc_after_collision_velocity(cue_ball_pos_wc,
ball_pos_wc,
cue_ball_velocity_wc,
ball_velocity_wc)
| [
"numpy.allclose",
"pyverilator.PyVerilator.build",
"numpy.random.rand",
"numpy.ones",
"taichi.init",
"os.chdir",
"numpy.array",
"numpy.zeros",
"taichi.GUI"
] | [((978, 1001), 'os.chdir', 'os.chdir', (['"""./pyverilog"""'], {}), "('./pyverilog')\n", (986, 1001), False, 'import os\n'), ((1012, 1069), 'pyverilator.PyVerilator.build', 'pyverilator.PyVerilator.build', (['"""rectify_p_in_collision.v"""'], {}), "('rectify_p_in_collision.v')\n", (1041, 1069), False, 'import pyverilator\n'), ((1075, 1090), 'taichi.init', 'ti.init', (['ti.cpu'], {}), '(ti.cpu)\n', (1082, 1090), True, 'import taichi as ti\n'), ((1348, 1393), 'taichi.GUI', 'ti.GUI', (['"""billiard_game_dual_ball"""', 'resolution'], {}), "('billiard_game_dual_ball', resolution)\n", (1354, 1393), True, 'import taichi as ti\n'), ((1625, 1647), 'numpy.array', 'np.array', (['[0.25, 0.25]'], {}), '([0.25, 0.25])\n', (1633, 1647), True, 'import numpy as np\n'), ((1671, 1682), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1679, 1682), True, 'import numpy as np\n'), ((1705, 1763), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]])\n', (1713, 1763), True, 'import numpy as np\n'), ((1822, 1880), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n', (1830, 1880), True, 'import numpy as np\n'), ((755, 807), 'numpy.array', 'np.array', (['[sim.io.new_x0.value, sim.io.new_y0.value]'], {}), '([sim.io.new_x0.value, sim.io.new_y0.value])\n', (763, 807), True, 'import numpy as np\n'), ((844, 896), 'numpy.array', 'np.array', (['[sim.io.new_x1.value, sim.io.new_y1.value]'], {}), '([sim.io.new_x1.value, sim.io.new_y1.value])\n', (852, 896), True, 'import numpy as np\n'), ((1490, 1507), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1504, 1507), True, 'import numpy as np\n'), ((1590, 1600), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1597, 1600), True, 'import numpy as np\n'), ((4149, 4211), 'numpy.allclose', 'np.allclose', (['cue_ball_pos_wc_ref', 'cue_ball_pos_wc'], {'atol': '(0.0001)'}), '(cue_ball_pos_wc_ref, cue_ball_pos_wc, atol=0.0001)\n', (4160, 4211), True, 'import numpy as np\n'), ((4231, 4285), 'numpy.allclose', 'np.allclose', (['ball_pos_wc_ref', 'ball_pos_wc'], {'atol': '(0.0001)'}), '(ball_pos_wc_ref, ball_pos_wc, atol=0.0001)\n', (4242, 4285), True, 'import numpy as np\n')] |
# Import modules
from time import sleep
from features.helpers.appium_helpers import app_driver, compare_screenshots
from features.helpers.common_helpers import get_config_and_set_logging, get_file_abs_path
# Read Config file and set logging
CONFIG, LOGGER = get_config_and_set_logging("config.yaml", "app_logs.log", "INFO", __name__)
QUERY_IMAGES_PATH = CONFIG['Paths']['QueryImages']
VISUALIZATIONS_PATH = CONFIG['Paths']['Visualizations']
def load_app():
"""
Start the COC App and ensures that the app loads completely
:return: Appium driver
"""
# Start App
coc_driver = app_driver()
sleep(15) # Wait for the app to boot up
LOGGER.debug("Compare current screen with coc loading page")
comparison_result = compare_screenshots(driver=coc_driver,
original_img="coc_loading.png",
compared_img=None, # Means take the current screenshot
save_visualization=False)
LOGGER.debug("Score is: " + str(comparison_result.get("score")))
LOGGER.info("Waiting for App to start up...")
# tries = 1 # Adjust tries depending on time taken to load
# while float(comparison_result.get("score")) == 0 and tries is not 0:
# comparison_result = compare_screenshots(driver=coc_driver,
# original_img="coc_loading.png",
# compared_img=None, # Means take the current screenshot
# save_visualization=False)
# LOGGER.debug("Score is: " + str(comparison_result.get("score")))
# sleep(5)
# tries = tries - 1
LOGGER.debug("Ensuring that the app has started and is on loading page...")
LOGGER.info("Waiting for App to finish loading...")
tries = 1 # Adjust tries depending on time taken to load
while 0 < float(comparison_result.get("score")) < 1 and tries is not 0:
comparison_result = compare_screenshots(driver=coc_driver,
original_img="coc_loading.png",
compared_img=None, # Means take the current screenshot
save_visualization=False)
LOGGER.debug("Score is: " + str(comparison_result.get("score")))
sleep(5)
tries = tries - 1
return coc_driver
if __name__ == '__main__':
load_app()
| [
"features.helpers.appium_helpers.compare_screenshots",
"time.sleep",
"features.helpers.appium_helpers.app_driver",
"features.helpers.common_helpers.get_config_and_set_logging"
] | [((259, 334), 'features.helpers.common_helpers.get_config_and_set_logging', 'get_config_and_set_logging', (['"""config.yaml"""', '"""app_logs.log"""', '"""INFO"""', '__name__'], {}), "('config.yaml', 'app_logs.log', 'INFO', __name__)\n", (285, 334), False, 'from features.helpers.common_helpers import get_config_and_set_logging, get_file_abs_path\n'), ((600, 612), 'features.helpers.appium_helpers.app_driver', 'app_driver', ([], {}), '()\n', (610, 612), False, 'from features.helpers.appium_helpers import app_driver, compare_screenshots\n'), ((617, 626), 'time.sleep', 'sleep', (['(15)'], {}), '(15)\n', (622, 626), False, 'from time import sleep\n'), ((748, 867), 'features.helpers.appium_helpers.compare_screenshots', 'compare_screenshots', ([], {'driver': 'coc_driver', 'original_img': '"""coc_loading.png"""', 'compared_img': 'None', 'save_visualization': '(False)'}), "(driver=coc_driver, original_img='coc_loading.png',\n compared_img=None, save_visualization=False)\n", (767, 867), False, 'from features.helpers.appium_helpers import app_driver, compare_screenshots\n'), ((2052, 2171), 'features.helpers.appium_helpers.compare_screenshots', 'compare_screenshots', ([], {'driver': 'coc_driver', 'original_img': '"""coc_loading.png"""', 'compared_img': 'None', 'save_visualization': '(False)'}), "(driver=coc_driver, original_img='coc_loading.png',\n compared_img=None, save_visualization=False)\n", (2071, 2171), False, 'from features.helpers.appium_helpers import app_driver, compare_screenshots\n'), ((2430, 2438), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (2435, 2438), False, 'from time import sleep\n')] |
# pip install neo4j-driver
# https://neo4j.com/docs/api/python-driver/current/
from neo4j.v1 import GraphDatabase, basic_auth
def pickwine(variety, country, region, winery, price, score, params):
prices = price.split(" to ")
lower = int(prices[0])
upper = int(prices[1])
scores = score.split(" to ")
lowScore = int(scores[0])
highScore = int(scores[0])
# query by current selection
driver = GraphDatabase.driver(
"bolt://localhost:7687",
auth=basic_auth("neo4j", "jesus"))
session = driver.session()
# 1 - Wine Variety
# 2 - Country
# 3 - Region
# 4 - Winery
# 5 - Price range
cypher_query = '''
MATCH (z:Winery)-[:MAKES]->(w:Wine)<-[d:DESCRIBES]-(r:Reviewer)
WHERE
'''
for i in range(len(params)):
if i > 0:
cypher_query = cypher_query + ' AND '
if params[i] == "variety":
cypher_query = cypher_query + 'w.variety = $variety'
elif params[i] == "country":
cypher_query = cypher_query + 'z.country = $country '
elif params[i] == "region":
cypher_query = cypher_query + 'z.region_1 = $region'
elif params[i] == "winery":
cypher_query = cypher_query + 'z.wineryName = $winery'
elif params[i] == "price":
cypher_query = cypher_query + 'toInteger(w.price) >= $lower AND toInteger(w.price) <= $upper'
elif params[i] == "score":
cypher_query = cypher_query + 'toInteger(d.points) >= $lowScore AND toInteger(d.points) <= $high'
cypher_query = cypher_query + '''
WITH w.variety as wine, z.wineryName as winery, w.price as price, z.country as country, z.region_1 as region, d.points as score, r.reviewerName as reviewer, d.description as description
RETURN wine, winery, price, country, region, score, reviewer, description LIMIT 50
'''
results = session.run(cypher_query,
parameters={"country": country, "region": region, "winery": winery, "variety": variety, "lower": lower, "upper": upper, "lowScore": lowScore, "high": highScore})
wineries = list()
countries = list()
regions = list()
wines = list()
prices = list()
scores = list()
reviewers = list()
descriptions = list()
for record in results:
wineries.append(record["winery"])
countries.append(record["country"])
regions.append(record["region"])
wines.append(record["wine"])
prices.append(record["price"])
scores.append(record["score"])
reviewers.append(record["reviewer"])
descriptions.append(record["description"])
if len(wineries) == 0:
wineries.append("None Found")
countries.append("None Found")
regions.append("None Found")
wines.append("None Found")
prices.append("None Found")
scores.append("None Found")
reviewers.append("None Found")
descriptions.append("None Found")
records = {"wineries": wineries, "countries": countries, "regions": regions, "wines": wines, "prices": prices, "scores": scores, "reviewers": reviewers, "descriptions": descriptions}
return records
def wineryDistance(wine, lowerHop, higherHop):
driver = GraphDatabase.driver(
"bolt://localhost:7687",
auth=basic_auth("neo4j", "jesus"))
session = driver.session()
cypher_query = '''
MATCH (w:Wine)-[:MAKES*
'''+ str(lowerHop) + '..' +str(higherHop) + '''
]-(h:Wine)<-[:MAKES]-(z:Winery)
WHERE w.variety = $variety
WITH h.variety as wine, h.price as price, z.wineryName as winery, z.country as country, z.region_1 as region
RETURN DISTINCT wine, price, winery, country, region LIMIT 50
'''
results = session.run(cypher_query,
parameters={"variety": wine, "lower": lowerHop, "upper": higherHop})
wineries = list()
countries = list()
regions = list()
wines = list()
prices = list()
scores = list()
reviewers = list()
descriptions = list()
for record in results:
wines.append(record["wine"])
wineries.append(record["winery"])
countries.append(record["country"])
regions.append(record["region"])
prices.append(record["price"])
scores.append("NA")
reviewers.append("NA")
descriptions.append("NA")
if len(wines) == 0:
wineries.append("None Found")
countries.append("None Found")
regions.append("None Found")
wines.append("None Found")
prices.append("None Found")
scores.append("None Found")
reviewers.append("None Found")
descriptions.append("None Found")
records = {"wineries": wineries, "countries": countries, "regions": regions, "wines": wines, "prices": prices, "scores": scores, "reviewers": reviewers, "descriptions": descriptions}
return records
def reviewerDistance(wine, lowerHop, higherHop):
driver = GraphDatabase.driver(
"bolt://localhost:7687",
auth=basic_auth("neo4j", "jesus"))
session = driver.session()
cypher_query = '''
MATCH (w:Wine)-[:DESCRIBES*
'''+ str(lowerHop) + '..' +str(higherHop) + '''
]-(h:Wine)<-[d:DESCRIBES]-(r:Reviewer)
WHERE w.variety = $variety
WITH h.variety as wine, h.price as price, r.reviewerName as reviewer, d.points as score, d.description as description
RETURN DISTINCT wine, price, reviewer, score, description LIMIT 50
'''
results = session.run(cypher_query,
parameters={"variety": wine, "lower": lowerHop, "upper": higherHop})
wineries = list()
countries = list()
regions = list()
wines = list()
prices = list()
scores = list()
reviewers = list()
descriptions = list()
for record in results:
wines.append(record["wine"])
wineries.append("NA")
countries.append("NA")
regions.append("NA")
prices.append(record["price"])
scores.append(record["score"])
reviewers.append(record["reviewer"])
descriptions.append(record["description"])
if len(wines) == 0:
wines.append("None Found")
wineries.append("None Found")
countries.append("None Found")
regions.append("None Found")
prices.append("None Found")
scores.append("None Found")
reviewers.append("None Found")
descriptions.append("None Found")
records = {"wineries": wineries, "countries": countries, "regions": regions, "wines": wines, "prices": prices, "scores": scores, "reviewers": reviewers, "descriptions": descriptions}
return records
| [
"neo4j.v1.basic_auth"
] | [((470, 498), 'neo4j.v1.basic_auth', 'basic_auth', (['"""neo4j"""', '"""jesus"""'], {}), "('neo4j', 'jesus')\n", (480, 498), False, 'from neo4j.v1 import GraphDatabase, basic_auth\n'), ((3106, 3134), 'neo4j.v1.basic_auth', 'basic_auth', (['"""neo4j"""', '"""jesus"""'], {}), "('neo4j', 'jesus')\n", (3116, 3134), False, 'from neo4j.v1 import GraphDatabase, basic_auth\n'), ((4691, 4719), 'neo4j.v1.basic_auth', 'basic_auth', (['"""neo4j"""', '"""jesus"""'], {}), "('neo4j', 'jesus')\n", (4701, 4719), False, 'from neo4j.v1 import GraphDatabase, basic_auth\n')] |
import unittest
from operator import attrgetter
from typing import Dict
import numpy as np
from PIL import Image
from _pytest._code import ExceptionInfo
from lunavl.sdk.errors.errors import ErrorInfo
from lunavl.sdk.faceengine.engine import VLFaceEngine
from lunavl.sdk.image_utils.geometry import Rect
from lunavl.sdk.image_utils.image import ColorFormat, VLImage
from tests.resources import ONE_FACE
class BaseTestClass(unittest.TestCase):
faceEngine = VLFaceEngine()
@classmethod
def setup_class(cls):
super().setUpClass()
@classmethod
def teardown_class(cls) -> None:
super().tearDownClass()
@staticmethod
def assertLunaVlError(exceptionInfo: ExceptionInfo, expectedError: ErrorInfo):
"""
Assert LunaVl Error
Args:
exceptionInfo: response from service
expectedError: expected error
"""
assert exceptionInfo.value.error.errorCode == expectedError.errorCode, exceptionInfo.value
assert exceptionInfo.value.error.description == expectedError.description, exceptionInfo.value
if expectedError.detail != "":
assert exceptionInfo.value.error.detail == expectedError.detail, exceptionInfo.value
@staticmethod
def assertReceivedAndRawExpectedErrors(receivedError: ErrorInfo, expectedErrorEmptyDetail: ErrorInfo):
"""
Assert expected and received errors as dicts
Args:
receivedError: received error
expectedErrorEmptyDetail: expected error with empty detail
"""
assert expectedErrorEmptyDetail.errorCode == receivedError.errorCode
assert expectedErrorEmptyDetail.description == receivedError.description
assert expectedErrorEmptyDetail.description == receivedError.detail
@staticmethod
def checkRectAttr(defaultRect: Rect):
"""
Validate attributes of Rect
Args:
defaultRect: rect object
"""
for rectType in ("coreRectI", "coreRectF"):
assert all(
isinstance(
getattr(defaultRect.__getattribute__(rectType), f"{coordinate}"),
float if rectType == "coreRectF" else int,
)
for coordinate in ("x", "y", "height", "width")
)
@staticmethod
def generateColorToArrayMap() -> Dict[ColorFormat, np.ndarray]:
"""
Get images as ndarrays in all available color formats.
Returns:
color format to pixel ndarray map
"""
image = Image.open(ONE_FACE)
R, G, B = np.array(image).T
X = np.ndarray(B.shape, dtype=np.uint8)
allImages = {
ColorFormat.B8G8R8: np.array((B, G, R)).T,
ColorFormat.B8G8R8X8: np.array((B, G, R, X)).T,
ColorFormat.IR_X8X8X8: np.array(image, dtype=np.uint8),
ColorFormat.R16: np.array(image.convert("L"), dtype=np.uint16),
ColorFormat.R8: np.array(image.convert("L"), dtype=np.uint8),
ColorFormat.R8G8B8: np.array(image),
ColorFormat.R8G8B8X8: np.array((R, G, B, X)).T,
}
def _checksAllFormats():
_notImplementedFormats = set(ColorFormat) - set(allImages) - {ColorFormat.Unknown}
if _notImplementedFormats:
notImplementedFormatsList = list(map(attrgetter("name"), _notImplementedFormats))
raise RuntimeError(f"Add Image for {notImplementedFormatsList} color formats")
def _checksArrayShapes():
for color, ndarray in allImages.items():
if ndarray.shape[:2] != allImages[ColorFormat.R8G8B8].shape[:2]:
msg = (
f"'{color.name}' image has incorrect shape.\n"
f"Expected:{allImages[ColorFormat.R8G8B8].shape}\n"
f"Received:{ndarray.shape}"
)
raise RuntimeError(msg)
_checksAllFormats()
_checksArrayShapes()
return allImages
@staticmethod
def getColorToImageMap() -> Dict[ColorFormat, VLImage]:
"""
Get images as vl image in all available color formats.
Returns:
color format to vl image map
"""
return {
color: VLImage.fromNumpyArray(ndarray, color)
for color, ndarray in BaseTestClass.generateColorToArrayMap().items()
}
| [
"operator.attrgetter",
"PIL.Image.open",
"lunavl.sdk.faceengine.engine.VLFaceEngine",
"numpy.array",
"numpy.ndarray",
"lunavl.sdk.image_utils.image.VLImage.fromNumpyArray"
] | [((463, 477), 'lunavl.sdk.faceengine.engine.VLFaceEngine', 'VLFaceEngine', ([], {}), '()\n', (475, 477), False, 'from lunavl.sdk.faceengine.engine import VLFaceEngine\n'), ((2576, 2596), 'PIL.Image.open', 'Image.open', (['ONE_FACE'], {}), '(ONE_FACE)\n', (2586, 2596), False, 'from PIL import Image\n'), ((2645, 2680), 'numpy.ndarray', 'np.ndarray', (['B.shape'], {'dtype': 'np.uint8'}), '(B.shape, dtype=np.uint8)\n', (2655, 2680), True, 'import numpy as np\n'), ((2615, 2630), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2623, 2630), True, 'import numpy as np\n'), ((2854, 2885), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (2862, 2885), True, 'import numpy as np\n'), ((3069, 3084), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3077, 3084), True, 'import numpy as np\n'), ((4323, 4361), 'lunavl.sdk.image_utils.image.VLImage.fromNumpyArray', 'VLImage.fromNumpyArray', (['ndarray', 'color'], {}), '(ndarray, color)\n', (4345, 4361), False, 'from lunavl.sdk.image_utils.image import ColorFormat, VLImage\n'), ((2736, 2755), 'numpy.array', 'np.array', (['(B, G, R)'], {}), '((B, G, R))\n', (2744, 2755), True, 'import numpy as np\n'), ((2793, 2815), 'numpy.array', 'np.array', (['(B, G, R, X)'], {}), '((B, G, R, X))\n', (2801, 2815), True, 'import numpy as np\n'), ((3120, 3142), 'numpy.array', 'np.array', (['(R, G, B, X)'], {}), '((R, G, B, X))\n', (3128, 3142), True, 'import numpy as np\n'), ((3377, 3395), 'operator.attrgetter', 'attrgetter', (['"""name"""'], {}), "('name')\n", (3387, 3395), False, 'from operator import attrgetter\n')] |
# Copyright (c) <NAME>, 2015
# See LICENSE for details.
import os
from textwrap import dedent
from twisted.trial.unittest import TestCase
import mock
from click.testing import CliRunner
from ..create import _main
def setup_simple_project(config=None, mkdir=True):
if not config:
config = dedent(
"""\
[tool.towncrier]
package = "foo"
"""
)
with open("pyproject.toml", "w") as f:
f.write(config)
os.mkdir("foo")
with open("foo/__init__.py", "w") as f:
f.write('__version__ = "1.2.3"\n')
if mkdir:
os.mkdir("foo/newsfragments")
class TestCli(TestCase):
maxDiff = None
def _test_success(
self, content=None, config=None, mkdir=True, additional_args=None
):
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project(config, mkdir)
args = ["123.feature.rst"]
if content is None:
content = ["Add your info here"]
if additional_args is not None:
args.extend(additional_args)
result = runner.invoke(_main, args)
self.assertEqual(["123.feature.rst"], os.listdir("foo/newsfragments"))
with open("foo/newsfragments/123.feature.rst") as fh:
self.assertEqual(content, fh.readlines())
self.assertEqual(0, result.exit_code)
def test_basics(self):
"""Ensure file created where output directory already exists."""
self._test_success(mkdir=True)
def test_directory_created(self):
"""Ensure both file and output directory created if necessary."""
self._test_success(mkdir=False)
def test_edit_without_comments(self):
"""Create file with dynamic content."""
content = ["This is line 1\n", "This is line 2"]
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = "".join(content)
self._test_success(content=content, additional_args=["--edit"])
mock_edit.assert_called_once_with(
"# Please write your news content. When finished, save the file.\n"
"# In order to abort, exit without saving.\n"
"# Lines starting with \"#\" are ignored.\n"
"\n"
"Add your info here\n"
)
def test_edit_with_comment(self):
"""Create file editly with ignored line."""
content = ["This is line 1\n", "This is line 2"]
comment = "# I am ignored\n"
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = "".join(content[:1] + [comment] + content[1:])
self._test_success(content=content, additional_args=["--edit"])
def test_edit_abort(self):
"""Create file editly and abort."""
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = None
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project(config=None, mkdir=True)
result = runner.invoke(_main, ["123.feature.rst", "--edit"])
self.assertEqual([], os.listdir("foo/newsfragments"))
self.assertEqual(1, result.exit_code)
def test_content(self):
"""
When creating a new fragment the content can be passed as a
command line argument.
The text editor is not invoked.
"""
content_line = "This is a content"
self._test_success(content=[content_line], additional_args=["-c", content_line])
def test_message_and_edit(self):
"""
When creating a new message, a initial content can be passed via
the command line and continue modifying the content by invoking the
text editor.
"""
content_line = "This is a content line"
edit_content = ["This is line 1\n", "This is line 2"]
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = "".join(edit_content)
self._test_success(
content=edit_content,
additional_args=["-c", content_line, "--edit"]
)
mock_edit.assert_called_once_with(
"# Please write your news content. When finished, save the file.\n"
"# In order to abort, exit without saving.\n"
"# Lines starting with \"#\" are ignored.\n"
"\n"
"{content_line}\n".format(content_line=content_line)
)
def test_different_directory(self):
"""Ensure non-standard directories are used."""
runner = CliRunner()
config = dedent(
"""\
[tool.towncrier]
directory = "releasenotes"
"""
)
with runner.isolated_filesystem():
setup_simple_project(config, mkdir=False)
os.mkdir("releasenotes")
result = runner.invoke(_main, ["123.feature.rst"])
self.assertEqual(["123.feature.rst"], os.listdir("releasenotes"))
self.assertEqual(0, result.exit_code)
def test_invalid_section(self):
"""Ensure creating a path without a valid section is rejected."""
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project()
self.assertEqual([], os.listdir("foo/newsfragments"))
result = runner.invoke(_main, ["123.foobar.rst"])
self.assertEqual([], os.listdir("foo/newsfragments"))
self.assertEqual(type(result.exception), SystemExit, result.exception)
self.assertIn(
"Expected filename '123.foobar.rst' to be of format", result.output
)
def test_file_exists(self):
"""Ensure we don't overwrite existing files."""
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project()
self.assertEqual([], os.listdir("foo/newsfragments"))
runner.invoke(_main, ["123.feature.rst"])
result = runner.invoke(_main, ["123.feature.rst"])
self.assertEqual(type(result.exception), SystemExit)
self.assertIn("123.feature.rst already exists", result.output)
| [
"textwrap.dedent",
"mock.patch",
"os.listdir",
"click.testing.CliRunner",
"os.mkdir"
] | [((487, 502), 'os.mkdir', 'os.mkdir', (['"""foo"""'], {}), "('foo')\n", (495, 502), False, 'import os\n'), ((306, 399), 'textwrap.dedent', 'dedent', (['""" [tool.towncrier]\n package = "foo"\n """'], {}), '(\n """ [tool.towncrier]\n package = "foo"\n """\n )\n', (312, 399), False, 'from textwrap import dedent\n'), ((613, 642), 'os.mkdir', 'os.mkdir', (['"""foo/newsfragments"""'], {}), "('foo/newsfragments')\n", (621, 642), False, 'import os\n'), ((815, 826), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (824, 826), False, 'from click.testing import CliRunner\n'), ((4692, 4703), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4701, 4703), False, 'from click.testing import CliRunner\n'), ((4721, 4825), 'textwrap.dedent', 'dedent', (['""" [tool.towncrier]\n directory = "releasenotes"\n """'], {}), '(\n """ [tool.towncrier]\n directory = "releasenotes"\n """\n )\n', (4727, 4825), False, 'from textwrap import dedent\n'), ((5293, 5304), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5302, 5304), False, 'from click.testing import CliRunner\n'), ((5880, 5891), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5889, 5891), False, 'from click.testing import CliRunner\n'), ((1887, 1911), 'mock.patch', 'mock.patch', (['"""click.edit"""'], {}), "('click.edit')\n", (1897, 1911), False, 'import mock\n'), ((2582, 2606), 'mock.patch', 'mock.patch', (['"""click.edit"""'], {}), "('click.edit')\n", (2592, 2606), False, 'import mock\n'), ((2870, 2894), 'mock.patch', 'mock.patch', (['"""click.edit"""'], {}), "('click.edit')\n", (2880, 2894), False, 'import mock\n'), ((2973, 2984), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2982, 2984), False, 'from click.testing import CliRunner\n'), ((3975, 3999), 'mock.patch', 'mock.patch', (['"""click.edit"""'], {}), "('click.edit')\n", (3985, 3999), False, 'import mock\n'), ((4950, 4974), 'os.mkdir', 'os.mkdir', (['"""releasenotes"""'], {}), "('releasenotes')\n", (4958, 4974), False, 'import os\n'), ((1228, 1259), 'os.listdir', 'os.listdir', (['"""foo/newsfragments"""'], {}), "('foo/newsfragments')\n", (1238, 1259), False, 'import os\n'), ((5090, 5116), 'os.listdir', 'os.listdir', (['"""releasenotes"""'], {}), "('releasenotes')\n", (5100, 5116), False, 'import os\n'), ((5418, 5449), 'os.listdir', 'os.listdir', (['"""foo/newsfragments"""'], {}), "('foo/newsfragments')\n", (5428, 5449), False, 'import os\n'), ((5548, 5579), 'os.listdir', 'os.listdir', (['"""foo/newsfragments"""'], {}), "('foo/newsfragments')\n", (5558, 5579), False, 'import os\n'), ((6005, 6036), 'os.listdir', 'os.listdir', (['"""foo/newsfragments"""'], {}), "('foo/newsfragments')\n", (6015, 6036), False, 'import os\n'), ((3209, 3240), 'os.listdir', 'os.listdir', (['"""foo/newsfragments"""'], {}), "('foo/newsfragments')\n", (3219, 3240), False, 'import os\n')] |
from pygame import init, display, time, event, draw, QUIT
from numpy import arange
def grid(janela, comprimento, tamanho_linha, tamanho_quadrado):
def draw_grid(v):
draw.line(janela, (255, 255, 255),
(v * tamanho_quadrado, 0),
(v * tamanho_quadrado, comprimento))
draw.line(janela, (255, 255, 255),
(0, v * tamanho_quadrado),
(comprimento, v * tamanho_quadrado))
# Iniciando o grid
for x_c in arange(comprimento // tamanho_linha):
draw_grid(x_c)
def borda(janela, comprimento, tamanho,
cor=(0, 0, 0), espaçamento=0, p_borda=12):
def borda_vertical(lado='esquerdo'):
for y in arange(comprimento // tamanho):
draw.line(janela, cor,
(y if lado == 'esquerdo' else y+comprimento-p_borda,
espaçamento),
(y if lado == 'esquerdo' else y+comprimento-p_borda,
comprimento))
def borda_horizontal(lado='cima'):
for x in arange(comprimento // tamanho):
draw.line(janela, cor,
(espaçamento,
x if lado == 'cima' else x+comprimento-p_borda),
(comprimento,
x if lado == 'cima' else x+comprimento-p_borda))
# -------------------------- Bordas
borda_vertical(lado='esquerdo')
borda_vertical(lado='direita')
borda_horizontal(lado='cima')
borda_horizontal(lado='baixo')
# ------------------ Programa
init()
tela_cheia = 600, 600
janela = display.set_mode(tela_cheia)
janela.fill((0, 0, 0))
display.set_caption('Testes de grid')
FPS = 60
Fps = time.Clock()
def teste_grid(game):
while 1:
for evento in event.get():
if evento.type == QUIT:
game = False
grid(janela, 400, 42, 50)
borda(janela, 600, 30, (80, 80, 80), 0, 20)
display.flip()
Fps.tick(FPS)
if not game:
break
| [
"pygame.display.set_caption",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.time.Clock",
"numpy.arange"
] | [((1554, 1560), 'pygame.init', 'init', ([], {}), '()\n', (1558, 1560), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1594, 1622), 'pygame.display.set_mode', 'display.set_mode', (['tela_cheia'], {}), '(tela_cheia)\n', (1610, 1622), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1646, 1683), 'pygame.display.set_caption', 'display.set_caption', (['"""Testes de grid"""'], {}), "('Testes de grid')\n", (1665, 1683), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1700, 1712), 'pygame.time.Clock', 'time.Clock', ([], {}), '()\n', (1710, 1712), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((496, 532), 'numpy.arange', 'arange', (['(comprimento // tamanho_linha)'], {}), '(comprimento // tamanho_linha)\n', (502, 532), False, 'from numpy import arange\n'), ((179, 281), 'pygame.draw.line', 'draw.line', (['janela', '(255, 255, 255)', '(v * tamanho_quadrado, 0)', '(v * tamanho_quadrado, comprimento)'], {}), '(janela, (255, 255, 255), (v * tamanho_quadrado, 0), (v *\n tamanho_quadrado, comprimento))\n', (188, 281), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((322, 424), 'pygame.draw.line', 'draw.line', (['janela', '(255, 255, 255)', '(0, v * tamanho_quadrado)', '(comprimento, v * tamanho_quadrado)'], {}), '(janela, (255, 255, 255), (0, v * tamanho_quadrado), (comprimento,\n v * tamanho_quadrado))\n', (331, 424), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((710, 740), 'numpy.arange', 'arange', (['(comprimento // tamanho)'], {}), '(comprimento // tamanho)\n', (716, 740), False, 'from numpy import arange\n'), ((1058, 1088), 'numpy.arange', 'arange', (['(comprimento // tamanho)'], {}), '(comprimento // tamanho)\n', (1064, 1088), False, 'from numpy import arange\n'), ((1772, 1783), 'pygame.event.get', 'event.get', ([], {}), '()\n', (1781, 1783), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1946, 1960), 'pygame.display.flip', 'display.flip', ([], {}), '()\n', (1958, 1960), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((754, 926), 'pygame.draw.line', 'draw.line', (['janela', 'cor', "(y if lado == 'esquerdo' else y + comprimento - p_borda, espaçamento)", "(y if lado == 'esquerdo' else y + comprimento - p_borda, comprimento)"], {}), "(janela, cor, (y if lado == 'esquerdo' else y + comprimento -\n p_borda, espaçamento), (y if lado == 'esquerdo' else y + comprimento -\n p_borda, comprimento))\n", (763, 926), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1102, 1266), 'pygame.draw.line', 'draw.line', (['janela', 'cor', "(espaçamento, x if lado == 'cima' else x + comprimento - p_borda)", "(comprimento, x if lado == 'cima' else x + comprimento - p_borda)"], {}), "(janela, cor, (espaçamento, x if lado == 'cima' else x +\n comprimento - p_borda), (comprimento, x if lado == 'cima' else x +\n comprimento - p_borda))\n", (1111, 1266), False, 'from pygame import init, display, time, event, draw, QUIT\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 2020 boringhexi
"""imccontainer.py - read/write IMC audio container files
An IMC audio container file is a file type from Gitaroo Man that has the extension .IMC
and contains audio subsongs."""
import struct
from itertools import count, zip_longest
from gitarootools.audio import subsong
from gitarootools.miscutils.datautils import chunks, open_maybe, readstruct
# subsong memory load modes, maps strings to raw values
loadmodes_toraw = {
"stream": 0, # load part at a time, e.g. music
"entire": 2, # load entire subsong and keep it in mem, e.g. sfx
}
# same, but maps raw values to strings
loadmodes_fromraw = {rawval: string for string, rawval in loadmodes_toraw.items()}
class ImcContainerError(Exception):
"""base class for IMC container related errors"""
pass
class ContainerSubsong:
"""transparent wrapper around a subsong.Subsong instance, containing additional info
needed to put it into an ImcContainer
all of the Subsong instance's public attributes are exposed by the ContainerSubsong
instance; they can be transparently accessed and assigned to.
"""
def __init__(self, subsong_, name, loadmode, rawname=None, unk1=None, unk2=None):
"""
subsong_: a subsong.Subsong instance to wrap
name: 16 ascii characters or fewer
loadmode: 'stream' (e.g. music) or 'entire' (load all at once e.g. sfx)
arguments below are optional, should come from the original IMC container
file to reduce the size of the resulting binary diff patch:
rawname: (optional) original 16-byte name including null byte + garbage
unk1, unk2: (optional) original 32-bit unsigned values with unknown purpose
"""
self._subsong = subsong_
self.name = name
self.loadmode = loadmode
self.rawname = rawname
self.unk1, self.unk2 = unk1, unk2
def __dir__(self):
"""return own contents + self._subsong's public contents"""
dir_contents = set()
subsong_contents = filter(
lambda x: not x.startswith("__"), self._subsong.__dir__()
)
dir_contents.update(super().__dir__(), subsong_contents)
return dir_contents
def __getattr__(self, name):
"""if name isn't found in self, also check self._subsong's public contents"""
return getattr(self._subsong, name)
def __setattr__(self, name, value):
"""if name exists in self._subsong's public contents, assign to it instead"""
if name == "_subsong":
object.__setattr__(self, "_subsong", value)
elif not name.startswith("__") and hasattr(self._subsong, name):
setattr(self._subsong, name, value)
else:
object.__setattr__(self, name, value)
@property
def name(self):
"""16 ascii characters or fewer"""
return self._name
@name.setter
def name(self, value):
try:
value.encode(encoding="ascii")
except UnicodeError as e:
if hasattr(e, "start") and hasattr(e, "end"):
nonascii = value[e.start : e.end]
raise ValueError(f"name {value!r} contains non-ascii {nonascii!r}")
else:
raise ValueError(f"name {value!r} contains non-ascii")
if len(value) > 16:
raise ValueError("name must be 16 or fewer ascii characters")
self._name = value
@property
def loadmode(self):
"""determines this subsong's load mode: either stream in parts or load entirely
This property can be assigned the strings "stream" or "entire" or their
corresponding raw values 0 or 2. Accessing this property will always return
the string version; for the raw value, use loadmode_raw.
"""
return self._loadmode
@loadmode.setter
def loadmode(self, value):
if value in loadmodes_toraw:
self._loadmode = value
elif value in loadmodes_fromraw:
self._loadmode = loadmodes_fromraw[value]
else:
raise ValueError(
f"loadmode can only be assigned strings {tuple(loadmodes_toraw.keys())}"
f" or their corresponding ints {tuple(loadmodes_toraw.values())}"
)
@property
def loadmode_raw(self):
"""raw value that corresponds to the current loadmode"""
return loadmodes_toraw[self.loadmode]
@property
def rawname(self):
"""bytes of length 16, or can be None"""
return self._rawname
@rawname.setter
def rawname(self, value):
if value is not None and len(value) != 16:
raise ValueError(
f"rawname must be length 16, was {value!r} with length {len(value)}"
)
self._rawname = value
def get_imcdata(self):
"""interleaves and returns subsong's PS-ADPCM data, including header
(It's Subsong.get_imcdata but uses self.loadmode instead of `entire` arg)
PS-ADPCM data is returned with a certain number of frames per block (fpb) and
blocks per channel (bpc). There are 3 possibilities, in this order of priority:
- use self.original_block_layout's saved fpb/bpc if it has them
- if self.loadmode=="entire", use large fpb + just enough bpc to hold the data
- otherwise (loadmode=="stream"), use 768 fpb + just enough bpc to hold the data
"entire" is for sound effects but not music, "otherwise" is for both (sfx/music)
"""
entire = self.loadmode == "entire"
return self._subsong.get_imcdata(entire=entire)
def clear_patchfinfo(self):
"""clear patch-friendly info: rawname, unk1/unk2, original block layout
clear (i.e. set to None) info that would otherwise be used to restore the
original layout from a Gitaroo Man subsong
"""
self.rawname = None
self.unk1 = self.unk2 = None
self._subsong.original_block_layout = None
class ImcContainer:
def __init__(self, containersubsongs):
"""initialize an ImcContainer
containersubsongs: iterable of imccontainer.ContainerSubsong instances"""
self.csubsongs = list(containersubsongs)
@property
def num_subsongs(self):
return len(self.csubsongs)
def read_imc(file):
""" read from a IMC audio container file and return an ImcContainer
file: A file path. Or it can be an already-opened file, in which case:
- it will read starting from the current file position, with no guarantee of file
position after returning
- the caller is responsible for closing the file afterwards
raises: EOFError if end of file is reached unexpectedly
"""
with open_maybe(file, "rb") as file:
start_offset = file.tell()
# read number of subsongs
num_subsongs = readstruct(file, "<I")
# read raw ssinfo
fmt, items_per_ssinfo_entry = ("<" + "16s4I" * num_subsongs), 5
raw_ssinfos = tuple(chunks(readstruct(file, fmt), items_per_ssinfo_entry))
next_ssoffsets = (x[1] for x in raw_ssinfos[1:])
# read subsongs, convert to ContainerSubsongs
csubsongs = []
for raw_ssinfo, next_ssoffset in zip_longest(
raw_ssinfos, next_ssoffsets, fillvalue=None
):
rawname, ssoffset, unk1, unk2, loadmode_raw = raw_ssinfo
name = rawname.split(b"\0", 1)[0].decode(encoding="ascii")
if next_ssoffset is not None:
ss_knownsize = next_ssoffset - ssoffset
else:
ss_knownsize = None
# read Subsong from within IMC container file
file.seek(start_offset + ssoffset)
subsong_ = subsong.read_subimc(file, knownsize=ss_knownsize)
csubsong = ContainerSubsong(
subsong_, name, loadmode_raw, rawname, unk1, unk2
)
csubsongs.append(csubsong)
return ImcContainer(csubsongs)
def write_imc(imccontainer, file, progressfunc=None):
"""write an ImcContainer to an IMC audio container file
imccontainer: an ImcContainer object
file: A file path. Or it can be an already-opened file, in which case:
- it will write starting from the current file position, with no guarantee of file
position after returning
- the caller is responsible for closing the file afterwards
progressfunc: function to run whenever a subsong of the ImcContainer is about to
be processed. It must accept three arguments: an int subsong index, an int total
number of subsongs, and an imccontainer.ContainerSubsong instance
"""
with open_maybe(file, "wb") as file:
start_offset = file.tell() # in case we're reading from inside an ISO file, etc
# write num_subsongs
num_subsongs = imccontainer.num_subsongs
file.write(struct.pack("<I", num_subsongs))
if not num_subsongs:
return
# true offsets for subsong info entries (contained in the IMC container header)
true_ssinfoentry_offsets = (
start_offset + 4 + i * 0x20 for i in range(num_subsongs)
)
file.seek(start_offset + 4 + num_subsongs * 0x20) # after the last ssinfo entry
for ssidx, true_ssinfoentry_offset, csubsong in zip(
count(), true_ssinfoentry_offsets, imccontainer.csubsongs
):
if progressfunc is not None:
progressfunc(ssidx, imccontainer.num_subsongs, csubsong)
# current pos is where we should write the subsong, but we won't just yet
true_subsong_offset = file.tell()
subsong_offset = true_subsong_offset - start_offset
# prepare subsong info entry
# uses patch-friendly info if present: rawname, unk1, unk2
if csubsong.rawname is not None:
# use csubsong.name pasted on top of csubsong.rawname
rawname = csubsong.name.encode(encoding="ascii")
if len(rawname) < 16:
rawname += b"\0"
rawname += csubsong.rawname[len(rawname) :]
else:
# just zero-pad csubsong.name
rawname = csubsong.name.encode(encoding="ascii")
rawname += (16 - len(rawname)) * b"\0"
unk1 = 0 if csubsong.unk1 is None else csubsong.unk1
unk2 = 0 if csubsong.unk2 is None else csubsong.unk2
ss_infoentry = struct.pack(
"<16s4I", rawname, subsong_offset, unk1, unk2, csubsong.loadmode_raw
)
# write subsong info entry into IMC container header
file.seek(true_ssinfoentry_offset)
file.write(ss_infoentry)
# write subsong data
file.seek(true_subsong_offset)
subsong.write_subimc(csubsong, file)
| [
"gitarootools.audio.subsong.write_subimc",
"gitarootools.audio.subsong.read_subimc",
"itertools.zip_longest",
"struct.pack",
"gitarootools.miscutils.datautils.open_maybe",
"itertools.count",
"gitarootools.miscutils.datautils.readstruct"
] | [((6777, 6799), 'gitarootools.miscutils.datautils.open_maybe', 'open_maybe', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (6787, 6799), False, 'from gitarootools.miscutils.datautils import chunks, open_maybe, readstruct\n'), ((6902, 6924), 'gitarootools.miscutils.datautils.readstruct', 'readstruct', (['file', '"""<I"""'], {}), "(file, '<I')\n", (6912, 6924), False, 'from gitarootools.miscutils.datautils import chunks, open_maybe, readstruct\n'), ((7283, 7339), 'itertools.zip_longest', 'zip_longest', (['raw_ssinfos', 'next_ssoffsets'], {'fillvalue': 'None'}), '(raw_ssinfos, next_ssoffsets, fillvalue=None)\n', (7294, 7339), False, 'from itertools import count, zip_longest\n'), ((8711, 8733), 'gitarootools.miscutils.datautils.open_maybe', 'open_maybe', (['file', '"""wb"""'], {}), "(file, 'wb')\n", (8721, 8733), False, 'from gitarootools.miscutils.datautils import chunks, open_maybe, readstruct\n'), ((7785, 7834), 'gitarootools.audio.subsong.read_subimc', 'subsong.read_subimc', (['file'], {'knownsize': 'ss_knownsize'}), '(file, knownsize=ss_knownsize)\n', (7804, 7834), False, 'from gitarootools.audio import subsong\n'), ((8930, 8961), 'struct.pack', 'struct.pack', (['"""<I"""', 'num_subsongs'], {}), "('<I', num_subsongs)\n", (8941, 8961), False, 'import struct\n'), ((9379, 9386), 'itertools.count', 'count', ([], {}), '()\n', (9384, 9386), False, 'from itertools import count, zip_longest\n'), ((10532, 10618), 'struct.pack', 'struct.pack', (['"""<16s4I"""', 'rawname', 'subsong_offset', 'unk1', 'unk2', 'csubsong.loadmode_raw'], {}), "('<16s4I', rawname, subsong_offset, unk1, unk2, csubsong.\n loadmode_raw)\n", (10543, 10618), False, 'import struct\n'), ((10883, 10919), 'gitarootools.audio.subsong.write_subimc', 'subsong.write_subimc', (['csubsong', 'file'], {}), '(csubsong, file)\n', (10903, 10919), False, 'from gitarootools.audio import subsong\n'), ((7059, 7080), 'gitarootools.miscutils.datautils.readstruct', 'readstruct', (['file', 'fmt'], {}), '(file, fmt)\n', (7069, 7080), False, 'from gitarootools.miscutils.datautils import chunks, open_maybe, readstruct\n')] |
# Generated by Django 3.1.13 on 2021-07-16 09:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0025_activity_warning"),
]
operations = [
migrations.AddField(
model_name="boec",
name="unreadActivityCount",
field=models.IntegerField(default=0),
),
]
| [
"django.db.models.IntegerField"
] | [((341, 371), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (360, 371), False, 'from django.db import migrations, models\n')] |
import requests
import parsel
import re
def parse(html):
'''
Parse form data including the entry ID, page history, and some token
thingies
'''
sel = parsel.Selector(html)
inputs = sel.css('input[type=hidden]')
entry_re = re.compile(r'entry\.(.*)_')
entry = int(entry_re.search(inputs[0].get()).group(1))
data = {'entry': entry}
for inp in inputs:
v = inp.attrib.get('value', '')
data[inp.attrib['name']] = v
return data
def find_path(goal_page, page_to_options):
'''
Greedy search from `goal_page` back to the first page. This should work
because there should only be one path between these two pages.
'''
page = goal_page
path = []
def find_option(po):
for option, p in enumerate(po):
if p == page:
return option
return None
while page != 0:
for p, po in page_to_options.items():
option = find_option(po)
if option:
path.append(option)
page = p
break
return path[::-1]
url = \
'https://docs.google.com/forms/d/e/' \
'1FAIpQLSe7sOTLHmGjmUY3iE6E7QLqeYAZDfQXsiJrz8r-ZcA_4cXNFQ/formResponse'
# HTML class used by multiple-choice questions.
# Used to detect whether a page contains more options or if we're at a terminal
# page.
choices_class = 'freebirdFormviewerComponentsQuestionRadioChoicesContainer'
res = requests.get(url)
page_form = parse(res.text)
# What are the options on each page and where do they lead to?
# The explored graph is stored in here.
page_to_options = {}
pages_forms = {0: page_form} # Form data needed to send a request
pages_todo = {0} # Unexplored pages
# From `FB_PUBLIC_LOAD_DATA_`, we kind of see the structure of the form.
# - There is a page that will ask for a password (which we found)
# - After that page is probably the 'congrats' page showing the flag format:
# `pbctf{<digits you got along the way>_<password>}`
password = '<PASSWORD>'
# Simple graph search (pick a element from unexplored set, add children to
# unexplored set, repeat)
while len(pages_todo) > 0:
page = pages_todo.pop()
data = pages_forms[page]
entry_name = 'entry.' + str(data['entry'])
# 'entry' is just for ourselves to remember what the ID is. It shouldn't
# really be sent along with the form, so we'll just delete it now
del data['entry']
data['continue'] = '1'
# What page does each option lead to?
options = [0] * 10
# `page_to_options` is used to keep track of which pages we've already
# visited, so setting an empty value here will mark the page as visited for
# now. This deals with pages with options leading back to itself.
page_to_options[page] = {}
print('Doing page {}: '.format(page))
# Check all possible options
for option in range(10):
data[entry_name] = str(option)
res = requests.post(url, data=data)
if choices_class in res.text:
# This page contains options, so let's parse it
res_data = parse(res.text)
# Figure out what the page number is (update `page_to_options`)
page_history = res_data['pageHistory'].split(',')
last_page = int(page_history[-1])
options[option] = last_page
print(str(last_page).rjust(2), end=' ', flush=True)
# Save the form data so we can explore it later
if last_page not in pages_forms:
pages_forms[last_page] = res_data
# Add it to unexplored set
if last_page not in page_to_options:
pages_todo.add(last_page)
else:
# This page doesn't have choices, so it must be the page that asks
# for a password. We already know the password
# `FB_PUBLIC_LOAD_DATA_` so we're done here
path = find_path(page, page_to_options)
path.append(option)
print('Pin:', path)
path = [str(x) for x in path]
pin = ''.join(path)
flag = 'pbctf{{{}_{}}}'.format(pin, password)
print('Flag:', flag)
exit(0)
page_to_options[page] = options
print()
| [
"requests.post",
"re.compile",
"requests.get",
"parsel.Selector"
] | [((1449, 1466), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1461, 1466), False, 'import requests\n'), ((171, 192), 'parsel.Selector', 'parsel.Selector', (['html'], {}), '(html)\n', (186, 192), False, 'import parsel\n'), ((252, 279), 're.compile', 're.compile', (['"""entry\\\\.(.*)_"""'], {}), "('entry\\\\.(.*)_')\n", (262, 279), False, 'import re\n'), ((2932, 2961), 'requests.post', 'requests.post', (['url'], {'data': 'data'}), '(url, data=data)\n', (2945, 2961), False, 'import requests\n')] |
from operator import truediv
import cv2
from time import sleep
import HandTrackingModule as htm
import os
import autopy
import numpy as np
import math
import mediapipe as mp
#import modules
#variables
frameR=20 #frame rduction
frameR_x=800
frameR_y=110
wCam,hCam=1300 ,400
pTime=0
smoothening = 5 #need to tune
plocX, plocY=0,0
clocX,clocY=0,0
##########
cap=cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
detector=htm.handDetector(maxHands=1)
wScr, hScr=autopy.screen.size()
while True:
#1. find hand landmarks
success, img = cap.read()
img= detector.findHands(img)
lmList,bbox =detector.findPosition(img)
#2. get the tip of the index and middle finger
if len(lmList)!=0:
x1,y1 =lmList[8][1:]
x2, y2=lmList[12][1:]
#print(x1,y1,x2,y2)
#3. check which finger is up
fingers=detector.fingersUp()
cv2.rectangle(img, (frameR_x, frameR_y), (wCam-frameR_x,hCam-frameR_y),(255,0,0),2)
#4. check if it is finger is in moving. index= moving, index and middle=clicking
#convert the coordinates to get correct position
if fingers[1]==1 and fingers[2]==0:
#moving mode
x3= np.interp(x1,(frameR,wCam-frameR_x),(0,wScr))
y3= np.interp(y1,(frameR,hCam-frameR_y),(0,hScr))
#5.smoothen the values
clocX=plocX+(x3-plocX)/smoothening
clocY=plocY+(y3-plocY)/smoothening
#move the mouse
#flip all existing values on x axis
autopy.mouse.move(wScr-clocX,clocY)
cv2.circle(img,(x1,y1),10,(0,255,0),cv2.FILLED)
plocX,plocY=clocX,clocY
#check if in clicking mode both middle and index gfiner are up
if fingers[1]==1 and fingers[2]==1:
length, img, lineinfo=detector.findDistance(8,12,img)
#print(length)
if length<40:
cv2.circle(img, (lineinfo[4],lineinfo[5]),7,(0,200,0),cv2.FILLED)
autopy.mouse.click()
sleep(0.3)
if fingers[1]==1 and fingers[2]==2 and fingers[3]==3:
length, img, lineinfo=detector.findDistance(8,12,img)
if length<40:
print("true")
#show image
cv2.imshow("Image",img)
cv2.waitKey(1) | [
"cv2.rectangle",
"autopy.mouse.click",
"autopy.screen.size",
"HandTrackingModule.handDetector",
"time.sleep",
"cv2.imshow",
"cv2.circle",
"cv2.VideoCapture",
"numpy.interp",
"autopy.mouse.move",
"cv2.waitKey"
] | [((360, 379), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (376, 379), False, 'import cv2\n'), ((423, 451), 'HandTrackingModule.handDetector', 'htm.handDetector', ([], {'maxHands': '(1)'}), '(maxHands=1)\n', (439, 451), True, 'import HandTrackingModule as htm\n'), ((463, 483), 'autopy.screen.size', 'autopy.screen.size', ([], {}), '()\n', (481, 483), False, 'import autopy\n'), ((2237, 2261), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (2247, 2261), False, 'import cv2\n'), ((2265, 2279), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2276, 2279), False, 'import cv2\n'), ((877, 973), 'cv2.rectangle', 'cv2.rectangle', (['img', '(frameR_x, frameR_y)', '(wCam - frameR_x, hCam - frameR_y)', '(255, 0, 0)', '(2)'], {}), '(img, (frameR_x, frameR_y), (wCam - frameR_x, hCam - frameR_y),\n (255, 0, 0), 2)\n', (890, 973), False, 'import cv2\n'), ((1211, 1262), 'numpy.interp', 'np.interp', (['x1', '(frameR, wCam - frameR_x)', '(0, wScr)'], {}), '(x1, (frameR, wCam - frameR_x), (0, wScr))\n', (1220, 1262), True, 'import numpy as np\n'), ((1273, 1324), 'numpy.interp', 'np.interp', (['y1', '(frameR, hCam - frameR_y)', '(0, hScr)'], {}), '(y1, (frameR, hCam - frameR_y), (0, hScr))\n', (1282, 1324), True, 'import numpy as np\n'), ((1518, 1556), 'autopy.mouse.move', 'autopy.mouse.move', (['(wScr - clocX)', 'clocY'], {}), '(wScr - clocX, clocY)\n', (1535, 1556), False, 'import autopy\n'), ((1566, 1620), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(10)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (x1, y1), 10, (0, 255, 0), cv2.FILLED)\n', (1576, 1620), False, 'import cv2\n'), ((1899, 1970), 'cv2.circle', 'cv2.circle', (['img', '(lineinfo[4], lineinfo[5])', '(7)', '(0, 200, 0)', 'cv2.FILLED'], {}), '(img, (lineinfo[4], lineinfo[5]), 7, (0, 200, 0), cv2.FILLED)\n', (1909, 1970), False, 'import cv2\n'), ((1981, 2001), 'autopy.mouse.click', 'autopy.mouse.click', ([], {}), '()\n', (1999, 2001), False, 'import autopy\n'), ((2018, 2028), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (2023, 2028), False, 'from time import sleep\n')] |
from abc import ABC, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
from heatlib.units import Time
from heatlib.boundary_conditions import Boundary_Condition
from heatlib.domains import Domain_Constant_1D, Domain_Variable_1D
from heatlib.solvers import Solver_1D
#################################################
# Models #
#################################################
class Model_1D(ABC):
@abstractmethod
def __init__(self, **kwargs):
assert isinstance(
self.bc0, Boundary_Condition
), 'Second argument must be Boundary_Condition.'
assert isinstance(
self.bc1, Boundary_Condition
), 'Third argument must be Boundary_Condition.'
self.time_unit = kwargs.get('time_unit', 's') # default plotting time units
self.orientation = kwargs.get('orientation', 'vertical') # default plotting orientation
self.figsize = kwargs.get('figsize', (9, 6)) # default figure size
self.flipy = kwargs.get('flipy', True) # plot x as negative for vertical orientation
self.T = None
self._time_abs = 0.0
@property
def time(self):
return self._time_abs / abs(Time(1, self.time_unit))
def get_T(self, x):
if self.T is not None:
return np.interp(abs(x), self.domain.x, self.T)
else:
print('Model has not yet solution.')
return None
def __repr__(self):
if self.T is None:
return 'No solutions. Ready for initial one.'
elif self._time_abs == 0.0:
return 'Model with initial solution'
else:
return f'Model with evolutionary solution for time {self.time:g}{self.time_unit}'
def info(self):
print(self.bc0)
print(self.domain.info())
print(self.bc1)
def solve(self, solver, **kwargs):
assert isinstance(
solver, Solver_1D
), 'You have to use Solver_1D instance as argument.'
solver.solve(self, **kwargs)
def plot(self):
if self.T is not None:
fig, ax = plt.subplots(figsize=self.figsize)
if self.orientation == 'vertical':
multi = -1 if self.flipy else 1
ax.plot(
self.T, multi * self.domain.x_units, label=f't={self.time:g}{self.time_unit}'
)
ax.set_xlabel('Temperature [°C]')
ax.set_ylabel(f'Depth [{self.domain.plot_unit}]')
else:
ax.plot(
self.domain.x_units, self.T, label=f't={self.time:g}{self.time_unit}'
)
ax.set_xlabel(f'Distance [{self.domain.plot_unit}]')
ax.set_ylabel('Temperature [°C]')
ax.legend(loc='best')
plt.show()
else:
print('Model has not yet any solution.')
class Model_Constant_1D(Model_1D):
def __init__(self, domain, bc0, bc1, **kwargs):
assert isinstance(
domain, Domain_Constant_1D
), 'You have to use Domain_Constant_1D instance as argument.'
self.domain = domain
self.bc0 = bc0
self.bc1 = bc1
super().__init__(**kwargs)
class Model_Variable_1D(Model_1D):
def __init__(self, domain, bc0, bc1, **kwargs):
assert isinstance(
domain, Domain_Variable_1D
), 'You have to use Domain_Variable_1D instance as argument.'
self.domain = domain
self.bc0 = bc0
self.bc1 = bc1
super().__init__(**kwargs)
@property
def Tm(self):
if self.T is not None:
return np.interp(self.domain.xm, self.domain.x, self.T)
else:
print('Model has not yet solution.')
| [
"heatlib.units.Time",
"matplotlib.pyplot.subplots",
"numpy.interp",
"matplotlib.pyplot.show"
] | [((2137, 2171), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'self.figsize'}), '(figsize=self.figsize)\n', (2149, 2171), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2848, 2850), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3719), 'numpy.interp', 'np.interp', (['self.domain.xm', 'self.domain.x', 'self.T'], {}), '(self.domain.xm, self.domain.x, self.T)\n', (3680, 3719), True, 'import numpy as np\n'), ((1234, 1257), 'heatlib.units.Time', 'Time', (['(1)', 'self.time_unit'], {}), '(1, self.time_unit)\n', (1238, 1257), False, 'from heatlib.units import Time\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to rosetta NPZ distance predictions
"""
import numpy as np
from conkit.io._parser import BinaryDistanceFileParser
from conkit.core.distance import Distance
from conkit.core.distogram import Distogram
from conkit.core.distancefile import DistanceFile
DISTANCE_BINS = ((0, 2), (2, 2.5), (2.5, 3), (3, 4), (4, 4.5), (4.5, 5), (5, 5.5), (5.5, 6), (6, 6.5), (6.5, 7),
(7, 7.5), (7.5, 8), (8, 8.5), (8.5, 9), (9, 9.5), (9.5, 10), (10, 10.5), (10.5, 11), (11, 11.5),
(11.5, 12), (12, 12.5), (12.5, 13), (13, 13.5), (13.5, 14), (14, 14.5), (14.5, 15), (15, 15.5),
(15.5, 16), (16, 16.5), (16.5, 17), (17, 17.5), (17.5, 18), (18, 18.5), (18.5, 19), (19, 19.5),
(19.5, 20), (20, np.inf))
class RosettaNpzParser(BinaryDistanceFileParser):
"""Parser class for rosetta NPZ distance prediction file"""
def read(self, f_handle, f_id="rosettanpz"):
"""Read a distance prediction file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique contact file identifier
Returns
-------
:obj:`~conkit.core.distancefile.DistanceFile`
"""
hierarchy = DistanceFile(f_id)
hierarchy.original_file_format = "ROSETTA_NPZ"
_map = Distogram("distogram_1")
hierarchy.add(_map)
prediction = np.load(f_handle, allow_pickle=True)
probs = prediction['dist']
# Bin #0 corresponds with d>20A & bins #1 ~ #36 correspond with 2A<d<20A in increments of 0.5A
probs = probs[:, :, [x for x in range(1, 37)] + [0]]
L = probs.shape[0]
for i in range(L):
for j in range(i, L):
_distance = Distance(i + 1, j + 1, tuple(probs[i, j, :].tolist()), DISTANCE_BINS)
_map.add(_distance)
return hierarchy
def write(self, f_handle, hierarchy):
"""Write a distance file instance to a file
Raises
------
:exc:`NotImplementedError`
Write function not available
"""
raise NotImplementedError("Write function not available yet")
| [
"numpy.load",
"conkit.core.distancefile.DistanceFile",
"conkit.core.distogram.Distogram"
] | [((2857, 2875), 'conkit.core.distancefile.DistanceFile', 'DistanceFile', (['f_id'], {}), '(f_id)\n', (2869, 2875), False, 'from conkit.core.distancefile import DistanceFile\n'), ((2946, 2970), 'conkit.core.distogram.Distogram', 'Distogram', (['"""distogram_1"""'], {}), "('distogram_1')\n", (2955, 2970), False, 'from conkit.core.distogram import Distogram\n'), ((3021, 3057), 'numpy.load', 'np.load', (['f_handle'], {'allow_pickle': '(True)'}), '(f_handle, allow_pickle=True)\n', (3028, 3057), True, 'import numpy as np\n')] |
##############################################################################
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
import dfpipeline as dfp
df = pd.DataFrame({
'col1': [1, 2, 3, np.nan],
'col2': [1, 3, 5, np.nan]
})
mean_df = pd.DataFrame({
'col1': [1.0, 2.0, 3.0, 2.0],
'col2': [1, 3, 5, np.nan]
})
median_df = pd.DataFrame({
'col1': [1, 2, 3, np.nan],
'col2': [1.0, 3.0, 5.0, 3.0]
})
const_df = pd.DataFrame({
'col1': [1.0, 2.0, 3.0, 0.0],
'col2': [1.0, 3.0, 5.0, 0.0]
})
def test_impute_mean():
im = dfp.Imputer(inputs=['col1'], outputs=['col1'], strategy='mean')
out = im.fit_transform(df.copy())
assert_frame_equal(out, mean_df)
def test_impute_median():
im = dfp.Imputer(inputs=['col2'], outputs=['col2'], strategy='median')
out = im.fit_transform(df.copy())
assert_frame_equal(out, median_df)
def test_impute_const():
im = dfp.Imputer(inputs=['col1', 'col2'], outputs=['col1', 'col2'], val=0)
out = im.fit_transform(df.copy())
assert_frame_equal(out, const_df)
| [
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"dfpipeline.Imputer"
] | [((900, 968), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [1, 2, 3, np.nan], 'col2': [1, 3, 5, np.nan]}"], {}), "({'col1': [1, 2, 3, np.nan], 'col2': [1, 3, 5, np.nan]})\n", (912, 968), True, 'import pandas as pd\n'), ((990, 1061), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [1.0, 2.0, 3.0, 2.0], 'col2': [1, 3, 5, np.nan]}"], {}), "({'col1': [1.0, 2.0, 3.0, 2.0], 'col2': [1, 3, 5, np.nan]})\n", (1002, 1061), True, 'import pandas as pd\n'), ((1085, 1156), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [1, 2, 3, np.nan], 'col2': [1.0, 3.0, 5.0, 3.0]}"], {}), "({'col1': [1, 2, 3, np.nan], 'col2': [1.0, 3.0, 5.0, 3.0]})\n", (1097, 1156), True, 'import pandas as pd\n'), ((1179, 1253), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': [1.0, 2.0, 3.0, 0.0], 'col2': [1.0, 3.0, 5.0, 0.0]}"], {}), "({'col1': [1.0, 2.0, 3.0, 0.0], 'col2': [1.0, 3.0, 5.0, 0.0]})\n", (1191, 1253), True, 'import pandas as pd\n'), ((1298, 1361), 'dfpipeline.Imputer', 'dfp.Imputer', ([], {'inputs': "['col1']", 'outputs': "['col1']", 'strategy': '"""mean"""'}), "(inputs=['col1'], outputs=['col1'], strategy='mean')\n", (1309, 1361), True, 'import dfpipeline as dfp\n'), ((1404, 1436), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['out', 'mean_df'], {}), '(out, mean_df)\n', (1422, 1436), False, 'from pandas.testing import assert_frame_equal\n'), ((1473, 1538), 'dfpipeline.Imputer', 'dfp.Imputer', ([], {'inputs': "['col2']", 'outputs': "['col2']", 'strategy': '"""median"""'}), "(inputs=['col2'], outputs=['col2'], strategy='median')\n", (1484, 1538), True, 'import dfpipeline as dfp\n'), ((1581, 1615), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['out', 'median_df'], {}), '(out, median_df)\n', (1599, 1615), False, 'from pandas.testing import assert_frame_equal\n'), ((1651, 1720), 'dfpipeline.Imputer', 'dfp.Imputer', ([], {'inputs': "['col1', 'col2']", 'outputs': "['col1', 'col2']", 'val': '(0)'}), "(inputs=['col1', 'col2'], outputs=['col1', 'col2'], val=0)\n", (1662, 1720), True, 'import dfpipeline as dfp\n'), ((1763, 1796), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['out', 'const_df'], {}), '(out, const_df)\n', (1781, 1796), False, 'from pandas.testing import assert_frame_equal\n')] |
import wx
from gui.textutil import CopyFont, default_font
#from gui.toolbox import prnt
from wx import EXPAND,ALL,TOP,VERTICAL,ALIGN_CENTER_HORIZONTAL,ALIGN_CENTER_VERTICAL,LI_HORIZONTAL
ALIGN_CENTER = ALIGN_CENTER_HORIZONTAL|ALIGN_CENTER_VERTICAL
TOPLESS = ALL & ~TOP
bgcolors = [
wx.Color(238, 238, 238),
wx.Color(255, 255, 255),
]
hovbgcolor = wx.Color(220, 220, 220)
#def printlist(list):
# prnt(list)
class VisualListEditorList(wx.VListBox):
text_alignment = ALIGN_CENTER
min_width = 150
def __init__(self,
parent,
list2sort,
prettynames = None,
listcallback = None,
ischecked = None # if given, a function of one argument that determines if an argument is checked
):
wx.VListBox.__init__(self, parent)
self.Font = default_font()
self.item_height = self.Font.Height + 12
self.oldlist = None
self.prettynames = prettynames or {}
self.listcallback = listcallback
self.SetList(list2sort)
self.setup_checkboxes(ischecked)
self.BackgroundColour = wx.WHITE
self._hovered = -1
Bind = self.Bind
Bind(wx.EVT_MOTION, self.OnMotion)
Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
Bind(wx.EVT_PAINT,self.OnPaint)
def CalcMinWidth(self):
return self.min_width
def SetList(self, seq):
self.thelist = seq[:]
self.SetItemCount(len(self.thelist))
height = self.OnMeasureItem(0) * self.ItemCount
self.SetMinSize(wx.Size(self.CalcMinWidth(), height))
self.RefreshAll()
def OnPaint(self,event):
event.Skip()
srect= wx.Rect(*self.Rect)
srect.Inflate(1,1)
pcdc = wx.ClientDC(self.Parent)
pcdc.Brush = wx.TRANSPARENT_BRUSH
pcdc.Pen = wx.Pen(wx.Colour(213,213,213))
pcdc.DrawRectangleRect(srect)
def GetHovered(self):
return self._hovered
def GetItem(self, n):
return self.thelist[n]
def SetHovered(self,i):
slist = self.thelist
if i >= len(slist):
return
n = self._hovered
self._hovered = i
if n != -1:
self.RefreshLine(n)
if i != -1:
self.RefreshLine(i)
Hovered = property(GetHovered,SetHovered)
def OnMeasureItem(self,n):
return self.item_height
def OnDrawBackground(self,dc,rect,n):
dc.Brush = wx.Brush(hovbgcolor if self.Hovered == n else bgcolors[n % len(bgcolors)])
dc.Pen = wx.TRANSPARENT_PEN
dc.DrawRectangleRect(rect)
def OnDrawItem(self,dc,rect,n):
elem = self.thelist[n]
self._draw_checkbox(dc, rect, n)
if hasattr(self.prettynames, '__call__'):
name = self.prettynames(elem)
else:
name = self.prettynames.get(self.thelist[n], _('(Unnamed Panel)'))
dc.Font = self.Font
dc.DrawLabel(name, rect, self.text_alignment)
def OnMotion(self,event):
rect = self.ClientRect
wap = wx.FindWindowAtPointer()
mp = event.Position
hit = self.HitTest(mp)
dragging = event.Dragging()
selection = self.Selection
thelist = self.thelist
checked = self.checked
if hit != -1:
cursor = wx.CURSOR_ARROW if self._over_checkbox(mp, hit) else wx.CURSOR_HAND
self.SetCursor(wx.StockCursor(cursor))
if not dragging:
if not rect.Contains(mp) or not wap == self:
while self.HasCapture():
self.ReleaseMouse()
self.Hovered = -1
return
elif not self.HasCapture():
self.CaptureMouse()
if dragging and -1 not in (selection, hit) and hit != selection:
self.Selection = hit
item = thelist[selection]
if checked is not None:
item_checked = checked[selection]
thelist.pop(selection)
thelist.insert(hit, item)
if checked is not None:
checked.pop(selection)
checked.insert(hit, item_checked)
self.Refresh()
self.Hovered = hit
def OnLeftDown(self,event):
mp = event.Position
self.oldlist = list(self.thelist)
hit = self.HitTest(mp)
if hit != -1 and self._over_checkbox(mp, hit):
self.checked[hit] = not self.checked[hit]
self.listcallback(self.thelist, self.checked)
self.Refresh()
else:
self.Selection = hit
def OnLeftUp(self,event):
if self.oldlist and self.oldlist != self.thelist and self.listcallback:
if self.checked is not None:
self.listcallback(self.thelist, self.checked)
else:
self.listcallback(self.thelist)
self.Selection = -1
self.oldlist = None
#
# checkbox support
#
def setup_checkboxes(self, ischecked):
if ischecked is not None:
self.checked = [ischecked(e) for e in self.thelist]
else:
self.checked = None
self.checkbox_border = 5
self.checkbox_size = 16
self.checkbox_rect = wx.Rect(self.checkbox_border, (self.item_height - self.checkbox_size) / 2, self.checkbox_size, self.checkbox_size)
def _draw_checkbox(self, dc, rect, n):
if self.checked is None:
return
flag = wx.CONTROL_CHECKED if self.checked[n] else 0
# draw a checkbox
cbrect = wx.Rect(*self.checkbox_rect)
cbrect.Offset((rect.x, rect.y))
wx.RendererNative.Get().DrawCheckBox(self, dc, cbrect, flag)
rect.x = rect.x + self.checkbox_size + self.checkbox_border * 2
def _over_checkbox(self, mp, hit):
if self.checked is None: return False
hitmp = mp - (0, hit * self.item_height)
return self.checkbox_rect.Contains(hitmp)
class VisualListEditorListWithLinks(VisualListEditorList):
'''
A "visual list editor" which draws clickable links on the right.
Subclasses override LinksForRow(n), returning ("Link Text", link_func)
where link_func is a callable taking one argument, the row's item.
Subclasses should also call PaintLinks(dc, rect, n) in their EVT_PAINT
handlers.
'''
link_padding = 5
def LinksForRow(self, n):
'''Overridden by subclasses'''
return []
def PaintLinks(self, dc, rect, n):
'''Should be called by subclassess' EVT_PAINT handler.'''
dc.Font = self.Font
dc.TextForeground = wx.BLUE
for (link_text, func), rect in self.LinkRectsForRow(n):
rect.y += n * self.item_height
dc.DrawLabel(link_text, rect, wx.ALIGN_CENTER_VERTICAL)
def __init__(self, *a, **k):
VisualListEditorList.__init__(self, *a, **k)
self.Bind(wx.EVT_LEFT_DOWN, self.__leftdown)
def __leftdown(self, e):
mp = e.Position
hit = self.HitTest(mp)
link = self._link_hittest(mp, hit)
if link:
link_text, link_func = link
return link_func(self.thelist[hit])
e.Skip()
def _link_hittest(self, mp, hit):
if hit == -1: return
mp = mp - (0, hit * self.item_height)
for link, rect in self.LinkRectsForRow(hit):
if rect.Contains(mp):
return link
def LinkRectsForRow(self, hit):
links = self.LinksForRow(hit)
dc = wx.ClientDC(self)
dc.Font = self.Font
# calculate link rects from the right.
p = self.ClientRect.TopRight
rects = []
for link_text, func in reversed(links):
w, h = dc.GetTextExtent(link_text)
w += self.link_padding
r = wx.Rect(p.x - w, p.y, w, self.item_height)
rects.append(((link_text, func), r))
p.x -= w
rects.reverse() # restore left to right order.
return rects
class VisualListEditor(wx.Dialog):
dialog_style = wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT
def __init__(self, parent, list2sort, prettynames=None, listcallback=None,
title=_("Arrange Panels"),
listclass = VisualListEditorList,
ischecked = None):
wx.Dialog.__init__(self, parent, -1, title, style = self.dialog_style)
Bind = self.Bind
Bind(wx.EVT_CLOSE, self.Done)
# construct
panel = wx.Panel(self)
text = wx.StaticText(panel, -1, _('Drag and drop to reorder'), style = ALIGN_CENTER)
text.Font = CopyFont(text.Font, weight=wx.BOLD)
self.vle = vle = listclass(panel, list2sort, prettynames, listcallback, ischecked=ischecked)
Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.vle.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
hline = wx.StaticLine(panel,style = LI_HORIZONTAL)
done = wx.Button(panel,-1, _('Done'))
done.Bind(wx.EVT_BUTTON,self.Done)
# layout
main_sizer = self.Sizer = wx.BoxSizer(VERTICAL)
main_sizer.Add(panel, 1, EXPAND)
s = panel.Sizer = wx.BoxSizer(VERTICAL)
border_size = 6
s.AddMany([(text, 0, EXPAND|ALL, border_size),
(vle, 1, EXPAND|TOPLESS, border_size),
(hline, 0, EXPAND|TOPLESS, border_size),
(done, 0, EXPAND|TOPLESS, border_size)])
self.Fit()
def SetList(self, seq):
return self.vle.SetList(seq)
def Done(self, event):
self.Hide()
self.Destroy()
def OnKeyDown(self, e):
if e.KeyCode == wx.WXK_ESCAPE:
self.Close()
else:
e.Skip()
| [
"wx.StockCursor",
"wx.Colour",
"wx.RendererNative.Get",
"wx.Dialog.__init__",
"gui.textutil.CopyFont",
"wx.BoxSizer",
"wx.FindWindowAtPointer",
"wx.VListBox.__init__",
"wx.StaticLine",
"wx.Color",
"gui.textutil.default_font",
"wx.Panel",
"wx.Rect",
"wx.ClientDC"
] | [((359, 382), 'wx.Color', 'wx.Color', (['(220)', '(220)', '(220)'], {}), '(220, 220, 220)\n', (367, 382), False, 'import wx\n'), ((289, 312), 'wx.Color', 'wx.Color', (['(238)', '(238)', '(238)'], {}), '(238, 238, 238)\n', (297, 312), False, 'import wx\n'), ((318, 341), 'wx.Color', 'wx.Color', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (326, 341), False, 'import wx\n'), ((814, 848), 'wx.VListBox.__init__', 'wx.VListBox.__init__', (['self', 'parent'], {}), '(self, parent)\n', (834, 848), False, 'import wx\n'), ((869, 883), 'gui.textutil.default_font', 'default_font', ([], {}), '()\n', (881, 883), False, 'from gui.textutil import CopyFont, default_font\n'), ((1767, 1786), 'wx.Rect', 'wx.Rect', (['*self.Rect'], {}), '(*self.Rect)\n', (1774, 1786), False, 'import wx\n'), ((1829, 1853), 'wx.ClientDC', 'wx.ClientDC', (['self.Parent'], {}), '(self.Parent)\n', (1840, 1853), False, 'import wx\n'), ((3149, 3173), 'wx.FindWindowAtPointer', 'wx.FindWindowAtPointer', ([], {}), '()\n', (3171, 3173), False, 'import wx\n'), ((5354, 5472), 'wx.Rect', 'wx.Rect', (['self.checkbox_border', '((self.item_height - self.checkbox_size) / 2)', 'self.checkbox_size', 'self.checkbox_size'], {}), '(self.checkbox_border, (self.item_height - self.checkbox_size) / 2,\n self.checkbox_size, self.checkbox_size)\n', (5361, 5472), False, 'import wx\n'), ((5678, 5706), 'wx.Rect', 'wx.Rect', (['*self.checkbox_rect'], {}), '(*self.checkbox_rect)\n', (5685, 5706), False, 'import wx\n'), ((7628, 7645), 'wx.ClientDC', 'wx.ClientDC', (['self'], {}), '(self)\n', (7639, 7645), False, 'import wx\n'), ((8440, 8508), 'wx.Dialog.__init__', 'wx.Dialog.__init__', (['self', 'parent', '(-1)', 'title'], {'style': 'self.dialog_style'}), '(self, parent, -1, title, style=self.dialog_style)\n', (8458, 8508), False, 'import wx\n'), ((8612, 8626), 'wx.Panel', 'wx.Panel', (['self'], {}), '(self)\n', (8620, 8626), False, 'import wx\n'), ((8741, 8776), 'gui.textutil.CopyFont', 'CopyFont', (['text.Font'], {'weight': 'wx.BOLD'}), '(text.Font, weight=wx.BOLD)\n', (8749, 8776), False, 'from gui.textutil import CopyFont, default_font\n'), ((8998, 9039), 'wx.StaticLine', 'wx.StaticLine', (['panel'], {'style': 'LI_HORIZONTAL'}), '(panel, style=LI_HORIZONTAL)\n', (9011, 9039), False, 'import wx\n'), ((9183, 9204), 'wx.BoxSizer', 'wx.BoxSizer', (['VERTICAL'], {}), '(VERTICAL)\n', (9194, 9204), False, 'import wx\n'), ((9272, 9293), 'wx.BoxSizer', 'wx.BoxSizer', (['VERTICAL'], {}), '(VERTICAL)\n', (9283, 9293), False, 'import wx\n'), ((1925, 1949), 'wx.Colour', 'wx.Colour', (['(213)', '(213)', '(213)'], {}), '(213, 213, 213)\n', (1934, 1949), False, 'import wx\n'), ((7924, 7966), 'wx.Rect', 'wx.Rect', (['(p.x - w)', 'p.y', 'w', 'self.item_height'], {}), '(p.x - w, p.y, w, self.item_height)\n', (7931, 7966), False, 'import wx\n'), ((3505, 3527), 'wx.StockCursor', 'wx.StockCursor', (['cursor'], {}), '(cursor)\n', (3519, 3527), False, 'import wx\n'), ((5755, 5778), 'wx.RendererNative.Get', 'wx.RendererNative.Get', ([], {}), '()\n', (5776, 5778), False, 'import wx\n')] |
from django.db import router
from django.urls import path, include
from .router import router
from . import views
urlpatterns = [
path('', include(router.urls))
] | [
"django.urls.include"
] | [((144, 164), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (151, 164), False, 'from django.urls import path, include\n')] |
# Copyright 2020 The Lucent Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides lowres_tensor()."""
from __future__ import absolute_import, division, print_function
from typing import Union, List, Tuple, Optional, Callable
import einops
import numpy as np
import torch
import torch.nn.functional as F
def lowres_tensor(
shape: Union[List, Tuple, torch.Size],
underlying_shape: Union[List, Tuple, torch.Size],
offset: Optional[Union[bool, int, List]] = None,
sd: Optional[float] = 0.01,
) -> Tuple[List[torch.Tensor], Callable]:
"""Produces a tensor paramaterized by a interpolated lower resolution tensor.
This is like what is done in a laplacian pyramid, but a bit more general. It
can be a powerful way to describe images.
:param shape: desired shape of resulting tensor, should be of format (B, C, H, W) #TODO support more shapes
:type shape: Union[List, Tuple, torch.Size]
:param underlying_shape: shape of the tensor being resized into final tensor
:type underlying_shape: Union[List, Tuple, torch.Size]
:param offset: Describes how to offset the interpolated vector (like phase in a
Fourier transform). If None, apply no offset. If int, apply the same
offset to each dimension; if a list use each entry for each dimension.
If False, do not offset. If True, offset by half the ratio between shape and underlying shape (analogous to 90
degrees), defaults to None
:type offset: Optional[Union[bool, int, List]], optional
:param sd: Standard deviation of initial tensor variable., defaults to 0.01
:type sd: Optional[float], optional
:return: One-element list containing the low resolution tensor and the corresponding image function returning the tensor on call.
:rtype: Tuple[List[torch.Tensor], Callable]
"""
if isinstance(offset, float):
raise TypeError('Passing float offset is deprecated!')
# TODO pass device as argument to avoid mixing devices
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
underlying_t = (torch.randn(*underlying_shape) * sd).to(device).requires_grad_(True)
if offset is not None:
# Deal with non-list offset
if not isinstance(offset, list):
offset = len(shape) * [offset]
# Deal with the non-int offset entries
for n in range(len(offset)):
if offset[n] is True:
offset[n] = shape[n] / underlying_shape[n] / 2
if offset[n] is False:
offset[n] = 0
offset[n] = int(offset[n])
def inner():
t = torch.nn.functional.interpolate(einops.rearrange(underlying_t, 'b c h w -> 1 c b h w'), (shape[0], shape[2], shape[3]), mode="trilinear")
t = einops.rearrange(t, 'dummy c b h w -> (dummy b) c h w')
if offset is not None:
# Actually apply offset by padding and then cropping off the excess.
t = F.pad(t, offset, "reflect")
t = t[:shape[0], :shape[1], :shape[2], :shape[3]]
return t
return [underlying_t], inner | [
"torch.nn.functional.pad",
"torch.cuda.is_available",
"einops.rearrange",
"torch.randn"
] | [((3394, 3449), 'einops.rearrange', 'einops.rearrange', (['t', '"""dummy c b h w -> (dummy b) c h w"""'], {}), "(t, 'dummy c b h w -> (dummy b) c h w')\n", (3410, 3449), False, 'import einops\n'), ((2655, 2680), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2678, 2680), False, 'import torch\n'), ((3276, 3330), 'einops.rearrange', 'einops.rearrange', (['underlying_t', '"""b c h w -> 1 c b h w"""'], {}), "(underlying_t, 'b c h w -> 1 c b h w')\n", (3292, 3330), False, 'import einops\n'), ((3578, 3605), 'torch.nn.functional.pad', 'F.pad', (['t', 'offset', '"""reflect"""'], {}), "(t, offset, 'reflect')\n", (3583, 3605), True, 'import torch.nn.functional as F\n'), ((2713, 2743), 'torch.randn', 'torch.randn', (['*underlying_shape'], {}), '(*underlying_shape)\n', (2724, 2743), False, 'import torch\n')] |
#!/usr/bin/env python
from flask import (
render_template, Response, Blueprint, request, session, current_app
)
import time
from .camera import Camera
from .auth import login_required
from .db import get_db_by_config,get_db
from . import history_records
from . import intruding_records
bp = Blueprint('video', __name__)
'''
一些需要全局使用的变量
'''
box_selection=[0,0,0,0]
old_box_selection=[0,0,0,0]
image_width=1
image_height=1
@bp.route('/', methods=('GET', 'POST'))
@login_required
def video_html():
'''返回监控界面'''
session.setdefault('ip', "0")
session.setdefault('camera_id', '0')
session.setdefault('task', "face_recognition")
session.setdefault('interval', 5)
session.setdefault('threshold',0.6)
global box_selection,old_box_selection
box_selection=old_box_selection
if request.method == 'POST':
if request.form['form_type'] == 'box_selection':
box_selection=[int(int(x)*image_width/900) for x in request.form['box_selection'].split('_')]
whether_update=False
if ((box_selection==[0,0,0,0]) or (not (box_selection==old_box_selection))):
whether_update=True
old_box_selection=box_selection
records_feed(whether_update)
elif request.form['form_type'] == "ip":
session['ip']=request.form['ip']
session['camera_id'] = request.form['camera_id']
elif request.form['form_type'] == 'task':
session['task'] = request.form['task']
elif request.form['form_type'] == 'interval':
session['interval'] = request.form['interval']
elif request.form['form_type'] == 'threshold_select':
session['threshold'] = request.form['threshold_select']
return render_template('video.html',
ip=session.get("ip"), camera_id = session.get("camera_id"),
task=session.get('task'), interval=session.get('interval'),
threshold=session.get('threshold'))
def gen(camera,config,user_id, camera_id,process,interval):
'''camera视频生成器'''
global image_width
global image_height
while True:
time.sleep(0.01) # 每个0.01s推送一帧视频
frame, criminal_ids,enter_items_label,leave_items_label,image_width,image_height = camera.get_frame(process=process)
db = get_db_by_config(config)
for criminal_id in criminal_ids:
history_records.produce_record(db, criminal_id=criminal_id, user_id=user_id,
camera_id=camera_id,interval=interval)
for enter_item in enter_items_label:
intruding_records.produce_record(db,item=enter_item.split('_')[0],item_id=int(enter_item.split('_')[1]),
user_id=user_id,camera_id=camera_id)
for leave_item in leave_items_label:
intruding_records.add_leave_time(db,item=leave_item.split('_')[0],item_id=int(leave_item.split('_')[1]))
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@bp.route('/video_feed')
def video_feed():
'''返回监控界面中的视频部分'''
camera = Camera(ip=session.get('ip'))
user_id = session.get('user_id')
threshold=float(session.get('threshold'))
process = {session.get('task'):1,'box':box_selection,'threshold':threshold}#获取图像的处理方式
if not camera.has_opened():#如果打开失败
session['ip'] = "0"
camera = Camera("0")
return Response(gen(camera,config=current_app.config['DATABASE'], user_id=user_id,
camera_id=session['camera_id'], process=process, interval=session.get('interval')),
mimetype='multipart/x-mixed-replace; boundary=frame')
@bp.route('/records_feed')
def records_feed(whether_update=False):
'''返回监控界面的警示记录部分'''
user_id = session.get("user_id")
task=session.get('task')
if task=='face_recognition':
return Response(history_records.RecordsGenerator(user_id=user_id,db_config=current_app.config['DATABASE']),
mimetype='text/event-stream')
elif task=='object_track':
return Response(intruding_records.RecordsGenerator(user_id=user_id,db_config=current_app.config['DATABASE'],whether_update=whether_update),
mimetype='text/event-stream')
| [
"flask.session.get",
"flask.Blueprint",
"flask.session.setdefault",
"time.sleep"
] | [((296, 324), 'flask.Blueprint', 'Blueprint', (['"""video"""', '__name__'], {}), "('video', __name__)\n", (305, 324), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((523, 552), 'flask.session.setdefault', 'session.setdefault', (['"""ip"""', '"""0"""'], {}), "('ip', '0')\n", (541, 552), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((557, 593), 'flask.session.setdefault', 'session.setdefault', (['"""camera_id"""', '"""0"""'], {}), "('camera_id', '0')\n", (575, 593), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((598, 644), 'flask.session.setdefault', 'session.setdefault', (['"""task"""', '"""face_recognition"""'], {}), "('task', 'face_recognition')\n", (616, 644), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((649, 682), 'flask.session.setdefault', 'session.setdefault', (['"""interval"""', '(5)'], {}), "('interval', 5)\n", (667, 682), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((687, 723), 'flask.session.setdefault', 'session.setdefault', (['"""threshold"""', '(0.6)'], {}), "('threshold', 0.6)\n", (705, 723), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((3179, 3201), 'flask.session.get', 'session.get', (['"""user_id"""'], {}), "('user_id')\n", (3190, 3201), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((3810, 3832), 'flask.session.get', 'session.get', (['"""user_id"""'], {}), "('user_id')\n", (3821, 3832), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((3842, 3861), 'flask.session.get', 'session.get', (['"""task"""'], {}), "('task')\n", (3853, 3861), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((2173, 2189), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2183, 2189), False, 'import time\n'), ((3222, 3246), 'flask.session.get', 'session.get', (['"""threshold"""'], {}), "('threshold')\n", (3233, 3246), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((3263, 3282), 'flask.session.get', 'session.get', (['"""task"""'], {}), "('task')\n", (3274, 3282), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((1811, 1828), 'flask.session.get', 'session.get', (['"""ip"""'], {}), "('ip')\n", (1822, 1828), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((1842, 1866), 'flask.session.get', 'session.get', (['"""camera_id"""'], {}), "('camera_id')\n", (1853, 1866), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((1900, 1919), 'flask.session.get', 'session.get', (['"""task"""'], {}), "('task')\n", (1911, 1919), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((1930, 1953), 'flask.session.get', 'session.get', (['"""interval"""'], {}), "('interval')\n", (1941, 1953), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((1992, 2016), 'flask.session.get', 'session.get', (['"""threshold"""'], {}), "('threshold')\n", (2003, 2016), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((3146, 3163), 'flask.session.get', 'session.get', (['"""ip"""'], {}), "('ip')\n", (3157, 3163), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n'), ((3603, 3626), 'flask.session.get', 'session.get', (['"""interval"""'], {}), "('interval')\n", (3614, 3626), False, 'from flask import render_template, Response, Blueprint, request, session, current_app\n')] |
from typing import TYPE_CHECKING, Dict, List, Union, Optional, Any, Tuple
import json
from logging import getLogger, Logger
import pg8000 # type: ignore
import pyarrow as pa # type: ignore
from boto3 import client # type: ignore
from awswrangler import data_types
from awswrangler.exceptions import (RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle,
InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey,
InvalidRedshiftPrimaryKeys)
if TYPE_CHECKING:
from awswrangler.session import Session
logger: Logger = getLogger(__name__)
DISTSTYLES = [
"AUTO",
"EVEN",
"ALL",
"KEY",
]
SORTSTYLES = [
"COMPOUND",
"INTERLEAVED",
]
class Redshift:
def __init__(self, session):
self._session: Session = session
self._client_s3: client = session.boto3_session.client(service_name="s3",
use_ssl=True,
config=session.botocore_config)
@staticmethod
def _validate_connection(database,
host,
port,
user,
password,
tcp_keepalive=True,
application_name="aws-data-wrangler-validation",
validation_timeout=5):
conn = pg8000.connect(database=database,
host=host,
port=int(port),
user=user,
password=password,
ssl=True,
application_name=application_name,
tcp_keepalive=tcp_keepalive,
timeout=validation_timeout)
conn.close()
@staticmethod
def generate_connection(database,
host,
port,
user,
password,
tcp_keepalive=True,
application_name="aws-data-wrangler",
connection_timeout=1_200_000,
statement_timeout=1_200_000,
validation_timeout=5):
"""
Generates a valid connection object to be passed to the load_table method
:param database: The name of the database instance to connect with.
:param host: The hostname of the Redshift server to connect with.
:param port: The TCP/IP port of the Redshift server instance.
:param user: The username to connect to the Redshift server with.
:param password: The user password to connect to the server with.
:param tcp_keepalive: If True then use TCP keepalive
:param application_name: Application name
:param connection_timeout: Connection Timeout
:param statement_timeout: Redshift statements timeout
:param validation_timeout: Timeout to try to validate the connection
:return: pg8000 connection
"""
Redshift._validate_connection(database=database,
host=host,
port=port,
user=user,
password=password,
tcp_keepalive=tcp_keepalive,
application_name=application_name,
validation_timeout=validation_timeout)
conn = pg8000.connect(database=database,
host=host,
port=int(port),
user=user,
password=password,
ssl=True,
application_name=application_name,
tcp_keepalive=tcp_keepalive,
timeout=connection_timeout)
cursor = conn.cursor()
cursor.execute(f"set statement_timeout = {statement_timeout}")
conn.commit()
cursor.close()
return conn
def get_connection(self, glue_connection):
conn_details = self._session.glue.get_connection_details(name=glue_connection)
props = conn_details["ConnectionProperties"]
host = props["JDBC_CONNECTION_URL"].split(":")[2].replace("/", "")
port, database = props["JDBC_CONNECTION_URL"].split(":")[3].split("/")
user = props["USERNAME"]
password = props["PASSWORD"]
conn = self.generate_connection(database=database, host=host, port=int(port), user=user, password=password)
return conn
def write_load_manifest(
self,
manifest_path: str,
objects_paths: List[str],
procs_io_bound: Optional[int] = None) -> Dict[str, List[Dict[str, Union[str, bool, Dict[str, int]]]]]:
objects_sizes: Dict[str, int] = self._session.s3.get_objects_sizes(objects_paths=objects_paths,
procs_io_bound=procs_io_bound)
manifest: Dict[str, List[Dict[str, Union[str, bool, Dict[str, int]]]]] = {"entries": []}
path: str
size: int
for path, size in objects_sizes.items():
entry: Dict[str, Union[str, bool, Dict[str, int]]] = {
"url": path,
"mandatory": True,
"meta": {
"content_length": size
}
}
manifest["entries"].append(entry)
payload: str = json.dumps(manifest)
bucket: str
bucket, path = manifest_path.replace("s3://", "").split("/", 1)
logger.info(f"payload: {payload}")
self._client_s3.put_object(Body=payload, Bucket=bucket, Key=path)
return manifest
@staticmethod
def get_number_of_slices(redshift_conn):
cursor = redshift_conn.cursor()
cursor.execute("SELECT COUNT(*) as count_slices FROM (SELECT DISTINCT node, slice from STV_SLICES)")
count_slices = cursor.fetchall()[0][0]
cursor.close()
return count_slices
@staticmethod
def load_table(dataframe,
dataframe_type,
manifest_path,
schema_name,
table_name,
redshift_conn,
num_files,
iam_role,
diststyle="AUTO",
distkey=None,
sortstyle="COMPOUND",
sortkey=None,
primary_keys: Optional[List[str]] = None,
mode="append",
preserve_index=False,
cast_columns=None):
"""
Load Parquet files into a Redshift table using a manifest file.
Creates the table if necessary.
:param dataframe: Pandas or Spark Dataframe
:param dataframe_type: "pandas" or "spark"
:param manifest_path: S3 path for manifest file (E.g. S3://...)
:param schema_name: Redshift schema
:param table_name: Redshift table name
:param redshift_conn: A PEP 249 compatible connection (Can be generated with Redshift.generate_connection())
:param num_files: Number of files to be loaded
:param iam_role: AWS IAM role with the related permissions
:param diststyle: Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"] (https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html)
:param distkey: Specifies a column name or positional number for the distribution key
:param sortstyle: Sorting can be "COMPOUND" or "INTERLEAVED" (https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html)
:param sortkey: List of columns to be sorted
:param primary_keys: Primary keys
:param mode: append, overwrite or upsert
:param preserve_index: Should we preserve the Dataframe index? (ONLY for Pandas Dataframe)
:param cast_columns: Dictionary of columns names and Redshift types to be casted. (E.g. {"col name": "INT", "col2 name": "FLOAT"})
:return: None
"""
final_table_name: Optional[str] = None
temp_table_name: Optional[str] = None
with redshift_conn.cursor() as cursor:
if mode == "overwrite":
Redshift._create_table(cursor=cursor,
dataframe=dataframe,
dataframe_type=dataframe_type,
schema_name=schema_name,
table_name=table_name,
diststyle=diststyle,
distkey=distkey,
sortstyle=sortstyle,
sortkey=sortkey,
primary_keys=primary_keys,
preserve_index=preserve_index,
cast_columns=cast_columns)
table_name = f"{schema_name}.{table_name}"
elif mode == "upsert":
guid: str = pa.compat.guid()
temp_table_name = f"temp_redshift_{guid}"
final_table_name = table_name
table_name = temp_table_name
sql: str = f"CREATE TEMPORARY TABLE {temp_table_name} (LIKE {schema_name}.{final_table_name})"
logger.debug(sql)
cursor.execute(sql)
else:
table_name = f"{schema_name}.{table_name}"
sql = ("-- AWS DATA WRANGLER\n"
f"COPY {table_name} FROM '{manifest_path}'\n"
f"IAM_ROLE '{iam_role}'\n"
"MANIFEST\n"
"FORMAT AS PARQUET")
logger.debug(sql)
cursor.execute(sql)
cursor.execute("-- AWS DATA WRANGLER\n SELECT pg_last_copy_id() AS query_id")
query_id = cursor.fetchall()[0][0]
sql = ("-- AWS DATA WRANGLER\n"
f"SELECT COUNT(DISTINCT filename) as num_files_loaded "
f"FROM STL_LOAD_COMMITS "
f"WHERE query = {query_id}")
logger.debug(sql)
cursor.execute(sql)
num_files_loaded = cursor.fetchall()[0][0]
if num_files_loaded != num_files:
redshift_conn.rollback()
raise RedshiftLoadError(
f"Redshift load rollbacked. {num_files_loaded} files counted. {num_files} expected.")
if (mode == "upsert") and (final_table_name is not None):
if not primary_keys:
primary_keys = Redshift.get_primary_keys(connection=redshift_conn,
schema=schema_name,
table=final_table_name)
if not primary_keys:
raise InvalidRedshiftPrimaryKeys()
equals_clause = f"{final_table_name}.%s = {temp_table_name}.%s"
join_clause = " AND ".join([equals_clause % (pk, pk) for pk in primary_keys])
sql = f"DELETE FROM {schema_name}.{final_table_name} USING {temp_table_name} WHERE {join_clause}"
logger.debug(sql)
cursor.execute(sql)
sql = f"INSERT INTO {schema_name}.{final_table_name} SELECT * FROM {temp_table_name}"
logger.debug(sql)
cursor.execute(sql)
redshift_conn.commit()
@staticmethod
def _create_table(cursor,
dataframe,
dataframe_type,
schema_name,
table_name,
diststyle="AUTO",
distkey=None,
sortstyle="COMPOUND",
sortkey=None,
primary_keys: List[str] = None,
preserve_index=False,
cast_columns=None):
"""
Creates Redshift table.
:param cursor: A PEP 249 compatible cursor
:param dataframe: Pandas or Spark Dataframe
:param dataframe_type: "pandas" or "spark"
:param schema_name: Redshift schema
:param table_name: Redshift table name
:param diststyle: Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"] (https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html)
:param distkey: Specifies a column name or positional number for the distribution key
:param sortstyle: Sorting can be "COMPOUND" or "INTERLEAVED" (https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html)
:param sortkey: List of columns to be sorted
:param primary_keys: Primary keys
:param preserve_index: Should we preserve the Dataframe index? (ONLY for Pandas Dataframe)
:param cast_columns: Dictionary of columns names and Redshift types to be casted. (E.g. {"col name": "INT", "col2 name": "FLOAT"})
:return: None
"""
sql = f"-- AWS DATA WRANGLER\n" \
f"DROP TABLE IF EXISTS {schema_name}.{table_name}"
logger.debug(f"Drop table query:\n{sql}")
cursor.execute(sql)
schema = Redshift._get_redshift_schema(
dataframe=dataframe,
dataframe_type=dataframe_type,
preserve_index=preserve_index,
cast_columns=cast_columns,
)
if diststyle:
diststyle = diststyle.upper()
else:
diststyle = "AUTO"
if sortstyle:
sortstyle = sortstyle.upper()
else:
sortstyle = "COMPOUND"
Redshift._validate_parameters(schema=schema,
diststyle=diststyle,
distkey=distkey,
sortstyle=sortstyle,
sortkey=sortkey)
cols_str: str = "".join([f"{col[0]} {col[1]},\n" for col in schema])[:-2]
primary_keys_str: str = ""
if primary_keys:
primary_keys_str = f",\nPRIMARY KEY ({', '.join(primary_keys)})"
distkey_str: str = ""
if distkey and diststyle == "KEY":
distkey_str = f"\nDISTKEY({distkey})"
sortkey_str: str = ""
if sortkey:
sortkey_str = f"\n{sortstyle} SORTKEY({','.join(sortkey)})"
sql = (f"-- AWS DATA WRANGLER\n"
f"CREATE TABLE IF NOT EXISTS {schema_name}.{table_name} (\n"
f"{cols_str}"
f"{primary_keys_str}"
f")\nDISTSTYLE {diststyle}"
f"{distkey_str}"
f"{sortkey_str}")
logger.debug(f"Create table query:\n{sql}")
cursor.execute(sql)
@staticmethod
def get_primary_keys(connection, schema, table):
"""
Get PKs
:param connection: A PEP 249 compatible connection (Can be generated with Redshift.generate_connection())
:param schema: Schema name
:param table: Redshift table name
:return: PKs list List[str]
"""
cursor = connection.cursor()
cursor.execute(f"SELECT indexdef FROM pg_indexes WHERE schemaname = '{schema}' AND tablename = '{table}'")
result = cursor.fetchall()[0][0]
rfields = result.split('(')[1].strip(')').split(',')
fields = [field.strip().strip('"') for field in rfields]
cursor.close()
return fields
@staticmethod
def _validate_parameters(schema, diststyle, distkey, sortstyle, sortkey):
"""
Validates the sanity of Redshift's parameters
:param schema: List of tuples (column name, column type)
:param diststyle: Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]
https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
:param distkey: Specifies a column name or positional number for the distribution key
:param sortstyle: Sorting can be "COMPOUND" or "INTERLEAVED"
https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html
:param sortkey: List of columns to be sorted
:return: None
"""
if diststyle not in DISTSTYLES:
raise InvalidRedshiftDiststyle(f"diststyle must be in {DISTSTYLES}")
cols = [x[0] for x in schema]
logger.debug(f"Redshift columns: {cols}")
if (diststyle == "KEY") and (not distkey):
raise InvalidRedshiftDistkey("You must pass a distkey if you intend to use KEY diststyle")
if distkey and distkey not in cols:
raise InvalidRedshiftDistkey(f"distkey ({distkey}) must be in the columns list: {cols})")
if sortstyle and sortstyle not in SORTSTYLES:
raise InvalidRedshiftSortstyle(f"sortstyle must be in {SORTSTYLES}")
if sortkey:
if type(sortkey) != list:
raise InvalidRedshiftSortkey(f"sortkey must be a List of items in the columns list: {cols}. "
f"Currently value: {sortkey}")
for key in sortkey:
if key not in cols:
raise InvalidRedshiftSortkey(f"sortkey must be a List of items in the columns list: {cols}. "
f"Currently value: {key}")
@staticmethod
def _get_redshift_schema(dataframe,
dataframe_type: str,
preserve_index: bool = False,
cast_columns=None) -> List[Tuple[str, str]]:
if cast_columns is None:
cast_columns = {}
schema_built: List[Tuple[str, str]] = []
if dataframe_type.lower() == "pandas":
pyarrow_schema = data_types.extract_pyarrow_schema_from_pandas(dataframe=dataframe,
preserve_index=preserve_index,
indexes_position="right")
for name, dtype in pyarrow_schema:
if (cast_columns is not None) and (name in cast_columns.keys()):
schema_built.append((name, cast_columns[name]))
else:
redshift_type = data_types.pyarrow2redshift(dtype)
schema_built.append((name, redshift_type))
elif dataframe_type.lower() == "spark":
for name, dtype in dataframe.dtypes:
if name in cast_columns.keys():
redshift_type = data_types.athena2redshift(cast_columns[name])
else:
redshift_type = data_types.spark2redshift(dtype)
schema_built.append((name, redshift_type))
else:
raise InvalidDataframeType(
f"{dataframe_type} is not a valid DataFrame type. Please use 'pandas' or 'spark'!")
return schema_built
def to_parquet(self,
sql: str,
path: str,
iam_role: str,
connection: Any,
partition_cols: Optional[List] = None) -> List[str]:
"""
Write a query result as parquet files on S3
:param sql: SQL Query
:param path: AWS S3 path to write the data (e.g. s3://...)
:param iam_role: AWS IAM role with the related permissions
:param connection: A PEP 249 compatible connection (Can be generated with Redshift.generate_connection())
:param partition_cols: Specifies the partition keys for the unload operation.
"""
sql = sql.replace("'", "\'").replace(";", "") # escaping single quote
path = path if path[-1] == "/" else path + "/"
cursor: Any = connection.cursor()
partition_str: str = ""
manifest_str: str = ""
if partition_cols is not None:
partition_str = f"PARTITION BY ({','.join([x for x in partition_cols])})\n"
else:
manifest_str = "\nmanifest"
query: str = f"-- AWS DATA WRANGLER\n" \
f"UNLOAD ('{sql}')\n" \
f"TO '{path}'\n" \
f"IAM_ROLE '{iam_role}'\n" \
f"ALLOWOVERWRITE\n" \
f"PARALLEL ON\n" \
f"ENCRYPTED \n" \
f"{partition_str}" \
f"FORMAT PARQUET" \
f"{manifest_str};"
logger.debug(f"query:\n{query}")
cursor.execute(query)
query = "-- AWS DATA WRANGLER\nSELECT pg_last_query_id() AS query_id"
logger.debug(f"query:\n{query}")
cursor.execute(query)
query_id = cursor.fetchall()[0][0]
query = f"-- AWS DATA WRANGLER\n" \
f"SELECT path FROM STL_UNLOAD_LOG WHERE query={query_id};"
logger.debug(f"query:\n{query}")
cursor.execute(query)
paths: List[str] = [row[0].replace(" ", "") for row in cursor.fetchall()]
logger.debug(f"paths: {paths}")
connection.commit()
cursor.close()
if manifest_str != "":
self._session.s3.wait_object_exists(path=f"{path}manifest", timeout=30.0)
for p in paths:
self._session.s3.wait_object_exists(path=p, timeout=30.0)
return paths
| [
"logging.getLogger",
"awswrangler.data_types.extract_pyarrow_schema_from_pandas",
"awswrangler.data_types.spark2redshift",
"awswrangler.exceptions.InvalidDataframeType",
"json.dumps",
"awswrangler.exceptions.InvalidRedshiftSortkey",
"awswrangler.exceptions.InvalidRedshiftPrimaryKeys",
"awswrangler.exc... | [((627, 646), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'from logging import getLogger, Logger\n'), ((5818, 5838), 'json.dumps', 'json.dumps', (['manifest'], {}), '(manifest)\n', (5828, 5838), False, 'import json\n'), ((16688, 16750), 'awswrangler.exceptions.InvalidRedshiftDiststyle', 'InvalidRedshiftDiststyle', (['f"""diststyle must be in {DISTSTYLES}"""'], {}), "(f'diststyle must be in {DISTSTYLES}')\n", (16712, 16750), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((16908, 16997), 'awswrangler.exceptions.InvalidRedshiftDistkey', 'InvalidRedshiftDistkey', (['"""You must pass a distkey if you intend to use KEY diststyle"""'], {}), "(\n 'You must pass a distkey if you intend to use KEY diststyle')\n", (16930, 16997), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((17055, 17143), 'awswrangler.exceptions.InvalidRedshiftDistkey', 'InvalidRedshiftDistkey', (['f"""distkey ({distkey}) must be in the columns list: {cols})"""'], {}), "(\n f'distkey ({distkey}) must be in the columns list: {cols})')\n", (17077, 17143), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((17211, 17273), 'awswrangler.exceptions.InvalidRedshiftSortstyle', 'InvalidRedshiftSortstyle', (['f"""sortstyle must be in {SORTSTYLES}"""'], {}), "(f'sortstyle must be in {SORTSTYLES}')\n", (17235, 17273), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((18206, 18333), 'awswrangler.data_types.extract_pyarrow_schema_from_pandas', 'data_types.extract_pyarrow_schema_from_pandas', ([], {'dataframe': 'dataframe', 'preserve_index': 'preserve_index', 'indexes_position': '"""right"""'}), "(dataframe=dataframe,\n preserve_index=preserve_index, indexes_position='right')\n", (18251, 18333), False, 'from awswrangler import data_types\n'), ((10757, 10870), 'awswrangler.exceptions.RedshiftLoadError', 'RedshiftLoadError', (['f"""Redshift load rollbacked. {num_files_loaded} files counted. {num_files} expected."""'], {}), "(\n f'Redshift load rollbacked. {num_files_loaded} files counted. {num_files} expected.'\n )\n", (10774, 10870), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((17354, 17478), 'awswrangler.exceptions.InvalidRedshiftSortkey', 'InvalidRedshiftSortkey', (['f"""sortkey must be a List of items in the columns list: {cols}. Currently value: {sortkey}"""'], {}), "(\n f'sortkey must be a List of items in the columns list: {cols}. Currently value: {sortkey}'\n )\n", (17376, 17478), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((19242, 19356), 'awswrangler.exceptions.InvalidDataframeType', 'InvalidDataframeType', (['f"""{dataframe_type} is not a valid DataFrame type. Please use \'pandas\' or \'spark\'!"""'], {}), '(\n f"{dataframe_type} is not a valid DataFrame type. Please use \'pandas\' or \'spark\'!"\n )\n', (19262, 19356), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((9468, 9484), 'pyarrow.compat.guid', 'pa.compat.guid', ([], {}), '()\n', (9482, 9484), True, 'import pyarrow as pa\n'), ((11306, 11334), 'awswrangler.exceptions.InvalidRedshiftPrimaryKeys', 'InvalidRedshiftPrimaryKeys', ([], {}), '()\n', (11332, 11334), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((17612, 17732), 'awswrangler.exceptions.InvalidRedshiftSortkey', 'InvalidRedshiftSortkey', (['f"""sortkey must be a List of items in the columns list: {cols}. Currently value: {key}"""'], {}), "(\n f'sortkey must be a List of items in the columns list: {cols}. Currently value: {key}'\n )\n", (17634, 17732), False, 'from awswrangler.exceptions import RedshiftLoadError, InvalidDataframeType, InvalidRedshiftDiststyle, InvalidRedshiftDistkey, InvalidRedshiftSortstyle, InvalidRedshiftSortkey, InvalidRedshiftPrimaryKeys\n'), ((18734, 18768), 'awswrangler.data_types.pyarrow2redshift', 'data_types.pyarrow2redshift', (['dtype'], {}), '(dtype)\n', (18761, 18768), False, 'from awswrangler import data_types\n'), ((19013, 19059), 'awswrangler.data_types.athena2redshift', 'data_types.athena2redshift', (['cast_columns[name]'], {}), '(cast_columns[name])\n', (19039, 19059), False, 'from awswrangler import data_types\n'), ((19118, 19150), 'awswrangler.data_types.spark2redshift', 'data_types.spark2redshift', (['dtype'], {}), '(dtype)\n', (19143, 19150), False, 'from awswrangler import data_types\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Utilities.
Author: <NAME>
Date Created: 29 May 2020
"""
from rdkit.Chem import AllChem as rdkit
def update_from_rdkit_conf(stk_mol, rdk_mol, conf_id):
"""
Update the structure to match `conf_id` of `mol`.
Parameters
----------
struct : :class:`stk.Molecule`
The molecule whoce coordinates are to be updated.
mol : :class:`rdkit.Mol`
The :mod:`rdkit` molecule to use for the structure update.
conf_id : :class:`int`
The conformer ID of the `mol` to update from.
Returns
-------
:class:`.Molecule`
The molecule.
"""
pos_mat = rdk_mol.GetConformer(id=conf_id).GetPositions()
return stk_mol.with_position_matrix(pos_mat)
def build_conformers(mol, N, ETKDG_version=None):
"""
Convert stk mol into RDKit mol with N conformers.
ETKDG_version allows the user to pick their choice of ETKDG params.
`None` provides the settings used in ligand_combiner and unsymm.
Other options:
`v3`:
New version from DOI: 10.1021/acs.jcim.0c00025
with improved handling of macrocycles.
"""
molecule = mol.to_rdkit_mol()
molecule.RemoveAllConformers()
if ETKDG_version is None:
cids = rdkit.EmbedMultipleConfs(
mol=molecule,
numConfs=N,
randomSeed=1000,
useExpTorsionAnglePrefs=True,
useBasicKnowledge=True,
numThreads=4,
)
elif ETKDG_version == 'v3':
params = rdkit.ETKDGv3()
params.randomSeed = 1000
cids = rdkit.EmbedMultipleConfs(
mol=molecule,
numConfs=N,
params=params
)
print(f'there are {molecule.GetNumConformers()} conformers')
return cids, molecule
| [
"rdkit.Chem.AllChem.EmbedMultipleConfs",
"rdkit.Chem.AllChem.ETKDGv3"
] | [((1338, 1477), 'rdkit.Chem.AllChem.EmbedMultipleConfs', 'rdkit.EmbedMultipleConfs', ([], {'mol': 'molecule', 'numConfs': 'N', 'randomSeed': '(1000)', 'useExpTorsionAnglePrefs': '(True)', 'useBasicKnowledge': '(True)', 'numThreads': '(4)'}), '(mol=molecule, numConfs=N, randomSeed=1000,\n useExpTorsionAnglePrefs=True, useBasicKnowledge=True, numThreads=4)\n', (1362, 1477), True, 'from rdkit.Chem import AllChem as rdkit\n'), ((1607, 1622), 'rdkit.Chem.AllChem.ETKDGv3', 'rdkit.ETKDGv3', ([], {}), '()\n', (1620, 1622), True, 'from rdkit.Chem import AllChem as rdkit\n'), ((1671, 1736), 'rdkit.Chem.AllChem.EmbedMultipleConfs', 'rdkit.EmbedMultipleConfs', ([], {'mol': 'molecule', 'numConfs': 'N', 'params': 'params'}), '(mol=molecule, numConfs=N, params=params)\n', (1695, 1736), True, 'from rdkit.Chem import AllChem as rdkit\n')] |
from cv2 import cv2
from collections import Counter
from PIL import Image, ImageDraw, ImageFont
from scipy.fftpack import dct
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import gmpy2
import numpy as np
import time
import os
"""
Transparency
If putting the pixel with RGBA = (Ra, Ga, Ba, Aa) over the pixel with RGBA = (Rb, Gb, Bb, 100%)
we get the output color equal to (Ra*Aa + Rb*(100% - Aa), Ga*Aa + Gb*(100% - Aa), Ba*Aa + Bb*(100% - Aa), 100%)
Tested with Adobe Photoshop :) Works there
"""
GENERATED_PATH = os.path.join('icons', 'generated')
def phash(img, hash_size=8, factor=4):
img = np.array(img, dtype=np.uint8)
img = Image.fromarray(img)
image_size = hash_size * factor
img.convert('L').resize((image_size, image_size), Image.ANTIALIAS)
img = np.asarray(img)[:, :, 0]
dct_ = dct(dct(img, axis=0), axis=1)
dct_ = dct_[:hash_size, :hash_size]
med = np.median(dct_)
diff = dct_ > med
return sum((1 << i) * int(el) for i, el in enumerate(diff.flatten()))
def cluster_icons(clusters, hash_size=8):
img_count = len(os.listdir(GENERATED_PATH))
assert img_count > clusters, f'There are not enough images in "{GENERATED_PATH}"'
X = np.zeros((img_count, hash_size * hash_size), dtype=np.uint8)
names = []
for i, filepath in enumerate(os.listdir(GENERATED_PATH)):
img = cv2.imread(os.path.join(GENERATED_PATH, filepath))
names.append(filepath)
hashed = phash(img)
X[i, :] = np.array(
[(hashed >> i) & 1 for i in range(hash_size * hash_size - 1, -1, -1)],
np.uint8)
kmeans = KMeans(n_jobs=-1, n_clusters=clusters)
X_dist = kmeans.fit_transform(X)
representative_sign_idx = np.argmin(X_dist, axis=0)
imgs = []
for idx in representative_sign_idx:
read = plt.imread(os.path.join(GENERATED_PATH, names[idx]))
img = np.zeros((read.shape[0], read.shape[1], 4))
img[:, :, :3] = read[:, :, :3]
# add transparency (make PNG out of JPEG)
if read.shape[-1] == 3:
img[:, :, 3] = 1.0
else:
img[:, :, 3] = read[:, :, 3]
imgs.append(img)
return imgs
def icon_mapping(threads):
counter = Counter(map(lambda thr: thr['number'], threads.flatten()))
clusters = cluster_icons(len(counter))
return {
number: img for (number, _), img in zip(counter.most_common(), clusters)
}
def draw_pattern(image, threads):
icons = icon_mapping(threads)
factor = 32
h, w = len(image), len(image[0])
new_h = h * factor + 2 * ((h * factor - 1) // (10 * factor))
new_w = w * factor + 2 * ((w * factor - 1) // (10 * factor))
pattern = np.zeros((new_h, new_w, 3))
t0 = time.time()
for y in range(h):
new_y = y * factor + (y//10) * 2 + 1
for x, rgb, thread in zip(range(w), image[y], threads[y]):
new_x = x * factor + (x // 10) * 2 + 1
icon = (np.copy(icons[thread['number']]) * 255).astype(np.uint8)
dark = not bool(np.mean(rgb[:3]) // 128)
if dark:
icon[:, :, :3] = 255 - icon[:, :, :3]
for y_offset in range(factor - 2):
for x_offset in range(factor - 2):
alpha = icon[y_offset + 1, x_offset + 1, 3] / 255
pattern[new_y + y_offset, new_x + x_offset] = (
alpha * icon[y_offset + 1, x_offset + 1, :3]
+ rgb * (1 - alpha))
print("\nTime spent: ", round(time.time() - t0, 2))
return pattern, icons
def draw_mapping(icons, threads):
icons_count = len(icons)
h_line = 36
h = icons_count * 36
w = 300
prj_path = os.path.dirname(os.path.dirname(__file__))
font_path = os.path.join(prj_path, 'fonts', 'arial.ttf')
font = ImageFont.truetype(font_path, size=24)
img = Image.new('RGBA', (w, h), (255, 255, 255, 255))
new_img = ImageDraw.Draw(img)
for i, (number, icon) in enumerate(icons.items()):
text = f'{number}'
icon_w, icon_h = new_img.textsize(text, font)
coords = (50, h_line * i + (h_line-icon_h) // 2)
new_img.text(coords, text, fill=(0, 0, 0), font=font)
img = np.array(img)
def find_rgb(number):
for thread in threads.flatten():
if number == thread['number']:
return thread['rgb']
raise ValueError(f'No thread with number {number}')
icon_h, icon_w = 32, 32
for i, (number, icon_) in enumerate(icons.items()):
r, g, b = find_rgb(number)
icon = np.array(Image.new('RGBA', (icon_w, icon_h), (r, g, b, 255)))
alpha = icon_[:, :, 3:4]
dark = not bool(np.mean([r, g, b]) // 128)
if dark:
icon_[:, :, :3] = 1 - icon_[:, :, :3]
icon = alpha * 255 * icon_ + (1 - alpha) * icon
icon = icon.astype(np.uint8)
coords = (w - 50 - icon_w, h_line * i + (h_line - icon_h) // 2)
img[coords[1]:coords[1]+icon_h, coords[0]:coords[0]+icon_w, :] = icon
return img
| [
"sklearn.cluster.KMeans",
"numpy.mean",
"PIL.Image.fromarray",
"numpy.median",
"os.listdir",
"numpy.copy",
"PIL.Image.new",
"os.path.join",
"PIL.ImageFont.truetype",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"PIL.ImageDraw.Draw",
"scipy.fftpack.dct",
"os.path.dirname",
"numpy.argm... | [((540, 574), 'os.path.join', 'os.path.join', (['"""icons"""', '"""generated"""'], {}), "('icons', 'generated')\n", (552, 574), False, 'import os\n'), ((626, 655), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (634, 655), True, 'import numpy as np\n'), ((666, 686), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (681, 686), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((920, 935), 'numpy.median', 'np.median', (['dct_'], {}), '(dct_)\n', (929, 935), True, 'import numpy as np\n'), ((1219, 1279), 'numpy.zeros', 'np.zeros', (['(img_count, hash_size * hash_size)'], {'dtype': 'np.uint8'}), '((img_count, hash_size * hash_size), dtype=np.uint8)\n', (1227, 1279), True, 'import numpy as np\n'), ((1634, 1672), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_jobs': '(-1)', 'n_clusters': 'clusters'}), '(n_jobs=-1, n_clusters=clusters)\n', (1640, 1672), False, 'from sklearn.cluster import KMeans\n'), ((1741, 1766), 'numpy.argmin', 'np.argmin', (['X_dist'], {'axis': '(0)'}), '(X_dist, axis=0)\n', (1750, 1766), True, 'import numpy as np\n'), ((2726, 2753), 'numpy.zeros', 'np.zeros', (['(new_h, new_w, 3)'], {}), '((new_h, new_w, 3))\n', (2734, 2753), True, 'import numpy as np\n'), ((2764, 2775), 'time.time', 'time.time', ([], {}), '()\n', (2773, 2775), False, 'import time\n'), ((3809, 3853), 'os.path.join', 'os.path.join', (['prj_path', '"""fonts"""', '"""arial.ttf"""'], {}), "(prj_path, 'fonts', 'arial.ttf')\n", (3821, 3853), False, 'import os\n'), ((3865, 3903), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_path'], {'size': '(24)'}), '(font_path, size=24)\n', (3883, 3903), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3915, 3962), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(w, h)', '(255, 255, 255, 255)'], {}), "('RGBA', (w, h), (255, 255, 255, 255))\n", (3924, 3962), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3977, 3996), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3991, 3996), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4266, 4279), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4274, 4279), True, 'import numpy as np\n'), ((804, 819), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (814, 819), True, 'import numpy as np\n'), ((844, 860), 'scipy.fftpack.dct', 'dct', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (847, 860), False, 'from scipy.fftpack import dct\n'), ((1096, 1122), 'os.listdir', 'os.listdir', (['GENERATED_PATH'], {}), '(GENERATED_PATH)\n', (1106, 1122), False, 'import os\n'), ((1333, 1359), 'os.listdir', 'os.listdir', (['GENERATED_PATH'], {}), '(GENERATED_PATH)\n', (1343, 1359), False, 'import os\n'), ((1904, 1947), 'numpy.zeros', 'np.zeros', (['(read.shape[0], read.shape[1], 4)'], {}), '((read.shape[0], read.shape[1], 4))\n', (1912, 1947), True, 'import numpy as np\n'), ((3766, 3791), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3781, 3791), False, 'import os\n'), ((1387, 1425), 'os.path.join', 'os.path.join', (['GENERATED_PATH', 'filepath'], {}), '(GENERATED_PATH, filepath)\n', (1399, 1425), False, 'import os\n'), ((1848, 1888), 'os.path.join', 'os.path.join', (['GENERATED_PATH', 'names[idx]'], {}), '(GENERATED_PATH, names[idx])\n', (1860, 1888), False, 'import os\n'), ((4632, 4683), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(icon_w, icon_h)', '(r, g, b, 255)'], {}), "('RGBA', (icon_w, icon_h), (r, g, b, 255))\n", (4641, 4683), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3567, 3578), 'time.time', 'time.time', ([], {}), '()\n', (3576, 3578), False, 'import time\n'), ((4743, 4761), 'numpy.mean', 'np.mean', (['[r, g, b]'], {}), '([r, g, b])\n', (4750, 4761), True, 'import numpy as np\n'), ((2995, 3027), 'numpy.copy', 'np.copy', (["icons[thread['number']]"], {}), "(icons[thread['number']])\n", (3002, 3027), True, 'import numpy as np\n'), ((3080, 3096), 'numpy.mean', 'np.mean', (['rgb[:3]'], {}), '(rgb[:3])\n', (3087, 3096), True, 'import numpy as np\n')] |
######################
# análise de sequencia
from Bio.Seq import Seq
seq1 = Seq("ATG")
print('Sequencia :', seq1)
# sequencia complementar
seq1_comp = seq1.complement()
print('Sequencia complementar :', seq1_comp)
# sequencia complementar reversa
seq1_comp_reversa = seq1.reverse_complement()
print('Sequencia complementar reversa :', seq1_comp_reversa)
###########################
# Transcrição de sequendia de DNS para RNA
dna_seq = Seq("ATG")
rna_seq = dna_seq.transcribe()
print("Transcrição RNA:", rna_seq)
# retorna ao DNA a partir do RNA
orig_dna = rna_seq.back_transcribe()
print("DNA original:", orig_dna)
###########################
# Tradução do DNA para RNA para Proteina
dna_seq = Seq("ATG")
rna_seq = dna_seq.transcribe()
print("Transcrição RNA:", rna_seq)
# retorna ao DNA a partir do RNA
protein_seq = rna_seq.translate()
print("Sequencia de proteina:", protein_seq)
| [
"Bio.Seq.Seq"
] | [((77, 87), 'Bio.Seq.Seq', 'Seq', (['"""ATG"""'], {}), "('ATG')\n", (80, 87), False, 'from Bio.Seq import Seq\n'), ((476, 486), 'Bio.Seq.Seq', 'Seq', (['"""ATG"""'], {}), "('ATG')\n", (479, 486), False, 'from Bio.Seq import Seq\n'), ((739, 749), 'Bio.Seq.Seq', 'Seq', (['"""ATG"""'], {}), "('ATG')\n", (742, 749), False, 'from Bio.Seq import Seq\n')] |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
permission = models.ForeignKey('AuthPermission', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.IntegerField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=254)
is_staff = models.IntegerField()
is_active = models.IntegerField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
permission = models.ForeignKey(AuthPermission, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.PositiveSmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class DotaAdmin(models.Model):
admin = models.ForeignKey('DotaUser', models.DO_NOTHING, primary_key=True)
admin_registration_number = models.CharField(unique=True, max_length=20)
class Meta:
managed = False
db_table = 'dota_admin'
class DotaGamer(models.Model):
gamer = models.ForeignKey('DotaUser', models.DO_NOTHING, primary_key=True)
gamer_ign = models.CharField(max_length=20)
class Meta:
managed = False
db_table = 'dota_gamer'
class DotaGamerMatch(models.Model):
matchid = models.ForeignKey('DotaMatch', models.DO_NOTHING, db_column='matchid', primary_key=True)
match_gpm = models.IntegerField(db_column='match_GPM') # Field name made lowercase.
match_kills = models.IntegerField(db_column='match_Kills') # Field name made lowercase.
match_xpm = models.IntegerField(db_column='match_XPM') # Field name made lowercase.
match_death = models.IntegerField()
match_assist = models.IntegerField()
gamerid = models.ForeignKey(DotaGamer, models.DO_NOTHING, db_column='gamerid')
dota_gamer_matchcol = models.CharField(max_length=45, blank=True, null=True)
match_status = models.CharField(max_length=45)
class Meta:
managed = False
db_table = 'dota_gamer_match'
class DotaMatch(models.Model):
match_id = models.IntegerField(db_column='match_ID', primary_key=True) # Field name made lowercase.
match_type = models.CharField(db_column='match_Type', max_length=15) # Field name made lowercase.
match_duration = models.CharField(db_column='match_Duration', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_match'
class DotaMmr(models.Model):
mmr = models.ForeignKey(DotaGamer, models.DO_NOTHING, db_column='mmr_Id', primary_key=True) # Field name made lowercase.
mmr_score = models.BigIntegerField()
mmr_medal = models.CharField(max_length=30)
class Meta:
managed = False
db_table = 'dota_mmr'
class DotaPremiumuser(models.Model):
premiumuser_registration_number = models.BigIntegerField(db_column='premiumuser_Registration_Number', primary_key=True) # Field name made lowercase.
premiumuser_registrationexpirydate = models.CharField(db_column='premiumuser_RegistrationExpiryDate', max_length=30) # Field name made lowercase.
premiumuser_gamer = models.ForeignKey(DotaGamer, models.DO_NOTHING, db_column='premiumuser_Gamer_ID') # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_premiumuser'
class DotaTournament(models.Model):
tournament_id = models.IntegerField(db_column='Tournament_ID', primary_key=True) # Field name made lowercase.
tournament_name = models.CharField(db_column='Tournament_name', max_length=100) # Field name made lowercase.
tournament_starting_timedate = models.DateTimeField(db_column='Tournament_starting_timedate') # Field name made lowercase.
tournament_end_timedate = models.DateTimeField(db_column='Tournament_end_timedate') # Field name made lowercase.
tournament_prize = models.CharField(db_column='Tournament_prize', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_tournament'
class DotaTournamentMatch(models.Model):
matchid = models.ForeignKey(DotaMatch, models.DO_NOTHING, db_column='Matchid', primary_key=True) # Field name made lowercase.
tournamentid = models.ForeignKey(DotaTournament, models.DO_NOTHING, db_column='Tournamentid') # Field name made lowercase.
class Meta:
managed = False
db_table = 'dota_tournament_match'
class DotaUser(models.Model):
user_id = models.BigIntegerField(primary_key=True)
user_name = models.CharField(max_length=45)
user_email = models.CharField(max_length=45)
user_username = models.CharField(unique=True, max_length=30)
user_password = models.CharField(max_length=30)
class Meta:
managed = False
db_table = 'dota_user'
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.BigIntegerField",
"django.db.models.DateTimeField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
] | [((541, 585), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(80)'}), '(unique=True, max_length=80)\n', (557, 585), False, 'from django.db import models\n'), ((715, 762), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AuthGroup', 'models.DO_NOTHING'], {}), '(AuthGroup, models.DO_NOTHING)\n', (732, 762), False, 'from django.db import models\n'), ((780, 834), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""AuthPermission"""', 'models.DO_NOTHING'], {}), "('AuthPermission', models.DO_NOTHING)\n", (797, 834), False, 'from django.db import models\n'), ((1022, 1054), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1038, 1054), False, 'from django.db import models\n'), ((1074, 1131), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DjangoContentType"""', 'models.DO_NOTHING'], {}), "('DjangoContentType', models.DO_NOTHING)\n", (1091, 1131), False, 'from django.db import models\n'), ((1147, 1179), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1163, 1179), False, 'from django.db import models\n'), ((1363, 1395), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1379, 1395), False, 'from django.db import models\n'), ((1413, 1456), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1433, 1456), False, 'from django.db import models\n'), ((1476, 1497), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1495, 1497), False, 'from django.db import models\n'), ((1513, 1558), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(150)'}), '(unique=True, max_length=150)\n', (1529, 1558), False, 'from django.db import models\n'), ((1576, 1607), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1592, 1607), False, 'from django.db import models\n'), ((1624, 1656), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1640, 1656), False, 'from django.db import models\n'), ((1669, 1701), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (1685, 1701), False, 'from django.db import models\n'), ((1717, 1738), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1736, 1738), False, 'from django.db import models\n'), ((1755, 1776), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1774, 1776), False, 'from django.db import models\n'), ((1795, 1817), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1815, 1817), False, 'from django.db import models\n'), ((1939, 1985), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AuthUser', 'models.DO_NOTHING'], {}), '(AuthUser, models.DO_NOTHING)\n', (1956, 1985), False, 'from django.db import models\n'), ((1998, 2045), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AuthGroup', 'models.DO_NOTHING'], {}), '(AuthGroup, models.DO_NOTHING)\n', (2015, 2045), False, 'from django.db import models\n'), ((2230, 2276), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AuthUser', 'models.DO_NOTHING'], {}), '(AuthUser, models.DO_NOTHING)\n', (2247, 2276), False, 'from django.db import models\n'), ((2294, 2346), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AuthPermission', 'models.DO_NOTHING'], {}), '(AuthPermission, models.DO_NOTHING)\n', (2311, 2346), False, 'from django.db import models\n'), ((2544, 2566), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2564, 2566), False, 'from django.db import models\n'), ((2583, 2622), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2599, 2622), False, 'from django.db import models\n'), ((2641, 2673), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2657, 2673), False, 'from django.db import models\n'), ((2692, 2726), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (2724, 2726), False, 'from django.db import models\n'), ((2748, 2766), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2764, 2766), False, 'from django.db import models\n'), ((2786, 2871), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DjangoContentType"""', 'models.DO_NOTHING'], {'blank': '(True)', 'null': '(True)'}), "('DjangoContentType', models.DO_NOTHING, blank=True, null=True\n )\n", (2803, 2871), False, 'from django.db import models\n'), ((2878, 2924), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AuthUser', 'models.DO_NOTHING'], {}), '(AuthUser, models.DO_NOTHING)\n', (2895, 2924), False, 'from django.db import models\n'), ((3061, 3093), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3077, 3093), False, 'from django.db import models\n'), ((3106, 3138), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3122, 3138), False, 'from django.db import models\n'), ((3323, 3355), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3339, 3355), False, 'from django.db import models\n'), ((3367, 3399), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3383, 3399), False, 'from django.db import models\n'), ((3414, 3436), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (3434, 3436), False, 'from django.db import models\n'), ((3572, 3621), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(40)'}), '(primary_key=True, max_length=40)\n', (3588, 3621), False, 'from django.db import models\n'), ((3641, 3659), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3657, 3659), False, 'from django.db import models\n'), ((3678, 3700), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (3698, 3700), False, 'from django.db import models\n'), ((3823, 3889), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DotaUser"""', 'models.DO_NOTHING'], {'primary_key': '(True)'}), "('DotaUser', models.DO_NOTHING, primary_key=True)\n", (3840, 3889), False, 'from django.db import models\n'), ((3922, 3966), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(20)'}), '(unique=True, max_length=20)\n', (3938, 3966), False, 'from django.db import models\n'), ((4085, 4151), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DotaUser"""', 'models.DO_NOTHING'], {'primary_key': '(True)'}), "('DotaUser', models.DO_NOTHING, primary_key=True)\n", (4102, 4151), False, 'from django.db import models\n'), ((4168, 4199), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (4184, 4199), False, 'from django.db import models\n'), ((4325, 4417), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DotaMatch"""', 'models.DO_NOTHING'], {'db_column': '"""matchid"""', 'primary_key': '(True)'}), "('DotaMatch', models.DO_NOTHING, db_column='matchid',\n primary_key=True)\n", (4342, 4417), False, 'from django.db import models\n'), ((4430, 4472), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_column': '"""match_GPM"""'}), "(db_column='match_GPM')\n", (4449, 4472), False, 'from django.db import models\n'), ((4521, 4565), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_column': '"""match_Kills"""'}), "(db_column='match_Kills')\n", (4540, 4565), False, 'from django.db import models\n'), ((4612, 4654), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_column': '"""match_XPM"""'}), "(db_column='match_XPM')\n", (4631, 4654), False, 'from django.db import models\n'), ((4703, 4724), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (4722, 4724), False, 'from django.db import models\n'), ((4744, 4765), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (4763, 4765), False, 'from django.db import models\n'), ((4780, 4848), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DotaGamer', 'models.DO_NOTHING'], {'db_column': '"""gamerid"""'}), "(DotaGamer, models.DO_NOTHING, db_column='gamerid')\n", (4797, 4848), False, 'from django.db import models\n'), ((4875, 4929), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)', 'blank': '(True)', 'null': '(True)'}), '(max_length=45, blank=True, null=True)\n', (4891, 4929), False, 'from django.db import models\n'), ((4949, 4980), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)'}), '(max_length=45)\n', (4965, 4980), False, 'from django.db import models\n'), ((5108, 5167), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_column': '"""match_ID"""', 'primary_key': '(True)'}), "(db_column='match_ID', primary_key=True)\n", (5127, 5167), False, 'from django.db import models\n'), ((5215, 5270), 'django.db.models.CharField', 'models.CharField', ([], {'db_column': '"""match_Type"""', 'max_length': '(15)'}), "(db_column='match_Type', max_length=15)\n", (5231, 5270), False, 'from django.db import models\n'), ((5322, 5381), 'django.db.models.CharField', 'models.CharField', ([], {'db_column': '"""match_Duration"""', 'max_length': '(50)'}), "(db_column='match_Duration', max_length=50)\n", (5338, 5381), False, 'from django.db import models\n'), ((5526, 5615), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DotaGamer', 'models.DO_NOTHING'], {'db_column': '"""mmr_Id"""', 'primary_key': '(True)'}), "(DotaGamer, models.DO_NOTHING, db_column='mmr_Id',\n primary_key=True)\n", (5543, 5615), False, 'from django.db import models\n'), ((5658, 5682), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (5680, 5682), False, 'from django.db import models\n'), ((5699, 5730), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (5715, 5730), False, 'from django.db import models\n'), ((5879, 5968), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'db_column': '"""premiumuser_Registration_Number"""', 'primary_key': '(True)'}), "(db_column='premiumuser_Registration_Number',\n primary_key=True)\n", (5901, 5968), False, 'from django.db import models\n'), ((6036, 6115), 'django.db.models.CharField', 'models.CharField', ([], {'db_column': '"""premiumuser_RegistrationExpiryDate"""', 'max_length': '(30)'}), "(db_column='premiumuser_RegistrationExpiryDate', max_length=30)\n", (6052, 6115), False, 'from django.db import models\n'), ((6170, 6256), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DotaGamer', 'models.DO_NOTHING'], {'db_column': '"""premiumuser_Gamer_ID"""'}), "(DotaGamer, models.DO_NOTHING, db_column=\n 'premiumuser_Gamer_ID')\n", (6187, 6256), False, 'from django.db import models\n'), ((6419, 6483), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_column': '"""Tournament_ID"""', 'primary_key': '(True)'}), "(db_column='Tournament_ID', primary_key=True)\n", (6438, 6483), False, 'from django.db import models\n'), ((6536, 6597), 'django.db.models.CharField', 'models.CharField', ([], {'db_column': '"""Tournament_name"""', 'max_length': '(100)'}), "(db_column='Tournament_name', max_length=100)\n", (6552, 6597), False, 'from django.db import models\n'), ((6663, 6725), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'db_column': '"""Tournament_starting_timedate"""'}), "(db_column='Tournament_starting_timedate')\n", (6683, 6725), False, 'from django.db import models\n'), ((6786, 6843), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'db_column': '"""Tournament_end_timedate"""'}), "(db_column='Tournament_end_timedate')\n", (6806, 6843), False, 'from django.db import models\n'), ((6897, 6959), 'django.db.models.CharField', 'models.CharField', ([], {'db_column': '"""Tournament_prize"""', 'max_length': '(100)'}), "(db_column='Tournament_prize', max_length=100)\n", (6913, 6959), False, 'from django.db import models\n'), ((7125, 7215), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DotaMatch', 'models.DO_NOTHING'], {'db_column': '"""Matchid"""', 'primary_key': '(True)'}), "(DotaMatch, models.DO_NOTHING, db_column='Matchid',\n primary_key=True)\n", (7142, 7215), False, 'from django.db import models\n'), ((7261, 7339), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DotaTournament', 'models.DO_NOTHING'], {'db_column': '"""Tournamentid"""'}), "(DotaTournament, models.DO_NOTHING, db_column='Tournamentid')\n", (7278, 7339), False, 'from django.db import models\n'), ((7500, 7540), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (7522, 7540), False, 'from django.db import models\n'), ((7557, 7588), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)'}), '(max_length=45)\n', (7573, 7588), False, 'from django.db import models\n'), ((7606, 7637), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)'}), '(max_length=45)\n', (7622, 7637), False, 'from django.db import models\n'), ((7658, 7702), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(30)'}), '(unique=True, max_length=30)\n', (7674, 7702), False, 'from django.db import models\n'), ((7723, 7754), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (7739, 7754), False, 'from django.db import models\n')] |
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.7.5
#
# <NAME>
# (c) 2019
#
# Licence APL2.0
#
###########################################################
# standard libraries
from unittest import mock
import pytest
import datetime
# external packages
import indibase
# local import
from mw4.test.test_units.setupQt import setupQt
host_ip = 'astro-mount.fritz.box'
@pytest.fixture(autouse=True, scope='module')
def module_setup_teardown():
global app, spy, mwGlob, test
app, spy, mwGlob, test = setupQt()
def test_name():
name = 'MBox'
app.skymeter.name = name
assert name == app.skymeter.name
def test_newDevice_1():
with mock.patch.object(app.skymeter.client,
'isServerConnected',
return_value=True):
with mock.patch.object(app.skymeter.client,
'getDevice',
return_value=1):
suc = app.skymeter.newDevice('test')
assert suc
assert app.skymeter.device is None
def test_newDevice_2():
app.skymeter.name = 'Test'
with mock.patch.object(app.skymeter.client,
'isServerConnected',
return_value=True):
with mock.patch.object(app.skymeter.client,
'getDevice',
return_value=1):
suc = app.skymeter.newDevice('Test')
assert suc
assert app.skymeter.device == 1
def test_removeDevice_1():
app.skymeter.name = 'Test'
with mock.patch.object(app.skymeter.client,
'isServerConnected',
return_value=True):
suc = app.skymeter.removeDevice('Test')
assert suc
assert app.skymeter.device is None
assert app.skymeter.data == {}
def test_startCommunication_1():
app.skymeter.name = ''
with mock.patch.object(app.skymeter.client,
'connectServer',
return_value=False):
suc = app.skymeter.startCommunication()
assert not suc
def test_setUpdateRate_1():
app.skymeter.name = 'test'
suc = app.skymeter.setUpdateConfig('false')
assert not suc
def test_setUpdateRate_2():
app.skymeter.name = 'test'
app.skymeter.device = None
suc = app.skymeter.setUpdateConfig('test')
assert not suc
def test_setUpdateRate_3():
class Test:
@staticmethod
def getNumber(test):
return {}
app.skymeter.name = 'test'
app.skymeter.device = Test()
suc = app.skymeter.setUpdateConfig('test')
assert not suc
def test_setUpdateRate_4():
class Test:
@staticmethod
def getNumber(test):
return {'PERIOD': 1}
app.skymeter.name = 'test'
app.skymeter.device = Test()
suc = app.skymeter.setUpdateConfig('test')
assert suc
def test_setUpdateRate_5():
class Test:
@staticmethod
def getNumber(test):
return {'PERIOD': 10}
app.skymeter.name = 'test'
app.skymeter.device = Test()
with mock.patch.object(app.skymeter.client,
'sendNewNumber',
return_value=False):
suc = app.skymeter.setUpdateConfig('test')
assert not suc
def test_setUpdateRate_6():
class Test:
@staticmethod
def getNumber(test):
return {'PERIOD': 10}
app.skymeter.name = 'test'
app.skymeter.device = Test()
with mock.patch.object(app.skymeter.client,
'sendNewNumber',
return_value=True):
suc = app.skymeter.setUpdateConfig('test')
assert suc
def test_updateNumber_1():
app.skymeter.device = None
app.skymeter.name = 'test'
suc = app.skymeter.updateNumber('false', 'WEATHER_HUMIDITY')
assert not suc
def test_updateNumber_2():
app.skymeter.device = 1
app.skymeter.name = 'test'
suc = app.skymeter.updateNumber('false', 'WEATHER_HUMIDITY')
assert not suc
def test_updateNumber_3():
app.skymeter.device = indibase.indiBase.Device()
app.skymeter.name = 'test'
values = {'WEATHER_DEWPOINT': 5,
'WEATHER_TEMPERATURE': 10,
'WEATHER_HUMIDITY': 50,
}
with mock.patch.object(app.skymeter.device,
'getNumber',
return_value=values):
suc = app.skymeter.updateNumber('test', 'WEATHER_PARAMETERS')
assert suc
assert app.skymeter.data['WEATHER_DEWPOINT'] == 5
assert app.skymeter.data['WEATHER_TEMPERATURE'] == 10
assert app.skymeter.data['WEATHER_HUMIDITY'] == 50
def test_updateNumber_4():
app.skymeter.device = indibase.indiBase.Device()
app.skymeter.name = 'test'
values = {'WEATHER_DEWPOINT': 5,
}
with mock.patch.object(app.skymeter.device,
'getNumber',
return_value=values):
suc = app.skymeter.updateNumber('test', 'WEATHER_PARAMETERS')
assert suc
def test_updateNumber_5():
app.skymeter.device = indibase.indiBase.Device()
app.skymeter.name = 'test'
values = {'WEATHER_HUMIDITY': 50,
}
with mock.patch.object(app.skymeter.device,
'getNumber',
return_value=values):
suc = app.skymeter.updateNumber('test', 'WEATHER_PARAMETERS')
assert suc
def test_updateNumber_6():
app.skymeter.device = indibase.indiBase.Device()
app.skymeter.name = 'test'
values = {'WEATHER_TEMPERATURE': 10,
}
with mock.patch.object(app.skymeter.device,
'getNumber',
return_value=values):
suc = app.skymeter.updateNumber('test', 'WEATHER_PARAMETERS')
assert suc
def test_updateNumber_7():
app.skymeter.device = indibase.indiBase.Device()
app.skymeter.name = 'test'
values = {'WEATHER_TEMPERATURE': 20,
'WEATHER_HUMIDITY': 50,
}
with mock.patch.object(app.skymeter.device,
'getNumber',
return_value=values):
suc = app.skymeter.updateNumber('test', 'WEATHER_PARAMETERS')
assert suc
def test_updateNumber_8():
app.skymeter.device = indibase.indiBase.Device()
app.skymeter.name = 'test'
t = datetime.datetime.utcnow()
values = {'WEATHER_TEMPERATURE': 10,
'WEATHER_HUMIDITY': 50,
}
app.skymeter.data = {'WEATHER_TEMPERATURE': 10,
'WEATHER_HUMIDITY': 50,
}
with mock.patch.object(app.skymeter.device,
'getNumber',
return_value=values):
suc = app.skymeter.updateNumber('test', 'WEATHER_PARAMETERS')
assert suc
| [
"datetime.datetime.utcnow",
"indibase.indiBase.Device",
"mw4.test.test_units.setupQt.setupQt",
"unittest.mock.patch.object",
"pytest.fixture"
] | [((663, 707), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""module"""'}), "(autouse=True, scope='module')\n", (677, 707), False, 'import pytest\n'), ((800, 809), 'mw4.test.test_units.setupQt.setupQt', 'setupQt', ([], {}), '()\n', (807, 809), False, 'from mw4.test.test_units.setupQt import setupQt\n'), ((4436, 4462), 'indibase.indiBase.Device', 'indibase.indiBase.Device', ([], {}), '()\n', (4460, 4462), False, 'import indibase\n'), ((5086, 5112), 'indibase.indiBase.Device', 'indibase.indiBase.Device', ([], {}), '()\n', (5110, 5112), False, 'import indibase\n'), ((5478, 5504), 'indibase.indiBase.Device', 'indibase.indiBase.Device', ([], {}), '()\n', (5502, 5504), False, 'import indibase\n'), ((5871, 5897), 'indibase.indiBase.Device', 'indibase.indiBase.Device', ([], {}), '()\n', (5895, 5897), False, 'import indibase\n'), ((6267, 6293), 'indibase.indiBase.Device', 'indibase.indiBase.Device', ([], {}), '()\n', (6291, 6293), False, 'import indibase\n'), ((6701, 6727), 'indibase.indiBase.Device', 'indibase.indiBase.Device', ([], {}), '()\n', (6725, 6727), False, 'import indibase\n'), ((6767, 6793), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6791, 6793), False, 'import datetime\n'), ((948, 1026), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""isServerConnected"""'], {'return_value': '(True)'}), "(app.skymeter.client, 'isServerConnected', return_value=True)\n", (965, 1026), False, 'from unittest import mock\n'), ((1411, 1489), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""isServerConnected"""'], {'return_value': '(True)'}), "(app.skymeter.client, 'isServerConnected', return_value=True)\n", (1428, 1489), False, 'from unittest import mock\n'), ((1874, 1952), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""isServerConnected"""'], {'return_value': '(True)'}), "(app.skymeter.client, 'isServerConnected', return_value=True)\n", (1891, 1952), False, 'from unittest import mock\n'), ((2228, 2303), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""connectServer"""'], {'return_value': '(False)'}), "(app.skymeter.client, 'connectServer', return_value=False)\n", (2245, 2303), False, 'from unittest import mock\n'), ((3425, 3500), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""sendNewNumber"""'], {'return_value': '(False)'}), "(app.skymeter.client, 'sendNewNumber', return_value=False)\n", (3442, 3500), False, 'from unittest import mock\n'), ((3834, 3908), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""sendNewNumber"""'], {'return_value': '(True)'}), "(app.skymeter.client, 'sendNewNumber', return_value=True)\n", (3851, 3908), False, 'from unittest import mock\n'), ((4635, 4707), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.device', '"""getNumber"""'], {'return_value': 'values'}), "(app.skymeter.device, 'getNumber', return_value=values)\n", (4652, 4707), False, 'from unittest import mock\n'), ((5206, 5278), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.device', '"""getNumber"""'], {'return_value': 'values'}), "(app.skymeter.device, 'getNumber', return_value=values)\n", (5223, 5278), False, 'from unittest import mock\n'), ((5599, 5671), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.device', '"""getNumber"""'], {'return_value': 'values'}), "(app.skymeter.device, 'getNumber', return_value=values)\n", (5616, 5671), False, 'from unittest import mock\n'), ((5995, 6067), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.device', '"""getNumber"""'], {'return_value': 'values'}), "(app.skymeter.device, 'getNumber', return_value=values)\n", (6012, 6067), False, 'from unittest import mock\n'), ((6429, 6501), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.device', '"""getNumber"""'], {'return_value': 'values'}), "(app.skymeter.device, 'getNumber', return_value=values)\n", (6446, 6501), False, 'from unittest import mock\n'), ((7008, 7080), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.device', '"""getNumber"""'], {'return_value': 'values'}), "(app.skymeter.device, 'getNumber', return_value=values)\n", (7025, 7080), False, 'from unittest import mock\n'), ((1095, 1162), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""getDevice"""'], {'return_value': '(1)'}), "(app.skymeter.client, 'getDevice', return_value=1)\n", (1112, 1162), False, 'from unittest import mock\n'), ((1558, 1625), 'unittest.mock.patch.object', 'mock.patch.object', (['app.skymeter.client', '"""getDevice"""'], {'return_value': '(1)'}), "(app.skymeter.client, 'getDevice', return_value=1)\n", (1575, 1625), False, 'from unittest import mock\n')] |
# Tests for Scheme class
import pytest
from simframe.integration import Scheme
def test_scheme_repr_str():
def f():
pass
s = Scheme(f)
assert isinstance(repr(s), str)
assert isinstance(str(s), str)
def test_scheme_attributes():
def f():
pass
with pytest.raises(TypeError):
s = Scheme(f, controller=None)
with pytest.raises(TypeError):
s = Scheme(None)
s = Scheme(f)
with pytest.raises(TypeError):
s.description = None
| [
"pytest.raises",
"simframe.integration.Scheme"
] | [((145, 154), 'simframe.integration.Scheme', 'Scheme', (['f'], {}), '(f)\n', (151, 154), False, 'from simframe.integration import Scheme\n'), ((426, 435), 'simframe.integration.Scheme', 'Scheme', (['f'], {}), '(f)\n', (432, 435), False, 'from simframe.integration import Scheme\n'), ((293, 317), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (306, 317), False, 'import pytest\n'), ((331, 357), 'simframe.integration.Scheme', 'Scheme', (['f'], {'controller': 'None'}), '(f, controller=None)\n', (337, 357), False, 'from simframe.integration import Scheme\n'), ((367, 391), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (380, 391), False, 'import pytest\n'), ((405, 417), 'simframe.integration.Scheme', 'Scheme', (['None'], {}), '(None)\n', (411, 417), False, 'from simframe.integration import Scheme\n'), ((445, 469), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (458, 469), False, 'import pytest\n')] |
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Implement the :py:class:`~.QuantumCircuitGroup` class used in GLOA.
The :py:class:`~.QuantumCircuitGroup` class represents what the `original paper\
<https://arxiv.org/abs/1004.2242>`_ call a "group". It is a collection of
entities (in this particular case "entities" refers to "instances of
:py:class:`~.QuantumCircuit`") admitting a leader.
"""
import copy
import typing
import numpy
import qtoolkit.data_structures.quantum_circuit.quantum_circuit as qcirc
import qtoolkit.data_structures.quantum_circuit.quantum_operation as qop
import qtoolkit.maths.matrix.distances as qdists
import qtoolkit.maths.matrix.generation.quantum_circuit as qc_gen
import qtoolkit.utils.types as qtypes
class QuantumCircuitGroup:
"""A group of :py:class:`~.QuantumCircuit`.
The instances of :py:class:`~.QuantumCircuit` are grouped into an instance
of :py:class:`~.QuantumCircuitGroup` to factorise the code.
"""
def __init__(
self,
basis: typing.Sequence[qop.QuantumOperation],
objective_unitary: qtypes.UnitaryMatrix,
length: int,
p: int,
r: numpy.ndarray,
correctness_weight: float,
circuit_cost_weight: float,
circuit_cost_func: qcirc.CircuitCostFunction,
parameters_bounds: qtypes.Bounds = None,
) -> None:
"""Initialise the :py:class:`~.QuantumCircuitGroup` instance.
A :py:class:`~.QuantumCircuitGroup` is a group composed of `p` instances
of :py:class:`~.QuantumCircuit`.
:param basis: a sequence of allowed operations. The operations can be
"abstract" (i.e. with None entries, see the documentation for the
:py:class:`~.QuantumOperation` class) or not (i.e. with specified
entries).
:param objective_unitary: unitary matrix we are trying to approximate.
:param length: length of the sequences that will be generated.
:param p: population of the group, i.e. number of gate sequences
contained in this group.
:param r: rates determining the portion of old (r[0]), leader (r[1]) and
random (r[2]) that are used to generate new candidates.
:param correctness_weight: scalar representing the importance attached
to the correctness of the generated circuit.
:param circuit_cost_weight: scalar representing the importance attached
to the cost of the generated circuit.
:param circuit_cost_func: a function that takes as input an instance of
:py:class:`~.QuantumCircuit` and returns a float representing the
cost of the given circuit.
:param parameters_bounds: a list of bounds for each operation in the
`basis`. A None value in this list means that the corresponding
operation is not parametrised. A None value for the whole list
(default value) means that no gate in `basis` is parametrised.
"""
self._qubit_number = objective_unitary.shape[0].bit_length() - 1
self._circuits = [
qc_gen.generate_random_quantum_circuit(
self._qubit_number, basis, length, parameters_bounds
)
for _ in range(p)
]
self._basis = basis
self._r = r
self._length = length
self._param_bounds = parameters_bounds
if self._param_bounds is None:
self._param_bounds = [None] * self._qubit_number
self._correctness_weight = correctness_weight
self._circuit_cost_weight = circuit_cost_weight
self._circuit_cost_func = circuit_cost_func
self._objective_unitary = objective_unitary
self._costs = numpy.zeros((p,), dtype=numpy.float)
self._update_costs()
def _update_costs(self):
"""Update the cached costs.
This method should be called after one or more sequence(s) of the group
changed in order to update the cached costs.
"""
for i in range(len(self._circuits)):
self._costs[i] = qdists.gloa_objective_function(
self._circuits[i],
self._objective_unitary,
self._correctness_weight,
self._circuit_cost_weight,
self._circuit_cost_func,
)
def get_leader(self) -> typing.Tuple[float, qcirc.QuantumCircuit]:
"""Get the best quantum circuit of the group.
:return: the best sequence of the group along with its cost.
"""
idx: int = numpy.argmin(self._costs)
return self._costs[idx], self._circuits[idx]
def mutate_and_recombine(self) -> None:
"""Apply the mutate and recombine step of the GLOA.
See the `GLOA paper <https://arxiv.org/abs/1004.2242>`_ for more
precision on this step.
"""
# Pre-compute group leader data.
_, leader = self.get_leader()
# For each member of the group, mutate and recombine it and see if the
# newly created member is better.
for seq_idx, current in enumerate(self._circuits):
new_circuit = qcirc.QuantumCircuit(self._qubit_number, cache_matrix=True)
random = qc_gen.generate_random_quantum_circuit(
self._qubit_number, self._basis, self._length, self._param_bounds
)
for ops in zip(current.operations, leader.operations, random.operations):
new_circuit.add_operation(self._combine_operations(ops))
new_cost = qdists.gloa_objective_function(
new_circuit,
self._objective_unitary,
self._correctness_weight,
self._circuit_cost_weight,
self._circuit_cost_func,
)
if new_cost < self._costs[seq_idx]:
self._circuits[seq_idx] = new_circuit
self._costs[seq_idx] = new_cost
return
def _combine_operations(
self, operations: typing.Sequence[qop.QuantumOperation]
) -> qop.QuantumOperation:
"""Combine the 3 given operations into one operation.
The combined operation is randomly chosen from the 3 given operations
with the probability distribution `r` given at the instance construction
and then randomly mutated with characteristics of the other operations.
:param operations: A sequence of 3 :py:class:`~.QuantumOperation`.
:return: a random merge of the 3 given operations.
"""
op1, op2, op3 = operations[0], operations[1], operations[2]
new_operation = copy.copy(numpy.random.choice(operations, p=self._r))
control_number = len(new_operation.controls)
new_operation.controls = []
new_operation.target = numpy.random.choice(
[op1.target, op2.target, op3.target], p=self._r
)
while len(new_operation.controls) < control_number:
ctrl = numpy.random.randint(0, self._qubit_number)
if ctrl != new_operation.target and ctrl not in new_operation.controls:
new_operation.controls.append(ctrl)
if new_operation.is_parametrised():
raise NotImplementedError(
"Parametrised operations are not supported for the moment."
)
return new_operation
@property
def circuits(self) -> typing.List[qcirc.QuantumCircuit]:
"""Getter for the stored list of :py:class:`~.QuantumCircuit`."""
return self._circuits
@property
def costs(self):
"""Getter for the pre-computed costs."""
return self._costs
| [
"numpy.random.choice",
"qtoolkit.data_structures.quantum_circuit.quantum_circuit.QuantumCircuit",
"qtoolkit.maths.matrix.distances.gloa_objective_function",
"numpy.zeros",
"numpy.random.randint",
"numpy.argmin",
"qtoolkit.maths.matrix.generation.quantum_circuit.generate_random_quantum_circuit"
] | [((5397, 5433), 'numpy.zeros', 'numpy.zeros', (['(p,)'], {'dtype': 'numpy.float'}), '((p,), dtype=numpy.float)\n', (5408, 5433), False, 'import numpy\n'), ((6224, 6249), 'numpy.argmin', 'numpy.argmin', (['self._costs'], {}), '(self._costs)\n', (6236, 6249), False, 'import numpy\n'), ((8461, 8529), 'numpy.random.choice', 'numpy.random.choice', (['[op1.target, op2.target, op3.target]'], {'p': 'self._r'}), '([op1.target, op2.target, op3.target], p=self._r)\n', (8480, 8529), False, 'import numpy\n'), ((4772, 4868), 'qtoolkit.maths.matrix.generation.quantum_circuit.generate_random_quantum_circuit', 'qc_gen.generate_random_quantum_circuit', (['self._qubit_number', 'basis', 'length', 'parameters_bounds'], {}), '(self._qubit_number, basis, length,\n parameters_bounds)\n', (4810, 4868), True, 'import qtoolkit.maths.matrix.generation.quantum_circuit as qc_gen\n'), ((5749, 5910), 'qtoolkit.maths.matrix.distances.gloa_objective_function', 'qdists.gloa_objective_function', (['self._circuits[i]', 'self._objective_unitary', 'self._correctness_weight', 'self._circuit_cost_weight', 'self._circuit_cost_func'], {}), '(self._circuits[i], self._objective_unitary,\n self._correctness_weight, self._circuit_cost_weight, self.\n _circuit_cost_func)\n', (5779, 5910), True, 'import qtoolkit.maths.matrix.distances as qdists\n'), ((6812, 6871), 'qtoolkit.data_structures.quantum_circuit.quantum_circuit.QuantumCircuit', 'qcirc.QuantumCircuit', (['self._qubit_number'], {'cache_matrix': '(True)'}), '(self._qubit_number, cache_matrix=True)\n', (6832, 6871), True, 'import qtoolkit.data_structures.quantum_circuit.quantum_circuit as qcirc\n'), ((6893, 7002), 'qtoolkit.maths.matrix.generation.quantum_circuit.generate_random_quantum_circuit', 'qc_gen.generate_random_quantum_circuit', (['self._qubit_number', 'self._basis', 'self._length', 'self._param_bounds'], {}), '(self._qubit_number, self._basis,\n self._length, self._param_bounds)\n', (6931, 7002), True, 'import qtoolkit.maths.matrix.generation.quantum_circuit as qc_gen\n'), ((7213, 7364), 'qtoolkit.maths.matrix.distances.gloa_objective_function', 'qdists.gloa_objective_function', (['new_circuit', 'self._objective_unitary', 'self._correctness_weight', 'self._circuit_cost_weight', 'self._circuit_cost_func'], {}), '(new_circuit, self._objective_unitary, self.\n _correctness_weight, self._circuit_cost_weight, self._circuit_cost_func)\n', (7243, 7364), True, 'import qtoolkit.maths.matrix.distances as qdists\n'), ((8297, 8339), 'numpy.random.choice', 'numpy.random.choice', (['operations'], {'p': 'self._r'}), '(operations, p=self._r)\n', (8316, 8339), False, 'import numpy\n'), ((8632, 8675), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'self._qubit_number'], {}), '(0, self._qubit_number)\n', (8652, 8675), False, 'import numpy\n')] |
#!/usr/bin/python
import sys, getopt, re
from collections import defaultdict
input_file = ''
dictionary_file = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"i:d:",["input=","dictionary="])
except getopt.GetoptError:
print('word_usage.py -i <input file> [-d <dictionary>]')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('word_usage.py -i <input file> [-d <dictionary>]')
sys.exit()
elif opt in ("-i", "--input"):
input_file = arg
elif opt in ("-d", "--dictionary"):
dictionary_file = arg
word_dict = defaultdict(int)
# Scan sentences
with open(input_file) as f:
for line in f:
words = line.lower().split()
for w in words:
# Filter out symbols
w = re.sub('[^a-zA-Z\']', '', w)
if len(w) > 0:
val = word_dict[w]
val += 1
word_dict[w] = val
# Scan dictionary if the user specified it (assumes one word per line)
if dictionary_file:
with open(dictionary_file) as f:
for line in f:
line = line.lower()
# Filter out symbols
line = re.sub('[^a-zA-Z\']', '', line)
if len(line) > 0:
# Add word if it doesn't exist
val = word_dict[line]
word_dict[line] = val
# Now sort by alphabetical order of word
sorted_words = sorted(word_dict.items(), key=lambda x:x[0]);
# Sort words by most to least frequent
sorted_words = sorted(sorted_words, key=lambda x:x[1], reverse=True);
for word,num in sorted_words:
print("{} {}".format(word,num))
| [
"re.sub",
"getopt.getopt",
"collections.defaultdict",
"sys.exit"
] | [((531, 547), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (542, 547), False, 'from collections import defaultdict\n'), ((136, 198), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""i:d:"""', "['input=', 'dictionary=']"], {}), "(sys.argv[1:], 'i:d:', ['input=', 'dictionary='])\n", (149, 198), False, 'import sys, getopt, re\n'), ((282, 293), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (290, 293), False, 'import sys, getopt, re\n'), ((395, 405), 'sys.exit', 'sys.exit', ([], {}), '()\n', (403, 405), False, 'import sys, getopt, re\n'), ((697, 724), 're.sub', 're.sub', (['"""[^a-zA-Z\']"""', '""""""', 'w'], {}), '("[^a-zA-Z\']", \'\', w)\n', (703, 724), False, 'import sys, getopt, re\n'), ((1013, 1043), 're.sub', 're.sub', (['"""[^a-zA-Z\']"""', '""""""', 'line'], {}), '("[^a-zA-Z\']", \'\', line)\n', (1019, 1043), False, 'import sys, getopt, re\n')] |
# coding=utf-8
# Copyright (c) 2021 <NAME> <<EMAIL>>. All rights reserved.
# This file is based on code by the authors denoted below and has been modified from its original version.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating text."""
import copy
import json
import os
import time
from typing import List, Union
import torch
import torch.nn.functional as F
from megatron import print_rank_0
from megatron import mpu
from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0
def get_batch(neox_args, context_tokens):
"""Generate batch from context tokens."""
# Move to GPU.
tokens = context_tokens.view(neox_args.batch_size, -1).contiguous().cuda()
# Get the attention mask and postition ids.
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
tokens,
neox_args.tokenizer.eod,
neox_args.reset_position_ids,
neox_args.reset_attention_mask,
neox_args.eod_mask_loss)
return tokens, attention_mask, position_ids
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
"""
Filters the logits using top_k / top_p, filling any filtered vocab items with filter_value (defaults to -inf).
This function has been mostly taken from huggingface conversational ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
logits: torch.Tensor -> logits of megatron model.
top_k: integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token.
top_p: float -> Top-p (nucles) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p.
returns: (filtered) logits"""
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# convert to 1D
sorted_logits, sorted_indices = torch.sort(
logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1),
dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] \
= sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size(0)):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits
def pad_batch(batch, pad_id, neox_args):
"""
pads context lengths in batch with pad_id to equal neox_args.seq_length,
and returns the padded batch and the new lengths.
batch: torch.Tensor of tokens
pad_id: int, integer to use as padding token
neox_args: neox_args
"""
context_lengths = []
for tokens in batch:
context_length = len(tokens)
if context_length < neox_args.seq_length:
tokens.extend([pad_id] * (neox_args.seq_length - context_length))
context_lengths.append(context_length)
return batch, context_lengths
def get_token_stream(neox_args, model, context_tokens):
"""
yields completions from a model as an iterator.
model: a Megatron model.
context_tokens: the prompt to complete.
"""
context_tokens, context_lengths = pad_batch(context_tokens, neox_args.tokenizer.eod, neox_args)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
torch.distributed.broadcast(context_length_tensor,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
torch.distributed.broadcast(context_tokens_tensor,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
context_length = context_length_tensor.min().item()
tokens, attention_mask, position_ids = get_batch(neox_args, context_tokens_tensor)
batch_token_iterator = sample_sequence_batch(neox_args, model, context_tokens_tensor,
context_length_tensor,
attention_mask, position_ids)
for tokens, lengths in batch_token_iterator:
context_length += 1
yield tokens[:, :context_length], lengths
def switch(val1, val2, boolean):
"""
replaces items in val1 with items in val2 where boolean = True
"""
boolean = boolean.type_as(val1)
return (1 - boolean) * val1 + boolean * val2
def forward_model(neox_args, model, model_inputs):
"""
Runs model.forward(model_inputs)
We need to create a wrapper for this function because deepspeed pipe parallel modules operate differently to normal models.
model: a Megatron model.
model_inputs: tuple containing model args
returns: result of model.forward(model_inputs)
"""
# because someone at deepspeed decided pipeline modules couldn't use kwargs,
# we need to forward a pipe model by access model.module() instead of just model()
torch.distributed.barrier()
if neox_args.pipe_parallel_size <= 1:
return model.module(model_inputs)
else:
data_iterator = iter(
[[model_inputs, torch.Tensor(1)]]) # we need to feed in fake labels bc deepspeed is only built for training
x = model.inference_batch(data_iterator)
return x
def broadcast_terminate_signal(terminate_runs: int):
"""Send signal to all workers to terminate if we've finished the process"""
terminate_runs_tensor = torch.cuda.LongTensor([terminate_runs])
torch.distributed.broadcast(terminate_runs_tensor,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
return terminate_runs_tensor[0].item()
def sample_sequence_batch(neox_args, model, context_tokens, context_lengths,
attention_mask, position_ids,
maxlen=None):
"""
yields completions from a model as an iterator.
model: a Megatron model.
context_tokens: the prompt to complete.
context_lengths: lengths of context tokens.
attention_mask: attention mask for megatron model.
position_ids: position ids for positional encoding.
yields: tokens (completions from model), and lengths (lengths of completions)
"""
model.eval()
with torch.no_grad():
context_length = context_lengths.min().item()
eos_id = neox_args.tokenizer.eod
counter = 0
org_context_length = context_length
batch_size = context_tokens.size(0)
is_done = torch.zeros([batch_size]).byte().cuda()
tokens = context_tokens
layer_past = torch.Tensor().cuda()
if maxlen is None:
maxlen = neox_args.seq_length - 1
if maxlen > (org_context_length + neox_args.out_seq_length):
maxlen = org_context_length + neox_args.out_seq_length
lengths = torch.ones([batch_size]).long().cuda() * maxlen
while context_length <= maxlen:
if neox_args.recompute:
# we need to use neox_args instead of kwargs here because deepspeed :|
model_inputs = (tokens,
position_ids,
attention_mask,
torch.Tensor(),
)
logits, _ = forward_model(neox_args, model, model_inputs)
logits = logits[:, context_length - 1, :]
else:
if counter == 0:
tokens2use = tokens[:, :context_length]
positions2use = position_ids[:, :context_length]
else:
tokens2use = tokens[:, context_length - 1].view(
batch_size, -1)
positions2use = position_ids[:, context_length - 1].view(
batch_size, -1)
# we have to use neox_args instead of kwargs here because deepspeed :|
model_inputs = (tokens2use, # input_ids
positions2use, # position_ids
attention_mask, # attention_mask
layer_past, # layer_past
)
logits, layer_past = forward_model(neox_args, model, model_inputs)
# TODO: we are replicating computation across all machines here, which is really unecessary,
# we should probably just do it on one then communicate the results?
logits = logits[:, -1].view(batch_size, -1).contiguous()
if neox_args.greedy:
prev = torch.argmax(logits, dim=-1).view(-1)
else:
logits = logits.float()
logits /= neox_args.temperature
logits = top_k_logits(logits, top_k=neox_args.top_k,
top_p=neox_args.top_p)
log_probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(log_probs, num_samples=1).view(-1)
print_logits = []
for p in prev:
print_logits.append([logits[i, p].item()
for i in range(batch_size)])
started = context_lengths <= context_length
tokens[:, context_length] = switch(
tokens[:, context_length].view(-1), prev, started)
context_length += 1
counter += 1
done_token = (prev == eos_id).byte() & started.byte()
just_finished = (done_token & ~is_done).bool()
lengths[just_finished.view(-1)] = context_length
is_done = is_done | done_token
done = torch.all(is_done)
yield tokens, lengths
if done:
break
def generate_samples_from_prompt(neox_args, model, text: Union[List[str], str]):
"""
Generates samples from raw text and returns them in a dictionary.
model: a Megatron model
text: either a single prompt (str) or a list of prompts (List[str]).
returns: List[dict] -> a list of dicts containing the following fields:
- 'context' (the input)
- 'text' (the completion)
- 'length' (the length of the completion)
"""
# type check
assert any([isinstance(text, str), isinstance(text, list)]), "Text should be in string or list form"
if isinstance(text, str):
text = [text]
if is_mp_rank_0():
input_count = len(text)
input_pos = 0
# generate completions
iterations = 0
generated_texts = []
while True:
start_time = time.time()
# Tokenize text, and check whether we should terminate process
terminate_runs = 0
if is_mp_rank_0():
if input_pos == input_count:
terminate_runs = 1
else:
raw_text = text[input_pos]
input_pos += 1
context_tokens = neox_args.tokenizer.tokenize(raw_text)
context_length = len(context_tokens)
if context_length >= (neox_args.seq_length // 2):
print_rank_0("\nContext length", context_length,
"\nPlease give smaller context (half of the "
"sequence length)!", flush=True)
continue
else:
context_tokens = neox_args.tokenizer.tokenize("EMPTY TEXT")
context_length = len(context_tokens)
terminate_runs = broadcast_terminate_signal(terminate_runs)
if terminate_runs == 1:
return generated_texts
for token_stream in get_token_stream(neox_args, model, copy.deepcopy([context_tokens])):
pass
token_batch = token_stream[0].cpu().numpy().tolist()
length_batch = token_stream[1].cpu().numpy().tolist()
for tokens, length in zip(token_batch, length_batch):
tokens = tokens[1:length - 1]
try:
text = neox_args.tokenizer.detokenize(tokens)
except KeyError:
print_rank_0("WARNING: generated token which doesn't exist. Skipping")
continue
is_finished = length < neox_args.seq_length - 1
if is_mp_rank_0():
data = {'context': raw_text, 'text': text, 'length': length - 1, 'finished': is_finished}
generated_texts.append(data)
if iterations % neox_args.log_interval == 0:
print_rank_0('Avg s/batch:',
(time.time() - start_time) / min(neox_args.log_interval, iterations + 1))
start_time = time.time()
iterations += 1
return generated_texts
def generate_samples_input_from_file(neox_args, model):
"""
Generates samples from an input file and writes them to an output file.
Reads prompts from neox_args.sample_input_file and writes completions to neox_args.sample_output_file
model: a Megatron model
"""
# Read the sample file and open the output file.
assert neox_args.sample_input_file is not None, \
'sample input file is not provided.'
with open(neox_args.sample_input_file, "r") as f:
prompts = f.readlines()
if is_mp_rank_0():
if neox_args.sample_output_file is None:
sample_output_file = neox_args.sample_input_file + ".out"
print_rank_0('could not find `sample-output-file`, setting '
'it to {}'.format(sample_output_file))
else:
sample_output_file = neox_args.sample_output_file
f_out = open(sample_output_file, "w+")
generated_texts = generate_samples_from_prompt(neox_args=neox_args, model=model, text=prompts)
if is_mp_rank_0():
for item in generated_texts:
f_out.write(json.dumps(item) + '\n')
def generate_samples_interactive(neox_args, model, print_frequency=24):
"""
Generates samples interactively in the terminal.
model: a Megatron model
print_frequency: int, how often (in tokens) to print the output.
"""
context_count = 0
model.eval()
with torch.no_grad():
while True:
torch.distributed.barrier(group=mpu.get_model_parallel_group())
terminate_runs = 0
if is_mp_rank_0():
os.system('clear')
raw_text = input("\nContext prompt (stop to exit) >>> ")
while not raw_text:
print_rank_0('Prompt should not be empty!')
raw_text = input("\nContext prompt (stop to exit) >>> ")
if "stop" in raw_text:
terminate_runs = 1
else:
context_tokens = neox_args.tokenizer.tokenize(raw_text)
context_length = len(context_tokens)
if context_length >= (neox_args.seq_length // 2):
print_rank_0("\nContext length", context_length,
"\nPlease give smaller context (half of the "
"sequence length)!", flush=True)
continue
else:
context_tokens = neox_args.tokenizer.tokenize("EMPTY TEXT")
context_length = len(context_tokens)
terminate_runs = broadcast_terminate_signal(terminate_runs)
if terminate_runs == 1:
return
token_stream = get_token_stream(neox_args, model, [context_tokens])
for counter, decode_tokens in enumerate(token_stream):
decode_tokens, _ = decode_tokens
decode_tokens = decode_tokens[0].cpu().numpy().tolist()
if mpu.get_model_parallel_rank() == 0 and \
counter % print_frequency == 0:
os.system('clear')
print_rank_0("\nContext:", raw_text, flush=True)
trim_decode_tokens = neox_args.tokenizer.detokenize(
decode_tokens)[len(raw_text):]
print_rank_0("\nMegatron-LM:", trim_decode_tokens, flush=True)
if is_mp_rank_0():
os.system('clear')
print_rank_0("\nContext:", raw_text, flush=True)
trim_decode_tokens = neox_args.tokenizer.detokenize(
decode_tokens)[len(raw_text):]
print_rank_0("\nMegatron-LM:", trim_decode_tokens, flush=True)
raw_text = None
torch.distributed.barrier(group=mpu.get_model_parallel_group())
context_count += 1
if is_mp_rank_0():
input("\nPress any key to continue >>>")
def generate_samples_unconditional(neox_args, model):
"""
Generates samples unconditionially (no prompt) and yields them in a dictionary.
model: a Megatron model
yields: Dict -> a dict containing the following fields:
- 'text' (the completion)
- 'length' (the length of the completion)
"""
num_samples = neox_args.num_samples
context_tokens = [[neox_args.tokenizer.eod]
for _ in range(neox_args.batch_size)]
ctr = 0
while True:
start_time = time.time()
token_stream = None
for token_stream in get_token_stream(neox_args, model, copy.deepcopy(context_tokens)):
' print(token_stream) -> 1, 1,2, 1,2,3'
pass
if token_stream is None: break
if ctr % neox_args.log_interval == 0:
print_rank_0('Avg s/batch:',
(time.time() - start_time) / min(neox_args.log_interval, ctr + 1))
start_time = time.time()
length = len(token_stream)
token_batch = token_stream[0].cpu().numpy().tolist()
length_batch = token_stream[1].cpu().numpy().tolist()
for tokens, length in zip(token_batch, length_batch):
tokens = tokens[1:length - 1]
try:
text = neox_args.tokenizer.detokenize(tokens)
except KeyError:
print_rank_0("WARNING: generated token which doesn't exist. Skipping")
continue
is_finished = length < neox_args.seq_length - 1
datum = {'text': text, 'length': length - 1, 'finished': is_finished}
yield datum
ctr += 1
if ctr >= num_samples:
break
if ctr >= num_samples:
break
def generate_and_write_samples_unconditional(neox_args, model):
"""
Generates samples unconditionially (no prompt) and writes them to an output file at neox_args.genfile
model: a Megatron model
"""
assert neox_args.genfile is not None
genfile = neox_args.genfile
# Create directory
genfile_dir = os.path.dirname(genfile)
os.makedirs(genfile_dir, exist_ok=True)
with open(genfile, 'w') as f:
for n, datum in enumerate(generate_samples_unconditional(neox_args=neox_args, model=model), 1):
f.write(json.dumps(datum) + '\n')
if n != 0 and n % neox_args.log_interval:
print_rank_0(f"Text generated and written: {n}")
| [
"torch.cuda.LongTensor",
"copy.deepcopy",
"megatron.mpu.get_model_parallel_src_rank",
"torch.nn.functional.softmax",
"megatron.utils.is_mp_rank_0",
"torch.distributed.barrier",
"megatron.mpu.get_model_parallel_group",
"json.dumps",
"megatron.utils.get_ltor_masks_and_position_ids",
"torch.argmax",
... | [((1364, 1525), 'megatron.utils.get_ltor_masks_and_position_ids', 'get_ltor_masks_and_position_ids', (['tokens', 'neox_args.tokenizer.eod', 'neox_args.reset_position_ids', 'neox_args.reset_attention_mask', 'neox_args.eod_mask_loss'], {}), '(tokens, neox_args.tokenizer.eod, neox_args.\n reset_position_ids, neox_args.reset_attention_mask, neox_args.eod_mask_loss\n )\n', (1395, 1525), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((4413, 4450), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['context_tokens'], {}), '(context_tokens)\n', (4434, 4450), False, 'import torch\n'), ((4479, 4517), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['context_lengths'], {}), '(context_lengths)\n', (4500, 4517), False, 'import torch\n'), ((6155, 6182), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (6180, 6182), False, 'import torch\n'), ((6657, 6696), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['[terminate_runs]'], {}), '([terminate_runs])\n', (6678, 6696), False, 'import torch\n'), ((11677, 11691), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (11689, 11691), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((14527, 14541), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (14539, 14541), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((15028, 15042), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (15040, 15042), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((20083, 20107), 'os.path.dirname', 'os.path.dirname', (['genfile'], {}), '(genfile)\n', (20098, 20107), False, 'import os\n'), ((20112, 20151), 'os.makedirs', 'os.makedirs', (['genfile_dir'], {'exist_ok': '(True)'}), '(genfile_dir, exist_ok=True)\n', (20123, 20151), False, 'import os\n'), ((2727, 2770), 'torch.sort', 'torch.sort', (['logits'], {'descending': '(True)', 'dim': '(-1)'}), '(logits, descending=True, dim=-1)\n', (2737, 2770), False, 'import torch\n'), ((4606, 4639), 'megatron.mpu.get_model_parallel_src_rank', 'mpu.get_model_parallel_src_rank', ([], {}), '()\n', (4637, 4639), False, 'from megatron import mpu\n'), ((4798, 4831), 'megatron.mpu.get_model_parallel_src_rank', 'mpu.get_model_parallel_src_rank', ([], {}), '()\n', (4829, 4831), False, 'from megatron import mpu\n'), ((6784, 6817), 'megatron.mpu.get_model_parallel_src_rank', 'mpu.get_model_parallel_src_rank', ([], {}), '()\n', (6815, 6817), False, 'from megatron import mpu\n'), ((7518, 7533), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7531, 7533), False, 'import torch\n'), ((11856, 11867), 'time.time', 'time.time', ([], {}), '()\n', (11865, 11867), False, 'import time\n'), ((11978, 11992), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (11990, 11992), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((15420, 15435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15433, 15435), False, 'import torch\n'), ((18514, 18525), 'time.time', 'time.time', ([], {}), '()\n', (18523, 18525), False, 'import time\n'), ((2824, 2856), 'torch.nn.functional.softmax', 'F.softmax', (['sorted_logits'], {'dim': '(-1)'}), '(sorted_logits, dim=-1)\n', (2833, 2856), True, 'import torch.nn.functional as F\n'), ((4679, 4709), 'megatron.mpu.get_model_parallel_group', 'mpu.get_model_parallel_group', ([], {}), '()\n', (4707, 4709), False, 'from megatron import mpu\n'), ((4871, 4901), 'megatron.mpu.get_model_parallel_group', 'mpu.get_model_parallel_group', ([], {}), '()\n', (4899, 4901), False, 'from megatron import mpu\n'), ((6857, 6887), 'megatron.mpu.get_model_parallel_group', 'mpu.get_model_parallel_group', ([], {}), '()\n', (6885, 6887), False, 'from megatron import mpu\n'), ((10933, 10951), 'torch.all', 'torch.all', (['is_done'], {}), '(is_done)\n', (10942, 10951), False, 'import torch\n'), ((12933, 12964), 'copy.deepcopy', 'copy.deepcopy', (['[context_tokens]'], {}), '([context_tokens])\n', (12946, 12964), False, 'import copy\n'), ((13507, 13521), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (13519, 13521), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((15580, 15594), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (15592, 15594), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((17445, 17459), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (17457, 17459), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((17912, 17926), 'megatron.utils.is_mp_rank_0', 'is_mp_rank_0', ([], {}), '()\n', (17924, 17926), False, 'from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0\n'), ((18618, 18647), 'copy.deepcopy', 'copy.deepcopy', (['context_tokens'], {}), '(context_tokens)\n', (18631, 18647), False, 'import copy\n'), ((18962, 18973), 'time.time', 'time.time', ([], {}), '()\n', (18971, 18973), False, 'import time\n'), ((7850, 7864), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (7862, 7864), False, 'import torch\n'), ((10174, 10199), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (10183, 10199), True, 'import torch.nn.functional as F\n'), ((15612, 15630), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (15621, 15630), False, 'import os\n'), ((17477, 17495), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (17486, 17495), False, 'import os\n'), ((17512, 17560), 'megatron.print_rank_0', 'print_rank_0', (['"""\nContext:"""', 'raw_text'], {'flush': '(True)'}), "('\\nContext:', raw_text, flush=True)\n", (17524, 17560), False, 'from megatron import print_rank_0\n'), ((17697, 17759), 'megatron.print_rank_0', 'print_rank_0', (['"""\nMegatron-LM:"""', 'trim_decode_tokens'], {'flush': '(True)'}), "('\\nMegatron-LM:', trim_decode_tokens, flush=True)\n", (17709, 17759), False, 'from megatron import print_rank_0\n'), ((20407, 20455), 'megatron.print_rank_0', 'print_rank_0', (['f"""Text generated and written: {n}"""'], {}), "(f'Text generated and written: {n}')\n", (20419, 20455), False, 'from megatron import print_rank_0\n'), ((2549, 2574), 'torch.topk', 'torch.topk', (['logits', 'top_k'], {}), '(logits, top_k)\n', (2559, 2574), False, 'import torch\n'), ((6335, 6350), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (6347, 6350), False, 'import torch\n'), ((8487, 8501), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (8499, 8501), False, 'import torch\n'), ((12375, 12510), 'megatron.print_rank_0', 'print_rank_0', (['"""\nContext length"""', 'context_length', '"""\nPlease give smaller context (half of the sequence length)!"""'], {'flush': '(True)'}), '(\'\\nContext length\', context_length,\n """\nPlease give smaller context (half of the sequence length)!""",\n flush=True)\n', (12387, 12510), False, 'from megatron import print_rank_0\n'), ((13335, 13405), 'megatron.print_rank_0', 'print_rank_0', (['"""WARNING: generated token which doesn\'t exist. Skipping"""'], {}), '("WARNING: generated token which doesn\'t exist. Skipping")\n', (13347, 13405), False, 'from megatron import print_rank_0\n'), ((13924, 13935), 'time.time', 'time.time', ([], {}), '()\n', (13933, 13935), False, 'import time\n'), ((15105, 15121), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (15115, 15121), False, 'import json\n'), ((15501, 15531), 'megatron.mpu.get_model_parallel_group', 'mpu.get_model_parallel_group', ([], {}), '()\n', (15529, 15531), False, 'from megatron import mpu\n'), ((15760, 15803), 'megatron.print_rank_0', 'print_rank_0', (['"""Prompt should not be empty!"""'], {}), "('Prompt should not be empty!')\n", (15772, 15803), False, 'from megatron import print_rank_0\n'), ((17130, 17148), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (17139, 17148), False, 'import os\n'), ((17169, 17217), 'megatron.print_rank_0', 'print_rank_0', (['"""\nContext:"""', 'raw_text'], {'flush': '(True)'}), "('\\nContext:', raw_text, flush=True)\n", (17181, 17217), False, 'from megatron import print_rank_0\n'), ((17366, 17428), 'megatron.print_rank_0', 'print_rank_0', (['"""\nMegatron-LM:"""', 'trim_decode_tokens'], {'flush': '(True)'}), "('\\nMegatron-LM:', trim_decode_tokens, flush=True)\n", (17378, 17428), False, 'from megatron import print_rank_0\n'), ((17833, 17863), 'megatron.mpu.get_model_parallel_group', 'mpu.get_model_parallel_group', ([], {}), '()\n', (17861, 17863), False, 'from megatron import mpu\n'), ((19361, 19431), 'megatron.print_rank_0', 'print_rank_0', (['"""WARNING: generated token which doesn\'t exist. Skipping"""'], {}), '("WARNING: generated token which doesn\'t exist. Skipping")\n', (19373, 19431), False, 'from megatron import print_rank_0\n'), ((20311, 20328), 'json.dumps', 'json.dumps', (['datum'], {}), '(datum)\n', (20321, 20328), False, 'import json\n'), ((7757, 7782), 'torch.zeros', 'torch.zeros', (['[batch_size]'], {}), '([batch_size])\n', (7768, 7782), False, 'import torch\n'), ((9872, 9900), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (9884, 9900), False, 'import torch\n'), ((10223, 10266), 'torch.multinomial', 'torch.multinomial', (['log_probs'], {'num_samples': '(1)'}), '(log_probs, num_samples=1)\n', (10240, 10266), False, 'import torch\n'), ((16210, 16345), 'megatron.print_rank_0', 'print_rank_0', (['"""\nContext length"""', 'context_length', '"""\nPlease give smaller context (half of the sequence length)!"""'], {'flush': '(True)'}), '(\'\\nContext length\', context_length,\n """\nPlease give smaller context (half of the sequence length)!""",\n flush=True)\n', (16222, 16345), False, 'from megatron import print_rank_0\n'), ((17013, 17042), 'megatron.mpu.get_model_parallel_rank', 'mpu.get_model_parallel_rank', ([], {}), '()\n', (17040, 17042), False, 'from megatron import mpu\n'), ((18871, 18882), 'time.time', 'time.time', ([], {}), '()\n', (18880, 18882), False, 'import time\n'), ((8109, 8133), 'torch.ones', 'torch.ones', (['[batch_size]'], {}), '([batch_size])\n', (8119, 8133), False, 'import torch\n'), ((13818, 13829), 'time.time', 'time.time', ([], {}), '()\n', (13827, 13829), False, 'import time\n')] |
from setuptools import setup, find_packages
setup(name='pystacking',
version='0.1.0',
description='Python Machine Learning Stacking Maker',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
python_requires=">=3.5",
tests_require=['pytest'])
| [
"setuptools.find_packages"
] | [((243, 258), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (256, 258), False, 'from setuptools import setup, find_packages\n')] |
import glob
import csv
import os
txt = glob.glob("/mnt/edisk/backup/dataset/semantic_raw/*.txt")
print(len(txt))
txt_train = txt[0:236]
txt_val = txt[237:241]
txt_test = txt[242:246]
os.chdir("/mnt/edisk/backup/filelists")
with open('FileList_train.txt', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(txt_train)
with open('FileList_val.txt', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(txt_val)
with open('FileList_test.txt', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(txt_test)
labels =glob.glob("/mnt/edisk/backup/dataset/semantic_raw/*.labels")
label_train = labels[0:236]
label_val = labels[237:241]
with open('LabelList_train.txt', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(label_train)
with open('LabelList_val.txt', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(label_val)
| [
"os.chdir",
"csv.writer",
"glob.glob"
] | [((42, 99), 'glob.glob', 'glob.glob', (['"""/mnt/edisk/backup/dataset/semantic_raw/*.txt"""'], {}), "('/mnt/edisk/backup/dataset/semantic_raw/*.txt')\n", (51, 99), False, 'import glob\n'), ((193, 232), 'os.chdir', 'os.chdir', (['"""/mnt/edisk/backup/filelists"""'], {}), "('/mnt/edisk/backup/filelists')\n", (201, 232), False, 'import os\n'), ((669, 729), 'glob.glob', 'glob.glob', (['"""/mnt/edisk/backup/dataset/semantic_raw/*.labels"""'], {}), "('/mnt/edisk/backup/dataset/semantic_raw/*.labels')\n", (678, 729), False, 'import glob\n'), ((308, 349), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (318, 349), False, 'import csv\n'), ((449, 490), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (459, 490), False, 'import csv\n'), ((589, 630), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (599, 630), False, 'import csv\n'), ((866, 907), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (876, 907), False, 'import csv\n'), ((1010, 1051), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_ALL'}), '(myfile, quoting=csv.QUOTE_ALL)\n', (1020, 1051), False, 'import csv\n')] |
import requests
from pkcs7_detached import verify_detached_signature, aws_certificates
import json
from pprint import pprint
def main():
print("Verifying ec2 instance identity document")
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
identity_document = r.text
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/pkcs7")
pkcs7 = r.text
if verify_detached_signature(
identity_document, pkcs7, aws_certificates.PUBLIC_REGIONS
):
print("Verified")
identity = json.loads(identity_document)
pprint(identity)
else:
print("Identity is not valid")
if __name__ == "__main__":
main()
| [
"pprint.pprint",
"json.loads",
"pkcs7_detached.verify_detached_signature",
"requests.get"
] | [((202, 287), 'requests.get', 'requests.get', (['"""http://169.254.169.254/latest/dynamic/instance-identity/document"""'], {}), "('http://169.254.169.254/latest/dynamic/instance-identity/document'\n )\n", (214, 287), False, 'import requests\n'), ((323, 400), 'requests.get', 'requests.get', (['"""http://169.254.169.254/latest/dynamic/instance-identity/pkcs7"""'], {}), "('http://169.254.169.254/latest/dynamic/instance-identity/pkcs7')\n", (335, 400), False, 'import requests\n'), ((428, 517), 'pkcs7_detached.verify_detached_signature', 'verify_detached_signature', (['identity_document', 'pkcs7', 'aws_certificates.PUBLIC_REGIONS'], {}), '(identity_document, pkcs7, aws_certificates.\n PUBLIC_REGIONS)\n', (453, 517), False, 'from pkcs7_detached import verify_detached_signature, aws_certificates\n'), ((573, 602), 'json.loads', 'json.loads', (['identity_document'], {}), '(identity_document)\n', (583, 602), False, 'import json\n'), ((611, 627), 'pprint.pprint', 'pprint', (['identity'], {}), '(identity)\n', (617, 627), False, 'from pprint import pprint\n')] |
from flask import Flask
from config import config
from flask_sqlalchemy import SQLAlchemy
# app.config['SECRET_KEY'] = '666666'
# ... add more variables here as needed
# app.config.from_object('config') # 载入配置文件
# app.config.from_object(config[config_name])
# config[config_name].init_app(app)
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__) # , static_url_path='/app/static')
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# view导入不能比db的前,会导致db导入错误
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# from .admin import admin as admin_blueprint
# app.register_blueprint(admin_blueprint, url_prefix='/admin')
db.init_app(app)
return app
# from app.front import routes | [
"flask_sqlalchemy.SQLAlchemy",
"flask.Flask"
] | [((302, 314), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (312, 314), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((355, 370), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (360, 370), False, 'from flask import Flask\n')] |
from timeit import repeat
from subprocess import check_output
def timer(arg, niter, name, module):
stmt = "%s(%s)" % (name, arg)
setup = "from %s import %s" % (module, name)
time = min(repeat(stmt=stmt, setup=setup, number=niter)) / float(niter) * 1e9
return time
N = 10**6
pytime_0 = timer(0, N, name='fib', module='fib')
cytime_0 = timer(0, N, name='fib', module='cyfib')
cexttime_0 = timer(0, N, name='fib', module='cfib')
ctime_0 = float(check_output(('./cfib.x 0 %d' % N).split()))
py0speedup = 1.0
cy0speedup = pytime_0 / cytime_0
cext0speedup = pytime_0 / cexttime_0
c0speedup = pytime_0 / ctime_0
N = 10**5
pytime_90 = timer(90, N, name='fib', module='fib')
cytime_90 = timer(90, N, name='fib', module='cyfib')
cexttime_90 = timer(90, N, name='fib', module='cfib')
ctime_90 = float(check_output(('./cfib.x 90 %d' % N).split()))
py90speedup = 1.0
cy90speedup = pytime_90 / cytime_90
cext90speedup = pytime_90 / cexttime_90
c90speedup = pytime_90 / ctime_90
data_format = "{:s},{:.0f},{:.1f},{:.0f},{:.1f}\n"
with open("fib.csv", 'w') as fh:
fh.write("Version,fib(0) runtime [ns],speedup,fib(90) runtime [ns],speedup\n")
fh.write(data_format.format("Python", pytime_0, py0speedup, pytime_90, py90speedup))
fh.write(data_format.format("C extension", cexttime_0, cext0speedup, cexttime_90, cext90speedup))
fh.write(data_format.format("Cython", cytime_0, cy0speedup, cytime_90, cy90speedup))
fh.write(data_format.format("Pure C", ctime_0, c0speedup, ctime_90, c90speedup))
| [
"timeit.repeat"
] | [((198, 242), 'timeit.repeat', 'repeat', ([], {'stmt': 'stmt', 'setup': 'setup', 'number': 'niter'}), '(stmt=stmt, setup=setup, number=niter)\n', (204, 242), False, 'from timeit import repeat\n')] |
import torch
import torch.nn as nn
from torch.nn import init
from encoders import *
from aggregators import *
class SupervisedGraphSage(nn.Module):
def __init__(self, num_classes, enc, w):
"""
w - array of len(num_classes) indicating the weight of each class when computing
loss.
"""
super(SupervisedGraphSage, self).__init__()
self.enc = enc
self.w = w
self.xent = nn.CrossEntropyLoss(weight=self.w)
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
embeds = self.enc(nodes)
scores = self.weight.mm(embeds)
return scores.t()
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def createGNN(args, features, adj_list, num_features, class_weights):
if args.model_type == 'GraphSage':
agg1 = MeanAggregator(features, cuda=False)
enc1 = Encoder(features, num_features, args.hidden_dim, adj_list, agg1, gcn=False, cuda=False)
enc1.num_samples = 25 # Sample 25 neighbors when aggregating.
return SupervisedGraphSage(len(class_weights), enc1, torch.FloatTensor(class_weights))
| [
"torch.nn.init.xavier_uniform_",
"torch.FloatTensor",
"torch.nn.CrossEntropyLoss"
] | [((440, 474), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'self.w'}), '(weight=self.w)\n', (459, 474), True, 'import torch.nn as nn\n'), ((565, 598), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['self.weight'], {}), '(self.weight)\n', (585, 598), False, 'from torch.nn import init\n'), ((510, 555), 'torch.FloatTensor', 'torch.FloatTensor', (['num_classes', 'enc.embed_dim'], {}), '(num_classes, enc.embed_dim)\n', (527, 555), False, 'import torch\n'), ((1252, 1284), 'torch.FloatTensor', 'torch.FloatTensor', (['class_weights'], {}), '(class_weights)\n', (1269, 1284), False, 'import torch\n')] |
from optparse import OptionParser
import os
import sys
import time
import numpy as np
import pandas as pd
import tensorflow as tf
import utils
import get_site_features
import tf_utils
np.set_printoptions(threshold=np.inf, linewidth=200)
pd.options.mode.chained_assignment = None
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--tpm_file", dest="TPM_FILE", help="tpm data")
parser.add_option("--orf_file", dest="ORF_FILE", help="ORF sequences in tsv format")
parser.add_option("--mirseqs", dest="MIR_SEQS", help="tsv with miRNAs and their sequences")
parser.add_option("--mirlen", dest="MIRLEN", type=int)
parser.add_option("-w", "--outfile", dest="OUTFILE", help="location for tfrecords")
parser.add_option("--overlap_dist", dest="OVERLAP_DIST", help="minimum distance between neighboring sites", type=int)
parser.add_option("--only_canon", dest="ONLY_CANON", help="only use canonical sites", default=False, action='store_true')
(options, args) = parser.parse_args()
### READ miRNA DATA and filter for ones to keep ###
MIRNAS = pd.read_csv(options.MIR_SEQS, sep='\t')
MIRNAS = MIRNAS[MIRNAS['use_tpms']]
ALL_GUIDES = sorted(list(MIRNAS['mir'].values))
MIR_DICT = {}
for row in MIRNAS.iterrows():
guide_seq = row[1]['guide_seq']
pass_seq = row[1]['pass_seq']
MIR_DICT[row[1]['mir']] = {
'mirseq': guide_seq,
'site8': utils.rev_comp(guide_seq[1:8]) + 'A',
'one_hot': utils.one_hot_encode(guide_seq[:options.MIRLEN])
}
MIR_DICT[row[1]['mir'] + '*'] = {
'mirseq': pass_seq,
'site8': utils.rev_comp(pass_seq[1:8]) + 'A',
'one_hot': utils.one_hot_encode(pass_seq[:options.MIRLEN])
}
### READ EXPRESSION DATA ###
TPM = pd.read_csv(options.TPM_FILE, sep='\t', index_col=0).sort_index()
for mir in ALL_GUIDES:
if mir not in TPM.columns:
raise ValueError('{} given in mirseqs file but not in TPM file.'.format(mir))
num_batches = 10
TPM['batch'] = [ix % num_batches for ix in TPM['ix']]
print("Using mirs: {}".format(ALL_GUIDES))
# read in orf sequences
ORF_SEQS = pd.read_csv(options.ORF_FILE, sep='\t', header=None, index_col=0)
feature_names = ['mir', 'tpm', 'orf_guide_1hot', 'utr3_guide_1hot',
'orf_pass_1hot', 'utr3_pass_1hot']
with tf.python_io.TFRecordWriter(options.OUTFILE) as tfwriter:
for ix, row in enumerate(TPM.iterrows()):
# print progress
if ix % 100 == 0:
print("Processed {}/{} transcripts".format(ix, len(TPM)))
transcript = row[0]
utr3 = row[1]['sequence']
orf = ORF_SEQS.loc[transcript][2]
transcript_sequence = orf + utr3
orf_length = len(orf)
context_dict = tf.train.Features(feature={
'transcript': tf_utils._bytes_feature(transcript.encode('utf-8')),
'batch': tf_utils._int64_feature([row[1]['batch']])
})
total_transcript_sites = 0
features = [[], [], [], [], [], []]
for mir in ALL_GUIDES:
site8 = MIR_DICT[mir]['site8']
mirseq = MIR_DICT[mir]['mirseq']
site8_star = MIR_DICT[mir + '*']['site8']
mirseq_star = MIR_DICT[mir + '*']['mirseq']
features[0].append(tf_utils._bytes_feature(mir.encode('utf-8'))) # mir
features[1].append(tf_utils._float_feature([row[1][mir]])) # tpm
# get sites for guide strand
seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
num_orf_sites = len([l for l in locs if l < orf_length])
orf_sites = utils.mir_site_pair_to_ints(mirseq[:options.MIRLEN], ''.join(seqs[:num_orf_sites]))
utr3_sites = utils.mir_site_pair_to_ints(mirseq[:options.MIRLEN], ''.join(seqs[num_orf_sites:]))
features[2].append(tf_utils._int64_feature(orf_sites))
features[3].append(tf_utils._int64_feature(utr3_sites))
total_transcript_sites += len(locs)
# get sites for guide strand
seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8_star, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
num_orf_sites = len([l for l in locs if l < orf_length])
orf_sites = utils.mir_site_pair_to_ints(mirseq_star[:options.MIRLEN], ''.join(seqs[:num_orf_sites]))
utr3_sites = utils.mir_site_pair_to_ints(mirseq_star[:options.MIRLEN], ''.join(seqs[num_orf_sites:]))
features[4].append(tf_utils._int64_feature(orf_sites))
features[5].append(tf_utils._int64_feature(utr3_sites))
total_transcript_sites += len(locs)
# features[0].append(tf_utils._bytes_feature(mir.encode('utf-8'))) # mir
# features[1].append(tf_utils._float_feature([row[1][mir]])) # tpm
# features[2].append(tf_utils._int64_feature(utils.one_hot_encode(mirseq[:options.MIRLEN]))) # mirseq
# assert len(utils.one_hot_encode(mirseq[:options.MIRLEN])) == 40
# # get sites for guide strand
# seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
# num_orf_sites = len([l for l in locs if l < orf_length])
# orf_sites = ''.join(seqs[:num_orf_sites])
# utr3_sites = ''.join(seqs[num_orf_sites:])
# features[3].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# features[4].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# total_transcript_sites += len(locs)
# features[5].append(tf_utils._int64_feature(utils.one_hot_encode(mirseq_star[:options.MIRLEN]))) # mirseq*
# assert len(utils.one_hot_encode(mirseq_star[:options.MIRLEN])) == 40
# # get sites for guide strand
# seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8_star, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
# num_orf_sites = len([l for l in locs if l < orf_length])
# orf_sites = ''.join(seqs[:num_orf_sites])
# utr3_sites = ''.join(seqs[num_orf_sites:])
# features[6].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# features[7].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# total_transcript_sites += len(locs)
print(total_transcript_sites)
if total_transcript_sites > 0:
feature_dict = tf.train.FeatureLists(feature_list={
feature_names[ix]: tf.train.FeatureList(feature=features[ix]) for ix in range(len(feature_names))
})
# Create the SequenceExample
example = tf.train.SequenceExample(context=context_dict,
feature_lists=feature_dict)
tfwriter.write(example.SerializeToString())
else:
print('Skipping {} because no sites found'.format(transcript))
| [
"tf_utils._float_feature",
"pandas.read_csv",
"tf_utils._int64_feature",
"optparse.OptionParser",
"get_site_features.get_sites_from_utr",
"utils.rev_comp",
"tensorflow.train.FeatureList",
"utils.one_hot_encode",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.SequenceExample",
"numpy.se... | [((187, 239), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'linewidth': '(200)'}), '(threshold=np.inf, linewidth=200)\n', (206, 239), True, 'import numpy as np\n'), ((325, 339), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (337, 339), False, 'from optparse import OptionParser\n'), ((1103, 1142), 'pandas.read_csv', 'pd.read_csv', (['options.MIR_SEQS'], {'sep': '"""\t"""'}), "(options.MIR_SEQS, sep='\\t')\n", (1114, 1142), True, 'import pandas as pd\n'), ((2224, 2289), 'pandas.read_csv', 'pd.read_csv', (['options.ORF_FILE'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(options.ORF_FILE, sep='\\t', header=None, index_col=0)\n", (2235, 2289), True, 'import pandas as pd\n'), ((2430, 2474), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['options.OUTFILE'], {}), '(options.OUTFILE)\n', (2457, 2474), True, 'import tensorflow as tf\n'), ((1517, 1565), 'utils.one_hot_encode', 'utils.one_hot_encode', (['guide_seq[:options.MIRLEN]'], {}), '(guide_seq[:options.MIRLEN])\n', (1537, 1565), False, 'import utils\n'), ((1732, 1779), 'utils.one_hot_encode', 'utils.one_hot_encode', (['pass_seq[:options.MIRLEN]'], {}), '(pass_seq[:options.MIRLEN])\n', (1752, 1779), False, 'import utils\n'), ((1834, 1886), 'pandas.read_csv', 'pd.read_csv', (['options.TPM_FILE'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(options.TPM_FILE, sep='\\t', index_col=0)\n", (1845, 1886), True, 'import pandas as pd\n'), ((1456, 1486), 'utils.rev_comp', 'utils.rev_comp', (['guide_seq[1:8]'], {}), '(guide_seq[1:8])\n', (1470, 1486), False, 'import utils\n'), ((1672, 1701), 'utils.rev_comp', 'utils.rev_comp', (['pass_seq[1:8]'], {}), '(pass_seq[1:8])\n', (1686, 1701), False, 'import utils\n'), ((3676, 3810), 'get_site_features.get_sites_from_utr', 'get_site_features.get_sites_from_utr', (['transcript_sequence', 'site8'], {'overlap_dist': 'options.OVERLAP_DIST', 'only_canon': 'options.ONLY_CANON'}), '(transcript_sequence, site8,\n overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)\n', (3712, 3810), False, 'import get_site_features\n'), ((4375, 4514), 'get_site_features.get_sites_from_utr', 'get_site_features.get_sites_from_utr', (['transcript_sequence', 'site8_star'], {'overlap_dist': 'options.OVERLAP_DIST', 'only_canon': 'options.ONLY_CANON'}), '(transcript_sequence, site8_star,\n overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)\n', (4411, 4514), False, 'import get_site_features\n'), ((7273, 7347), 'tensorflow.train.SequenceExample', 'tf.train.SequenceExample', ([], {'context': 'context_dict', 'feature_lists': 'feature_dict'}), '(context=context_dict, feature_lists=feature_dict)\n', (7297, 7347), True, 'import tensorflow as tf\n'), ((3554, 3592), 'tf_utils._float_feature', 'tf_utils._float_feature', (['[row[1][mir]]'], {}), '([row[1][mir]])\n', (3577, 3592), False, 'import tf_utils\n'), ((4140, 4174), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['orf_sites'], {}), '(orf_sites)\n', (4163, 4174), False, 'import tf_utils\n'), ((4211, 4246), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['utr3_sites'], {}), '(utr3_sites)\n', (4234, 4246), False, 'import tf_utils\n'), ((4854, 4888), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['orf_sites'], {}), '(orf_sites)\n', (4877, 4888), False, 'import tf_utils\n'), ((4925, 4960), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['utr3_sites'], {}), '(utr3_sites)\n', (4948, 4960), False, 'import tf_utils\n'), ((3032, 3074), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (["[row[1]['batch']]"], {}), "([row[1]['batch']])\n", (3055, 3074), False, 'import tf_utils\n'), ((7103, 7145), 'tensorflow.train.FeatureList', 'tf.train.FeatureList', ([], {'feature': 'features[ix]'}), '(feature=features[ix])\n', (7123, 7145), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://pysimplegui.readthedocs.io/en/latest/cookbook/
import PySimpleGUI as sg
#import PySimpleGUIWeb as sg
# Very basic window. Return values using auto numbered keys
layout = [
# API選択 free, google, watson, azure, nict, special
[sg.Frame(layout=[
[sg.Radio('free', 'API', default=True), sg.Radio('google', 'API', key='google'), sg.Radio('watson', 'API'),
sg.Radio('azure', 'API'), sg.Radio('nict', 'API'), sg.Radio('special', 'API')]
], title=u'API選択'),
],
# モード選択 hud, live, translator, speech, number, camera, assistant
[sg.Frame(layout=[
[sg.Radio('hud', 'MODE', default=True), sg.Radio('live', 'MODE'), sg.Radio('translator', 'MODE'),
sg.Radio('speech', 'MODE'), sg.Radio('number', 'MODE'), sg.Radio('camera', 'MODE'), sg.Radio('assistant', 'MODE')]
], title=u'モード選択'),
],
[
# speech
sg.Frame(layout=[
[sg.Checkbox('main_speech', default=True)],
[sg.Checkbox('controls', default=True)],
[sg.Checkbox('adintool', default=True)],
[sg.Checkbox('voice2wav', default=True)],
[sg.Checkbox('coreSTT', default=True)],
[sg.Checkbox('coreTTS', default=True)],
[sg.Checkbox('playvoice', default=True)],
[sg.Checkbox('julius', default=True)],
[sg.Checkbox('sttreader', default=True)],
[sg.Checkbox('trareader', default=True)],
[sg.Text('')],
[sg.Text('')],
[sg.Text('')],
], title=u'speech 起動条件'),
# vision
sg.Frame(layout=[
[sg.Checkbox('main_vision', default=True)],
[sg.Checkbox('controlv', default=True)],
[sg.Checkbox('overlay', default=True)],
[sg.Checkbox('camera1', default=True)],
[sg.Checkbox('camera2', default=True)],
[sg.Checkbox('txt2img', default=True)],
[sg.Checkbox('cvreader', default=True)],
[sg.Checkbox('cvdetect1', default=True)],
[sg.Checkbox('cvdetect2', default=True)],
[sg.Checkbox('cv2dnn_yolo', default=True)],
[sg.Checkbox('cv2dnn_ssd', default=True)],
[sg.Checkbox('vin2jpg', default=True)],
[sg.Checkbox('coreCV', default=True)],
], title=u'vision 起動条件'),
# desktop
sg.Frame(layout=[
[sg.Checkbox('main_desktop', default=True)],
[sg.Checkbox('controld', default=True)],
[sg.Checkbox('capture', default=True)],
[sg.Checkbox('cvreader', default=True)],
[sg.Checkbox('recorder', default=True)],
[sg.Checkbox('uploader', default=True)],
[sg.Text('')],
[sg.Text('')],
[sg.Text('')],
[sg.Text('')],
[sg.Text('')],
[sg.Text('')],
[sg.Text('')],
], title=u'desktop 起動条件'),
],
[sg.Button(u'OK'), sg.Button(u'キャンセル')]
]
window = sg.Window(u'RiKi 設定入力', layout)
#window.Element('google').Update(1)
event, values = window.Read()
window.Close()
print(event, values[0], values[1], values[2]) # the input data looks like a simple list when auto numbered
print(event, values)
| [
"PySimpleGUI.Checkbox",
"PySimpleGUI.Text",
"PySimpleGUI.Button",
"PySimpleGUI.Radio",
"PySimpleGUI.Window"
] | [((3663, 3694), 'PySimpleGUI.Window', 'sg.Window', (['u"""RiKi 設定入力"""', 'layout'], {}), "(u'RiKi 設定入力', layout)\n", (3672, 3694), True, 'import PySimpleGUI as sg\n'), ((3612, 3628), 'PySimpleGUI.Button', 'sg.Button', (['u"""OK"""'], {}), "(u'OK')\n", (3621, 3628), True, 'import PySimpleGUI as sg\n'), ((3634, 3653), 'PySimpleGUI.Button', 'sg.Button', (['u"""キャンセル"""'], {}), "(u'キャンセル')\n", (3643, 3653), True, 'import PySimpleGUI as sg\n'), ((334, 371), 'PySimpleGUI.Radio', 'sg.Radio', (['"""free"""', '"""API"""'], {'default': '(True)'}), "('free', 'API', default=True)\n", (342, 371), True, 'import PySimpleGUI as sg\n'), ((373, 412), 'PySimpleGUI.Radio', 'sg.Radio', (['"""google"""', '"""API"""'], {'key': '"""google"""'}), "('google', 'API', key='google')\n", (381, 412), True, 'import PySimpleGUI as sg\n'), ((414, 439), 'PySimpleGUI.Radio', 'sg.Radio', (['"""watson"""', '"""API"""'], {}), "('watson', 'API')\n", (422, 439), True, 'import PySimpleGUI as sg\n'), ((466, 490), 'PySimpleGUI.Radio', 'sg.Radio', (['"""azure"""', '"""API"""'], {}), "('azure', 'API')\n", (474, 490), True, 'import PySimpleGUI as sg\n'), ((492, 515), 'PySimpleGUI.Radio', 'sg.Radio', (['"""nict"""', '"""API"""'], {}), "('nict', 'API')\n", (500, 515), True, 'import PySimpleGUI as sg\n'), ((517, 543), 'PySimpleGUI.Radio', 'sg.Radio', (['"""special"""', '"""API"""'], {}), "('special', 'API')\n", (525, 543), True, 'import PySimpleGUI as sg\n'), ((709, 746), 'PySimpleGUI.Radio', 'sg.Radio', (['"""hud"""', '"""MODE"""'], {'default': '(True)'}), "('hud', 'MODE', default=True)\n", (717, 746), True, 'import PySimpleGUI as sg\n'), ((748, 772), 'PySimpleGUI.Radio', 'sg.Radio', (['"""live"""', '"""MODE"""'], {}), "('live', 'MODE')\n", (756, 772), True, 'import PySimpleGUI as sg\n'), ((774, 804), 'PySimpleGUI.Radio', 'sg.Radio', (['"""translator"""', '"""MODE"""'], {}), "('translator', 'MODE')\n", (782, 804), True, 'import PySimpleGUI as sg\n'), ((831, 857), 'PySimpleGUI.Radio', 'sg.Radio', (['"""speech"""', '"""MODE"""'], {}), "('speech', 'MODE')\n", (839, 857), True, 'import PySimpleGUI as sg\n'), ((859, 885), 'PySimpleGUI.Radio', 'sg.Radio', (['"""number"""', '"""MODE"""'], {}), "('number', 'MODE')\n", (867, 885), True, 'import PySimpleGUI as sg\n'), ((887, 913), 'PySimpleGUI.Radio', 'sg.Radio', (['"""camera"""', '"""MODE"""'], {}), "('camera', 'MODE')\n", (895, 913), True, 'import PySimpleGUI as sg\n'), ((915, 944), 'PySimpleGUI.Radio', 'sg.Radio', (['"""assistant"""', '"""MODE"""'], {}), "('assistant', 'MODE')\n", (923, 944), True, 'import PySimpleGUI as sg\n'), ((1070, 1110), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""main_speech"""'], {'default': '(True)'}), "('main_speech', default=True)\n", (1081, 1110), True, 'import PySimpleGUI as sg\n'), ((1139, 1176), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""controls"""'], {'default': '(True)'}), "('controls', default=True)\n", (1150, 1176), True, 'import PySimpleGUI as sg\n'), ((1205, 1242), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""adintool"""'], {'default': '(True)'}), "('adintool', default=True)\n", (1216, 1242), True, 'import PySimpleGUI as sg\n'), ((1271, 1309), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""voice2wav"""'], {'default': '(True)'}), "('voice2wav', default=True)\n", (1282, 1309), True, 'import PySimpleGUI as sg\n'), ((1338, 1374), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""coreSTT"""'], {'default': '(True)'}), "('coreSTT', default=True)\n", (1349, 1374), True, 'import PySimpleGUI as sg\n'), ((1403, 1439), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""coreTTS"""'], {'default': '(True)'}), "('coreTTS', default=True)\n", (1414, 1439), True, 'import PySimpleGUI as sg\n'), ((1468, 1506), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""playvoice"""'], {'default': '(True)'}), "('playvoice', default=True)\n", (1479, 1506), True, 'import PySimpleGUI as sg\n'), ((1535, 1570), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""julius"""'], {'default': '(True)'}), "('julius', default=True)\n", (1546, 1570), True, 'import PySimpleGUI as sg\n'), ((1599, 1637), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""sttreader"""'], {'default': '(True)'}), "('sttreader', default=True)\n", (1610, 1637), True, 'import PySimpleGUI as sg\n'), ((1666, 1704), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""trareader"""'], {'default': '(True)'}), "('trareader', default=True)\n", (1677, 1704), True, 'import PySimpleGUI as sg\n'), ((1733, 1744), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (1740, 1744), True, 'import PySimpleGUI as sg\n'), ((1773, 1784), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (1780, 1784), True, 'import PySimpleGUI as sg\n'), ((1813, 1824), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (1820, 1824), True, 'import PySimpleGUI as sg\n'), ((1944, 1984), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""main_vision"""'], {'default': '(True)'}), "('main_vision', default=True)\n", (1955, 1984), True, 'import PySimpleGUI as sg\n'), ((2013, 2050), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""controlv"""'], {'default': '(True)'}), "('controlv', default=True)\n", (2024, 2050), True, 'import PySimpleGUI as sg\n'), ((2079, 2115), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""overlay"""'], {'default': '(True)'}), "('overlay', default=True)\n", (2090, 2115), True, 'import PySimpleGUI as sg\n'), ((2144, 2180), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""camera1"""'], {'default': '(True)'}), "('camera1', default=True)\n", (2155, 2180), True, 'import PySimpleGUI as sg\n'), ((2209, 2245), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""camera2"""'], {'default': '(True)'}), "('camera2', default=True)\n", (2220, 2245), True, 'import PySimpleGUI as sg\n'), ((2274, 2310), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""txt2img"""'], {'default': '(True)'}), "('txt2img', default=True)\n", (2285, 2310), True, 'import PySimpleGUI as sg\n'), ((2339, 2376), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""cvreader"""'], {'default': '(True)'}), "('cvreader', default=True)\n", (2350, 2376), True, 'import PySimpleGUI as sg\n'), ((2405, 2443), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""cvdetect1"""'], {'default': '(True)'}), "('cvdetect1', default=True)\n", (2416, 2443), True, 'import PySimpleGUI as sg\n'), ((2472, 2510), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""cvdetect2"""'], {'default': '(True)'}), "('cvdetect2', default=True)\n", (2483, 2510), True, 'import PySimpleGUI as sg\n'), ((2539, 2579), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""cv2dnn_yolo"""'], {'default': '(True)'}), "('cv2dnn_yolo', default=True)\n", (2550, 2579), True, 'import PySimpleGUI as sg\n'), ((2608, 2647), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""cv2dnn_ssd"""'], {'default': '(True)'}), "('cv2dnn_ssd', default=True)\n", (2619, 2647), True, 'import PySimpleGUI as sg\n'), ((2676, 2712), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""vin2jpg"""'], {'default': '(True)'}), "('vin2jpg', default=True)\n", (2687, 2712), True, 'import PySimpleGUI as sg\n'), ((2741, 2776), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""coreCV"""'], {'default': '(True)'}), "('coreCV', default=True)\n", (2752, 2776), True, 'import PySimpleGUI as sg\n'), ((2897, 2938), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""main_desktop"""'], {'default': '(True)'}), "('main_desktop', default=True)\n", (2908, 2938), True, 'import PySimpleGUI as sg\n'), ((2967, 3004), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""controld"""'], {'default': '(True)'}), "('controld', default=True)\n", (2978, 3004), True, 'import PySimpleGUI as sg\n'), ((3033, 3069), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""capture"""'], {'default': '(True)'}), "('capture', default=True)\n", (3044, 3069), True, 'import PySimpleGUI as sg\n'), ((3098, 3135), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""cvreader"""'], {'default': '(True)'}), "('cvreader', default=True)\n", (3109, 3135), True, 'import PySimpleGUI as sg\n'), ((3164, 3201), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""recorder"""'], {'default': '(True)'}), "('recorder', default=True)\n", (3175, 3201), True, 'import PySimpleGUI as sg\n'), ((3230, 3267), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""uploader"""'], {'default': '(True)'}), "('uploader', default=True)\n", (3241, 3267), True, 'import PySimpleGUI as sg\n'), ((3296, 3307), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3303, 3307), True, 'import PySimpleGUI as sg\n'), ((3336, 3347), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3343, 3347), True, 'import PySimpleGUI as sg\n'), ((3376, 3387), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3383, 3387), True, 'import PySimpleGUI as sg\n'), ((3416, 3427), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3423, 3427), True, 'import PySimpleGUI as sg\n'), ((3456, 3467), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3463, 3467), True, 'import PySimpleGUI as sg\n'), ((3496, 3507), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3503, 3507), True, 'import PySimpleGUI as sg\n'), ((3536, 3547), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {}), "('')\n", (3543, 3547), True, 'import PySimpleGUI as sg\n')] |
from django.conf.urls import url, include
from django.contrib.auth.models import User
urlpatterns = [
url(r'^test/', include('testapp.urls'), name='Test User/Group API'),
url(r'^resume/', include('apis.jsonresume_org.urls'), name='Json Resume'),
url(r'^schema/', include('apis.schema_org.urls'), name='Schemas'),
url(r'^geoname/', include('apis.geonames_org.urls'), name='Geonames'),
] | [
"django.conf.urls.include"
] | [((122, 145), 'django.conf.urls.include', 'include', (['"""testapp.urls"""'], {}), "('testapp.urls')\n", (129, 145), False, 'from django.conf.urls import url, include\n'), ((197, 232), 'django.conf.urls.include', 'include', (['"""apis.jsonresume_org.urls"""'], {}), "('apis.jsonresume_org.urls')\n", (204, 232), False, 'from django.conf.urls import url, include\n'), ((276, 307), 'django.conf.urls.include', 'include', (['"""apis.schema_org.urls"""'], {}), "('apis.schema_org.urls')\n", (283, 307), False, 'from django.conf.urls import url, include\n'), ((348, 381), 'django.conf.urls.include', 'include', (['"""apis.geonames_org.urls"""'], {}), "('apis.geonames_org.urls')\n", (355, 381), False, 'from django.conf.urls import url, include\n')] |
# Cryptomath Module
import random
def gcd(a, b):
# Returns the GCD of positive integers a and b using the Euclidean Algorithm.
if a>b:
x, y = a, b
else:
y, x = a, b
while y!= 0:
temp = x % y
x = y
y = temp
return x
def extendedGCD(a,b): #used to find mod inverse
# Returns integers u, v such that au + bv = gcd(a,b).
x, y = a, b
u1, v1 = 1, 0
u2, v2 = 0, 1
while y != 0:
r = x % y
q = (x - r) // y
u, v = u1 - q*u2, v1 - q*v2
x = y
y = r
u1, v1 = u2, v2
u2, v2 = u, v
return (u1, v1)
def findModInverse(a, m):
# Returns the inverse of a modulo m, if it exists.
if gcd(a,m) != 1:
return None
u, v = extendedGCD(a,m)
return u % m
def RabinMiller(n):
# Applies the probabilistic Rabin-Miller test for primality.
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
d = n - 1
s = 0
while(d % 2 == 0):
s += 1
d = d // 2
# At this point n - 1 = 2^s*d with d odd.
# Try fifty times to prove that n is composite.
for i in range(50):
a = random.randint(2, n - 1)
if gcd(a, n) != 1:
return False
b = pow(a, d, n)
if b == 1 or b == n - 1:
continue
isWitness = True
r = 1
while(r < s and isWitness):
b = pow(b, 2, n)
if b == n - 1:
isWitness = False
r += 1
if isWitness:
return False
return True
def isPrime(n):
# Determines whether a positive integer n is composite or probably prime.
if n < 2:
return False
smallPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181,
191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251,
257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,
331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557,
563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619,
631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787,
797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953,
967, 971, 977, 983, 991, 997]
# See if n is a small prime.
if n in smallPrimes:
return True
# See if n is divisible by a small prime.
for p in smallPrimes:
if n % p == 0:
return False
# Apply Fermat test for compositeness.
for base in [2,3,5,7,11]:
if pow(base, n - 1, n) != 1:
return False
# Apply Rabin-Miller test.
return RabinMiller(n)
def findPrime(bits=1024, tries=10000):
# Find a prime with the given number of bits.
x = 2**(bits - 1)
y = 2*x
for i in range(tries):
n = random.randint(x, y)
if n % 2 == 0:
n += 1
if isPrime(n):
return n
return None | [
"random.randint"
] | [((1210, 1234), 'random.randint', 'random.randint', (['(2)', '(n - 1)'], {}), '(2, n - 1)\n', (1224, 1234), False, 'import random\n'), ((3389, 3409), 'random.randint', 'random.randint', (['x', 'y'], {}), '(x, y)\n', (3403, 3409), False, 'import random\n')] |
from more_itertools import ilen
from my.discord import messages, activity
def test_discord() -> None:
assert ilen(messages()) > 100
# get at least 100 activity events
i: int = 0
for event in activity():
assert isinstance(event, dict)
i += 1
if i > 100:
break
else:
assert False
assert True
| [
"my.discord.messages",
"my.discord.activity"
] | [((211, 221), 'my.discord.activity', 'activity', ([], {}), '()\n', (219, 221), False, 'from my.discord import messages, activity\n'), ((121, 131), 'my.discord.messages', 'messages', ([], {}), '()\n', (129, 131), False, 'from my.discord import messages, activity\n')] |
import sys
from collections import deque
def sol():
# sys.stdin = open("./17086/input.txt")
input = sys.stdin.readline
N, M = map(int, input().split())
baby_sharks = deque()
space = []
for r in range(N):
tmp = list(map(int, input().split()))
for c in range(M):
if tmp[c] == 1:
baby_sharks.append((r, c, 0))
space.append(tmp)
result = bfs(N, M, space, baby_sharks)
print(result)
def bfs(N: int, M: int, space: list, baby_sharks: list) -> int:
dr = [-1, -1, -1, 0, 0, 1, 1, 1]
dc = [-1, 0, 1, -1, 1, -1, 0, 1]
dist = [[N + M + 1] * M for _ in range(N)]
visited = [[False] * M for _ in range(N)]
while baby_sharks:
cur_r, cur_c, cur_dist = baby_sharks.popleft()
if dist[cur_r][cur_c] > cur_dist:
dist[cur_r][cur_c] = cur_dist
visited[cur_r][cur_c] = True
for i in range(8):
nr = cur_r + dr[i]
nc = cur_c + dc[i]
if 0 <= nr < N and 0 <= nc < M and not visited[nr][nc]:
visited[nr][nc] = True
baby_sharks.append((nr, nc, cur_dist + 1))
result = 0
for r in range(N):
for c in range(M):
if dist[r][c] > result:
result = dist[r][c]
return result
if __name__ == "__main__":
sol()
| [
"collections.deque"
] | [((184, 191), 'collections.deque', 'deque', ([], {}), '()\n', (189, 191), False, 'from collections import deque\n')] |
from pprint import pprint
from dataclasses import dataclass, field
from typing import List
from tabulate import tabulate
from typer import Typer
from sly import Lexer
app = Typer()
class Scanner(Lexer):
"""Lexer class for scanning the code.
"""
tokens = {
IF_KW, ELSE_KW, FOR_KW, CONST_STR, CONST_NUMBER, PLUS_OP, MINUS_OP, MULTIPLY_OP,
DIVIDE_OP, LP, LCB, RP, RCB, EQUAL_OP, ASSIGNMENT_OP, SEMICOLON, IDENTIFIER,
}
ignore = ' \t\n'
IF_KW = r'if'
ELSE_KW = r'else'
FOR_KW = r'for'
CONST_STR = r'".*?"|\'.*?\''
CONST_NUMBER = r'\d+'
PLUS_OP = r'\+'
MINUS_OP = r'\-'
MULTIPLY_OP = r'\*'
DIVIDE_OP = r'\/'
LP = r'\('
LCB = r'\{'
RP = r'\)'
RCB = r'\}'
EQUAL_OP = r'=='
ASSIGNMENT_OP = r'='
SEMICOLON = r';'
IDENTIFIER = r'[a-zA-Z_]\w*'
variable_tokens = [
'IDENTIFIER',
'CONST_STR',
'CONST_NUMBER',
]
def get_tokens_symbol_table(self, data):
"""tokenize the input and gets tokens and symbol table
Args:
data (str): data that needs to tokenize
Returns:
tuple(List[Token], SymbolTable): tokens and symbol table
"""
counter = 1
tokens = []
symbol_table = SymbolTable()
for token in self.tokenize(data):
if token.type in self.variable_tokens:
if token_type := symbol_table.get_type_if_duplicate(token):
token.type = token_type
else:
token.type += f'_{counter}'
counter += 1
symbol_table.add(token)
tokens.append(token)
return tokens, symbol_table
@dataclass
class Symbol:
"""Represents one symbol in the symbol table.
"""
type: str
value: str
@dataclass
class SymbolTable:
"""Contains list of symbols in the symbol table.
"""
symbols: List[Symbol] = field(default_factory=list)
def add(self, token):
"""add new token in to the symbol table.
Args:
token (Token): the Token object generated by sly.
"""
self.symbols.append(Symbol(type=token.type, value=token.value))
def get_type_if_duplicate(self, token):
"""gets the type of prev token if the given token
is a duplicate IDENTIFIER.
Args:
token (Token): object of sly Token.
Returns:
str: token type if it's exist.
"""
symbol_type = [s.type for s in self.symbols if s.value == token.value and s.type.startswith('IDENTIFIER')]
return symbol_type[0] if symbol_type else ''
def __str__(self):
"""this function make a table to show the symbol table in to the cli.
Returns:
str: symbol table as string.
"""
return tabulate(
[(i, s.type, s.value) for i, s in enumerate(self.symbols)],
headers=['#', 'token type', 'value'],
tablefmt='psql'
)
@app.command()
def compile(file_address):
"""main function.
Args:
file_address (str): address of file that given in the command line.
"""
scanner = Scanner()
data = open(file_address, 'r').read()
tokens, symbol_table = scanner.get_tokens_symbol_table(data)
pprint(tokens)
print(symbol_table)
if __name__ == '__main__':
app()
| [
"typer.Typer",
"pprint.pprint",
"dataclasses.field"
] | [((185, 192), 'typer.Typer', 'Typer', ([], {}), '()\n', (190, 192), False, 'from typer import Typer\n'), ((2052, 2079), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2057, 2079), False, 'from dataclasses import dataclass, field\n'), ((3465, 3479), 'pprint.pprint', 'pprint', (['tokens'], {}), '(tokens)\n', (3471, 3479), False, 'from pprint import pprint\n')] |
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from materials.views import MaterialViewSets
from projects.views import ProjectViewSets, DirectoryViewSets, CodeFileViewSets
from rest_framework_nested import routers
router = routers.SimpleRouter()
router.register(r'materials', MaterialViewSets)
router.register(r'projects', ProjectViewSets)
inside_proj_router = routers.NestedSimpleRouter(router, r'projects', lookup='project')
inside_proj_router.register(r'folders', DirectoryViewSets)
inside_proj_router.register(r'files', CodeFileViewSets)
urlpatterns = [
path(r'', include(router.urls)),
path(r'', include(inside_proj_router.urls)),
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
]
urlpatterns += router.urls
| [
"django.urls.include",
"rest_framework_nested.routers.NestedSimpleRouter",
"django.urls.path",
"rest_framework_nested.routers.SimpleRouter"
] | [((882, 904), 'rest_framework_nested.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (902, 904), False, 'from rest_framework_nested import routers\n'), ((1021, 1085), 'rest_framework_nested.routers.NestedSimpleRouter', 'routers.NestedSimpleRouter', (['router', '"""projects"""'], {'lookup': '"""project"""'}), "(router, 'projects', lookup='project')\n", (1047, 1085), False, 'from rest_framework_nested import routers\n'), ((1310, 1341), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1314, 1341), False, 'from django.urls import path, include\n'), ((1234, 1254), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (1241, 1254), False, 'from django.urls import path, include\n'), ((1271, 1303), 'django.urls.include', 'include', (['inside_proj_router.urls'], {}), '(inside_proj_router.urls)\n', (1278, 1303), False, 'from django.urls import path, include\n'), ((1365, 1395), 'django.urls.include', 'include', (['"""rest_framework.urls"""'], {}), "('rest_framework.urls')\n", (1372, 1395), False, 'from django.urls import path, include\n')] |
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.regularizers import get_regularizer
from torchreid.losses.wrapped_cross_entropy_loss import WrappedCrossEntropyLoss
from torchreid.models.tricks.dropout import DropoutOptimizer
import logging
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'CRITICAL'))
# global variables
parser = argument_parser()
args = parser.parse_args()
dropout_optimizer = DropoutOptimizer(args)
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for
the specified values of k.
Args:
output (torch.Tensor): prediction matrix with shape (batch_size, num_classes).
target (torch.LongTensor): ground truth labels with shape (batch_size).
topk (tuple, optional): accuracy at top-k will be computed. For example,
topk=(1, 5) means accuracy at top-1 and top-5 will be computed.
Returns:
list: accuracy at top-k.
Examples::
>>> from torchreid import metrics
>>> metrics.accuracy(output, target)
"""
maxk = max(topk)
batch_size = target.size(0)
if isinstance(output, (tuple, list)):
output = output[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
acc = correct_k.mul_(100.0 / batch_size)
res.append(acc)
return res
def get_criterions(num_classes: int, use_gpu: bool, args) -> ('criterion', 'fix_criterion', 'switch_criterion'):
from torchreid.losses.wrapped_triplet_loss import WrappedTripletLoss
from torchreid.regularizers.param_controller import HtriParamController
htri_param_controller = HtriParamController()
if 'htri' in args.criterion:
fix_criterion = WrappedTripletLoss(num_classes, use_gpu, args, htri_param_controller)
switch_criterion = WrappedTripletLoss(num_classes, use_gpu, args, htri_param_controller)
else:
fix_criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
switch_criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.criterion == 'xent':
criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'spectral':
from torchreid.losses.spectral_loss import SpectralLoss
criterion = SpectralLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position)
elif args.criterion == 'batch_spectral':
from torchreid.losses.batch_spectral_loss import BatchSpectralLoss
criterion = BatchSpectralLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'lowrank':
from torchreid.losses.lowrank_loss import LowRankLoss
criterion = LowRankLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'singular':
from torchreid.losses.singular_loss import SingularLoss
criterion = SingularLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position)
elif args.criterion == 'htri':
criterion = WrappedTripletLoss(num_classes=num_classes, use_gpu=use_gpu, args=args, param_controller=htri_param_controller)
elif args.criterion == 'singular_htri':
from torchreid.losses.singular_triplet_loss import SingularTripletLoss
criterion = SingularTripletLoss(num_classes, use_gpu, args, htri_param_controller)
elif args.criterion == 'incidence':
from torchreid.losses.incidence_loss import IncidenceLoss
criterion = IncidenceLoss()
elif args.criterion == 'incidence_xent':
from torchreid.losses.incidence_xent_loss import IncidenceXentLoss
criterion = IncidenceXentLoss(num_classes, use_gpu, args.label_smooth)
else:
raise RuntimeError('Unknown criterion {!r}'.format(criterion))
if args.fix_custom_loss:
fix_criterion = criterion
if args.switch_loss < 0:
criterion, switch_criterion = switch_criterion, criterion
return criterion, fix_criterion, switch_criterion, htri_param_controller
def main():
global args, dropout_optimizer
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, dropout_optimizer=dropout_optimizer)
print(model)
print("Model size: {:.3f} M".format(count_num_param(model)))
# criterion = WrappedCrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
criterion, fix_criterion, switch_criterion, htri_param_controller = get_criterions(dm.num_train_pids, use_gpu, args)
regularizer, reg_param_controller = get_regularizer(args.regularizer)
optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
try:
checkpoint = torch.load(args.load_weights)
except Exception as e:
print(e)
checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'})
# dropout_optimizer.set_p(checkpoint.get('dropout_p', 0))
# print(list(checkpoint.keys()), checkpoint['dropout_p'])
pretrain_dict = checkpoint['state_dict']
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.resume and check_isfile(args.resume):
checkpoint = torch.load(args.resume)
state = model.state_dict()
state.update(checkpoint['state_dict'])
model.load_state_dict(state)
# args.start_epoch = checkpoint['epoch'] + 1
print("Loaded checkpoint from '{}'".format(args.resume))
print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1']))
if use_gpu:
model = nn.DataParallel(model, device_ids=list(range(len(args.gpu_devices.split(','))))).cuda()
extract_train_info(model, trainloader)
def extract_train_info(model, trainloader):
model.eval()
os.environ['fake'] = '1'
accs = [AverageMeter() for _ in range(3)]
with torch.no_grad():
for imgs, pids, _, paths in trainloader:
xent_features = model(imgs.cuda())[1]
for i, xent_feature in enumerate(xent_features):
accs[i].update(
accuracy(xent_feature, pids.cuda())[0].item(),
pids.size(0),
)
with open(args.load_weights + '.acc', 'w') as f:
print(*(acc.avg for acc in accs), file=f)
if __name__ == '__main__':
main()
| [
"torch.optim.lr_scheduler.MultiStepLR",
"torchreid.losses.singular_triplet_loss.SingularTripletLoss",
"torchreid.losses.incidence_xent_loss.IncidenceXentLoss",
"torch.cuda.is_available",
"args.image_dataset_kwargs",
"args.argument_parser",
"torchreid.losses.wrapped_triplet_loss.WrappedTripletLoss",
"t... | [((1246, 1263), 'args.argument_parser', 'argument_parser', ([], {}), '()\n', (1261, 1263), False, 'from args import argument_parser, image_dataset_kwargs, optimizer_kwargs\n'), ((1311, 1333), 'torchreid.models.tricks.dropout.DropoutOptimizer', 'DropoutOptimizer', (['args'], {}), '(args)\n', (1327, 1333), False, 'from torchreid.models.tricks.dropout import DropoutOptimizer\n'), ((2794, 2815), 'torchreid.regularizers.param_controller.HtriParamController', 'HtriParamController', ([], {}), '()\n', (2813, 2815), False, 'from torchreid.regularizers.param_controller import HtriParamController\n'), ((5483, 5511), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5500, 5511), False, 'import torch\n'), ((5619, 5644), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5642, 5644), False, 'import torch\n'), ((6367, 6505), 'torchreid.models.init_model', 'models.init_model', ([], {'name': 'args.arch', 'num_classes': 'dm.num_train_pids', 'loss': "{'xent'}", 'use_gpu': 'use_gpu', 'dropout_optimizer': 'dropout_optimizer'}), "(name=args.arch, num_classes=dm.num_train_pids, loss={\n 'xent'}, use_gpu=use_gpu, dropout_optimizer=dropout_optimizer)\n", (6384, 6505), False, 'from torchreid import models\n'), ((6867, 6900), 'torchreid.regularizers.get_regularizer', 'get_regularizer', (['args.regularizer'], {}), '(args.regularizer)\n', (6882, 6900), False, 'from torchreid.regularizers import get_regularizer\n'), ((6994, 7073), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'args.stepsize', 'gamma': 'args.gamma'}), '(optimizer, milestones=args.stepsize, gamma=args.gamma)\n', (7018, 7073), False, 'from torch.optim import lr_scheduler\n'), ((1177, 1215), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""CRITICAL"""'], {}), "('LOGLEVEL', 'CRITICAL')\n", (1191, 1215), False, 'import os\n'), ((1391, 1416), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1406, 1416), False, 'import os\n'), ((2874, 2943), 'torchreid.losses.wrapped_triplet_loss.WrappedTripletLoss', 'WrappedTripletLoss', (['num_classes', 'use_gpu', 'args', 'htri_param_controller'], {}), '(num_classes, use_gpu, args, htri_param_controller)\n', (2892, 2943), False, 'from torchreid.losses.wrapped_triplet_loss import WrappedTripletLoss\n'), ((2971, 3040), 'torchreid.losses.wrapped_triplet_loss.WrappedTripletLoss', 'WrappedTripletLoss', (['num_classes', 'use_gpu', 'args', 'htri_param_controller'], {}), '(num_classes, use_gpu, args, htri_param_controller)\n', (2989, 3040), False, 'from torchreid.losses.wrapped_triplet_loss import WrappedTripletLoss\n'), ((3075, 3176), 'torchreid.losses.wrapped_cross_entropy_loss.WrappedCrossEntropyLoss', 'WrappedCrossEntropyLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth'}), '(num_classes=num_classes, use_gpu=use_gpu,\n label_smooth=args.label_smooth)\n', (3098, 3176), False, 'from torchreid.losses.wrapped_cross_entropy_loss import WrappedCrossEntropyLoss\n'), ((3200, 3301), 'torchreid.losses.wrapped_cross_entropy_loss.WrappedCrossEntropyLoss', 'WrappedCrossEntropyLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth'}), '(num_classes=num_classes, use_gpu=use_gpu,\n label_smooth=args.label_smooth)\n', (3223, 3301), False, 'from torchreid.losses.wrapped_cross_entropy_loss import WrappedCrossEntropyLoss\n'), ((3352, 3453), 'torchreid.losses.wrapped_cross_entropy_loss.WrappedCrossEntropyLoss', 'WrappedCrossEntropyLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth'}), '(num_classes=num_classes, use_gpu=use_gpu,\n label_smooth=args.label_smooth)\n', (3375, 3453), False, 'from torchreid.losses.wrapped_cross_entropy_loss import WrappedCrossEntropyLoss\n'), ((5795, 5828), 'os.path.join', 'osp.join', (['args.save_dir', 'log_name'], {}), '(args.save_dir, log_name)\n', (5803, 5828), True, 'import os.path as osp\n'), ((6009, 6046), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (6035, 6046), False, 'import torch\n'), ((7104, 7135), 'torchreid.utils.iotools.check_isfile', 'check_isfile', (['args.load_weights'], {}), '(args.load_weights)\n', (7116, 7135), False, 'from torchreid.utils.iotools import save_checkpoint, check_isfile\n'), ((7952, 7977), 'torchreid.utils.iotools.check_isfile', 'check_isfile', (['args.resume'], {}), '(args.resume)\n', (7964, 7977), False, 'from torchreid.utils.iotools import save_checkpoint, check_isfile\n'), ((8000, 8023), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (8010, 8023), False, 'import torch\n'), ((8626, 8640), 'torchreid.utils.avgmeter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (8638, 8640), False, 'from torchreid.utils.avgmeter import AverageMeter\n'), ((8670, 8685), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8683, 8685), False, 'import torch\n'), ((3573, 3704), 'torchreid.losses.spectral_loss.SpectralLoss', 'SpectralLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth', 'penalty_position': 'args.penalty_position'}), '(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.\n label_smooth, penalty_position=args.penalty_position)\n', (3585, 3704), False, 'from torchreid.losses.spectral_loss import SpectralLoss\n'), ((6213, 6239), 'args.image_dataset_kwargs', 'image_dataset_kwargs', (['args'], {}), '(args)\n', (6233, 6239), False, 'from args import argument_parser, image_dataset_kwargs, optimizer_kwargs\n'), ((6558, 6580), 'torchreid.utils.torchtools.count_num_param', 'count_num_param', (['model'], {}), '(model)\n', (6573, 6580), False, 'from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers\n'), ((6954, 6976), 'args.optimizer_kwargs', 'optimizer_kwargs', (['args'], {}), '(args)\n', (6970, 6976), False, 'from args import argument_parser, image_dataset_kwargs, optimizer_kwargs\n'), ((7253, 7282), 'torch.load', 'torch.load', (['args.load_weights'], {}), '(args.load_weights)\n', (7263, 7282), False, 'import torch\n'), ((3840, 3936), 'torchreid.losses.batch_spectral_loss.BatchSpectralLoss', 'BatchSpectralLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth'}), '(num_classes=num_classes, use_gpu=use_gpu, label_smooth=\n args.label_smooth)\n', (3857, 3936), False, 'from torchreid.losses.batch_spectral_loss import BatchSpectralLoss\n'), ((7360, 7421), 'torch.load', 'torch.load', (['args.load_weights'], {'map_location': "{'cuda:0': 'cpu'}"}), "(args.load_weights, map_location={'cuda:0': 'cpu'})\n", (7370, 7421), False, 'import torch\n'), ((4052, 4142), 'torchreid.losses.lowrank_loss.LowRankLoss', 'LowRankLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth'}), '(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.\n label_smooth)\n', (4063, 4142), False, 'from torchreid.losses.lowrank_loss import LowRankLoss\n'), ((4261, 4392), 'torchreid.losses.singular_loss.SingularLoss', 'SingularLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'label_smooth': 'args.label_smooth', 'penalty_position': 'args.penalty_position'}), '(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.\n label_smooth, penalty_position=args.penalty_position)\n', (4273, 4392), False, 'from torchreid.losses.singular_loss import SingularLoss\n'), ((4443, 4558), 'torchreid.losses.wrapped_triplet_loss.WrappedTripletLoss', 'WrappedTripletLoss', ([], {'num_classes': 'num_classes', 'use_gpu': 'use_gpu', 'args': 'args', 'param_controller': 'htri_param_controller'}), '(num_classes=num_classes, use_gpu=use_gpu, args=args,\n param_controller=htri_param_controller)\n', (4461, 4558), False, 'from torchreid.losses.wrapped_triplet_loss import WrappedTripletLoss\n'), ((4698, 4768), 'torchreid.losses.singular_triplet_loss.SingularTripletLoss', 'SingularTripletLoss', (['num_classes', 'use_gpu', 'args', 'htri_param_controller'], {}), '(num_classes, use_gpu, args, htri_param_controller)\n', (4717, 4768), False, 'from torchreid.losses.singular_triplet_loss import SingularTripletLoss\n'), ((4895, 4910), 'torchreid.losses.incidence_loss.IncidenceLoss', 'IncidenceLoss', ([], {}), '()\n', (4908, 4910), False, 'from torchreid.losses.incidence_loss import IncidenceLoss\n'), ((5051, 5109), 'torchreid.losses.incidence_xent_loss.IncidenceXentLoss', 'IncidenceXentLoss', (['num_classes', 'use_gpu', 'args.label_smooth'], {}), '(num_classes, use_gpu, args.label_smooth)\n', (5068, 5109), False, 'from torchreid.losses.incidence_xent_loss import IncidenceXentLoss\n')] |