code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""[summary]
:return: [description]
:rtype: [type]
"""
import os
import constants
import pytest
import json
import logging
import CKANData
import tests.helpers.CKANDataHelpers as CKANDataHelpers
LOGGER = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def CKANData_User_Data_Raw():
"""returns a user dataset
"""
ckanDataHelper = CKANDataHelpers.CKAN_Test_Data()
ckanTestUserData = ckanDataHelper.getTestUserData()
yield ckanTestUserData
@pytest.fixture(scope="session")
def CKANData_Test_User_Data_Raw(CKANData_User_Data_Raw):
UserData = CKANData_User_Data_Raw[constants.TEST_USER_DATA_POSITION]
UserData['password'] = '<PASSWORD>'
del UserData['id']
del UserData['number_of_edits']
del UserData['email_hash']
del UserData['created']
del UserData['apikey']
LOGGER.debug("user: %s", UserData)
yield UserData
@pytest.fixture(scope="session")
def CKANData_User_Data_Set(CKANData_User_Data_Raw):
ckanUserDataSet = CKANData.CKANUsersDataSet(CKANData_User_Data_Raw)
yield ckanUserDataSet
@pytest.fixture(scope="session")
def CKANData_User_Data_Record(CKANData_User_Data_Set):
ckanUserRecord = CKANData_User_Data_Set.next()
LOGGER.debug(f"ckanUserRecord:{ckanUserRecord}")
#ckanUserDataSet = CKANData.CKANUsersDataSet(CKANData_User_Data_Raw, constants.TRANSFORM_TYPE_USERS)
#yield ckanUserDataSet
yield ckanUserRecord
@pytest.fixture(scope="session")
def CKAN_Cached_Prod_User_Data(TestProdUserCacheJsonfile, CKANWrapperProd):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
if not os.path.exists(TestProdUserCacheJsonfile):
userDataProd = CKANWrapperProd.getUsers(includeData=True)
with open(TestProdUserCacheJsonfile, 'w') as outfile:
json.dump(userDataProd, outfile)
else:
with open(TestProdUserCacheJsonfile) as json_file:
userDataProd = json.load(json_file)
yield userDataProd
@pytest.fixture(scope="session")
def CKAN_Cached_Test_User_Data(TestTestUserCacheJsonfile, CKANWrapperTest):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
if not os.path.exists(TestTestUserCacheJsonfile):
userDataTest = CKANWrapperTest.getUsers(includeData=True)
with open(TestTestUserCacheJsonfile, 'w') as outfile:
json.dump(userDataTest, outfile)
else:
with open(TestTestUserCacheJsonfile) as json_file:
userDataTest = json.load(json_file)
yield userDataTest
@pytest.fixture(scope="session")
def CKAN_Cached_Test_User_Data_Set(CKAN_Cached_Test_User_Data):
ds = CKANData.CKANUsersDataSet(CKAN_Cached_Test_User_Data)
yield ds
@pytest.fixture(scope="session")
def CKAN_Cached_Prod_User_Data_Set(CKAN_Cached_Prod_User_Data):
ds = CKANData.CKANUsersDataSet(CKAN_Cached_Prod_User_Data)
yield ds
@pytest.fixture(scope="session")
def CKAN_Cached_Prod_Org_Data(TestProdOrgCacheJsonFile, CKANWrapperProd):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
#CKANWrapperProd
if not os.path.exists(TestProdOrgCacheJsonFile):
orgDataProd = CKANWrapperProd.getOrganizations(includeData=True)
with open(TestProdOrgCacheJsonFile, 'w') as outfile:
json.dump(orgDataProd, outfile)
else:
with open(TestProdOrgCacheJsonFile) as json_file:
orgDataProd = json.load(json_file)
yield orgDataProd
@pytest.fixture(scope="session")
def CKAN_Cached_Test_Org_Data(TestTestOrgCacheJsonFile, CKANWrapperTest):
"""Checks to see if a cache file exists in the junk directory. If it does
load the data from there otherwise will make an api call, cache the data for
next time and then return the org data
This method returns the prod data
"""
if not os.path.exists(TestTestOrgCacheJsonFile):
orgDataTest = CKANWrapperTest.getOrganizations(includeData=True)
with open(TestTestOrgCacheJsonFile, 'w') as outfile:
json.dump(orgDataTest, outfile)
else:
with open(TestTestOrgCacheJsonFile) as json_file:
orgDataTest = json.load(json_file)
yield orgDataTest
@pytest.fixture(scope="session")
def CKAN_Cached_Test_Org_Data_Set(CKAN_Cached_Test_Org_Data):
ds = CKANData.CKANOrganizationDataSet(CKAN_Cached_Test_Org_Data)
yield ds
@pytest.fixture(scope="session")
def CKAN_Cached_Test_Org_Record(CKAN_Cached_Test_Org_Data_Set):
rec = CKAN_Cached_Test_Org_Data_Set.next()
yield rec
| [
"json.dump",
"json.load",
"CKANData.CKANOrganizationDataSet",
"pytest.fixture",
"os.path.exists",
"CKANData.CKANUsersDataSet",
"tests.helpers.CKANDataHelpers.CKAN_Test_Data",
"logging.getLogger"
] | [((207, 234), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (224, 234), False, 'import logging\n'), ((238, 269), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (252, 269), False, 'import pytest\n'), ((477, 508), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (491, 508), False, 'import pytest\n'), ((884, 915), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (898, 915), False, 'import pytest\n'), ((1068, 1099), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1082, 1099), False, 'import pytest\n'), ((1418, 1449), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1432, 1449), False, 'import pytest\n'), ((2146, 2177), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2160, 2177), False, 'import pytest\n'), ((2874, 2905), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2888, 2905), False, 'import pytest\n'), ((3048, 3079), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3062, 3079), False, 'import pytest\n'), ((3222, 3253), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3236, 3253), False, 'import pytest\n'), ((3970, 4001), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3984, 4001), False, 'import pytest\n'), ((4697, 4728), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4711, 4728), False, 'import pytest\n'), ((4875, 4906), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4889, 4906), False, 'import pytest\n'), ((359, 391), 'tests.helpers.CKANDataHelpers.CKAN_Test_Data', 'CKANDataHelpers.CKAN_Test_Data', ([], {}), '()\n', (389, 391), True, 'import tests.helpers.CKANDataHelpers as CKANDataHelpers\n'), ((990, 1039), 'CKANData.CKANUsersDataSet', 'CKANData.CKANUsersDataSet', (['CKANData_User_Data_Raw'], {}), '(CKANData_User_Data_Raw)\n', (1015, 1039), False, 'import CKANData\n'), ((2979, 3032), 'CKANData.CKANUsersDataSet', 'CKANData.CKANUsersDataSet', (['CKAN_Cached_Test_User_Data'], {}), '(CKAN_Cached_Test_User_Data)\n', (3004, 3032), False, 'import CKANData\n'), ((3153, 3206), 'CKANData.CKANUsersDataSet', 'CKANData.CKANUsersDataSet', (['CKAN_Cached_Prod_User_Data'], {}), '(CKAN_Cached_Prod_User_Data)\n', (3178, 3206), False, 'import CKANData\n'), ((4800, 4859), 'CKANData.CKANOrganizationDataSet', 'CKANData.CKANOrganizationDataSet', (['CKAN_Cached_Test_Org_Data'], {}), '(CKAN_Cached_Test_Org_Data)\n', (4832, 4859), False, 'import CKANData\n'), ((1788, 1829), 'os.path.exists', 'os.path.exists', (['TestProdUserCacheJsonfile'], {}), '(TestProdUserCacheJsonfile)\n', (1802, 1829), False, 'import os\n'), ((2516, 2557), 'os.path.exists', 'os.path.exists', (['TestTestUserCacheJsonfile'], {}), '(TestTestUserCacheJsonfile)\n', (2530, 2557), False, 'import os\n'), ((3611, 3651), 'os.path.exists', 'os.path.exists', (['TestProdOrgCacheJsonFile'], {}), '(TestProdOrgCacheJsonFile)\n', (3625, 3651), False, 'import os\n'), ((4338, 4378), 'os.path.exists', 'os.path.exists', (['TestTestOrgCacheJsonFile'], {}), '(TestTestOrgCacheJsonFile)\n', (4352, 4378), False, 'import os\n'), ((1971, 2003), 'json.dump', 'json.dump', (['userDataProd', 'outfile'], {}), '(userDataProd, outfile)\n', (1980, 2003), False, 'import json\n'), ((2100, 2120), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2109, 2120), False, 'import json\n'), ((2699, 2731), 'json.dump', 'json.dump', (['userDataTest', 'outfile'], {}), '(userDataTest, outfile)\n', (2708, 2731), False, 'import json\n'), ((2828, 2848), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2837, 2848), False, 'import json\n'), ((3799, 3830), 'json.dump', 'json.dump', (['orgDataProd', 'outfile'], {}), '(orgDataProd, outfile)\n', (3808, 3830), False, 'import json\n'), ((3925, 3945), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3934, 3945), False, 'import json\n'), ((4526, 4557), 'json.dump', 'json.dump', (['orgDataTest', 'outfile'], {}), '(orgDataTest, outfile)\n', (4535, 4557), False, 'import json\n'), ((4652, 4672), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4661, 4672), False, 'import json\n')] |
from app import *
from threading import Thread
def get_morning_news() :
root_logger.critical("< NEWS > get_morning_new Thread Started ... ")
while True :
d_month = datetime.now().month
d_day = datetime.now().day
d_hour = datetime.now().hour
# 정해진 시간에 뉴스 전송
for key, flag in f_send.items() :
child_logger.debug("< NEWS > running... ")
if d_hour == SEND_HOUR and flag == True :
# 매일경제
if key == 'maekyung' :
status, maekyung = get_maekyung_msg(d_month, d_day)
if status == 200 :
dbout('\r\n' + maekyung)
parent_logger.info("< NEWS > Success get_maekyung_msg()... ")
else :
dbout(f'\r\nStatus : {status}\nMessage : {maekyung}\n')
root_logger.warning(f'Status : {status}\nMessage : {maekyung}')
f_send[key] = False
# 한국경제
elif key == 'hankyung' :
status, hankyung = get_hankyung_issue_today(d_month, d_day)
if status == 200 :
dbout('\r\n' + hankyung)
parent_logger.info("< NEWS > Success get_hankyung_issue_today()... ")
else :
dbout(f'\r\nStatus : {status}\nMessage : {hankyung}\n')
root_logger.warning(f'Status : {status}\nMessage : {hankyung}')
f_send[key] = False
else :
dbout('Err. Wrong Key.')
root_logger.warning('< NEWS > Err. Wrong Key.')
time.sleep(1)
elif d_hour != SEND_HOUR :
f_send[key] = True
time.sleep(60)
def scraping_news() :
th1 = Thread(target=get_morning_news)
th1.start()
th1.join()
if __name__ == '__main__' :
root_logger.critical("============================================")
root_logger.critical("")
root_logger.critical(" < S C R A P E R > S T A R T ")
root_logger.critical(" written by ywlee")
root_logger.critical("============================================")
scraping_news() | [
"threading.Thread"
] | [((1868, 1899), 'threading.Thread', 'Thread', ([], {'target': 'get_morning_news'}), '(target=get_morning_news)\n', (1874, 1899), False, 'from threading import Thread\n')] |
from rcj_soccer.base import app, db
from rcj_soccer.models import Competition
from flask import render_template, jsonify, request
from datetime import datetime
from dateutil.parser import parse
from rcj_soccer.util import config, obj_to_dict
import logging
logger = logging.getLogger(__name__)
@app.route("/")
def list_competitions():
competitions = Competition.query.filter_by(is_active=True)\
.order_by(Competition.start_date.desc(), Competition.name).all()
return render_template("competitions.html", competitions=competitions,
year=datetime.utcnow().year)
@app.route("/api/competitions")
def api_list_competitions():
competitions = Competition.query.order_by(Competition.start_date).all()
data = []
for competition in competitions:
logger.warn("{0}".format(str(dir(competition))))
data.append(obj_to_dict(competition))
return jsonify(data)
@app.route("/api/competitions/<comp>/<token>",
methods=["GET", "POST", "DELETE", "PUT"])
def api_competition(comp, token):
if request.method == "GET":
competition = Competition.query.filter_by(id=comp).one()
return jsonify(obj_to_dict(competition))
if token != config.get("api", "token"):
return jsonify({"error": "invalid token"})
if request.method == "POST":
body = request.get_json()
competition = Competition()
competition.id = comp
competition.name = body["name"]
competition.fb_link = body["fb_link"]
competition.twitter_link = body["twitter_link"]
competition.event_sponsor_link = body["event_sponsor"]["link"]
competition.event_sponsor_img = body["event_sponsor"]["img"]
competition.is_active = True
competition.start_date = parse(body["start_date"])
db.session.add(competition)
db.session.commit()
return jsonify({"status": "created"})
elif request.method == "DELETE":
competition = Competition.query.filter_by(id=comp).one()
db.session.delete(competition)
db.session.commit()
return jsonify({"status": "deleted"})
elif request.method == "PUT":
competition = Competition.query.filter_by(id=comp).one()
body = request.get_json()
if "name" in body:
competition.name = body["name"]
if "fb_link" in body:
competition.fb_link = body["fb_link"]
if "twitter_link" in body:
competition.twitter_link = body["twitter_link"]
if "active" in body:
competition.is_active = body["active"]
if "start_date" in body:
competition.start_date = parse(body["start_date"])
if "event_sponsor" in body:
if "link" in body["event_sponsor"]:
competition.event_sponsor_link = body["event_sponsor"]["link"]
if "img" in body["event_sponsor"]:
competition.event_sponsor_img = body["event_sponsor"]["img"]
db.session.commit()
return jsonify(obj_to_dict(competition))
def get_competition(id):
competition = Competition.query.filter_by(id=id, is_active=True).first()
return competition
| [
"rcj_soccer.base.db.session.delete",
"dateutil.parser.parse",
"rcj_soccer.base.db.session.add",
"rcj_soccer.base.app.route",
"rcj_soccer.base.db.session.commit",
"rcj_soccer.models.Competition.query.order_by",
"rcj_soccer.models.Competition.start_date.desc",
"flask.jsonify",
"rcj_soccer.models.Compe... | [((267, 294), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (284, 294), False, 'import logging\n'), ((298, 312), 'rcj_soccer.base.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (307, 312), False, 'from rcj_soccer.base import app, db\n'), ((609, 639), 'rcj_soccer.base.app.route', 'app.route', (['"""/api/competitions"""'], {}), "('/api/competitions')\n", (618, 639), False, 'from rcj_soccer.base import app, db\n'), ((929, 1020), 'rcj_soccer.base.app.route', 'app.route', (['"""/api/competitions/<comp>/<token>"""'], {'methods': "['GET', 'POST', 'DELETE', 'PUT']"}), "('/api/competitions/<comp>/<token>', methods=['GET', 'POST',\n 'DELETE', 'PUT'])\n", (938, 1020), False, 'from rcj_soccer.base import app, db\n'), ((912, 925), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (919, 925), False, 'from flask import render_template, jsonify, request\n'), ((1225, 1251), 'rcj_soccer.util.config.get', 'config.get', (['"""api"""', '"""token"""'], {}), "('api', 'token')\n", (1235, 1251), False, 'from rcj_soccer.util import config, obj_to_dict\n'), ((1268, 1303), 'flask.jsonify', 'jsonify', (["{'error': 'invalid token'}"], {}), "({'error': 'invalid token'})\n", (1275, 1303), False, 'from flask import render_template, jsonify, request\n'), ((1353, 1371), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1369, 1371), False, 'from flask import render_template, jsonify, request\n'), ((1394, 1407), 'rcj_soccer.models.Competition', 'Competition', ([], {}), '()\n', (1405, 1407), False, 'from rcj_soccer.models import Competition\n'), ((1790, 1815), 'dateutil.parser.parse', 'parse', (["body['start_date']"], {}), "(body['start_date'])\n", (1795, 1815), False, 'from dateutil.parser import parse\n'), ((1825, 1852), 'rcj_soccer.base.db.session.add', 'db.session.add', (['competition'], {}), '(competition)\n', (1839, 1852), False, 'from rcj_soccer.base import app, db\n'), ((1861, 1880), 'rcj_soccer.base.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1878, 1880), False, 'from rcj_soccer.base import app, db\n'), ((1896, 1926), 'flask.jsonify', 'jsonify', (["{'status': 'created'}"], {}), "({'status': 'created'})\n", (1903, 1926), False, 'from flask import render_template, jsonify, request\n'), ((688, 738), 'rcj_soccer.models.Competition.query.order_by', 'Competition.query.order_by', (['Competition.start_date'], {}), '(Competition.start_date)\n', (714, 738), False, 'from rcj_soccer.models import Competition\n'), ((874, 898), 'rcj_soccer.util.obj_to_dict', 'obj_to_dict', (['competition'], {}), '(competition)\n', (885, 898), False, 'from rcj_soccer.util import config, obj_to_dict\n'), ((1182, 1206), 'rcj_soccer.util.obj_to_dict', 'obj_to_dict', (['competition'], {}), '(competition)\n', (1193, 1206), False, 'from rcj_soccer.util import config, obj_to_dict\n'), ((2037, 2067), 'rcj_soccer.base.db.session.delete', 'db.session.delete', (['competition'], {}), '(competition)\n', (2054, 2067), False, 'from rcj_soccer.base import app, db\n'), ((2076, 2095), 'rcj_soccer.base.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2093, 2095), False, 'from rcj_soccer.base import app, db\n'), ((2111, 2141), 'flask.jsonify', 'jsonify', (["{'status': 'deleted'}"], {}), "({'status': 'deleted'})\n", (2118, 2141), False, 'from flask import render_template, jsonify, request\n'), ((3109, 3159), 'rcj_soccer.models.Competition.query.filter_by', 'Competition.query.filter_by', ([], {'id': 'id', 'is_active': '(True)'}), '(id=id, is_active=True)\n', (3136, 3159), False, 'from rcj_soccer.models import Competition\n'), ((420, 449), 'rcj_soccer.models.Competition.start_date.desc', 'Competition.start_date.desc', ([], {}), '()\n', (447, 449), False, 'from rcj_soccer.models import Competition\n'), ((582, 599), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (597, 599), False, 'from datetime import datetime\n'), ((1116, 1152), 'rcj_soccer.models.Competition.query.filter_by', 'Competition.query.filter_by', ([], {'id': 'comp'}), '(id=comp)\n', (1143, 1152), False, 'from rcj_soccer.models import Competition\n'), ((2256, 2274), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2272, 2274), False, 'from flask import render_template, jsonify, request\n'), ((2995, 3014), 'rcj_soccer.base.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3012, 3014), False, 'from rcj_soccer.base import app, db\n'), ((357, 400), 'rcj_soccer.models.Competition.query.filter_by', 'Competition.query.filter_by', ([], {'is_active': '(True)'}), '(is_active=True)\n', (384, 400), False, 'from rcj_soccer.models import Competition\n'), ((1986, 2022), 'rcj_soccer.models.Competition.query.filter_by', 'Competition.query.filter_by', ([], {'id': 'comp'}), '(id=comp)\n', (2013, 2022), False, 'from rcj_soccer.models import Competition\n'), ((2672, 2697), 'dateutil.parser.parse', 'parse', (["body['start_date']"], {}), "(body['start_date'])\n", (2677, 2697), False, 'from dateutil.parser import parse\n'), ((3038, 3062), 'rcj_soccer.util.obj_to_dict', 'obj_to_dict', (['competition'], {}), '(competition)\n', (3049, 3062), False, 'from rcj_soccer.util import config, obj_to_dict\n'), ((2198, 2234), 'rcj_soccer.models.Competition.query.filter_by', 'Competition.query.filter_by', ([], {'id': 'comp'}), '(id=comp)\n', (2225, 2234), False, 'from rcj_soccer.models import Competition\n')] |
#!/usr/bin/python
"""
A very crude emulator of dejagnu, just enough to integrate the libbfi
unittests into the pyobjc ones.
"""
import os
import re
import sys
import signal
from fnmatch import fnmatch
import unittest
from distutils.util import get_platform
gDgCommands=re.compile(r'''
(?:{\s*(dg-do)\s*run\s*({[^}]*})?\s*})
|
(?:{\s*(dg-output)\s*"([^"]*)"\s*})
''',
re.VERBOSE|re.MULTILINE)
def signame(code):
for nm in dir(signal):
if nm.startswith('SIG') and nm[3] != '_' \
and getattr(signal, nm) == code:
return nm
return code
def exitCode2Description(code):
"""
Convert the exit code as returned by os.popen().close() to a string
"""
if os.WIFEXITED(code):
return 'exited with status %s'%(os.WEXITSTATUS(code),)
elif os.WIFSIGNALED(code):
sig = os.WTERMSIG(code)
return 'crashed with signal %s [%s]'%(signame(sig), sig)
else:
return 'exit code %s'%(code,)
def platform_matches(matchstr):
# This is a hack
if sys.byteorder == 'little':
platform = 'i386-apple-darwin'
else:
platform = 'powerpc-apple-darwin'
return fnmatch(platform, matchstr)
def parseDG(fdata):
result = []
for item in gDgCommands.findall(fdata):
if item[0] == 'dg-do':
result.append(('run', item[1]))
elif item[2] == 'dg-output':
result.append(('expect', item[3].decode('string_escape')))
return result
class DgTestCase (unittest.TestCase):
def __init__(self, filename):
unittest.TestCase.__init__(self)
self.filename = filename
#archOption = "-arch ppc"
#archOption = "-arch ppc64"
#archOption = "-arch i386"
archOption = "-arch x86_64"
#archOption = ""
compileOptionsBase = "-g -DMACOSX -Iinclude -o /tmp/test.bin -lffi"
compileOptionsList = ( # HACK ALERT: Yes, there are better ways to do this, but this is easy and extremely flexible
"%s %s %s" % (compileOptionsBase, archOption, "-O0"),
"%s %s %s" % (compileOptionsBase, archOption, "-O1"),
"%s %s %s" % (compileOptionsBase, archOption, "-O2"),
"%s %s %s" % (compileOptionsBase, archOption, "-O3"),
"%s %s %s" % (compileOptionsBase, archOption, "-Os"),
"%s %s %s" % (compileOptionsBase, archOption, "-Oz"), # Note: Apple-Only, see gcc man page for details
)
def runTest(self):
script = parseDG(open(self.filename).read())
output = []
for command, data in script:
if command == 'run':
action = 'run'
action_data = data
if command == 'expect':
output.append(data)
output = ''.join(output)
output = output.replace('\\', '')
d = action_data.split()
if d and d[1] == 'target':
for item in d[2:]:
if platform_matches(item):
break
else:
# Test shouldn't be run on this platform
return
# NOTE: We're ignoring the xfail data for now, none of the
# testcases are supposed to fail on darwin.
for compileOptions in self.compileOptionsList:
self.compileTestCase(compileOptions)
data = self.runTestCase()
if output != '':
self.assertEquals(data.rstrip(), output.rstrip())
os.unlink('/tmp/test.bin')
def shortDescription(self):
fn = os.path.basename(self.filename)[:-2]
dn = os.path.basename(os.path.dirname(self.filename))
return "dejagnu.%s.%s"%(dn, fn)
def compileTestCase(self, compileOptions):
# libdir = os.path.join('build', 'temp.%s-%d.%d'%(get_platform(), sys.version_info[0], sys.version_info[1]), 'libffi-src')
# libffiobjects = self.object_files(libdir)
commandline='cc %s %s 2>&1' % (compileOptions, self.filename)
fp = os.popen(commandline)
data = fp.read()
xit = fp.close()
if xit != None:
self.fail("Compile failed[%s]:\n%s"%(xit, data))
def runTestCase(self):
os.environ['DYLD_BIND_AT_LAUNCH'] = '1'
fp = os.popen('/tmp/test.bin', 'r')
del os.environ['DYLD_BIND_AT_LAUNCH']
data = fp.read()
xit = fp.close()
if xit != None:
self.fail("Running failed (%s)"%(exitCode2Description(xit),))
return data
def object_files(self, basedir):
result = []
for dirpath, dirnames, filenames in os.walk(basedir):
for fn in filenames:
if fn.endswith('.o'):
result.append(os.path.join(dirpath, fn))
return result
def testSuiteForDirectory(dirname):
tests = []
for fn in os.listdir(dirname):
if not fn.endswith('.c'): continue
tst = DgTestCase(os.path.join(dirname, fn))
if alltests and tst.shortDescription() not in alltests:
continue
tests.append(tst)
return unittest.TestSuite(tests)
alltests = []
if __name__ == "__main__":
alltests = sys.argv[1:]
runner = unittest.TextTestRunner(verbosity=2)
runner.run(testSuiteForDirectory('tests/testsuite/libffi.call'))
| [
"unittest.TestCase.__init__",
"unittest.TextTestRunner",
"unittest.TestSuite",
"os.path.basename",
"os.unlink",
"os.path.dirname",
"os.popen",
"os.walk",
"os.WTERMSIG",
"os.WIFEXITED",
"os.WEXITSTATUS",
"fnmatch.fnmatch",
"os.WIFSIGNALED",
"os.path.join",
"os.listdir",
"re.compile"
] | [((270, 442), 're.compile', 're.compile', (['"""\n (?:{\\\\s*(dg-do)\\\\s*run\\\\s*({[^}]*})?\\\\s*})\n |\n (?:{\\\\s*(dg-output)\\\\s*"([^"]*)"\\\\s*})\n """', '(re.VERBOSE | re.MULTILINE)'], {}), '(\n """\n (?:{\\\\s*(dg-do)\\\\s*run\\\\s*({[^}]*})?\\\\s*})\n |\n (?:{\\\\s*(dg-output)\\\\s*"([^"]*)"\\\\s*})\n """\n , re.VERBOSE | re.MULTILINE)\n', (280, 442), False, 'import re\n'), ((750, 768), 'os.WIFEXITED', 'os.WIFEXITED', (['code'], {}), '(code)\n', (762, 768), False, 'import os\n'), ((1202, 1229), 'fnmatch.fnmatch', 'fnmatch', (['platform', 'matchstr'], {}), '(platform, matchstr)\n', (1209, 1229), False, 'from fnmatch import fnmatch\n'), ((4816, 4835), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (4826, 4835), False, 'import os\n'), ((5055, 5080), 'unittest.TestSuite', 'unittest.TestSuite', (['tests'], {}), '(tests)\n', (5073, 5080), False, 'import unittest\n'), ((5164, 5200), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5187, 5200), False, 'import unittest\n'), ((843, 863), 'os.WIFSIGNALED', 'os.WIFSIGNALED', (['code'], {}), '(code)\n', (857, 863), False, 'import os\n'), ((1595, 1627), 'unittest.TestCase.__init__', 'unittest.TestCase.__init__', (['self'], {}), '(self)\n', (1621, 1627), False, 'import unittest\n'), ((3982, 4003), 'os.popen', 'os.popen', (['commandline'], {}), '(commandline)\n', (3990, 4003), False, 'import os\n'), ((4229, 4259), 'os.popen', 'os.popen', (['"""/tmp/test.bin"""', '"""r"""'], {}), "('/tmp/test.bin', 'r')\n", (4237, 4259), False, 'import os\n'), ((4577, 4593), 'os.walk', 'os.walk', (['basedir'], {}), '(basedir)\n', (4584, 4593), False, 'import os\n'), ((879, 896), 'os.WTERMSIG', 'os.WTERMSIG', (['code'], {}), '(code)\n', (890, 896), False, 'import os\n'), ((3528, 3559), 'os.path.basename', 'os.path.basename', (['self.filename'], {}), '(self.filename)\n', (3544, 3559), False, 'import os\n'), ((3595, 3625), 'os.path.dirname', 'os.path.dirname', (['self.filename'], {}), '(self.filename)\n', (3610, 3625), False, 'import os\n'), ((4905, 4930), 'os.path.join', 'os.path.join', (['dirname', 'fn'], {}), '(dirname, fn)\n', (4917, 4930), False, 'import os\n'), ((810, 830), 'os.WEXITSTATUS', 'os.WEXITSTATUS', (['code'], {}), '(code)\n', (824, 830), False, 'import os\n'), ((3454, 3480), 'os.unlink', 'os.unlink', (['"""/tmp/test.bin"""'], {}), "('/tmp/test.bin')\n", (3463, 3480), False, 'import os\n'), ((4700, 4725), 'os.path.join', 'os.path.join', (['dirpath', 'fn'], {}), '(dirpath, fn)\n', (4712, 4725), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('poll', '0002_remove_poll_expires'),
]
operations = [
migrations.AddField(
model_name='poll',
name='expires',
field=models.DateTimeField(default=datetime.datetime(2015, 10, 25, 17, 52, 51, 968925, tzinfo=utc)),
preserve_default=False,
),
]
| [
"datetime.datetime"
] | [((430, 493), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(10)', '(25)', '(17)', '(52)', '(51)', '(968925)'], {'tzinfo': 'utc'}), '(2015, 10, 25, 17, 52, 51, 968925, tzinfo=utc)\n', (447, 493), False, 'import datetime\n')] |
import os
import time
import re
# import lxml
from lxml import etree
from tool import read_txt_file_to_list
with open('ttttt.html','a',encoding = 'utf-8') as f:
f.write('盖雅放假啊')
f.write('\n')
with open('ttttt.html','r',encoding = 'utf-8') as f:
fileHtml = f.read()
sensitiveKeywordListUri = './config/sensitiveKeywords.txt'
sensitiveKeywordList = read_txt_file_to_list(sensitiveKeywordListUri)
print(sensitiveKeywordList)
isHaveSensitiveKeyword = True
fileWeight = 0
havedSensitiveKeywordList = []
for sensitiveKeyword in sensitiveKeywordList:
regPattern = re.compile(r'' + sensitiveKeyword + '')
result = regPattern.findall(fileHtml)
havedSensitiveKeywordList.extend(result)
if len(havedSensitiveKeywordList) == 0:
isHaveSensitiveKeyword = False
print("不包含一些敏感信息词汇")
else:
fileWeight = fileWeight + len(havedSensitiveKeywordList)
print("包含的敏感词汇如下:")
print(havedSensitiveKeywordList)
| [
"tool.read_txt_file_to_list",
"re.compile"
] | [((361, 407), 'tool.read_txt_file_to_list', 'read_txt_file_to_list', (['sensitiveKeywordListUri'], {}), '(sensitiveKeywordListUri)\n', (382, 407), False, 'from tool import read_txt_file_to_list\n'), ((575, 613), 're.compile', 're.compile', (["('' + sensitiveKeyword + '')"], {}), "('' + sensitiveKeyword + '')\n", (585, 613), False, 'import re\n')] |
# coding: utf-8
import re
import logging
from decimal import Decimal
from allauth import account
from django import forms
from django.forms import widgets
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.forms.utils import ErrorList, ErrorDict
from autocomplete_light import shortcuts as autocomplete_light
from bitcategory.fields import HierarchicalField
from captcha import fields as recaptcha_fields
from market.utils.forms import (
ModelChoiceCreationField,
Model2CharField,
)
from market.utils.widgets import (
CurrencyInput,
ClearableImageInput,
)
from . import models
logger = logging.getLogger(__name__)
# register autocomplete stuff
autocomplete_light.register(
models.Manufacturer,
search_fields=['name', ],
autocomplete_js_attributes={
'minimum_characters': 3,
},
widget_js_attributes={
'max_values': 6,
}
)
autocomplete_light.register(
models.Product,
search_fields=['name', ],
autocomplete_js_attributes={
'minimum_characters': 3,
},
widget_js_attributes={
'max_values': 6,
}
)
class UserForm(forms.ModelForm):
"""Form to update basic user's information."""
class Meta:
"""Meta."""
fields = ("name",)
model = models.User
class SecuredEmailForm(forms.Form):
"""Form to get contact email from the user."""
messages = {
"email_exists": _("Email already registered."),
"invalid_credentials": _("Incorrect email and password combination"),
}
# captcha = recaptcha_fields.ReCaptchaField()
email = forms.EmailField(required=True)
password = forms.CharField(label=_("Password"), required=False,
widget=widgets.PasswordInput)
def __init__(self, *args, **kwargs):
"""Save `request` from kwargs for optional password validation."""
self.request = kwargs.pop('request', None)
super(SecuredEmailForm, self).__init__(*args, **kwargs)
def clean(self):
"""Validate whether email has not been taken yet."""
cleaned_data = super(SecuredEmailForm, self).clean()
user = None
if cleaned_data.get('email'):
if cleaned_data.get('password'):
user = (account.adapter.get_adapter(self.request)
.authenticate(
self.request,
email=cleaned_data['email'],
password=cleaned_data['password']))
if user is None:
raise ValidationError(self.messages["invalid_credentials"])
else: # no password
confirmed_email = account.models.EmailAddress.objects.filter(
email=cleaned_data['email'])
if confirmed_email.exists():
raise ValidationError(self.messages["email_exists"])
cleaned_data["user"] = user
return cleaned_data
class BaseSignupForm(forms.Form):
"""Serves as a base class for (allauth.)account.forms.SignupForm.
Its only purpose is to provide `full name` field.
"""
messages = {
"name": _("Name is required")
}
name = forms.CharField(label=_("Full name"),
max_length=70, required=True,
widget=widgets.TextInput(
attrs={'placeholder': _('Full name'),
'autofocus': 'autofocus'}))
def signup(self, request, user):
"""Invoked at signup time to complete the signup of the user."""
pass
def clean(self):
"""Split name into first_name and last_name for backward compatibility."""
cleaned_data = super().clean()
if 'name' not in cleaned_data:
raise ValidationError(self.messages['name'])
cleaned_data['first_name'], cleaned_data['last_name'] = \
cleaned_data['name'].strip().split(" ", 1)
return cleaned_data
class AddressForm(forms.ModelForm):
"""Mandatory address form."""
class Meta:
"""Meta."""
model = models.Address
exclude = ("user_shipping", "user_billing", "state", "position", "position_x", "position_y")
widgets = {
"extra": forms.Textarea(attrs={"cols": 23, "rows": 5})
}
class PositionForm(forms.ModelForm):
"""It is an Address form with fields necessary for location and is optional."""
address_visible = forms.BooleanField(label=_("I have a physical vendor."),
required=False)
class Meta:
"""Meta."""
model = models.Address
exclude = ("user_shipping", "user_billing", "state",
"name", "business_id", "tax_id", "zip_code")
widgets = {
'position': forms.HiddenInput,
'position_x': forms.HiddenInput,
'position_y': forms.HiddenInput,
}
class Media:
"""Media."""
js = ('https://api4.mapy.cz/loader.js', )
def is_valid(self):
"""Decide whether to clean form based on visibility of the address."""
if not hasattr(self, "cleaned_data"):
self.full_clean()
if not self.cleaned_data.get("address_visible", False):
return True
return super(PositionForm, self).is_valid()
def clean(self):
data = self.cleaned_data
if not data["address_visible"]:
self._errors = ErrorDict()
return data
class VendorAddressForm(forms.ModelForm):
"""Vendor address has more mandatory field than generic address."""
name = forms.CharField(max_length=255, required=True, label=_("Name"))
business_id = forms.CharField(max_length=10, required=False, label=_("Business number"))
tax_id = forms.CharField(max_length=12, required=False, label=_("Tax ID"))
zip_code = forms.CharField(max_length=10, required=True, label=_("Zip code"))
class Meta:
model = models.Address
exclude = ("user_shipping", "user_billing", "state", "position", "position_x", "position_y")
class VendorForm(forms.ModelForm):
"""Form for creating and updating Vendor."""
category = HierarchicalField(queryset=models.Category.objects.all(), label=_("Category"))
bank_account = Model2CharField(
models.BankAccount, max_length=30, label=_("Bank account"), required=False)
_messages = {
"bank_account_number": _("Bank account number should be PREFIX - NUMBER / BANK"),
}
class Meta:
"""Define model and fields to be handled."""
model = models.Vendor
fields = ("name", "category", "motto", "description", "ships", "logo", "openings", "bank_account")
widgets = {
'description': forms.Textarea(attrs={"class": "input-xxlarge"}),
}
def __init__(self, *args, **kwargs):
"""Initialize M2M with all possibilities."""
kwargs.update(prefix="vendor")
super(VendorForm, self).__init__(*args, **kwargs)
def clean_bank_account(self):
"""Parse bank account number and construct an instance of BankAccount model."""
if not self.cleaned_data.get('bank_account'):
return None
number = self.cleaned_data['bank_account']
match_o = re.match(r'(?:(\d+)\s*\-\s*)?(\d+)\s*/\s*(\d{4})', number)
if match_o is None:
raise ValidationError(self._messages["bank_account_number"])
try:
int(match_o.group(2))
except:
raise ValidationError(self._messages["bank_account_number"])
try:
bank_account = self.instance.bank_account
bank_account.prefix = match_o.group(1)
bank_account.number = match_o.group(2)
bank_account.bank = match_o.group(3)
bank_account.save()
except:
bank_account = models.BankAccount.objects.create(
prefix=match_o.group(1), number=match_o.group(2), bank=match_o.group(3))
return bank_account
class ProductForm(forms.ModelForm):
"""Add product to a vendor."""
name = forms.CharField(
widget=autocomplete_light.TextWidget(
'ProductAutocomplete',
attrs={"placeholder": _("select your product if we already know it")}),
required=True, label=_("Name"))
category = HierarchicalField(queryset=models.Category.objects.all(), label=_("Category"))
manufacturer = ModelChoiceCreationField(
label=_("Manufacturer"),
queryset=models.Manufacturer.objects.all(),
to_field_name="name", required=False,
widget=autocomplete_light.TextWidget(
'ManufacturerAutocomplete',
attrs={"placeholder": _("select the manufacturer if we already know them")}))
class Meta:
model = models.Product
fields = ("name", "category", "description", "manufacturer",
"photo", "expedition_days", "tax")
widgets = {
'description': forms.Textarea,
'extra': forms.Textarea,
"photo": ClearableImageInput,
}
class OfferForm(forms.ModelForm):
class Meta:
model = models.Offer
fields = ("product", "unit_price", "note", "shipping_price")
widgets = {
'product': forms.HiddenInput,
'unit_price': CurrencyInput,
'shipping_price': CurrencyInput,
'note': widgets.TextInput,
}
class Media:
js = list()
def clean_shipping_price(self):
shipping_price = self.cleaned_data.get('shipping_price', '')
if not shipping_price:
return Decimal('0.00')
return Decimal(shipping_price)
def _post_clean(self):
try:
return super(OfferForm, self)._post_clean()
except ValueError:
self._errors['product'] = ErrorList([_("Field is required"), ])
class AddressesForm(forms.Form):
"""Uberform which manages shipping and billing addresses.
It provides the option for the addresses to be the same.
You can pass `billing` (resp. `shipping`) models instances to edit them.
You can pass your own `billing_form_class` and `shipping_form_class` which
have to be `ModelForm` subclasses.
Validity of empty form is controlled by a checkbox field `necessary`. If the
addresses are not necessary then forms data "shipping" and "billing" will
be always empty dictionaries.
This form contains one own field - `addresses_the_same` and two subforms -
`billing`, `shipping`
"""
error_messages = {
'empty': _('Shipping address has to be filled when marked different'),
'required': _('Billing address is required')
}
necessary = forms.BooleanField(
label=_("Mark whether address is necessary"), required=False, initial=False)
addresses_the_same = forms.BooleanField(
label=_("Shipping is the same as billing"), required=False, initial=True)
def __init__(self, data=None, files=None, billing=None, shipping=None,
billing_form_class=None, shipping_form_class=None,
auto_id='id_%s', prefix=None, initial={}, error_class=ErrorList,
label_suffix=None):
"""Initialize with two addresses for billing and shipping."""
super(AddressesForm, self).__init__(data, files,
initial={"addresses_the_same": (shipping == billing)},
label_suffix=label_suffix)
assert billing_form_class is not None or shipping_form_class is not None
# TODO: construct a ModelForm from Address model instance
bform = (billing_form_class or shipping_form_class)
sform = (shipping_form_class or billing_form_class)
self.billing = bform(data, files, instance=billing, prefix="billing",
initial=initial.pop("billing", None), label_suffix=label_suffix)
self.shipping = sform(data, files, prefix="shipping",
instance=shipping if shipping != billing else None,
initial=initial.pop("shipping", None), label_suffix=label_suffix)
self.billing_empty = False # helper in save method (bcs Form does not have is_empty method)
def clean(self):
"""The form is valid even when both addresses are empty."""
data = self.cleaned_data
is_necessary = data.get('necessary', False)
# Billing is required (if `is_necessary`)
if is_necessary and not self.billing.is_valid():
raise ValidationError(self.error_messages['required'])
# User marks addresses as different - check they both are valid
if is_necessary and not data.get('addresses_the_same', True):
if not all((self.shipping.is_valid(), self.billing.is_valid())):
raise ValidationError(self.error_messages['empty'])
# Mark as valid in case no addresses are necessary
if not is_necessary:
self.billing._errors = ErrorDict()
self.shipping._errors = ErrorDict()
data['billing'] = getattr(self.billing, "cleaned_data", {})
data['shipping'] = getattr(self.shipping, "cleaned_data", {})
return data
def save(self, commit=True):
"""Return tuple with address models.
In the case when empty form was allowed (`required=False` in the constructor)
tuple `(None, None)` might be returned.
"""
billing = None
shipping = None
if not self.cleaned_data['necessary']:
return (billing, shipping)
billing = self.billing.save(commit=commit)
if self.cleaned_data['addresses_the_same']:
shipping = billing
else:
if billing.user_shipping is not None:
billing.user_shipping = None
billing.save()
shipping = self.shipping.save(commit=commit)
return (billing, shipping)
def save_to_request(self, request):
if request.user.is_authenticated():
billing, shipping = self.save(commit=False)
if shipping:
shipping.user_shipping = request.user
if not shipping.name:
shipping.name = request.user.get_full_name()
if shipping != billing:
# reset billing address because it could have changed
shipping.user_billing = None
shipping.save()
if billing:
billing.user_billing = request.user
if not billing.name:
billing.name = request.user.get_full_name()
billing.save()
else:
billing, shipping = self.save(commit=True)
if shipping:
request.session['shipping_address_id'] = shipping.pk
if billing:
request.session['billing_address_id'] = billing.pk
return billing, shipping
| [
"allauth.account.models.EmailAddress.objects.filter",
"django.core.exceptions.ValidationError",
"django.forms.EmailField",
"decimal.Decimal",
"django.forms.utils.ErrorDict",
"allauth.account.adapter.get_adapter",
"re.match",
"django.forms.Textarea",
"autocomplete_light.shortcuts.register",
"django... | [((675, 702), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (692, 702), False, 'import logging\n'), ((734, 908), 'autocomplete_light.shortcuts.register', 'autocomplete_light.register', (['models.Manufacturer'], {'search_fields': "['name']", 'autocomplete_js_attributes': "{'minimum_characters': 3}", 'widget_js_attributes': "{'max_values': 6}"}), "(models.Manufacturer, search_fields=['name'],\n autocomplete_js_attributes={'minimum_characters': 3},\n widget_js_attributes={'max_values': 6})\n", (761, 908), True, 'from autocomplete_light import shortcuts as autocomplete_light\n'), ((952, 1121), 'autocomplete_light.shortcuts.register', 'autocomplete_light.register', (['models.Product'], {'search_fields': "['name']", 'autocomplete_js_attributes': "{'minimum_characters': 3}", 'widget_js_attributes': "{'max_values': 6}"}), "(models.Product, search_fields=['name'],\n autocomplete_js_attributes={'minimum_characters': 3},\n widget_js_attributes={'max_values': 6})\n", (979, 1121), True, 'from autocomplete_light import shortcuts as autocomplete_light\n'), ((1651, 1682), 'django.forms.EmailField', 'forms.EmailField', ([], {'required': '(True)'}), '(required=True)\n', (1667, 1682), False, 'from django import forms\n'), ((1472, 1502), 'django.utils.translation.ugettext_lazy', '_', (['"""Email already registered."""'], {}), "('Email already registered.')\n", (1473, 1502), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1535, 1580), 'django.utils.translation.ugettext_lazy', '_', (['"""Incorrect email and password combination"""'], {}), "('Incorrect email and password combination')\n", (1536, 1580), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3277, 3298), 'django.utils.translation.ugettext_lazy', '_', (['"""Name is required"""'], {}), "('Name is required')\n", (3278, 3298), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6577, 6634), 'django.utils.translation.ugettext_lazy', '_', (['"""Bank account number should be PREFIX - NUMBER / BANK"""'], {}), "('Bank account number should be PREFIX - NUMBER / BANK')\n", (6578, 6634), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7419, 7484), 're.match', 're.match', (['"""(?:(\\\\d+)\\\\s*\\\\-\\\\s*)?(\\\\d+)\\\\s*/\\\\s*(\\\\d{4})"""', 'number'], {}), "('(?:(\\\\d+)\\\\s*\\\\-\\\\s*)?(\\\\d+)\\\\s*/\\\\s*(\\\\d{4})', number)\n", (7427, 7484), False, 'import re\n'), ((9810, 9833), 'decimal.Decimal', 'Decimal', (['shipping_price'], {}), '(shipping_price)\n', (9817, 9833), False, 'from decimal import Decimal\n'), ((10737, 10797), 'django.utils.translation.ugettext_lazy', '_', (['"""Shipping address has to be filled when marked different"""'], {}), "('Shipping address has to be filled when marked different')\n", (10738, 10797), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10819, 10851), 'django.utils.translation.ugettext_lazy', '_', (['"""Billing address is required"""'], {}), "('Billing address is required')\n", (10820, 10851), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1720, 1733), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (1721, 1733), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3339, 3353), 'django.utils.translation.ugettext_lazy', '_', (['"""Full name"""'], {}), "('Full name')\n", (3340, 3353), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3925, 3963), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.messages['name']"], {}), "(self.messages['name'])\n", (3940, 3963), False, 'from django.core.exceptions import ValidationError\n'), ((4395, 4440), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'cols': 23, 'rows': 5}"}), "(attrs={'cols': 23, 'rows': 5})\n", (4409, 4440), False, 'from django import forms\n'), ((4622, 4652), 'django.utils.translation.ugettext_lazy', '_', (['"""I have a physical vendor."""'], {}), "('I have a physical vendor.')\n", (4623, 4652), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5598, 5609), 'django.forms.utils.ErrorDict', 'ErrorDict', ([], {}), '()\n', (5607, 5609), False, 'from django.forms.utils import ErrorList, ErrorDict\n'), ((5811, 5820), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (5812, 5820), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5893, 5913), 'django.utils.translation.ugettext_lazy', '_', (['"""Business number"""'], {}), "('Business number')\n", (5894, 5913), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5981, 5992), 'django.utils.translation.ugettext_lazy', '_', (['"""Tax ID"""'], {}), "('Tax ID')\n", (5982, 5992), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6061, 6074), 'django.utils.translation.ugettext_lazy', '_', (['"""Zip code"""'], {}), "('Zip code')\n", (6062, 6074), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6391, 6404), 'django.utils.translation.ugettext_lazy', '_', (['"""Category"""'], {}), "('Category')\n", (6392, 6404), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6492, 6509), 'django.utils.translation.ugettext_lazy', '_', (['"""Bank account"""'], {}), "('Bank account')\n", (6493, 6509), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6896, 6944), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'class': 'input-xxlarge'}"}), "(attrs={'class': 'input-xxlarge'})\n", (6910, 6944), False, 'from django import forms\n'), ((7524, 7578), 'django.core.exceptions.ValidationError', 'ValidationError', (["self._messages['bank_account_number']"], {}), "(self._messages['bank_account_number'])\n", (7539, 7578), False, 'from django.core.exceptions import ValidationError\n'), ((8458, 8467), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (8459, 8467), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8548, 8561), 'django.utils.translation.ugettext_lazy', '_', (['"""Category"""'], {}), "('Category')\n", (8549, 8561), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8622, 8639), 'django.utils.translation.ugettext_lazy', '_', (['"""Manufacturer"""'], {}), "('Manufacturer')\n", (8623, 8639), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9779, 9794), 'decimal.Decimal', 'Decimal', (['"""0.00"""'], {}), "('0.00')\n", (9786, 9794), False, 'from decimal import Decimal\n'), ((10909, 10947), 'django.utils.translation.ugettext_lazy', '_', (['"""Mark whether address is necessary"""'], {}), "('Mark whether address is necessary')\n", (10910, 10947), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11040, 11076), 'django.utils.translation.ugettext_lazy', '_', (['"""Shipping is the same as billing"""'], {}), "('Shipping is the same as billing')\n", (11041, 11076), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12754, 12802), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.error_messages['required']"], {}), "(self.error_messages['required'])\n", (12769, 12802), False, 'from django.core.exceptions import ValidationError\n'), ((13215, 13226), 'django.forms.utils.ErrorDict', 'ErrorDict', ([], {}), '()\n', (13224, 13226), False, 'from django.forms.utils import ErrorList, ErrorDict\n'), ((13263, 13274), 'django.forms.utils.ErrorDict', 'ErrorDict', ([], {}), '()\n', (13272, 13274), False, 'from django.forms.utils import ErrorList, ErrorDict\n'), ((2800, 2871), 'allauth.account.models.EmailAddress.objects.filter', 'account.models.EmailAddress.objects.filter', ([], {'email': "cleaned_data['email']"}), "(email=cleaned_data['email'])\n", (2842, 2871), False, 'from allauth import account\n'), ((7660, 7714), 'django.core.exceptions.ValidationError', 'ValidationError', (["self._messages['bank_account_number']"], {}), "(self._messages['bank_account_number'])\n", (7675, 7714), False, 'from django.core.exceptions import ValidationError\n'), ((13045, 13090), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.error_messages['empty']"], {}), "(self.error_messages['empty'])\n", (13060, 13090), False, 'from django.core.exceptions import ValidationError\n'), ((2678, 2731), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.messages['invalid_credentials']"], {}), "(self.messages['invalid_credentials'])\n", (2693, 2731), False, 'from django.core.exceptions import ValidationError\n'), ((2964, 3010), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.messages['email_exists']"], {}), "(self.messages['email_exists'])\n", (2979, 3010), False, 'from django.core.exceptions import ValidationError\n'), ((2315, 2356), 'allauth.account.adapter.get_adapter', 'account.adapter.get_adapter', (['self.request'], {}), '(self.request)\n', (2342, 2356), False, 'from allauth import account\n'), ((3518, 3532), 'django.utils.translation.ugettext_lazy', '_', (['"""Full name"""'], {}), "('Full name')\n", (3519, 3532), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8379, 8425), 'django.utils.translation.ugettext_lazy', '_', (['"""select your product if we already know it"""'], {}), "('select your product if we already know it')\n", (8380, 8425), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8859, 8911), 'django.utils.translation.ugettext_lazy', '_', (['"""select the manufacturer if we already know them"""'], {}), "('select the manufacturer if we already know them')\n", (8860, 8911), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10007, 10029), 'django.utils.translation.ugettext_lazy', '_', (['"""Field is required"""'], {}), "('Field is required')\n", (10008, 10029), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import os
import argparse
import json
from utils import print_host
from xml.dom import minidom
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="input dir")
parser.add_argument("--output", type=str, help="output file")
args = parser.parse_args()
print(args)
hosts = []
file_list = os.listdir(args.input)
for filename in file_list:
if filename.split(".")[-1] != "xml":
continue
domTree = minidom.parse(args.input + "/" + filename)
rootNode = domTree.documentElement
hosts_node = rootNode.getElementsByTagName("host")
for host_node in hosts_node:
# Host
host = {"addr": "", "addr_type": "", "hostnames": [], "ports": [], "os_list": []}
# Address
addr_node = host_node.getElementsByTagName("address")[0]
addr = addr_node.getAttribute("addr")
addr_type = addr_node.getAttribute("addrtype")
host["addr"] = addr
host["addr_type"] = addr_type
# Hostnames
hostnames_node = host_node.getElementsByTagName("hostnames")[0].getElementsByTagName("hostname")
hostnames = []
for hostname_node in hostnames_node:
hostnames.append({"name": hostname_node.getAttribute("name"), "type": hostname_node.getAttribute("type")})
host["hostnames"] = hostnames
# Ports
ports_node_root = host_node.getElementsByTagName("ports")
if len(ports_node_root) > 0:
ports_node = ports_node_root[0].getElementsByTagName("port")
ports = []
for port_node in ports_node:
port = {}
port["protocol"] = port_node.getAttribute("protocol")
port["portid"] = port_node.getAttribute("portid")
port["state"] = port_node.getElementsByTagName("state")[0].getAttribute("state")
port["service"] = port_node.getElementsByTagName("service")[0].getAttribute("name")
ports.append(port)
host["ports"] = ports
# OS
os_root = host_node.getElementsByTagName("os")
if len(os_root) > 0:
os_list_node = os_root[0].getElementsByTagName("osmatch")
os_list = []
for os_node in os_list_node:
os = {}
os["name"] = os_node.getAttribute("name")
os["type"] = os_node.getElementsByTagName("osclass")[0].getAttribute("type")
os["vendor"] = os_node.getElementsByTagName("osclass")[0].getAttribute("vendor")
os["family"] = os_node.getElementsByTagName("osclass")[0].getAttribute("osfamily")
cpes_node = os_node.getElementsByTagName("osclass")[0].getElementsByTagName("cpe")
cpe = []
for cpe_node in cpes_node:
cpe.append(cpe_node.childNodes[0].data)
os["cpe"] = cpe
os_list.append(os)
host["os_list"] = os_list
fingers_node = os_root[0].getElementsByTagName("osfingerprint")
fingers = []
for finger_node in fingers_node:
finger = finger_node.getAttribute("fingerprint")
fingers.append(finger)
host["fingers"] = fingers
hosts.append(host)
with open(args.output, "w") as f:
json.dump(hosts, f) | [
"os.listdir",
"json.dump",
"xml.dom.minidom.parse",
"argparse.ArgumentParser"
] | [((136, 161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (159, 161), False, 'import argparse\n'), ((370, 392), 'os.listdir', 'os.listdir', (['args.input'], {}), '(args.input)\n', (380, 392), False, 'import os\n'), ((508, 550), 'xml.dom.minidom.parse', 'minidom.parse', (["(args.input + '/' + filename)"], {}), "(args.input + '/' + filename)\n", (521, 550), False, 'from xml.dom import minidom\n'), ((3611, 3630), 'json.dump', 'json.dump', (['hosts', 'f'], {}), '(hosts, f)\n', (3620, 3630), False, 'import json\n')] |
from pathlib import Path
import numpy as np
from tifffile import imread
from tracker.export import ExportResults
from tracker.extract_data import get_img_files
from tracker.extract_data import get_indices_pandas
from tracker.tracking import TrackingConfig, MultiCellTracker
def run_tracker(img_path, segm_path, res_path, delta_t=3, default_roi_size=2):
img_path = Path(img_path)
segm_path = Path(segm_path)
res_path = Path(res_path)
img_files = get_img_files(img_path)
segm_files = get_img_files(segm_path, 'mask')
# set roi size
# assume img shape z,x,y
dummy = np.squeeze(imread(segm_files[max(segm_files.keys())]))
img_shape = dummy.shape
masks = get_indices_pandas(imread(segm_files[max(segm_files.keys())]))
m_shape = np.stack(masks.apply(lambda x: np.max(np.array(x), axis=-1) - np.min(np.array(x), axis=-1) +1))
if len(img_shape) == 2:
if len(masks) > 10:
m_size = np.median(np.stack(m_shape)).astype(int)
roi_size = tuple([m_size*default_roi_size, m_size*default_roi_size])
else:
roi_size = tuple((np.array(dummy.shape) // 10).astype(int))
else:
roi_size = tuple((np.median(np.stack(m_shape), axis=0) * default_roi_size).astype(int))
config = TrackingConfig(img_files, segm_files, roi_size, delta_t=delta_t, cut_off_distance=None)
tracker = MultiCellTracker(config)
tracks = tracker()
exporter = ExportResults()
exporter(tracks, res_path, tracker.img_shape, time_steps=sorted(img_files.keys()))
if __name__ == '__main__':
from argparse import ArgumentParser
PARSER = ArgumentParser(description='Tracking KIT-Sch-GE.')
PARSER.add_argument('--image_path', type=str, help='path to the folder containing the raw images.')
PARSER.add_argument('--segmentation_path', type=str, help='path to the folder containing the segmentation images.')
PARSER.add_argument('--results_path', type=str, help='path where to store the tracking results. '
'If the results path is the same as the segmentation'
'_path the segmentation images will be overwritten.')
PARSER.add_argument('--delta_t', type=int, default=3)
PARSER.add_argument('--default_roi_size', type=int, default=2)
ARGS = PARSER.parse_args()
run_tracker(ARGS.image_path, ARGS.segmentation_path, ARGS.results_path, ARGS.delta_t, ARGS.default_roi_size)
| [
"numpy.stack",
"argparse.ArgumentParser",
"tracker.tracking.MultiCellTracker",
"tracker.extract_data.get_img_files",
"tracker.tracking.TrackingConfig",
"pathlib.Path",
"numpy.array",
"tracker.export.ExportResults"
] | [((372, 386), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (376, 386), False, 'from pathlib import Path\n'), ((403, 418), 'pathlib.Path', 'Path', (['segm_path'], {}), '(segm_path)\n', (407, 418), False, 'from pathlib import Path\n'), ((434, 448), 'pathlib.Path', 'Path', (['res_path'], {}), '(res_path)\n', (438, 448), False, 'from pathlib import Path\n'), ((465, 488), 'tracker.extract_data.get_img_files', 'get_img_files', (['img_path'], {}), '(img_path)\n', (478, 488), False, 'from tracker.extract_data import get_img_files\n'), ((506, 538), 'tracker.extract_data.get_img_files', 'get_img_files', (['segm_path', '"""mask"""'], {}), "(segm_path, 'mask')\n", (519, 538), False, 'from tracker.extract_data import get_img_files\n'), ((1275, 1366), 'tracker.tracking.TrackingConfig', 'TrackingConfig', (['img_files', 'segm_files', 'roi_size'], {'delta_t': 'delta_t', 'cut_off_distance': 'None'}), '(img_files, segm_files, roi_size, delta_t=delta_t,\n cut_off_distance=None)\n', (1289, 1366), False, 'from tracker.tracking import TrackingConfig, MultiCellTracker\n'), ((1377, 1401), 'tracker.tracking.MultiCellTracker', 'MultiCellTracker', (['config'], {}), '(config)\n', (1393, 1401), False, 'from tracker.tracking import TrackingConfig, MultiCellTracker\n'), ((1441, 1456), 'tracker.export.ExportResults', 'ExportResults', ([], {}), '()\n', (1454, 1456), False, 'from tracker.export import ExportResults\n'), ((1627, 1677), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Tracking KIT-Sch-GE."""'}), "(description='Tracking KIT-Sch-GE.')\n", (1641, 1677), False, 'from argparse import ArgumentParser\n'), ((956, 973), 'numpy.stack', 'np.stack', (['m_shape'], {}), '(m_shape)\n', (964, 973), True, 'import numpy as np\n'), ((810, 821), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (818, 821), True, 'import numpy as np\n'), ((841, 852), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (849, 852), True, 'import numpy as np\n'), ((1113, 1134), 'numpy.array', 'np.array', (['dummy.shape'], {}), '(dummy.shape)\n', (1121, 1134), True, 'import numpy as np\n'), ((1201, 1218), 'numpy.stack', 'np.stack', (['m_shape'], {}), '(m_shape)\n', (1209, 1218), True, 'import numpy as np\n')] |
from functools import wraps
from flask import abort
from flask_login import current_user
from .models import Permission
def permission_required(permission):
def decorator(f):
@wraps(f)
def decarated_function(*args,**kwargs):
if not current_user.can(permission):
abort(403)
return f(*args,**kwargs)
return decarated_function
return decorator
def admin_required(f):
return permission_required(Permission.ADMIN)(f) | [
"flask_login.current_user.can",
"flask.abort",
"functools.wraps"
] | [((190, 198), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (195, 198), False, 'from functools import wraps\n'), ((266, 294), 'flask_login.current_user.can', 'current_user.can', (['permission'], {}), '(permission)\n', (282, 294), False, 'from flask_login import current_user\n'), ((312, 322), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (317, 322), False, 'from flask import abort\n')] |
"""
Application name: users.py
Author/Programmer: <NAME>
Date application created: April 1st, 2022
This model helps to define the strucutre of stored data.
The fields used are:
*id
*is_active
*email
*username
*name
*last_name
*is_verified
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
from utils.models import CustomAbstractUser
class User(CustomAbstractUser):
is_active = models.BooleanField(
_("active"),
default=True,
help_text=(
"Indica si el registro debe ser tratado como activo.",
"Desmarque esta opción en lugar de borrar el registro",
),
)
email = models.EmailField(
"email address",
unique=True,
error_messages={"unique": "A user with that email already exists."},
)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username", "name", "last_name"]
is_verified = models.BooleanField(
"verified",
default=True,
help_text="Set to true when the user have verified its email address.",
)
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
def __str__(self):
"""Return username."""
return self.username | [
"django.utils.translation.gettext_lazy",
"django.db.models.BooleanField",
"django.db.models.EmailField"
] | [((735, 855), 'django.db.models.EmailField', 'models.EmailField', (['"""email address"""'], {'unique': '(True)', 'error_messages': "{'unique': 'A user with that email already exists.'}"}), "('email address', unique=True, error_messages={'unique':\n 'A user with that email already exists.'})\n", (752, 855), False, 'from django.db import models\n'), ((988, 1110), 'django.db.models.BooleanField', 'models.BooleanField', (['"""verified"""'], {'default': '(True)', 'help_text': '"""Set to true when the user have verified its email address."""'}), "('verified', default=True, help_text=\n 'Set to true when the user have verified its email address.')\n", (1007, 1110), False, 'from django.db import models\n'), ((516, 527), 'django.utils.translation.gettext_lazy', '_', (['"""active"""'], {}), "('active')\n", (517, 527), True, 'from django.utils.translation import gettext_lazy as _\n')] |
# -*- coding: utf-8 -*-
"""nncli_gui module"""
import hashlib
import subprocess
import threading
import urwid
from . import view_titles, view_note, view_help, view_log, user_input
from .utils import exec_cmd_on_note, get_pager
# pylint: disable=too-many-instance-attributes, unused-argument
class NncliGui:
"""NncliGui class. Responsible for the console GUI view logic."""
def __init__(self, config, logger, ndb, key=None):
self.ndb = ndb
self.logger = logger
self.config = config
self.last_view = []
self.status_bar = self.config.get_config('status_bar')
self.config.state.current_sort_mode = \
self.config.get_config('sort_mode')
self.log_lock = threading.Lock()
self.log_alarms = 0
self.logs = []
self.thread_sync = threading.Thread(
target=self.ndb.sync_worker,
args=[self.config.state.do_server_sync]
)
self.thread_sync.setDaemon(True)
self.view_titles = \
view_titles.ViewTitles(
self.config,
{
'ndb' : self.ndb,
'search_string' : None,
'log' : self.log
}
)
self.view_note = \
view_note.ViewNote(
self.config,
{
'ndb' : self.ndb,
'id' : key, # initial key to view or None
'log' : self.log
}
)
self.view_log = view_log.ViewLog(self.config, self.logger)
self.view_help = view_help.ViewHelp(self.config)
palette = \
[
(
'default',
self.config.get_color('default_fg'),
self.config.get_color('default_bg')
),
(
'status_bar',
self.config.get_color('status_bar_fg'),
self.config.get_color('status_bar_bg')
),
(
'log',
self.config.get_color('log_fg'),
self.config.get_color('log_bg')
),
(
'user_input_bar',
self.config.get_color('user_input_bar_fg'),
self.config.get_color('user_input_bar_bg')
),
(
'note_focus',
self.config.get_color('note_focus_fg'),
self.config.get_color('note_focus_bg')
),
(
'note_title_day',
self.config.get_color('note_title_day_fg'),
self.config.get_color('note_title_day_bg')
),
(
'note_title_week',
self.config.get_color('note_title_week_fg'),
self.config.get_color('note_title_week_bg')
),
(
'note_title_month',
self.config.get_color('note_title_month_fg'),
self.config.get_color('note_title_month_bg')
),
(
'note_title_year',
self.config.get_color('note_title_year_fg'),
self.config.get_color('note_title_year_bg')
),
(
'note_title_ancient',
self.config.get_color('note_title_ancient_fg'),
self.config.get_color('note_title_ancient_bg')
),
(
'note_date',
self.config.get_color('note_date_fg'),
self.config.get_color('note_date_bg')
),
(
'note_flags',
self.config.get_color('note_flags_fg'),
self.config.get_color('note_flags_bg')
),
(
'note_category',
self.config.get_color('note_category_fg'),
self.config.get_color('note_category_bg')
),
(
'note_content',
self.config.get_color('note_content_fg'),
self.config.get_color('note_content_bg')
),
(
'note_content_focus',
self.config.get_color('note_content_focus_fg'),
self.config.get_color('note_content_focus_bg')
),
(
'note_content_old',
self.config.get_color('note_content_old_fg'),
self.config.get_color('note_content_old_bg')
),
(
'note_content_old_focus',
self.config.get_color(
'note_content_old_focus_fg'
),
self.config.get_color(
'note_content_old_focus_bg'
)
),
(
'help_focus',
self.config.get_color('help_focus_fg'),
self.config.get_color('help_focus_bg')
),
(
'help_header',
self.config.get_color('help_header_fg'),
self.config.get_color('help_header_bg')
),
(
'help_config',
self.config.get_color('help_config_fg'),
self.config.get_color('help_config_bg')
),
(
'help_value',
self.config.get_color('help_value_fg'),
self.config.get_color('help_value_bg')
),
(
'help_descr',
self.config.get_color('help_descr_fg'),
self.config.get_color('help_descr_bg')
)
]
self.master_frame = urwid.Frame(
body=urwid.Filler(urwid.Text('')),
header=None,
footer=urwid.Pile([urwid.Pile([]), urwid.Pile([])]),
focus_part='body')
self.nncli_loop = urwid.MainLoop(self.master_frame,
palette,
handle_mouse=False)
self.nncli_loop.set_alarm_in(0, self._gui_init_view, \
bool(key))
def run(self):
"""Run the GUI"""
self.nncli_loop.run()
def _gui_header_clear(self):
"""Clear the console GUI header row"""
self.master_frame.contents['header'] = (None, None)
self.nncli_loop.draw_screen()
def _gui_header_set(self, widget):
"""Set the content of the console GUI header row"""
self.master_frame.contents['header'] = (widget, None)
self.nncli_loop.draw_screen()
def _gui_footer_log_clear(self):
"""Clear the log at the bottom of the GUI"""
gui = self._gui_footer_input_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile([]), urwid.Pile([gui])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_log_set(self, pile):
"""Set the log at the bottom of the GUI"""
gui = self._gui_footer_input_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile(pile), urwid.Pile([gui])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_log_get(self):
"""Get the log at the bottom of the GUI"""
return self.master_frame.contents['footer'][0].contents[0][0]
def _gui_footer_input_clear(self):
"""Clear the input at the bottom of the GUI"""
pile = self._gui_footer_log_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile([pile]), urwid.Pile([])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_input_set(self, gui):
"""Set the input at the bottom of the GUI"""
pile = self._gui_footer_log_get()
self.master_frame.contents['footer'] = \
(urwid.Pile([urwid.Pile([pile]), urwid.Pile([gui])]), None)
self.nncli_loop.draw_screen()
def _gui_footer_input_get(self):
"""Get the input at the bottom of the GUI"""
return self.master_frame.contents['footer'][0].contents[1][0]
def _gui_footer_focus_input(self):
"""Set the GUI focus to the input at the bottom of the GUI"""
self.master_frame.focus_position = 'footer'
self.master_frame.contents['footer'][0].focus_position = 1
def _gui_body_set(self, widget):
"""Set the GUI body"""
self.master_frame.contents['body'] = (widget, None)
self._gui_update_status_bar()
self.nncli_loop.draw_screen()
def gui_body_get(self):
"""Get the GUI body"""
return self.master_frame.contents['body'][0]
def _gui_body_focus(self):
"""Set the GUI focus to the body"""
self.master_frame.focus_position = 'body'
def gui_update_view(self):
"""Update the GUI"""
if not self.config.state.do_gui:
return
try:
cur_key = self.view_titles.note_list \
[self.view_titles.focus_position].note['localkey']
except IndexError:
cur_key = None
self.view_titles.update_note_list(
self.view_titles.search_string,
sort_mode=self.config.state.current_sort_mode
)
self.view_titles.focus_note(cur_key)
if self.gui_body_get().__class__ == view_note.ViewNote:
self.view_note.update_note_view()
self._gui_update_status_bar()
def _gui_update_status_bar(self):
"""Update the GUI status bar"""
if self.status_bar != 'yes':
self._gui_header_clear()
else:
self._gui_header_set(self.gui_body_get().get_status_bar())
def _gui_switch_frame_body(self, new_view, save_current_view=True):
"""
Switch the body frame of the GUI. Used to switch to a new
view
"""
if new_view is None:
if not self.last_view:
self._gui_stop()
else:
self._gui_body_set(self.last_view.pop())
else:
if self.gui_body_get().__class__ != new_view.__class__:
if save_current_view:
self.last_view.append(self.gui_body_get())
self._gui_body_set(new_view)
def _delete_note_callback(self, key, delete):
"""Update the GUI after deleting a note"""
if not delete:
return
self.ndb.set_note_deleted(key, True)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
self.view_titles.update_note_title()
self._gui_update_status_bar()
self.ndb.sync_worker_go()
def _gui_yes_no_input(self, args, yes_no):
"""Create a yes/no input dialog at the GUI footer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
args[0](args[1],
yes_no in ['YES', 'Yes', 'yes', 'Y', 'y']
)
def _gui_search_input(self, args, search_string):
"""Create a search input dialog at the GUI footer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
if search_string:
if self.gui_body_get() == self.view_note:
self.config.state.search_direction = args[1]
self.view_note.search_note_view_next(
search_string=search_string,
search_mode=args[0]
)
else:
self.view_titles.update_note_list(
search_string,
args[0],
sort_mode=self.config.state.current_sort_mode
)
self._gui_body_set(self.view_titles)
def _gui_category_input(self, args, category):
"""Create a category input at the GUI footer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
if category is not None:
if self.gui_body_get().__class__ == view_titles.ViewTitles:
note = self.view_titles.note_list \
[self.view_titles.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = self.view_note.note
self.ndb.set_note_category(note['localkey'], category)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
self.view_titles.update_note_title()
else: # self.gui_body_get().__class__ == view_note.ViewNote:
self.view_note.update_note_view()
self._gui_update_status_bar()
self.ndb.sync_worker_go()
def _gui_pipe_input(self, args, cmd):
"""Create a pipe input dialog at the GUI footoer"""
self._gui_footer_input_clear()
self._gui_body_focus()
self.master_frame.keypress = self._gui_frame_keypress
if cmd is not None:
if self.gui_body_get().__class__ == view_titles.ViewTitles:
note = self.view_titles.note_list \
[self.view_titles.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = self.view_note.old_note \
if self.view_note.old_note \
else self.view_note.note
try:
self._gui_clear()
pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, shell=True)
pipe.communicate(note['content'].encode('utf-8'))
pipe.stdin.close()
pipe.wait()
except OSError as ex:
self.log('Pipe error: %s' % ex)
finally:
self._gui_reset()
# pylint: disable=too-many-return-statements, too-many-branches
# pylint: disable=too-many-statements
def _gui_frame_keypress(self, size, key):
"""Keypress handler for the GUI"""
# convert space character into name
if key == ' ':
key = 'space'
contents = self.gui_body_get()
if key == self.config.get_keybind('quit'):
self._gui_switch_frame_body(None)
elif key == self.config.get_keybind('help'):
self._gui_switch_frame_body(self.view_help)
elif key == self.config.get_keybind('sync'):
self.ndb.last_sync = 0
self.ndb.sync_worker_go()
elif key == self.config.get_keybind('view_log'):
self.view_log.update_log()
self._gui_switch_frame_body(self.view_log)
elif key == self.config.get_keybind('down'):
if not contents.body.positions():
return None
last = len(contents.body.positions())
if contents.focus_position == (last - 1):
return None
contents.focus_position += 1
contents.render(size)
elif key == self.config.get_keybind('up'):
if not contents.body.positions():
return None
if contents.focus_position == 0:
return None
contents.focus_position -= 1
contents.render(size)
elif key == self.config.get_keybind('page_down'):
if not contents.body.positions():
return None
last = len(contents.body.positions())
next_focus = contents.focus_position + size[1]
if next_focus >= last:
next_focus = last - 1
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='above')
elif key == self.config.get_keybind('page_up'):
if not contents.body.positions():
return None
if 'bottom' in contents.ends_visible(size):
last = len(contents.body.positions())
next_focus = last - size[1] - size[1]
else:
next_focus = contents.focus_position - size[1]
if next_focus < 0:
next_focus = 0
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='below')
elif key == self.config.get_keybind('half_page_down'):
if not contents.body.positions():
return None
last = len(contents.body.positions())
next_focus = contents.focus_position + (size[1] // 2)
if next_focus >= last:
next_focus = last - 1
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='above')
elif key == self.config.get_keybind('half_page_up'):
if not contents.body.positions():
return None
if 'bottom' in contents.ends_visible(size):
last = len(contents.body.positions())
next_focus = last - size[1] - (size[1] // 2)
else:
next_focus = contents.focus_position - (size[1] // 2)
if next_focus < 0:
next_focus = 0
contents.change_focus(size, next_focus,
offset_inset=0,
coming_from='below')
elif key == self.config.get_keybind('bottom'):
if not contents.body.positions():
return None
contents.change_focus(size, (len(contents.body.positions()) - 1),
offset_inset=0,
coming_from='above')
elif key == self.config.get_keybind('top'):
if not contents.body.positions():
return None
contents.change_focus(size, 0,
offset_inset=0,
coming_from='below')
elif key == self.config.get_keybind('view_next_note'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
if not self.view_titles.body.positions():
return None
last = len(self.view_titles.body.positions())
if self.view_titles.focus_position == (last - 1):
return None
self.view_titles.focus_position += 1
contents.update_note_view(
self.view_titles. \
note_list[self.view_titles. \
focus_position].note['localkey']
)
self._gui_switch_frame_body(self.view_note)
elif key == self.config.get_keybind('view_prev_note'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
if not self.view_titles.body.positions():
return None
if self.view_titles.focus_position == 0:
return None
self.view_titles.focus_position -= 1
contents.update_note_view(
self.view_titles. \
note_list[self.view_titles. \
focus_position].note['localkey']
)
self._gui_switch_frame_body(self.view_note)
elif key == self.config.get_keybind('status'):
if self.status_bar == 'yes':
self.status_bar = 'no'
else:
self.status_bar = self.config.get_config('status_bar')
elif key == self.config.get_keybind('create_note'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self._gui_clear()
content = exec_cmd_on_note(None, self.config, self, self.logger)
self._gui_reset()
if content:
self.log('New note created')
self.ndb.create_note(content)
self.gui_update_view()
self.ndb.sync_worker_go()
elif key == self.config.get_keybind('edit_note') or \
key == self.config.get_keybind('view_note_ext') or \
key == self.config.get_keybind('view_note_json'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
if key == self.config.get_keybind('edit_note'):
note = contents.note
else:
note = contents.old_note if contents.old_note \
else contents.note
self._gui_clear()
if key == self.config.get_keybind('edit_note'):
content = exec_cmd_on_note(note, self.config, self,
self.logger)
elif key == self.config.get_keybind('view_note_ext'):
content = exec_cmd_on_note(
note,
self.config,
self,
self.logger,
cmd=get_pager(self.config, self.logger))
else: # key == self.config.get_keybind('view_note_json')
content = exec_cmd_on_note(
note,
self.config,
self,
self.logger,
cmd=get_pager(self.config, self.logger),
raw=True
)
self._gui_reset()
if not content:
return None
md5_old = hashlib.md5(note['content'].encode('utf-8')).digest()
md5_new = hashlib.md5(content.encode('utf-8')).digest()
if md5_old != md5_new:
self.log('Note updated')
self.ndb.set_note_content(note['localkey'], content)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
contents.update_note_title()
else: # self.gui_body_get().__class__ == view_note.ViewNote:
contents.update_note_view()
self.ndb.sync_worker_go()
else:
self.log('Note unchanged')
elif key == self.config.get_keybind('view_note'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
if not contents.body.positions():
return None
self.view_note.update_note_view(
contents.note_list[contents.focus_position]. \
note['localkey'])
self._gui_switch_frame_body(self.view_note)
elif key == self.config.get_keybind('pipe_note'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.old_note if contents.old_note else contents.note
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
key,
'',
self._gui_pipe_input,
None
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('note_delete'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.note
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
'Delete (y/n): ',
'',
self._gui_yes_no_input,
[
self._delete_note_callback,
note['localkey']
]
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('note_favorite'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.note
favorite = not note['favorite']
self.ndb.set_note_favorite(note['localkey'], favorite)
if self.gui_body_get().__class__ == view_titles.ViewTitles:
contents.update_note_title()
self.ndb.sync_worker_go()
elif key == self.config.get_keybind('note_category'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_titles.ViewTitles:
if not contents.body.positions():
return None
note = contents.note_list[contents.focus_position].note
else: # self.gui_body_get().__class__ == view_note.ViewNote:
note = contents.note
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
'Category: ',
note['category'],
self._gui_category_input,
None
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('search_gstyle') or \
key == self.config.get_keybind('search_regex') or \
key == self.config.get_keybind('search_prev_gstyle') or \
key == self.config.get_keybind('search_prev_regex'):
if self.gui_body_get().__class__ != view_titles.ViewTitles and \
self.gui_body_get().__class__ != view_note.ViewNote:
return key
if self.gui_body_get().__class__ == view_note.ViewNote:
if key == self.config.get_keybind('search_prev_gstyle') or \
key == self.config.get_keybind('search_prev_regex'):
self.view_note.search_direction = 'backward'
else:
self.view_note.search_direction = 'forward'
options = [
'gstyle' if key == self.config.get_keybind('search_gstyle')
or key == self.config.get_keybind('search_prev_gstyle')
else 'regex',
'backward' if key ==
self.config.get_keybind('search_prev_gstyle')
or key == self.config.get_keybind('search_prev_regex')
else 'forward'
]
caption = '{}{}'.format('(regex) '
if options[0] == 'regex'
else '',
'/' if options[1] == 'forward'
else '?')
self._gui_footer_input_set(
urwid.AttrMap(
user_input.UserInput(
self.config,
caption,
'',
self._gui_search_input,
options
),
'user_input_bar'
)
)
self._gui_footer_focus_input()
self.master_frame.keypress = \
self._gui_footer_input_get().keypress
elif key == self.config.get_keybind('search_next'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
self.view_note.search_note_view_next()
elif key == self.config.get_keybind('search_prev'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
self.view_note.search_note_view_prev()
elif key == self.config.get_keybind('clear_search'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.view_titles.update_note_list(
None,
sort_mode=self.config.state.current_sort_mode
)
self._gui_body_set(self.view_titles)
elif key == self.config.get_keybind('sort_date'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.config.state.current_sort_mode = 'date'
self.view_titles.sort_note_list('date')
elif key == self.config.get_keybind('sort_alpha'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.config.state.current_sort_mode = 'alpha'
self.view_titles.sort_note_list('alpha')
elif key == self.config.get_keybind('sort_categories'):
if self.gui_body_get().__class__ != view_titles.ViewTitles:
return key
self.config.state.current_sort_mode = 'categories'
self.view_titles.sort_note_list('categories')
elif key == self.config.get_keybind('copy_note_text'):
if self.gui_body_get().__class__ != view_note.ViewNote:
return key
self.view_note.copy_note_text()
else:
return contents.keypress(size, key)
self._gui_update_status_bar()
return None
def _gui_init_view(self, loop, show_note):
"""Initialize the GUI"""
self.master_frame.keypress = self._gui_frame_keypress
self._gui_body_set(self.view_titles)
if show_note:
# note that title view set first to prime the view stack
self._gui_switch_frame_body(self.view_note)
self.thread_sync.start()
def _gui_clear(self):
"""Clear the GUI"""
self.nncli_loop.widget = urwid.Filler(urwid.Text(''))
self.nncli_loop.draw_screen()
def _gui_reset(self):
"""Reset the GUI"""
self.nncli_loop.widget = self.master_frame
self.nncli_loop.draw_screen()
def _gui_stop(self):
"""Stop the GUI"""
# don't exit if there are any notes not yet saved to the disk
# NOTE: this was originally causing hangs on exit with urllib2
# should not be a problem now since using the requests library
# ref https://github.com/insanum/sncli/issues/18#issuecomment-105517773
if self.ndb.verify_all_saved():
# clear the screen and exit the urwid run loop
self._gui_clear()
raise urwid.ExitMainLoop()
self.log('WARNING: Not all notes saved'
'to disk (wait for sync worker)')
def log(self, msg):
"""Log as message, displaying to the user as appropriate"""
self.logger.log(msg)
self.log_lock.acquire()
self.log_alarms += 1
self.logs.append(msg)
if len(self.logs) > int(self.config.get_config('max_logs')):
self.log_alarms -= 1
self.logs.pop(0)
log_pile = []
for log in self.logs:
log_pile.append(urwid.AttrMap(urwid.Text(log), 'log'))
if self.config.state.verbose:
self._gui_footer_log_set(log_pile)
self.nncli_loop.set_alarm_in(
int(self.config.get_config('log_timeout')),
self._log_timeout, None)
self.log_lock.release()
def _log_timeout(self, loop, arg):
"""
Run periodically to check for new log entries to append to
the GUI footer
"""
self.log_lock.acquire()
self.log_alarms -= 1
if self.log_alarms == 0:
self._gui_footer_log_clear()
self.logs = []
else:
if self.logs:
self.logs.pop(0)
log_pile = []
for log in self.logs:
log_pile.append(urwid.AttrMap(urwid.Text(log), 'log'))
if self.config.state.verbose:
self._gui_footer_log_set(log_pile)
self.log_lock.release()
| [
"threading.Thread",
"urwid.Text",
"subprocess.Popen",
"urwid.Pile",
"urwid.ExitMainLoop",
"urwid.MainLoop",
"threading.Lock"
] | [((732, 748), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (746, 748), False, 'import threading\n'), ((828, 919), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.ndb.sync_worker', 'args': '[self.config.state.do_server_sync]'}), '(target=self.ndb.sync_worker, args=[self.config.state.\n do_server_sync])\n', (844, 919), False, 'import threading\n'), ((7737, 7799), 'urwid.MainLoop', 'urwid.MainLoop', (['self.master_frame', 'palette'], {'handle_mouse': '(False)'}), '(self.master_frame, palette, handle_mouse=False)\n', (7751, 7799), False, 'import urwid\n'), ((34259, 34273), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (34269, 34273), False, 'import urwid\n'), ((34950, 34970), 'urwid.ExitMainLoop', 'urwid.ExitMainLoop', ([], {}), '()\n', (34968, 34970), False, 'import urwid\n'), ((15399, 15455), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdin': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdin=subprocess.PIPE, shell=True)\n', (15415, 15455), False, 'import subprocess\n'), ((7560, 7574), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (7570, 7574), False, 'import urwid\n'), ((8640, 8654), 'urwid.Pile', 'urwid.Pile', (['[]'], {}), '([])\n', (8650, 8654), False, 'import urwid\n'), ((8656, 8673), 'urwid.Pile', 'urwid.Pile', (['[gui]'], {}), '([gui])\n', (8666, 8673), False, 'import urwid\n'), ((8935, 8951), 'urwid.Pile', 'urwid.Pile', (['pile'], {}), '(pile)\n', (8945, 8951), False, 'import urwid\n'), ((8953, 8970), 'urwid.Pile', 'urwid.Pile', (['[gui]'], {}), '([gui])\n', (8963, 8970), False, 'import urwid\n'), ((9390, 9408), 'urwid.Pile', 'urwid.Pile', (['[pile]'], {}), '([pile])\n', (9400, 9408), False, 'import urwid\n'), ((9410, 9424), 'urwid.Pile', 'urwid.Pile', (['[]'], {}), '([])\n', (9420, 9424), False, 'import urwid\n'), ((9688, 9706), 'urwid.Pile', 'urwid.Pile', (['[pile]'], {}), '([pile])\n', (9698, 9706), False, 'import urwid\n'), ((9708, 9725), 'urwid.Pile', 'urwid.Pile', (['[gui]'], {}), '([gui])\n', (9718, 9725), False, 'import urwid\n'), ((35512, 35527), 'urwid.Text', 'urwid.Text', (['log'], {}), '(log)\n', (35522, 35527), False, 'import urwid\n'), ((7641, 7655), 'urwid.Pile', 'urwid.Pile', (['[]'], {}), '([])\n', (7651, 7655), False, 'import urwid\n'), ((7657, 7671), 'urwid.Pile', 'urwid.Pile', (['[]'], {}), '([])\n', (7667, 7671), False, 'import urwid\n'), ((36295, 36310), 'urwid.Text', 'urwid.Text', (['log'], {}), '(log)\n', (36305, 36310), False, 'import urwid\n')] |
from aiounittest import AsyncTestCase
from urllib.parse import urlparse
from robot.api import Context
from robot.context.core import ContextImpl
class ResolveUrlContextImplTest(AsyncTestCase):
context: Context = None
@classmethod
def setUpClass(cls):
cls.context = ContextImpl(url=urlparse('http://example.com/path1/page1?q=query#element-1'))
async def test_resolve_absolute_url(self):
absolute_url = 'https://http.cat/102'
result = self.context.resolve_url(absolute_url)
self.assertEqual(absolute_url, result)
async def test_resolve_scheme(self):
url = '//http.cat/102'
result = self.context.resolve_url(url)
self.assertEqual('http:' + url, result)
async def test_absolute_path(self):
url = '/path2/page2'
result = self.context.resolve_url(url)
self.assertEqual(
'http://example.com/path2/page2',
result
)
async def test_relative_path(self):
url = 'page2'
result = self.context.resolve_url(url)
self.assertEqual(
'http://example.com/path1/page2',
result
)
| [
"urllib.parse.urlparse"
] | [((304, 364), 'urllib.parse.urlparse', 'urlparse', (['"""http://example.com/path1/page1?q=query#element-1"""'], {}), "('http://example.com/path1/page1?q=query#element-1')\n", (312, 364), False, 'from urllib.parse import urlparse\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : spm_dataset.py
@author: zijun
@contact : <EMAIL>
@date : 2021/1/21 15:00
@version: 1.0
@desc : Dataset for sentence pair matching tasks
"""
from functools import partial
import torch
from torch.utils.data import DataLoader
from datasets.chinese_bert_dataset import ChineseBertDataset
from datasets.collate_functions import collate_to_max_length
class SPMDataset(ChineseBertDataset):
def get_lines(self):
with open(self.data_path, 'r') as f:
lines = f.readlines()
return lines
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
third, first, second, _ = line.split('\t')
first = first.replace(" ", "")
second = second.replace(" ", "")
first_output = self.tokenizer.encode(first, add_special_tokens=False)
first_pinyin_tokens = self.convert_sentence_to_pinyin_ids(first, first_output)
second_output = self.tokenizer.encode(second, add_special_tokens=False)
second_pinyin_tokens = self.convert_sentence_to_pinyin_ids(second, second_output)
label = third
# convert sentence to id
bert_tokens = first_output.ids + [102] + second_output.ids
pinyin_tokens = first_pinyin_tokens + [[0] * 8] + second_pinyin_tokens
if len(bert_tokens) > self.max_length - 2:
bert_tokens = bert_tokens[:self.max_length - 2]
pinyin_tokens = pinyin_tokens[:self.max_length - 2]
# id nums should be same
assert len(bert_tokens) <= self.max_length
assert len(bert_tokens) == len(pinyin_tokens)
# convert list to tensor
input_ids = torch.LongTensor([101] + bert_tokens + [102])
pinyin_ids = torch.LongTensor([[0] * 8] + pinyin_tokens + [[0] * 8]).view(-1)
label = torch.LongTensor([int(label)])
return input_ids, pinyin_ids, label
def unit_test():
data_path = "/data/nfsdata2/sunzijun/glyce/tasks/BQ/dev.tsv"
chinese_bert_path = "/data/nfsdata2/sunzijun/glyce/best/ChineseBERT-base"
dataset = SPMDataset(data_path=data_path, chinese_bert_path=chinese_bert_path)
dataloader = DataLoader(
dataset=dataset,
batch_size=10,
num_workers=0,
shuffle=False,
collate_fn=partial(collate_to_max_length, fill_values=[0, 0, 0])
)
for input_ids, pinyin_ids, label in dataloader:
bs, length = input_ids.shape
print(input_ids.shape)
print(pinyin_ids.reshape(bs, length, -1).shape)
print(label.view(-1).shape)
print()
if __name__ == '__main__':
unit_test()
| [
"functools.partial",
"torch.LongTensor"
] | [((1729, 1774), 'torch.LongTensor', 'torch.LongTensor', (['([101] + bert_tokens + [102])'], {}), '([101] + bert_tokens + [102])\n', (1745, 1774), False, 'import torch\n'), ((2340, 2393), 'functools.partial', 'partial', (['collate_to_max_length'], {'fill_values': '[0, 0, 0]'}), '(collate_to_max_length, fill_values=[0, 0, 0])\n', (2347, 2393), False, 'from functools import partial\n'), ((1796, 1851), 'torch.LongTensor', 'torch.LongTensor', (['([[0] * 8] + pinyin_tokens + [[0] * 8])'], {}), '([[0] * 8] + pinyin_tokens + [[0] * 8])\n', (1812, 1851), False, 'import torch\n')] |
from nose.tools import assert_raises, assert_true, assert_equal
from vispy.util.keys import Key, ENTER
def test_key():
"""Test basic key functionality"""
def bad():
return (ENTER == dict())
assert_raises(ValueError, bad)
assert_true(not (ENTER == None)) # noqa
assert_equal('Return', ENTER)
print(ENTER.name)
print(ENTER) # __repr__
assert_equal(Key('1'), 49) # ASCII code
| [
"nose.tools.assert_true",
"nose.tools.assert_equal",
"nose.tools.assert_raises",
"vispy.util.keys.Key"
] | [((213, 243), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'bad'], {}), '(ValueError, bad)\n', (226, 243), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((248, 278), 'nose.tools.assert_true', 'assert_true', (['(not ENTER == None)'], {}), '(not ENTER == None)\n', (259, 278), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((293, 322), 'nose.tools.assert_equal', 'assert_equal', (['"""Return"""', 'ENTER'], {}), "('Return', ENTER)\n", (305, 322), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((391, 399), 'vispy.util.keys.Key', 'Key', (['"""1"""'], {}), "('1')\n", (394, 399), False, 'from vispy.util.keys import Key, ENTER\n')] |
import argparse
import os, sys, time
import json
import jsonlines
import nltk
from nltk import word_tokenize, sent_tokenize
import pandas as pd
from pathlib import Path
import pickle
from utils import timed_func
categories = [
"AAAI", "ACL", "COLING", "CVPR", "EMNLP", "ICML", "ICRA", "IJCAI", "NAACL", "NIPS", "ICASSP"
]
def get_venue_labels(args):
"""
Return venue_name -> [v_shortname, label]
where venue_name is e.g., "AAAI Spring Symposium 2013"
v_shortname is e.g., "AAAI"
label is binary (0 - Workshop, 1 - Conference)
"""
venue_name_labels_map = {}
for v_shortname in categories:
if args.include_arxiv:
fname = Path(args.venue_name_labels_path, f"{v_shortname}_v_arxiv.csv")
else:
fname = Path(args.venue_name_labels_path, f"{v_shortname}.csv")
df = pd.read_csv(fname)
for i, row in df.iterrows():
v = row.venue
venue_name_labels_map[v] = [v_shortname, row.label]
return venue_name_labels_map
def prepare_text(metadata, pdfparse):
"""
Return:
abstract: str (or None)
bodytext: list of str (or None)
"""
MIN_WORDS = 5
abstract = metadata['abstract']
if abstract is None or len(abstract.split()) < MIN_WORDS:
return None, None
bodytext = []
for section in pdfparse['body_text']:
if section['text'] is not None:
bodytext.extend(sent_tokenize(section['text']))
bodytext = [sent for sent in filter(lambda s: len(s)>0, bodytext)]
if len(bodytext) == 0:
return None, None
return abstract, bodytext
@timed_func
def filter_articles_main(venue_name_labels_map, args):
"""
Find these articles. Save into standalone pkl file
results: dictionary (key is category)
results['AAAI']: list of articles. Each article contains {'abstract': str, 'bodytext': list of str, 'venue': str, 'label': int (0 or 1)}
"""
results = {}
for cat in categories:
results[cat] = []
# Traverse the CompSci paper collections. Save
for chunk_id in range(100):
start_time = time.time()
with open(Path(args.input_dir, f"metadata_{chunk_id}.jsonl"), "r") as f_md:
mds = [json.loads(line) for line in f_md.readlines()]
with open(Path(args.input_dir, f"pdf_parses_{chunk_id}.pkl"), "rb") as f_pp:
cat_pdf = pickle.load(f_pp)
skipped = 0
collected = 0
for i, metadata in enumerate(mds):
paper_id = metadata['paper_id']
pdfparse = cat_pdf[paper_id]
abstract, bodytext = prepare_text(metadata, pdfparse)
if abstract is None or metadata['year'] is None:
skipped += 1
continue
# Following the 20201206_venue_info/venue_info.py convention (i.e., prioritize journal, then venue) for extracting venue information
journal = metadata.get('journal', None)
venue = metadata.get('venue', None)
if journal is not None:
v = journal
elif venue is not None:
v = venue
else:
v = "None"
if v in venue_name_labels_map:
shortname, label = venue_name_labels_map[v]
curr_year = 2021
citation_per_year = len(metadata['inbound_citations']) / (curr_year - metadata['year'])
results[shortname].append({
"abstract": abstract,
"bodytext": bodytext,
"venue": v,
"label": label,
"year": metadata.get('year', None),
"title": metadata.get("title", None),
"annual_citations": citation_per_year
})
collected += 1
print ("Chunk {} done in {:.2f} seconds. Skipped {} entries. Collected {} entries.".format(chunk_id, time.time() - start_time, skipped, collected))
export_dir = Path(args.export)
with open(args.export, "wb") as f:
pickle.dump(results, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--venue_name_labels_path", type=str, default="../../data/venue_name_labels")
parser.add_argument("--include_arxiv", action="store_true", default=False)
parser.add_argument("--input_dir", type=str, default="../../data/S2ORC/20200705v1/by_category/Computer Science/")
parser.add_argument("--export", type=str, default="../../data/text_classify_articles_with_arxiv.pkl")
args = parser.parse_args()
print(args)
name_map = get_venue_labels(args)
filter_articles_main(name_map, args)
| [
"pickle.dump",
"argparse.ArgumentParser",
"json.loads",
"pandas.read_csv",
"nltk.sent_tokenize",
"time.time",
"pathlib.Path",
"pickle.load"
] | [((4068, 4085), 'pathlib.Path', 'Path', (['args.export'], {}), '(args.export)\n', (4072, 4085), False, 'from pathlib import Path\n'), ((4199, 4224), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4222, 4224), False, 'import argparse\n'), ((869, 887), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (880, 887), True, 'import pandas as pd\n'), ((2160, 2171), 'time.time', 'time.time', ([], {}), '()\n', (2169, 2171), False, 'import os, sys, time\n'), ((4133, 4156), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (4144, 4156), False, 'import pickle\n'), ((697, 760), 'pathlib.Path', 'Path', (['args.venue_name_labels_path', 'f"""{v_shortname}_v_arxiv.csv"""'], {}), "(args.venue_name_labels_path, f'{v_shortname}_v_arxiv.csv')\n", (701, 760), False, 'from pathlib import Path\n'), ((795, 850), 'pathlib.Path', 'Path', (['args.venue_name_labels_path', 'f"""{v_shortname}.csv"""'], {}), "(args.venue_name_labels_path, f'{v_shortname}.csv')\n", (799, 850), False, 'from pathlib import Path\n'), ((2430, 2447), 'pickle.load', 'pickle.load', (['f_pp'], {}), '(f_pp)\n', (2441, 2447), False, 'import pickle\n'), ((1461, 1491), 'nltk.sent_tokenize', 'sent_tokenize', (["section['text']"], {}), "(section['text'])\n", (1474, 1491), False, 'from nltk import word_tokenize, sent_tokenize\n'), ((2190, 2240), 'pathlib.Path', 'Path', (['args.input_dir', 'f"""metadata_{chunk_id}.jsonl"""'], {}), "(args.input_dir, f'metadata_{chunk_id}.jsonl')\n", (2194, 2240), False, 'from pathlib import Path\n'), ((2275, 2291), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2285, 2291), False, 'import json\n'), ((2341, 2391), 'pathlib.Path', 'Path', (['args.input_dir', 'f"""pdf_parses_{chunk_id}.pkl"""'], {}), "(args.input_dir, f'pdf_parses_{chunk_id}.pkl')\n", (2345, 2391), False, 'from pathlib import Path\n'), ((4003, 4014), 'time.time', 'time.time', ([], {}), '()\n', (4012, 4014), False, 'import os, sys, time\n')] |
import numpy as np
class BBoxFilter(object):
def __init__(self, min_area, max_area, min_ratio):
self.min_area = min_area
self.max_area = max_area
self.min_ratio = min_ratio
def __call__(self, bbox):
assert len(bbox) == 4
area = bbox[2] * bbox[3]
if area < self.min_area or area > self.max_area:
return False
if min(bbox[2], bbox[3]) / max(bbox[2], bbox[3]) < self.min_ratio:
return False
return True
def truncate_bbox(bbox, h, w):
cmin = np.clip(bbox[0], 0, w - 1)
cmax = np.clip(bbox[0] + bbox[2], 0, w - 1)
rmin = np.clip(bbox[1], 0, h - 1)
rmax = np.clip(bbox[1] + bbox[3], 0, h - 1)
# return int(cmin), int(rmin), int(cmax - cmin), int(rmax - rmin)
return cmin, rmin, cmax - cmin, rmax - rmin
def round_bbox(bbox):
bbox = np.floor(bbox).astype(np.int32)
return tuple(bbox)
def compute_bbox(bimg):
rows = np.any(bimg, axis = 1)
cols = np.any(bimg, axis = 0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return cmin, rmin, cmax - cmin, rmax - rmin
def compute_iou(bbox1, bbox2):
if bbox1 is None or bbox2 is None:
return None
cmin = max(bbox1[0], bbox2[0])
rmin = max(bbox1[1], bbox2[1])
cmax = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
rmax = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
if (cmin < cmax) and (rmin < rmax):
intersect = float(cmax - cmin) * (rmax - rmin)
return intersect / (bbox1[2] * bbox1[3] + bbox2[2] * bbox2[3] - intersect)
else:
return 0.
def find_max_iou(bbox, bboxes):
bbox = np.asarray(bbox)
bboxes = np.asarray(bboxes)
if bboxes.shape[0] == 0:
return -1, 0.
minp = np.maximum([bbox[:2]], bboxes[:, :2])
maxp = np.minimum([bbox[:2] + bbox[2:]], bboxes[:, :2] + bboxes[:, 2:])
delta = maxp - minp
intersect_inds = np.where(np.all(delta > 0, axis = 1))[0]
intersect = np.prod(delta[intersect_inds, :], axis = 1, dtype = np.float32)
ious = intersect / (bbox[2] * bbox[3] + \
np.prod(bboxes[intersect_inds, 2:], axis = 1) - intersect)
if ious.shape[0] == 0:
return -1, 0.
else:
max_ind = np.argmax(ious)
return intersect_inds[max_ind], ious[max_ind]
def ciou(bboxes1, bboxes2):
"""
Compute IoUs between two sets of bounding boxes
Input: np.array((n, 4), np.float32), np.array((m, 4), np.float32)
Output: np.array((n, m), np.float32)
"""
cmin = np.maximum.outer(bboxes1[:, 0], bboxes2[:, 0])
cmax = np.minimum.outer(bboxes1[:, 0] + bboxes1[:, 2],
bboxes2[:, 0] + bboxes2[:, 2])
w = cmax - cmin
del cmax, cmin
w.clip(min = 0, out = w)
rmin = np.maximum.outer(bboxes1[:, 1], bboxes2[:, 1])
rmax = np.minimum.outer(bboxes1[:, 1] + bboxes1[:, 3],
bboxes2[:, 1] + bboxes2[:, 3])
h = rmax - rmin
del rmax, rmin
h.clip(min = 0, out = h)
iou = w
np.multiply(w, h, out = iou)
del w, h
a1 = np.prod(bboxes1[:, 2:], axis = 1)
a2 = np.prod(bboxes2[:, 2:], axis = 1)
np.divide(iou, np.add.outer(a1, a2) - iou, out = iou)
return iou
# @jit('float32[:, :](float32[:, :], float32[:, :])')
# def ciou_v2(bboxes1, bboxes2):
# """
# Compute IoUs between two sets of bounding boxes
# Input: np.array((n, 4), np.float32), np.array((m, 4), np.float32)
# Output: np.array((n, m), np.float32)
# """
# n = bboxes1.shape[0]
# m = bboxes2.shape[0]
# iou = np.zeros((n, m), dtype = np.float32)
# for i in range(n):
# for j in range(m):
# minp = np.maximum(bboxes1[i, :2], bboxes2[j, :2])
# maxp = np.minimum(bboxes1[i, :2] + bboxes1[i, 2:],
# bboxes2[j, :2] + bboxes2[j, 2:])
# delta = maxp - minp
# if delta[0] > 0 and delta[1] > 0:
# intersect = np.prod(delta)
# iou[i, j] = intersect / (np.prod(bboxes1[i, 2:]) + \
# np.prod(bboxes2[j, 2:]) - intersect)
# return iou
def _intersect(bboxes1, bboxes2):
"""
bboxes: t x n x 4
"""
assert bboxes1.shape[0] == bboxes2.shape[0]
t = bboxes1.shape[0]
inters = np.zeros((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
_min = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
_max = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
w = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
h = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
for i in range(t):
np.maximum.outer(bboxes1[i, :, 0], bboxes2[i, :, 0], out = _min)
np.minimum.outer(bboxes1[i, :, 0] + bboxes1[i, :, 2],
bboxes2[i, :, 0] + bboxes2[i, :, 2], out = _max)
np.subtract(_max, _min, out = w)
w.clip(min = 0, out = w)
np.maximum.outer(bboxes1[i, :, 1], bboxes2[i, :, 1], out = _min)
np.minimum.outer(bboxes1[i, :, 1] + bboxes1[i, :, 3],
bboxes2[i, :, 1] + bboxes2[i, :, 3], out = _max)
np.subtract(_max, _min, out = h)
h.clip(min = 0, out = h)
np.multiply(w, h, out = w)
inters += w
return inters
def _union(bboxes1, bboxes2):
if id(bboxes1) == id(bboxes2):
w = bboxes1[:, :, 2]
h = bboxes1[:, :, 3]
area = np.sum(w * h, axis = 0)
unions = np.add.outer(area, area)
else:
w = bboxes1[:, :, 2]
h = bboxes1[:, :, 3]
area1 = np.sum(w * h, axis = 0)
w = bboxes2[:, :, 2]
h = bboxes2[:, :, 3]
area2 = np.sum(w * h, axis = 0)
unions = np.add.outer(area1, area2)
return unions
def viou(bboxes1, bboxes2):
# bboxes: t x n x 4
iou = _intersect(bboxes1, bboxes2)
union = _union(bboxes1, bboxes2)
np.subtract(union, iou, out = union)
np.divide(iou, union, out = iou)
return iou | [
"numpy.maximum",
"numpy.sum",
"numpy.argmax",
"numpy.empty",
"numpy.floor",
"numpy.clip",
"numpy.add.outer",
"numpy.prod",
"numpy.multiply",
"numpy.maximum.outer",
"numpy.divide",
"numpy.minimum",
"numpy.asarray",
"numpy.all",
"numpy.subtract",
"numpy.zeros",
"numpy.any",
"numpy.wh... | [((497, 523), 'numpy.clip', 'np.clip', (['bbox[0]', '(0)', '(w - 1)'], {}), '(bbox[0], 0, w - 1)\n', (504, 523), True, 'import numpy as np\n'), ((533, 569), 'numpy.clip', 'np.clip', (['(bbox[0] + bbox[2])', '(0)', '(w - 1)'], {}), '(bbox[0] + bbox[2], 0, w - 1)\n', (540, 569), True, 'import numpy as np\n'), ((579, 605), 'numpy.clip', 'np.clip', (['bbox[1]', '(0)', '(h - 1)'], {}), '(bbox[1], 0, h - 1)\n', (586, 605), True, 'import numpy as np\n'), ((615, 651), 'numpy.clip', 'np.clip', (['(bbox[1] + bbox[3])', '(0)', '(h - 1)'], {}), '(bbox[1] + bbox[3], 0, h - 1)\n', (622, 651), True, 'import numpy as np\n'), ((887, 907), 'numpy.any', 'np.any', (['bimg'], {'axis': '(1)'}), '(bimg, axis=1)\n', (893, 907), True, 'import numpy as np\n'), ((919, 939), 'numpy.any', 'np.any', (['bimg'], {'axis': '(0)'}), '(bimg, axis=0)\n', (925, 939), True, 'import numpy as np\n'), ((1567, 1583), 'numpy.asarray', 'np.asarray', (['bbox'], {}), '(bbox)\n', (1577, 1583), True, 'import numpy as np\n'), ((1595, 1613), 'numpy.asarray', 'np.asarray', (['bboxes'], {}), '(bboxes)\n', (1605, 1613), True, 'import numpy as np\n'), ((1668, 1705), 'numpy.maximum', 'np.maximum', (['[bbox[:2]]', 'bboxes[:, :2]'], {}), '([bbox[:2]], bboxes[:, :2])\n', (1678, 1705), True, 'import numpy as np\n'), ((1715, 1779), 'numpy.minimum', 'np.minimum', (['[bbox[:2] + bbox[2:]]', '(bboxes[:, :2] + bboxes[:, 2:])'], {}), '([bbox[:2] + bbox[2:]], bboxes[:, :2] + bboxes[:, 2:])\n', (1725, 1779), True, 'import numpy as np\n'), ((1876, 1935), 'numpy.prod', 'np.prod', (['delta[intersect_inds, :]'], {'axis': '(1)', 'dtype': 'np.float32'}), '(delta[intersect_inds, :], axis=1, dtype=np.float32)\n', (1883, 1935), True, 'import numpy as np\n'), ((2388, 2434), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[:, 0]', 'bboxes2[:, 0]'], {}), '(bboxes1[:, 0], bboxes2[:, 0])\n', (2404, 2434), True, 'import numpy as np\n'), ((2444, 2522), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[:, 0] + bboxes1[:, 2])', '(bboxes2[:, 0] + bboxes2[:, 2])'], {}), '(bboxes1[:, 0] + bboxes1[:, 2], bboxes2[:, 0] + bboxes2[:, 2])\n', (2460, 2522), True, 'import numpy as np\n'), ((2621, 2667), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[:, 1]', 'bboxes2[:, 1]'], {}), '(bboxes1[:, 1], bboxes2[:, 1])\n', (2637, 2667), True, 'import numpy as np\n'), ((2677, 2755), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[:, 1] + bboxes1[:, 3])', '(bboxes2[:, 1] + bboxes2[:, 3])'], {}), '(bboxes1[:, 1] + bboxes1[:, 3], bboxes2[:, 1] + bboxes2[:, 3])\n', (2693, 2755), True, 'import numpy as np\n'), ((2857, 2883), 'numpy.multiply', 'np.multiply', (['w', 'h'], {'out': 'iou'}), '(w, h, out=iou)\n', (2868, 2883), True, 'import numpy as np\n'), ((2905, 2936), 'numpy.prod', 'np.prod', (['bboxes1[:, 2:]'], {'axis': '(1)'}), '(bboxes1[:, 2:], axis=1)\n', (2912, 2936), True, 'import numpy as np\n'), ((2946, 2977), 'numpy.prod', 'np.prod', (['bboxes2[:, 2:]'], {'axis': '(1)'}), '(bboxes2[:, 2:], axis=1)\n', (2953, 2977), True, 'import numpy as np\n'), ((4010, 4074), 'numpy.zeros', 'np.zeros', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4018, 4074), True, 'import numpy as np\n'), ((4086, 4150), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4094, 4150), True, 'import numpy as np\n'), ((4162, 4226), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4170, 4226), True, 'import numpy as np\n'), ((4235, 4299), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4243, 4299), True, 'import numpy as np\n'), ((4308, 4372), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4316, 4372), True, 'import numpy as np\n'), ((5509, 5543), 'numpy.subtract', 'np.subtract', (['union', 'iou'], {'out': 'union'}), '(union, iou, out=union)\n', (5520, 5543), True, 'import numpy as np\n'), ((5548, 5578), 'numpy.divide', 'np.divide', (['iou', 'union'], {'out': 'iou'}), '(iou, union, out=iou)\n', (5557, 5578), True, 'import numpy as np\n'), ((2114, 2129), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (2123, 2129), True, 'import numpy as np\n'), ((4400, 4462), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[i, :, 0]', 'bboxes2[i, :, 0]'], {'out': '_min'}), '(bboxes1[i, :, 0], bboxes2[i, :, 0], out=_min)\n', (4416, 4462), True, 'import numpy as np\n'), ((4469, 4573), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[i, :, 0] + bboxes1[i, :, 2])', '(bboxes2[i, :, 0] + bboxes2[i, :, 2])'], {'out': '_max'}), '(bboxes1[i, :, 0] + bboxes1[i, :, 2], bboxes2[i, :, 0] +\n bboxes2[i, :, 2], out=_max)\n', (4485, 4573), True, 'import numpy as np\n'), ((4585, 4615), 'numpy.subtract', 'np.subtract', (['_max', '_min'], {'out': 'w'}), '(_max, _min, out=w)\n', (4596, 4615), True, 'import numpy as np\n'), ((4651, 4713), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[i, :, 1]', 'bboxes2[i, :, 1]'], {'out': '_min'}), '(bboxes1[i, :, 1], bboxes2[i, :, 1], out=_min)\n', (4667, 4713), True, 'import numpy as np\n'), ((4720, 4824), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[i, :, 1] + bboxes1[i, :, 3])', '(bboxes2[i, :, 1] + bboxes2[i, :, 3])'], {'out': '_max'}), '(bboxes1[i, :, 1] + bboxes1[i, :, 3], bboxes2[i, :, 1] +\n bboxes2[i, :, 3], out=_max)\n', (4736, 4824), True, 'import numpy as np\n'), ((4836, 4866), 'numpy.subtract', 'np.subtract', (['_max', '_min'], {'out': 'h'}), '(_max, _min, out=h)\n', (4847, 4866), True, 'import numpy as np\n'), ((4902, 4926), 'numpy.multiply', 'np.multiply', (['w', 'h'], {'out': 'w'}), '(w, h, out=w)\n', (4913, 4926), True, 'import numpy as np\n'), ((5086, 5107), 'numpy.sum', 'np.sum', (['(w * h)'], {'axis': '(0)'}), '(w * h, axis=0)\n', (5092, 5107), True, 'import numpy as np\n'), ((5123, 5147), 'numpy.add.outer', 'np.add.outer', (['area', 'area'], {}), '(area, area)\n', (5135, 5147), True, 'import numpy as np\n'), ((5218, 5239), 'numpy.sum', 'np.sum', (['(w * h)'], {'axis': '(0)'}), '(w * h, axis=0)\n', (5224, 5239), True, 'import numpy as np\n'), ((5304, 5325), 'numpy.sum', 'np.sum', (['(w * h)'], {'axis': '(0)'}), '(w * h, axis=0)\n', (5310, 5325), True, 'import numpy as np\n'), ((5341, 5367), 'numpy.add.outer', 'np.add.outer', (['area1', 'area2'], {}), '(area1, area2)\n', (5353, 5367), True, 'import numpy as np\n'), ((799, 813), 'numpy.floor', 'np.floor', (['bbox'], {}), '(bbox)\n', (807, 813), True, 'import numpy as np\n'), ((957, 971), 'numpy.where', 'np.where', (['rows'], {}), '(rows)\n', (965, 971), True, 'import numpy as np\n'), ((999, 1013), 'numpy.where', 'np.where', (['cols'], {}), '(cols)\n', (1007, 1013), True, 'import numpy as np\n'), ((1830, 1855), 'numpy.all', 'np.all', (['(delta > 0)'], {'axis': '(1)'}), '(delta > 0, axis=1)\n', (1836, 1855), True, 'import numpy as np\n'), ((2997, 3017), 'numpy.add.outer', 'np.add.outer', (['a1', 'a2'], {}), '(a1, a2)\n', (3009, 3017), True, 'import numpy as np\n'), ((1990, 2033), 'numpy.prod', 'np.prod', (['bboxes[intersect_inds, 2:]'], {'axis': '(1)'}), '(bboxes[intersect_inds, 2:], axis=1)\n', (1997, 2033), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
from nibabel import load as nib_load
import nibabel as nib
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from scipy import signal
import os
from numpy import genfromtxt
from sklearn.decomposition import PCA
def load_gifti_func(path_to_file):
"""
#Wrapper function to load functional data from
#a gifti file using nibabel. Returns data in shape
#<num_verts x num_timepoints>
"""
gifti_img = nib_load(path_to_file)
gifti_list = [x.data for x in gifti_img.darrays]
gifti_data = np.vstack(gifti_list).transpose()
return gifti_data
def load_cifti_func(path_to_file):
cifti_img = nib_load(path_to_file)
return np.asarray(cifti_img.dataobj).transpose()
def calc_fishers_icc(tp1, tp2):
"""
#Calculate intraclass correlation coefficient
#from the equation on wikipedia describing
#fisher's formulation. tp1 and tp2 should
# be of shape (n,1) or (n,) where n is the
#number of samples
"""
xhat = np.mean(np.vstack((tp1, tp2)))
sq_dif1 = np.power((tp1 - xhat),2)
sq_dif2 = np.power((tp2 - xhat),2)
s2 = np.mean(np.vstack((sq_dif1, sq_dif2)))
r = 1/(tp1.shape[0]*s2)*np.sum(np.multiply(tp1 - xhat, tp2 - xhat))
return r
def pre_post_carpet_plot(noisy_time_series, cleaned_time_series):
"""
#This function is for calculating a carpet plot figure, that
#will allow for comparison of the BOLD time series before and
#after denoising takes place. The two input matrices should have
#shape <num_parcels, num_timepoints>, and will ideally be from a
#parcellated time series and not whole hemisphere data (lots of points).
#The script will demean and then normalize all regions' time signals,
#and then will display them side by side on grey-scale plots
"""
#Copy the data
noisy_data = np.copy(noisy_time_series)
clean_data = np.copy(cleaned_time_series)
#Calculate means and standard deviations for all parcels
noisy_means = np.mean(noisy_data, axis = 1)
noisy_stds = np.std(noisy_data, axis = 1)
clean_means = np.mean(clean_data, axis = 1)
clean_stds = np.std(clean_data, axis = 1)
#Empty matrices for demeaned and normalized data
dn_noisy_data = np.zeros(noisy_data.shape)
dn_clean_data = np.zeros(clean_data.shape)
#Use the means and stds to mean and normalize all parcels' time signals
for i in range(0, clean_data.shape[0]):
dn_noisy_data[i,:] = (noisy_data[i,:] - noisy_means[i])/noisy_stds[i]
dn_clean_data[i,:] = (clean_data[i,:] - clean_means[i])/clean_stds[i]
#Create a subplot
plot_obj = plt.subplot(1,2,1)
#Plot the noisy data
img_plot = plt.imshow(dn_noisy_data, aspect = 'auto', cmap = 'binary')
plt.title('Noisy BOLD Data')
plt.xlabel('Timepoint #')
plt.ylabel('Region # (Arbritrary)')
plt.colorbar()
#Plot the clean data
plt.subplot(1,2,2)
img_plot2 = plt.imshow(dn_clean_data, aspect = 'auto', cmap = 'binary')
plt.title('Clean BOLD Data')
plt.xlabel('Timepoint #')
plt.colorbar()
fig = plt.gcf()
fig.set_size_inches(15, 5)
return plot_obj
def parcellate_func_combine_hemis(lh_func, rh_func, lh_parcel_path, rh_parcel_path):
"""
#Function that takes functional data in the form <num_verts, num_timepoints> for
#both the left and right hemisphere, and averages the functional time series across
#all vertices defined in a given parcel, for every parcel, with the parcels identified
#by a annotation file specified at ?h_parcel_path. The function then returns a combined
#matrix of size <num_parcels, num_timepoints> and <num_labels> for the time series and
#parcel label names, respectively. The lh parcels will preceed the rh parcels in order.
#NOTE: THIS ASSUMES THE FIRST PARCEL WILL BE MEDIAL WALL, AND DISREGARDS ANY VERTICES WITHIN
#THAT PARCEL. IF THIS IS NOT THE CASE FOR YOUR PARCELLATION, DO NOT USE THIS FUNCTION.
"""
#Output will be tuple of format [labels, ctab, names]
lh_parcels = nib.freesurfer.io.read_annot(lh_parcel_path)
rh_parcels = nib.freesurfer.io.read_annot(rh_parcel_path)
#Make array to store parcellated data with shape <num_parcels, num_timepoints>
lh_parcellated_data = np.zeros((len(lh_parcels[2]) - 1, lh_func.shape[1]))
rh_parcellated_data = np.zeros((len(rh_parcels[2]) - 1, rh_func.shape[1]))
#Start with left hemisphere
for i in range(1,len(lh_parcels[2])):
#Find the voxels for the current parcel
vois = np.where(lh_parcels[0] == i)
#Take the mean of all voxels of interest
lh_parcellated_data[i-1, :] = np.mean(lh_func[vois[0],:], axis = 0)
#Move to right hemisphere
for i in range(1,len(rh_parcels[2])):
vois = np.where(rh_parcels[0] == i)
rh_parcellated_data[i-1, :] = np.mean(rh_func[vois[0],:], axis = 0)
#Then concatenate parcel labels and parcel timeseries between the left and right hemisphere
#and drop the medial wall from label list
parcellated_data = np.vstack((lh_parcellated_data, rh_parcellated_data))
parcel_labels = lh_parcels[2][1:] + rh_parcels[2][1:]
#Try to convert the parcel labels from bytes to normal string
for i in range(0, len(parcel_labels)):
parcel_labels[i] = parcel_labels[i].decode("utf-8")
return parcellated_data, parcel_labels
def net_mat_summary_stats(matrix_data, include_diagonals, parcel_labels):
"""
#Function that takes a network matrix of size <num_parcels x num_parcels>
#and calculates summary statistics for each grouping of parcels within a
#given network combination (i.e. within DMN would be one grouping, between
#DMN and Control would be another grouping). If you would like to include
#the diagonals of the matrix set include_diagonals to true, otherwise,
#as is the case in conventional functional connectivity matrices, exclude
#the diagonal since it will most commonly be 1 or Inf.
#This function only works on data formatted in the Schaeffer/Yeo 7 network
#configuration.
#Parcel labels should be a list of strings that has the names of the different
#parcels in the parcellation. This is how the function knows what parcels
#belong to what networks.
"""
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
#Calculate the average stat for each network combination
network_stats = np.zeros((7,7))
for i in range(0,7):
for j in range(0,7):
temp_stat = 0
temp_stat_count = 0
rel_inds_i = np.where(network_ids == i)[0]
rel_inds_j = np.where(network_ids == j)[0]
for inds_i in rel_inds_i:
for inds_j in rel_inds_j:
if inds_i == inds_j:
if include_diagonals == True:
temp_stat += matrix_data[inds_i, inds_j]
temp_stat_count += 1
else:
temp_stat += matrix_data[inds_i, inds_j]
temp_stat_count += 1
network_stats[i,j] = temp_stat/temp_stat_count
return network_stats
def net_summary_stats(parcel_data, parcel_labels):
"""
#Function that takes a statistic defined at a parcel level, and
#resamples that statistic to the network level. This function is a copy of
#net_mat_summary_stats only now defined to work on 1D instead of 2D data.
#This function only works on data formatted in the Schaeffer/Yeo 7 network
#configuration.
#Parcel labels should be a list of strings that has the names of the different
#parcels in the parcellation. This is how the function knows what parcels
#belong to what networks.
"""
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
#Calculate the average stat for each network combination
network_stats = np.zeros((7))
for i in range(0,7):
temp_stat = 0
temp_stat_count = 0
rel_inds_i = np.where(network_ids == i)[0]
for inds_i in rel_inds_i:
temp_stat += parcel_data[inds_i]
temp_stat_count += 1
network_stats[i] = temp_stat/temp_stat_count
return network_stats
def plot_network_timeseries(parcel_data, parcel_labels):
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
network_colors = [[121/255,3/255,136/255,1],[67/255,129/255,182/255,1],[0/255,150/255,0/255,1], \
[198/255,41/255,254/255,1],[219/255,249/255,160/255,1], \
[232/255,149/255,0/255,1], [207/255,60/255,74/255,1]]
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
fig, ax = plt.subplots(7,1)
for i in range(0,7):
in_network = np.where(network_ids == i)[0]
plt.sca(ax[i])
for j in range(0, in_network.shape[0]):
plt.plot(parcel_data[in_network[j]], color=network_colors[i])
plt.ylabel('Signal Intensity')
plt.title('Time-Course For All ' + network_names[i] + ' Parcels')
if i != 6:
plt.xticks([])
plt.xlabel('Volume # (excluding high-motion volumes)')
fig.set_size_inches(15, 20)
return fig
def calc_norm_std(parcel_data, confound_path):
"""
#This script is used to calculate the normalized standard
#deviation of a cleaned fmri time signal. This is a metric
#representative of variability/amplitude in the BOLD signal.
#This is a particularly good option if you are working with
#scrubbed data such that the FFT for ALFF can no longer be
#properly calculated.
#parcel_data has size <num_regions, num_timepoints>. Confound
#path is the path to the confound file for the run of interest.
#The global signal will be taken from the confound file to calculate
#the median BOLD signal in the brain before pre-processing. This will then
#be used to normalize the standard deviation of the BOLD signal such that
#the output measure will be std(BOLD_Time_Series)/median_global_signal_intensity.
"""
#Create a dataframe for nuisance variables in confounds
confound_df = pd.read_csv(confound_path, sep='\t')
global_signal = confound_df.global_signal.values
median_intensity = np.median(global_signal)
parcel_std = np.zeros((parcel_data.shape[0]))
for i in range(0, parcel_data.shape[0]):
parcel_std[i] = np.std(parcel_data[i,:])/median_intensity
return parcel_std
def network_bar_chart(network_vals, ylabel):
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
network_colors = [[121/255,3/255,136/255,1],[67/255,129/255,182/255,1],[0/255,150/255,0/255,1], \
[198/255,41/255,254/255,1],[219/255,249/255,160/255,1], \
[232/255,149/255,0/255,1], [207/255,60/255,74/255,1]]
x = [1, 2, 3, 4, 5, 6, 7]
fig = plt.bar(x, network_vals, color = network_colors, tick_label = network_names)
plt.ylabel(ylabel)
plt.xticks(rotation=45)
return fig
def fs_anat_to_array(path_to_fs_subject, folder_for_output_files):
"""
#This function serves the function of collecting the aseg.stats file,
#lh.aparc.stats file, and rh.aparc.stats files from a freesurfer subject
#found at the path path_to_fs_subject, and grabs the volumes for all
#subcortical structures, along with volumes, thicknesses, and surface
#areas for all cortical structures, and saves them as .npy files under
#folder_for_output_files. Also saves a text file with the names of the
#regions (one for subcortical, and one for lh/rh)
"""
aseg_path = os.path.join(path_to_fs_subject, 'stats', 'aseg.stats')
lh_path = os.path.join(path_to_fs_subject, 'stats', 'lh.aparc.stats')
rh_path = os.path.join(path_to_fs_subject, 'stats', 'rh.aparc.stats')
f = open(aseg_path, "r")
lines = f.readlines()
f.close()
header = '# ColHeaders Index SegId NVoxels Volume_mm3 StructName normMean normStdDev normMin normMax normRange'
subcort_names = ['Left-Lateral-Ventricle', 'Left-Inf-Lat-Vent', 'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen',
'Left-Pallidum', '3rd-Ventricle', '4th-Ventricle', 'Brain-Stem', 'Left-Hippocampus',
'Left-Amygdala', 'CSF' ,'Left-Accumbens-area', 'Left-VentralDC', 'Left-vessel',
'Left-choroid-plexus', 'Right-Lateral-Ventricle', 'Right-Inf-Lat-Vent',
'Right-Cerebellum-White-Matter','Right-Cerebellum-Cortex', 'Right-Thalamus-Proper',
'Right-Caudate', 'Right-Putamen', 'Right-Pallidum', 'Right-Hippocampus',
'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Right-vessel',
'Right-choroid-plexus', '5th-Ventricle', 'WM-hypointensities', 'Left-WM-hypointensities',
'Right-WM-hypointensities', 'non-WM-hypointensities', 'Left-non-WM-hypointensities',
'Right-non-WM-hypointensities', 'Optic-Chiasm', 'CC_Posterior', 'CC_Mid_Posterior',
'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior']
aseg_vol = []
header_found = 0
for i in range(0,len(lines)):
if header_found == 1:
split_line = lines[i].split()
if split_line[4] != subcort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
subcort_names[i-header_found_ind] + ' but found ' + split_line[4])
aseg_vol.append(float(split_line[3]))
if header in lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
lh_f = open(lh_path, "r")
lh_lines = lh_f.readlines()
lh_f.close()
header = '# ColHeaders StructName NumVert SurfArea GrayVol ThickAvg ThickStd MeanCurv GausCurv FoldInd CurvInd'
cort_names = ['bankssts', 'caudalanteriorcingulate', 'caudalmiddlefrontal', 'cuneus', 'entorhinal',
'fusiform', 'inferiorparietal', 'inferiortemporal', 'isthmuscingulate', 'lateraloccipital',
'lateralorbitofrontal', 'lingual', 'medialorbitofrontal', 'middletemporal', 'parahippocampal',
'paracentral', 'parsopercularis', 'parsorbitalis', 'parstriangularis', 'pericalcarine',
'postcentral', 'posteriorcingulate', 'precentral', 'precuneus', 'rostralanteriorcingulate',
'rostralmiddlefrontal', 'superiorfrontal', 'superiorparietal', 'superiortemporal', 'supramarginal',
'frontalpole', 'temporalpole', 'transversetemporal', 'insula']
lh_surface_area = []
lh_volume = []
lh_thickness = []
header_found = 0
for i in range(0,len(lh_lines)):
if header_found == 1:
split_line = lh_lines[i].split()
if split_line[0] != cort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
cort_names[i-header_found_ind] + ' but found ' + split_line[4])
#then insert text to actually grab/save the data.....
lh_surface_area.append(float(split_line[2]))
lh_volume.append(float(split_line[3]))
lh_thickness.append(float(split_line[4]))
if header in lh_lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
rh_f = open(rh_path, "r")
rh_lines = rh_f.readlines()
rh_f.close()
rh_surface_area = []
rh_volume = []
rh_thickness = []
header_found = 0
for i in range(0,len(rh_lines)):
if header_found == 1:
split_line = rh_lines[i].split()
if split_line[0] != cort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
cort_names[i-header_found_ind] + ' but found ' + split_line[4])
#then insert text to actually grab/save the data.....
rh_surface_area.append(float(split_line[2]))
rh_volume.append(float(split_line[3]))
rh_thickness.append(float(split_line[4]))
if header in rh_lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
if os.path.exists(folder_for_output_files) == False:
os.mkdir(folder_for_output_files)
#Save the metrics as numpy files
np.save(os.path.join(folder_for_output_files, 'aseg_vols.npy'), np.asarray(aseg_vol))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_surface_areas.npy'), np.asarray(lh_surface_area))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_volumes.npy'), np.asarray(lh_volume))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_thicknesses.npy'), np.asarray(lh_thickness))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_surface_areas.npy'), np.asarray(rh_surface_area))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_volumes.npy'), np.asarray(rh_volume))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_thicknesses.npy'), np.asarray(rh_thickness))
#Calculate some bilateral metrics
left_vent = 0
right_vent = 18
total_lateral_vent = aseg_vol[left_vent] + aseg_vol[right_vent]
left_hipp = 11
right_hipp = 26
total_hipp_vol = aseg_vol[left_hipp] + aseg_vol[right_hipp]
left_thal = 4
right_thal = 22
total_thal_vol = aseg_vol[left_thal] + aseg_vol[right_thal]
left_amyg = 12
right_amyg = 27
total_amyg_vol = aseg_vol[left_amyg] + aseg_vol[right_amyg]
#Also calculate global thickness
numerator = np.sum(np.multiply(lh_surface_area,lh_thickness)) + np.sum(np.multiply(rh_surface_area,rh_thickness))
denominator = np.sum(lh_surface_area) + np.sum(rh_surface_area)
whole_brain_ave_thick = numerator/denominator
discovery_metric_array = [total_hipp_vol, total_amyg_vol, total_thal_vol,
total_lateral_vent, whole_brain_ave_thick]
np.save(os.path.join(folder_for_output_files, 'discovery_anat_metrics.npy'), np.asarray(discovery_metric_array))
discovery_anat_ids = ['bilateral_hipp_volume', 'bilateral_amyg_vol', 'bilateral_thal_vol',
'bilateral_lateral_vent_vol', 'whole_brain_ave_thick']
#Then save a file with the region names
with open(os.path.join(folder_for_output_files, 'subcortical_region_names.txt'), 'w') as f:
for item in subcort_names:
f.write("%s\n" % item)
with open(os.path.join(folder_for_output_files, 'cortical_region_names.txt'), 'w') as f:
for item in cort_names:
f.write("%s\n" % item)
with open(os.path.join(folder_for_output_files, 'discovery_region_names.txt'), 'w') as f:
for item in discovery_anat_ids:
f.write("%s\n" % item)
return
def calculate_XT_X_Neg1_XT(X):
"""
#Calculate term that can be multiplied with
#Y to calculate the beta weights for least
#squares regression. X should be of shape
#(n x d) where n is the number of observations
#and d is the number of dimensions/predictors
#uses inverse transform
"""
XT = X.transpose()
XT_X_Neg1 = np.linalg.pinv(np.matmul(XT,X))
return np.matmul(XT_X_Neg1, XT)
def partial_clean_fast(Y, XT_X_Neg1_XT, bad_regressors):
"""
#Function to help in the denoising of time signal Y with shape
#(n,1) or (n,) where n is the number of timepoints.
#XT_X_Neg1_XT is ((X^T)*X)^-1*(X^T), where ^T represents transpose
#and ^-1 represents matrix inversions. X contains bad regressors including
#noise ICs, a constant component, and a linear trend (etc.), and good regressors
#containing non-motion related ICs. The Beta weights for the linear model
#will be solved by multiplying XT_X_Neg1_XT with Y, and then the beta weights
#determined for the bad regressors will be subtracted off from Y and the residuals
#from this operation will be returned. For this reason, it is important to
#put all bad regressors in front when doing matrix multiplication
"""
B = np.matmul(XT_X_Neg1_XT, Y)
Y_noise = np.matmul(bad_regressors, B[:bad_regressors.shape[1]])
return (Y - Y_noise)
from scipy.signal import butter, filtfilt
def construct_filter(btype, cutoff, TR, order):
"""
#btype should be 'lowpass', 'highpass', or 'bandpass' and
#cutoff should be list (in Hz) with length 1 for low and high and
#2 for band. Order is the order of the filter
#which will be doubled since filtfilt will be used
#to remove phase distortion from the filter. Recommended
#order is 6. Will return filter coefficients b and a for
#the desired butterworth filter.
#Constructs filter coefficients. Use apply_filter to use
#the coefficients to filter a signal.
#Should have butter imported from scipy.signal
"""
nyq = 0.5 * (1/TR)
if btype == 'lowpass':
if len(cutoff) != 1:
raise NameError('Error: lowpass type filter should have one cutoff values')
low = cutoff[0]/nyq
b, a = butter(order, low, btype='lowpass')
elif btype == 'highpass':
if len(cutoff) != 1:
raise NameError('Error: highpass type filter should have one cutoff values')
high = cutoff[0]/nyq
b, a = butter(order, high, btype='highpass')
elif btype == 'bandpass':
if len(cutoff) != 2:
raise NameError('Error: bandpass type filter should have two cutoff values')
low = min(cutoff)/nyq
high = max(cutoff)/nyq
b, a = butter(order, [low, high], btype='bandpass')
else:
raise NameError('Error: filter type should by low, high, or band')
return b, a
########################################################################################
########################################################################################
########################################################################################
def apply_filter(b, a, signal):
"""
#Wrapper function to apply the filter coefficients from
#construct_filter to a signal.
#should have filtfilt imported from scipy.signal
"""
filtered_signal = filtfilt(b, a, signal)
return filtered_signal
########################################################################################
########################################################################################
########################################################################################
def output_stats_figures_pa_ap_compare(cleaned_ap, cleaned_pa):
cleaned_ap_netmat = np.corrcoef(cleaned_ap)
cleaned_pa_netmat = np.corrcoef(cleaned_pa)
plt.figure()
plt.imshow(cleaned_ap_netmat)
plt.colorbar()
plt.title('AP Conn Matrix')
plt.figure()
cleaned_ap.shape
plt.imshow(cleaned_pa_netmat)
plt.colorbar()
plt.title('PA Conn Matrix')
plt.figure()
corr_dif = cleaned_ap_netmat - cleaned_pa_netmat
plt.imshow(np.abs(corr_dif), vmin=0, vmax=0.1)
plt.title('abs(AP - PA)')
plt.colorbar()
plt.figure()
plt.hist(np.abs(np.reshape(corr_dif, corr_dif.shape[0]**2)), bins = 20)
plt.title('abs(AP - PA) mean = ' + str(np.mean(np.abs(corr_dif))))
ap_arr = cleaned_ap_netmat[np.triu_indices(cleaned_ap_netmat.shape[0], k = 1)]
pa_arr = cleaned_pa_netmat[np.triu_indices(cleaned_pa_netmat.shape[0], k = 1)]
plt.figure()
plt.scatter(ap_arr, pa_arr)
plt.title('AP-PA corr: ' + str(np.corrcoef(ap_arr, pa_arr)[0,1]))
def find_mean_fd(path_to_func):
#For a functional path (must be pointing to fsaverage),
#and a list of confounds (from *desc-confounds_regressors.tsv).
#This function will make two matrices of shape (t x n), where
#t is the number of timepoints, and n the number of regressors.
#The first matrix will contain 'nuisance_vars' which will be
#a combination of the variables from list_of_confounds, and
#independent components identified as noise by ICA-AROMA.
#The second will contain the indpendent components not identified
#by ICA-AROMA, which are presumed to contain meaningful functional
#data
confound_path = path_to_func[:-31] + 'desc-confounds_regressors.tsv'
confound_df = pd.read_csv(confound_path, sep='\t')
partial_confounds = []
temp = confound_df.loc[ : , 'framewise_displacement' ]
fd_arr = np.copy(temp.values)
return np.mean(fd_arr[1:])
def convert_to_upper_arr(np_square_matrix):
"""
#Function that takes a square matrix,
#and outputs its upper triangle without
#the diagonal as an array
"""
inds = np.triu_indices(np_square_matrix.shape[0], k = 1)
return np_square_matrix[inds]
def demedian_parcellate_func_combine_hemis(lh_func, rh_func, lh_parcel_path, rh_parcel_path):
"""
#Function that takes functional data in the form <num_verts, num_timepoints> for
#both the left and right hemisphere, and averages the functional time series across
#all vertices defined in a given parcel, for every parcel, with the parcels identified
#by a annotation file specified at ?h_parcel_path. The function then returns a combined
#matrix of size <num_parcels, num_timepoints> and <num_labels> for the time series and
#parcel label names, respectively. The lh parcels will preceed the rh parcels in order.
#Prior to taking the average of all vertices, all vertices time signals are divided by their
#median signal intensity. The mean of all these medians within a given parcel is then
#exported with this function as the third argument
#NOTE: THIS ASSUMES THE FIRST PARCEL WILL BE MEDIAL WALL, AND DISREGARDS ANY VERTICES WITHIN
#THAT PARCEL. IF THIS IS NOT THE CASE FOR YOUR PARCELLATION, DO NOT USE THIS FUNCTION.
"""
#Output will be tuple of format [labels, ctab, names]
lh_parcels = nib.freesurfer.io.read_annot(lh_parcel_path)
rh_parcels = nib.freesurfer.io.read_annot(rh_parcel_path)
#Make array to store parcellated data with shape <num_parcels, num_timepoints>
lh_parcellated_data = np.zeros((len(lh_parcels[2]) - 1, lh_func.shape[1]))
rh_parcellated_data = np.zeros((len(rh_parcels[2]) - 1, rh_func.shape[1]))
lh_parcel_medians = np.zeros(len(lh_parcels[2]) - 1)
rh_parcel_medians = np.zeros(len(rh_parcels[2]) - 1)
lh_vertex_medians = np.nanmedian(lh_func, axis=1)
rh_vertex_medians = np.nanmedian(rh_func, axis=1)
lh_vertex_medians[np.where(lh_vertex_medians < 0.001)] = np.nan
rh_vertex_medians[np.where(rh_vertex_medians < 0.001)] = np.nan
lh_adjusted_func = lh_func/lh_vertex_medians[:,None]
rh_adjusted_func = rh_func/rh_vertex_medians[:,None]
#Start with left hemisphere
for i in range(1,len(lh_parcels[2])):
#Find the voxels for the current parcel
vois = np.where(lh_parcels[0] == i)
#Take the mean of all voxels of interest
lh_parcellated_data[i-1, :] = np.nanmean(lh_adjusted_func[vois[0],:], axis = 0)
lh_parcel_medians[i-1] = np.nanmean(lh_vertex_medians[vois[0]])
#Move to right hemisphere
for i in range(1,len(rh_parcels[2])):
vois = np.where(rh_parcels[0] == i)
rh_parcellated_data[i-1, :] = np.nanmean(rh_adjusted_func[vois[0],:], axis = 0)
rh_parcel_medians[i-1] = np.nanmean(rh_vertex_medians[vois[0]])
#Then concatenate parcel labels and parcel timeseries between the left and right hemisphere
#and drop the medial wall from label list
parcellated_data = np.vstack((lh_parcellated_data, rh_parcellated_data))
parcel_labels = lh_parcels[2][1:] + rh_parcels[2][1:]
parcel_medians = np.hstack((lh_parcel_medians, rh_parcel_medians))
#Try to convert the parcel labels from bytes to normal string
for i in range(0, len(parcel_labels)):
parcel_labels[i] = parcel_labels[i].decode("utf-8")
return parcellated_data, parcel_labels, parcel_medians
| [
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.sum",
"numpy.abs",
"numpy.nanmedian",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"numpy.mean",
"os.path.join",
"numpy.nanmean",
"numpy.multiply",
"numpy.copy",
"numpy.std",
"numpy.power",
"matplotlib.pyplot.imsh... | [((525, 547), 'nibabel.load', 'nib_load', (['path_to_file'], {}), '(path_to_file)\n', (533, 547), True, 'from nibabel import load as nib_load\n'), ((764, 786), 'nibabel.load', 'nib_load', (['path_to_file'], {}), '(path_to_file)\n', (772, 786), True, 'from nibabel import load as nib_load\n'), ((1165, 1188), 'numpy.power', 'np.power', (['(tp1 - xhat)', '(2)'], {}), '(tp1 - xhat, 2)\n', (1173, 1188), True, 'import numpy as np\n'), ((1204, 1227), 'numpy.power', 'np.power', (['(tp2 - xhat)', '(2)'], {}), '(tp2 - xhat, 2)\n', (1212, 1227), True, 'import numpy as np\n'), ((1989, 2015), 'numpy.copy', 'np.copy', (['noisy_time_series'], {}), '(noisy_time_series)\n', (1996, 2015), True, 'import numpy as np\n'), ((2033, 2061), 'numpy.copy', 'np.copy', (['cleaned_time_series'], {}), '(cleaned_time_series)\n', (2040, 2061), True, 'import numpy as np\n'), ((2142, 2169), 'numpy.mean', 'np.mean', (['noisy_data'], {'axis': '(1)'}), '(noisy_data, axis=1)\n', (2149, 2169), True, 'import numpy as np\n'), ((2189, 2215), 'numpy.std', 'np.std', (['noisy_data'], {'axis': '(1)'}), '(noisy_data, axis=1)\n', (2195, 2215), True, 'import numpy as np\n'), ((2236, 2263), 'numpy.mean', 'np.mean', (['clean_data'], {'axis': '(1)'}), '(clean_data, axis=1)\n', (2243, 2263), True, 'import numpy as np\n'), ((2283, 2309), 'numpy.std', 'np.std', (['clean_data'], {'axis': '(1)'}), '(clean_data, axis=1)\n', (2289, 2309), True, 'import numpy as np\n'), ((2390, 2416), 'numpy.zeros', 'np.zeros', (['noisy_data.shape'], {}), '(noisy_data.shape)\n', (2398, 2416), True, 'import numpy as np\n'), ((2437, 2463), 'numpy.zeros', 'np.zeros', (['clean_data.shape'], {}), '(clean_data.shape)\n', (2445, 2463), True, 'import numpy as np\n'), ((2780, 2800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2791, 2800), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dn_noisy_data'], {'aspect': '"""auto"""', 'cmap': '"""binary"""'}), "(dn_noisy_data, aspect='auto', cmap='binary')\n", (2868, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2950), 'matplotlib.pyplot.title', 'plt.title', (['"""Noisy BOLD Data"""'], {}), "('Noisy BOLD Data')\n", (2931, 2950), True, 'import matplotlib.pyplot as plt\n'), ((2955, 2980), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timepoint #"""'], {}), "('Timepoint #')\n", (2965, 2980), True, 'import matplotlib.pyplot as plt\n'), ((2985, 3020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Region # (Arbritrary)"""'], {}), "('Region # (Arbritrary)')\n", (2995, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3025, 3039), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3037, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3099, 3119), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3110, 3119), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3189), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dn_clean_data'], {'aspect': '"""auto"""', 'cmap': '"""binary"""'}), "(dn_clean_data, aspect='auto', cmap='binary')\n", (3144, 3189), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3226), 'matplotlib.pyplot.title', 'plt.title', (['"""Clean BOLD Data"""'], {}), "('Clean BOLD Data')\n", (3207, 3226), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timepoint #"""'], {}), "('Timepoint #')\n", (3241, 3256), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3275), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3273, 3275), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3295), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3293, 3295), True, 'import matplotlib.pyplot as plt\n'), ((4385, 4429), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['lh_parcel_path'], {}), '(lh_parcel_path)\n', (4413, 4429), True, 'import nibabel as nib\n'), ((4447, 4491), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['rh_parcel_path'], {}), '(rh_parcel_path)\n', (4475, 4491), True, 'import nibabel as nib\n'), ((5417, 5470), 'numpy.vstack', 'np.vstack', (['(lh_parcellated_data, rh_parcellated_data)'], {}), '((lh_parcellated_data, rh_parcellated_data))\n', (5426, 5470), True, 'import numpy as np\n'), ((7325, 7341), 'numpy.zeros', 'np.zeros', (['(7, 7)'], {}), '((7, 7))\n', (7333, 7341), True, 'import numpy as np\n'), ((9335, 9346), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (9343, 9346), True, 'import numpy as np\n'), ((10523, 10541), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(7)', '(1)'], {}), '(7, 1)\n', (10535, 10541), True, 'import matplotlib.pyplot as plt\n'), ((10983, 11037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Volume # (excluding high-motion volumes)"""'], {}), "('Volume # (excluding high-motion volumes)')\n", (10993, 11037), True, 'import matplotlib.pyplot as plt\n'), ((12045, 12081), 'pandas.read_csv', 'pd.read_csv', (['confound_path'], {'sep': '"""\t"""'}), "(confound_path, sep='\\t')\n", (12056, 12081), True, 'import pandas as pd\n'), ((12160, 12184), 'numpy.median', 'np.median', (['global_signal'], {}), '(global_signal)\n', (12169, 12184), True, 'import numpy as np\n'), ((12207, 12237), 'numpy.zeros', 'np.zeros', (['parcel_data.shape[0]'], {}), '(parcel_data.shape[0])\n', (12215, 12237), True, 'import numpy as np\n'), ((12881, 12953), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'network_vals'], {'color': 'network_colors', 'tick_label': 'network_names'}), '(x, network_vals, color=network_colors, tick_label=network_names)\n', (12888, 12953), True, 'import matplotlib.pyplot as plt\n'), ((12962, 12980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (12972, 12980), True, 'import matplotlib.pyplot as plt\n'), ((12985, 13008), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (12995, 13008), True, 'import matplotlib.pyplot as plt\n'), ((13637, 13692), 'os.path.join', 'os.path.join', (['path_to_fs_subject', '"""stats"""', '"""aseg.stats"""'], {}), "(path_to_fs_subject, 'stats', 'aseg.stats')\n", (13649, 13692), False, 'import os\n'), ((13707, 13766), 'os.path.join', 'os.path.join', (['path_to_fs_subject', '"""stats"""', '"""lh.aparc.stats"""'], {}), "(path_to_fs_subject, 'stats', 'lh.aparc.stats')\n", (13719, 13766), False, 'import os\n'), ((13781, 13840), 'os.path.join', 'os.path.join', (['path_to_fs_subject', '"""stats"""', '"""rh.aparc.stats"""'], {}), "(path_to_fs_subject, 'stats', 'rh.aparc.stats')\n", (13793, 13840), False, 'import os\n'), ((22062, 22086), 'numpy.matmul', 'np.matmul', (['XT_X_Neg1', 'XT'], {}), '(XT_X_Neg1, XT)\n', (22071, 22086), True, 'import numpy as np\n'), ((22930, 22956), 'numpy.matmul', 'np.matmul', (['XT_X_Neg1_XT', 'Y'], {}), '(XT_X_Neg1_XT, Y)\n', (22939, 22956), True, 'import numpy as np\n'), ((22971, 23025), 'numpy.matmul', 'np.matmul', (['bad_regressors', 'B[:bad_regressors.shape[1]]'], {}), '(bad_regressors, B[:bad_regressors.shape[1]])\n', (22980, 23025), True, 'import numpy as np\n'), ((25142, 25164), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'signal'], {}), '(b, a, signal)\n', (25150, 25164), False, 'from scipy.signal import butter, filtfilt\n'), ((25554, 25577), 'numpy.corrcoef', 'np.corrcoef', (['cleaned_ap'], {}), '(cleaned_ap)\n', (25565, 25577), True, 'import numpy as np\n'), ((25602, 25625), 'numpy.corrcoef', 'np.corrcoef', (['cleaned_pa'], {}), '(cleaned_pa)\n', (25613, 25625), True, 'import numpy as np\n'), ((25631, 25643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25641, 25643), True, 'import matplotlib.pyplot as plt\n'), ((25648, 25677), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cleaned_ap_netmat'], {}), '(cleaned_ap_netmat)\n', (25658, 25677), True, 'import matplotlib.pyplot as plt\n'), ((25682, 25696), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25694, 25696), True, 'import matplotlib.pyplot as plt\n'), ((25701, 25728), 'matplotlib.pyplot.title', 'plt.title', (['"""AP Conn Matrix"""'], {}), "('AP Conn Matrix')\n", (25710, 25728), True, 'import matplotlib.pyplot as plt\n'), ((25733, 25745), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25743, 25745), True, 'import matplotlib.pyplot as plt\n'), ((25772, 25801), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cleaned_pa_netmat'], {}), '(cleaned_pa_netmat)\n', (25782, 25801), True, 'import matplotlib.pyplot as plt\n'), ((25806, 25820), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25818, 25820), True, 'import matplotlib.pyplot as plt\n'), ((25825, 25852), 'matplotlib.pyplot.title', 'plt.title', (['"""PA Conn Matrix"""'], {}), "('PA Conn Matrix')\n", (25834, 25852), True, 'import matplotlib.pyplot as plt\n'), ((25857, 25869), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25867, 25869), True, 'import matplotlib.pyplot as plt\n'), ((25979, 26004), 'matplotlib.pyplot.title', 'plt.title', (['"""abs(AP - PA)"""'], {}), "('abs(AP - PA)')\n", (25988, 26004), True, 'import matplotlib.pyplot as plt\n'), ((26009, 26023), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (26021, 26023), True, 'import matplotlib.pyplot as plt\n'), ((26028, 26040), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26038, 26040), True, 'import matplotlib.pyplot as plt\n'), ((26360, 26372), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26370, 26372), True, 'import matplotlib.pyplot as plt\n'), ((26377, 26404), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ap_arr', 'pa_arr'], {}), '(ap_arr, pa_arr)\n', (26388, 26404), True, 'import matplotlib.pyplot as plt\n'), ((27223, 27259), 'pandas.read_csv', 'pd.read_csv', (['confound_path'], {'sep': '"""\t"""'}), "(confound_path, sep='\\t')\n", (27234, 27259), True, 'import pandas as pd\n'), ((27359, 27379), 'numpy.copy', 'np.copy', (['temp.values'], {}), '(temp.values)\n', (27366, 27379), True, 'import numpy as np\n'), ((27396, 27415), 'numpy.mean', 'np.mean', (['fd_arr[1:]'], {}), '(fd_arr[1:])\n', (27403, 27415), True, 'import numpy as np\n'), ((27611, 27658), 'numpy.triu_indices', 'np.triu_indices', (['np_square_matrix.shape[0]'], {'k': '(1)'}), '(np_square_matrix.shape[0], k=1)\n', (27626, 27658), True, 'import numpy as np\n'), ((28866, 28910), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['lh_parcel_path'], {}), '(lh_parcel_path)\n', (28894, 28910), True, 'import nibabel as nib\n'), ((28928, 28972), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['rh_parcel_path'], {}), '(rh_parcel_path)\n', (28956, 28972), True, 'import nibabel as nib\n'), ((29355, 29384), 'numpy.nanmedian', 'np.nanmedian', (['lh_func'], {'axis': '(1)'}), '(lh_func, axis=1)\n', (29367, 29384), True, 'import numpy as np\n'), ((29409, 29438), 'numpy.nanmedian', 'np.nanmedian', (['rh_func'], {'axis': '(1)'}), '(rh_func, axis=1)\n', (29421, 29438), True, 'import numpy as np\n'), ((30516, 30569), 'numpy.vstack', 'np.vstack', (['(lh_parcellated_data, rh_parcellated_data)'], {}), '((lh_parcellated_data, rh_parcellated_data))\n', (30525, 30569), True, 'import numpy as np\n'), ((30649, 30698), 'numpy.hstack', 'np.hstack', (['(lh_parcel_medians, rh_parcel_medians)'], {}), '((lh_parcel_medians, rh_parcel_medians))\n', (30658, 30698), True, 'import numpy as np\n'), ((1128, 1149), 'numpy.vstack', 'np.vstack', (['(tp1, tp2)'], {}), '((tp1, tp2))\n', (1137, 1149), True, 'import numpy as np\n'), ((1246, 1275), 'numpy.vstack', 'np.vstack', (['(sq_dif1, sq_dif2)'], {}), '((sq_dif1, sq_dif2))\n', (1255, 1275), True, 'import numpy as np\n'), ((4902, 4930), 'numpy.where', 'np.where', (['(lh_parcels[0] == i)'], {}), '(lh_parcels[0] == i)\n', (4910, 4930), True, 'import numpy as np\n'), ((5019, 5055), 'numpy.mean', 'np.mean', (['lh_func[vois[0], :]'], {'axis': '(0)'}), '(lh_func[vois[0], :], axis=0)\n', (5026, 5055), True, 'import numpy as np\n'), ((5146, 5174), 'numpy.where', 'np.where', (['(rh_parcels[0] == i)'], {}), '(rh_parcels[0] == i)\n', (5154, 5174), True, 'import numpy as np\n'), ((5213, 5249), 'numpy.mean', 'np.mean', (['rh_func[vois[0], :]'], {'axis': '(0)'}), '(rh_func[vois[0], :], axis=0)\n', (5220, 5249), True, 'import numpy as np\n'), ((10626, 10640), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[i]'], {}), '(ax[i])\n', (10633, 10640), True, 'import matplotlib.pyplot as plt\n'), ((10809, 10839), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal Intensity"""'], {}), "('Signal Intensity')\n", (10819, 10839), True, 'import matplotlib.pyplot as plt\n'), ((10848, 10913), 'matplotlib.pyplot.title', 'plt.title', (["('Time-Course For All ' + network_names[i] + ' Parcels')"], {}), "('Time-Course For All ' + network_names[i] + ' Parcels')\n", (10857, 10913), True, 'import matplotlib.pyplot as plt\n'), ((19027, 19066), 'os.path.exists', 'os.path.exists', (['folder_for_output_files'], {}), '(folder_for_output_files)\n', (19041, 19066), False, 'import os\n'), ((19085, 19118), 'os.mkdir', 'os.mkdir', (['folder_for_output_files'], {}), '(folder_for_output_files)\n', (19093, 19118), False, 'import os\n'), ((19173, 19227), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""aseg_vols.npy"""'], {}), "(folder_for_output_files, 'aseg_vols.npy')\n", (19185, 19227), False, 'import os\n'), ((19229, 19249), 'numpy.asarray', 'np.asarray', (['aseg_vol'], {}), '(aseg_vol)\n', (19239, 19249), True, 'import numpy as np\n'), ((19263, 19329), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""lh_aseg_surface_areas.npy"""'], {}), "(folder_for_output_files, 'lh_aseg_surface_areas.npy')\n", (19275, 19329), False, 'import os\n'), ((19331, 19358), 'numpy.asarray', 'np.asarray', (['lh_surface_area'], {}), '(lh_surface_area)\n', (19341, 19358), True, 'import numpy as np\n'), ((19372, 19432), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""lh_aseg_volumes.npy"""'], {}), "(folder_for_output_files, 'lh_aseg_volumes.npy')\n", (19384, 19432), False, 'import os\n'), ((19434, 19455), 'numpy.asarray', 'np.asarray', (['lh_volume'], {}), '(lh_volume)\n', (19444, 19455), True, 'import numpy as np\n'), ((19469, 19533), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""lh_aseg_thicknesses.npy"""'], {}), "(folder_for_output_files, 'lh_aseg_thicknesses.npy')\n", (19481, 19533), False, 'import os\n'), ((19535, 19559), 'numpy.asarray', 'np.asarray', (['lh_thickness'], {}), '(lh_thickness)\n', (19545, 19559), True, 'import numpy as np\n'), ((19573, 19639), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""rh_aseg_surface_areas.npy"""'], {}), "(folder_for_output_files, 'rh_aseg_surface_areas.npy')\n", (19585, 19639), False, 'import os\n'), ((19641, 19668), 'numpy.asarray', 'np.asarray', (['rh_surface_area'], {}), '(rh_surface_area)\n', (19651, 19668), True, 'import numpy as np\n'), ((19682, 19742), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""rh_aseg_volumes.npy"""'], {}), "(folder_for_output_files, 'rh_aseg_volumes.npy')\n", (19694, 19742), False, 'import os\n'), ((19744, 19765), 'numpy.asarray', 'np.asarray', (['rh_volume'], {}), '(rh_volume)\n', (19754, 19765), True, 'import numpy as np\n'), ((19779, 19843), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""rh_aseg_thicknesses.npy"""'], {}), "(folder_for_output_files, 'rh_aseg_thicknesses.npy')\n", (19791, 19843), False, 'import os\n'), ((19845, 19869), 'numpy.asarray', 'np.asarray', (['rh_thickness'], {}), '(rh_thickness)\n', (19855, 19869), True, 'import numpy as np\n'), ((20509, 20532), 'numpy.sum', 'np.sum', (['lh_surface_area'], {}), '(lh_surface_area)\n', (20515, 20532), True, 'import numpy as np\n'), ((20535, 20558), 'numpy.sum', 'np.sum', (['rh_surface_area'], {}), '(rh_surface_area)\n', (20541, 20558), True, 'import numpy as np\n'), ((20781, 20848), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""discovery_anat_metrics.npy"""'], {}), "(folder_for_output_files, 'discovery_anat_metrics.npy')\n", (20793, 20848), False, 'import os\n'), ((20850, 20884), 'numpy.asarray', 'np.asarray', (['discovery_metric_array'], {}), '(discovery_metric_array)\n', (20860, 20884), True, 'import numpy as np\n'), ((22034, 22050), 'numpy.matmul', 'np.matmul', (['XT', 'X'], {}), '(XT, X)\n', (22043, 22050), True, 'import numpy as np\n'), ((23957, 23992), 'scipy.signal.butter', 'butter', (['order', 'low'], {'btype': '"""lowpass"""'}), "(order, low, btype='lowpass')\n", (23963, 23992), False, 'from scipy.signal import butter, filtfilt\n'), ((25939, 25955), 'numpy.abs', 'np.abs', (['corr_dif'], {}), '(corr_dif)\n', (25945, 25955), True, 'import numpy as np\n'), ((26221, 26269), 'numpy.triu_indices', 'np.triu_indices', (['cleaned_ap_netmat.shape[0]'], {'k': '(1)'}), '(cleaned_ap_netmat.shape[0], k=1)\n', (26236, 26269), True, 'import numpy as np\n'), ((26304, 26352), 'numpy.triu_indices', 'np.triu_indices', (['cleaned_pa_netmat.shape[0]'], {'k': '(1)'}), '(cleaned_pa_netmat.shape[0], k=1)\n', (26319, 26352), True, 'import numpy as np\n'), ((29462, 29497), 'numpy.where', 'np.where', (['(lh_vertex_medians < 0.001)'], {}), '(lh_vertex_medians < 0.001)\n', (29470, 29497), True, 'import numpy as np\n'), ((29530, 29565), 'numpy.where', 'np.where', (['(rh_vertex_medians < 0.001)'], {}), '(rh_vertex_medians < 0.001)\n', (29538, 29565), True, 'import numpy as np\n'), ((29833, 29861), 'numpy.where', 'np.where', (['(lh_parcels[0] == i)'], {}), '(lh_parcels[0] == i)\n', (29841, 29861), True, 'import numpy as np\n'), ((29950, 29998), 'numpy.nanmean', 'np.nanmean', (['lh_adjusted_func[vois[0], :]'], {'axis': '(0)'}), '(lh_adjusted_func[vois[0], :], axis=0)\n', (29960, 29998), True, 'import numpy as np\n'), ((30033, 30071), 'numpy.nanmean', 'np.nanmean', (['lh_vertex_medians[vois[0]]'], {}), '(lh_vertex_medians[vois[0]])\n', (30043, 30071), True, 'import numpy as np\n'), ((30161, 30189), 'numpy.where', 'np.where', (['(rh_parcels[0] == i)'], {}), '(rh_parcels[0] == i)\n', (30169, 30189), True, 'import numpy as np\n'), ((30228, 30276), 'numpy.nanmean', 'np.nanmean', (['rh_adjusted_func[vois[0], :]'], {'axis': '(0)'}), '(rh_adjusted_func[vois[0], :], axis=0)\n', (30238, 30276), True, 'import numpy as np\n'), ((30311, 30349), 'numpy.nanmean', 'np.nanmean', (['rh_vertex_medians[vois[0]]'], {}), '(rh_vertex_medians[vois[0]])\n', (30321, 30349), True, 'import numpy as np\n'), ((618, 639), 'numpy.vstack', 'np.vstack', (['gifti_list'], {}), '(gifti_list)\n', (627, 639), True, 'import numpy as np\n'), ((798, 827), 'numpy.asarray', 'np.asarray', (['cifti_img.dataobj'], {}), '(cifti_img.dataobj)\n', (808, 827), True, 'import numpy as np\n'), ((1312, 1347), 'numpy.multiply', 'np.multiply', (['(tp1 - xhat)', '(tp2 - xhat)'], {}), '(tp1 - xhat, tp2 - xhat)\n', (1323, 1347), True, 'import numpy as np\n'), ((9445, 9471), 'numpy.where', 'np.where', (['(network_ids == i)'], {}), '(network_ids == i)\n', (9453, 9471), True, 'import numpy as np\n'), ((10588, 10614), 'numpy.where', 'np.where', (['(network_ids == i)'], {}), '(network_ids == i)\n', (10596, 10614), True, 'import numpy as np\n'), ((10723, 10784), 'matplotlib.pyplot.plot', 'plt.plot', (['parcel_data[in_network[j]]'], {'color': 'network_colors[i]'}), '(parcel_data[in_network[j]], color=network_colors[i])\n', (10731, 10784), True, 'import matplotlib.pyplot as plt\n'), ((10954, 10968), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (10964, 10968), True, 'import matplotlib.pyplot as plt\n'), ((12318, 12343), 'numpy.std', 'np.std', (['parcel_data[i, :]'], {}), '(parcel_data[i, :])\n', (12324, 12343), True, 'import numpy as np\n'), ((20396, 20438), 'numpy.multiply', 'np.multiply', (['lh_surface_area', 'lh_thickness'], {}), '(lh_surface_area, lh_thickness)\n', (20407, 20438), True, 'import numpy as np\n'), ((20448, 20490), 'numpy.multiply', 'np.multiply', (['rh_surface_area', 'rh_thickness'], {}), '(rh_surface_area, rh_thickness)\n', (20459, 20490), True, 'import numpy as np\n'), ((21125, 21194), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""subcortical_region_names.txt"""'], {}), "(folder_for_output_files, 'subcortical_region_names.txt')\n", (21137, 21194), False, 'import os\n'), ((21296, 21362), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""cortical_region_names.txt"""'], {}), "(folder_for_output_files, 'cortical_region_names.txt')\n", (21308, 21362), False, 'import os\n'), ((21469, 21536), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""discovery_region_names.txt"""'], {}), "(folder_for_output_files, 'discovery_region_names.txt')\n", (21481, 21536), False, 'import os\n'), ((24194, 24231), 'scipy.signal.butter', 'butter', (['order', 'high'], {'btype': '"""highpass"""'}), "(order, high, btype='highpass')\n", (24200, 24231), False, 'from scipy.signal import butter, filtfilt\n'), ((26062, 26106), 'numpy.reshape', 'np.reshape', (['corr_dif', '(corr_dif.shape[0] ** 2)'], {}), '(corr_dif, corr_dif.shape[0] ** 2)\n', (26072, 26106), True, 'import numpy as np\n'), ((7478, 7504), 'numpy.where', 'np.where', (['(network_ids == i)'], {}), '(network_ids == i)\n', (7486, 7504), True, 'import numpy as np\n'), ((7533, 7559), 'numpy.where', 'np.where', (['(network_ids == j)'], {}), '(network_ids == j)\n', (7541, 7559), True, 'import numpy as np\n'), ((24465, 24509), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""bandpass"""'}), "(order, [low, high], btype='bandpass')\n", (24471, 24509), False, 'from scipy.signal import butter, filtfilt\n'), ((26169, 26185), 'numpy.abs', 'np.abs', (['corr_dif'], {}), '(corr_dif)\n', (26175, 26185), True, 'import numpy as np\n'), ((26440, 26467), 'numpy.corrcoef', 'np.corrcoef', (['ap_arr', 'pa_arr'], {}), '(ap_arr, pa_arr)\n', (26451, 26467), True, 'import numpy as np\n')] |
import unittest
import aiostripe
from aiostripe.test.helper import StripeResourceTest, DUMMY_COUPON
class CouponTest(StripeResourceTest):
async def test_create_coupon(self):
await aiostripe.Coupon.create(**DUMMY_COUPON)
self.requestor_mock.request.assert_called_with('post', '/v1/coupons',
DUMMY_COUPON, None)
async def test_update_coupon(self):
coup = aiostripe.Coupon.construct_from({
'id': 'cu_update',
'metadata': {},
}, 'api_key')
coup.metadata['key'] = 'value'
await coup.save()
self.requestor_mock.request.assert_called_with('post', '/v1/coupons/cu_update',
{
'metadata': {
'key': 'value',
}
}, None)
async def test_delete_coupon(self):
c = aiostripe.Coupon(id='cu_delete')
await c.delete()
self.requestor_mock.request.assert_called_with('delete', '/v1/coupons/cu_delete',
{}, None)
async def test_detach_coupon(self):
customer = aiostripe.Customer(id='cus_delete_discount')
await customer.delete_discount()
self.requestor_mock.request.assert_called_with('delete', '/v1/customers/cus_delete_discount/discount')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"aiostripe.Coupon.construct_from",
"aiostripe.Coupon.create",
"aiostripe.Customer",
"aiostripe.Coupon"
] | [((1609, 1624), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1622, 1624), False, 'import unittest\n'), ((445, 524), 'aiostripe.Coupon.construct_from', 'aiostripe.Coupon.construct_from', (["{'id': 'cu_update', 'metadata': {}}", '"""api_key"""'], {}), "({'id': 'cu_update', 'metadata': {}}, 'api_key')\n", (476, 524), False, 'import aiostripe\n'), ((1102, 1134), 'aiostripe.Coupon', 'aiostripe.Coupon', ([], {'id': '"""cu_delete"""'}), "(id='cu_delete')\n", (1118, 1134), False, 'import aiostripe\n'), ((1377, 1421), 'aiostripe.Customer', 'aiostripe.Customer', ([], {'id': '"""cus_delete_discount"""'}), "(id='cus_delete_discount')\n", (1395, 1421), False, 'import aiostripe\n'), ((195, 234), 'aiostripe.Coupon.create', 'aiostripe.Coupon.create', ([], {}), '(**DUMMY_COUPON)\n', (218, 234), False, 'import aiostripe\n')] |
# faça u programa que leia um ângulo qualquer e mostre na etla o valor do seno, cosseno e tangente desse ângulo.
import math
n = int(input('Digite o ângulo: '))
sen = math.sin(math.radians(n))
cos = math.cos(math.radians(n))
tan = math.tan(math.radians(n))
print('Sabendo que o ângulo vale {}º, o seno vale {:.2f}, o cosseno {:.2f} e a tangente {:.2f}.'.format(n, sen, cos, tan))
| [
"math.radians"
] | [((178, 193), 'math.radians', 'math.radians', (['n'], {}), '(n)\n', (190, 193), False, 'import math\n'), ((210, 225), 'math.radians', 'math.radians', (['n'], {}), '(n)\n', (222, 225), False, 'import math\n'), ((242, 257), 'math.radians', 'math.radians', (['n'], {}), '(n)\n', (254, 257), False, 'import math\n')] |
import nltk
from nltk.corpus import words
from pprint import pprint
end = '_end_'
def is_valid(trie, word):
pos = trie
for letter in word:
if letter not in pos:
return False
pos = pos[letter]
return end in pos
def is_prefix(trie, word):
pos = trie
for letter in word:
if letter not in pos:
return False
pos = pos[letter]
# If there's more than just '_end_', then it's a word prefix
return len(pos) > 1
neighbors = [(-1,-1),(-1,0),(0,-1),(1,0),(0,1),(1,1),(1,-1),(-1,1)]
def get_neighbors(row, col):
result = []
for row_offset, col_offset in neighbors:
r = row + row_offset
c = col + col_offset
if(0 <= r < ROW_LENGTH and 0 <= c < COL_LENGTH):
result.append((r, c))
return result
valid_words = set()
def dfs(board, row, col, trie, visited_path, curr):
letter = board[row][col]
visited_path.append((row, col))
curr += letter.lower()
if len(curr) >= 3 and is_valid(trie, curr):
valid_words.add(curr)
if not is_prefix(trie, curr):
return
curr_neighbors = get_neighbors(row, col)
for n in curr_neighbors:
if n not in visited_path:
dfs(board, n[0], n[1], trie, visited_path.copy(), curr)
print("Reading board...")
# Read board
board = []
with open("board.txt") as f:
lines = f.readlines()
for line in lines:
row = []
last = ""
for letter in line:
letter = letter.upper()
if letter == 'Q':
row.append('QU')
elif letter == 'U' and last == 'Q':
continue
elif letter == '\n':
continue
else:
row.append(letter)
last = letter
board.append(row)
ROW_LENGTH = len(board)
COL_LENGTH = len(board[0])
pprint(board)
print("Reading dictionary...")
# Get English words according to NLTK
word_list = words.words()
# Generate prefix trie
trie = dict()
for word in word_list:
pos = trie
last = ""
for letter in word:
# I hate the Qu tile
if letter == 'u' and last == 'q':
continue
if letter == 'q':
letter = 'qu'
pos = pos.setdefault(letter, {})
last = letter
pos[end] = end
print("Searching board...")
# Perform depth first search on the Boggle board
for row in range(ROW_LENGTH):
for col in range(COL_LENGTH):
dfs(board, row, col, trie, [], "")
print("Valid words:")
print(valid_words)
| [
"pprint.pprint",
"nltk.corpus.words.words"
] | [((1867, 1880), 'pprint.pprint', 'pprint', (['board'], {}), '(board)\n', (1873, 1880), False, 'from pprint import pprint\n'), ((1963, 1976), 'nltk.corpus.words.words', 'words.words', ([], {}), '()\n', (1974, 1976), False, 'from nltk.corpus import words\n')] |
import open3d as o3d
import os
import glob
import numpy as np
import json
class Open3DReconstructionDataset:
def __init__(self, root_dir):
self.root_dir = root_dir
self.len_frame = len(list(glob.glob(os.path.join(root_dir, "color/*.jpg"))))
def get_rgb_paths(self):
open3d_rgb_paths = []
for i in range(0, self.len_frame):
open3d_rgb_paths.append(os.path.join(self.root_dir, "color", '{:06}.jpg'.format(i)))
return open3d_rgb_paths
def get_depth_paths(self):
open3d_depth_paths = []
for i in range(0, self.len_frame):
open3d_depth_paths.append(os.path.join(self.root_dir, "depth", '{:06}.png'.format(i)))
return open3d_depth_paths
def get_trajectory(self):
lines = open(os.path.join(self.root_dir, "scene/trajectory.log"), 'r').readlines()
mats = []
for i in range(0, self.len_frame * 5, 5):
rows = [
[float(t) for t in lines[i + 1].split(" ")],
[float(t) for t in lines[i + 2].split(" ")],
[float(t) for t in lines[i + 3].split(" ")],
[float(t) for t in lines[i + 4].split(" ")]
]
mats.append(np.array(rows))
return mats
def get_intrinsic(self, type = "raw"):
if type == "raw":
return json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
elif type == "open3d":
intrinsics = json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
return o3d.camera.PinholeCameraIntrinsic(
intrinsics["width"],
intrinsics["height"],
intrinsics["intrinsic_matrix"][0],
intrinsics["intrinsic_matrix"][4],
intrinsics["intrinsic_matrix"][6],
intrinsics["intrinsic_matrix"][7],
)
elif type == "matrix":
intrinsics = json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
intrinsic_matrix = np.zeros((3, 3), dtype=np.float64)
fx = intrinsics["intrinsic_matrix"][0]
fy = intrinsics["intrinsic_matrix"][4]
cx = intrinsics["intrinsic_matrix"][6]
cy = intrinsics["intrinsic_matrix"][7]
intrinsic_matrix[0, 0] = fx
intrinsic_matrix[0, 2] = cx
intrinsic_matrix[1, 1] = fy
intrinsic_matrix[1, 2] = cy
intrinsic_matrix[2, 2] = 1
return intrinsic_matrix | [
"numpy.zeros",
"numpy.array",
"os.path.join",
"open3d.camera.PinholeCameraIntrinsic"
] | [((1236, 1250), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (1244, 1250), True, 'import numpy as np\n'), ((1576, 1800), 'open3d.camera.PinholeCameraIntrinsic', 'o3d.camera.PinholeCameraIntrinsic', (["intrinsics['width']", "intrinsics['height']", "intrinsics['intrinsic_matrix'][0]", "intrinsics['intrinsic_matrix'][4]", "intrinsics['intrinsic_matrix'][6]", "intrinsics['intrinsic_matrix'][7]"], {}), "(intrinsics['width'], intrinsics['height'],\n intrinsics['intrinsic_matrix'][0], intrinsics['intrinsic_matrix'][4],\n intrinsics['intrinsic_matrix'][6], intrinsics['intrinsic_matrix'][7])\n", (1609, 1800), True, 'import open3d as o3d\n'), ((222, 259), 'os.path.join', 'os.path.join', (['root_dir', '"""color/*.jpg"""'], {}), "(root_dir, 'color/*.jpg')\n", (234, 259), False, 'import os\n'), ((796, 847), 'os.path.join', 'os.path.join', (['self.root_dir', '"""scene/trajectory.log"""'], {}), "(self.root_dir, 'scene/trajectory.log')\n", (808, 847), False, 'import os\n'), ((1376, 1428), 'os.path.join', 'os.path.join', (['self.root_dir', '"""camera_intrinsic.json"""'], {}), "(self.root_dir, 'camera_intrinsic.json')\n", (1388, 1428), False, 'import os\n'), ((2061, 2095), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float64'}), '((3, 3), dtype=np.float64)\n', (2069, 2095), True, 'import numpy as np\n'), ((1502, 1554), 'os.path.join', 'os.path.join', (['self.root_dir', '"""camera_intrinsic.json"""'], {}), "(self.root_dir, 'camera_intrinsic.json')\n", (1514, 1554), False, 'import os\n'), ((1975, 2027), 'os.path.join', 'os.path.join', (['self.root_dir', '"""camera_intrinsic.json"""'], {}), "(self.root_dir, 'camera_intrinsic.json')\n", (1987, 2027), False, 'import os\n')] |
import numpy as np
x = [0, 1, 2, 3, 4]
y = [5, 6, 7, 8, 9]
z = []
for i, j in zip(x, y):
z.append(i + j)
print(z)
z = np.add(x, y)
print(z)
def my_add(a, b):
return a + b
my_add = np.frompyfunc(my_add, 2, 1)
z = my_add(x, y)
print(z)
print(type(np.add))
print(type(np.concatenate))
print(type(my_add))
| [
"numpy.add",
"numpy.frompyfunc"
] | [((123, 135), 'numpy.add', 'np.add', (['x', 'y'], {}), '(x, y)\n', (129, 135), True, 'import numpy as np\n'), ((190, 217), 'numpy.frompyfunc', 'np.frompyfunc', (['my_add', '(2)', '(1)'], {}), '(my_add, 2, 1)\n', (203, 217), True, 'import numpy as np\n')] |
"""
Copyright 2021 <NAME> (<EMAIL>)
Licensed under the Apache License v2.0
http://www.apache.org/licenses/LICENSE-2.0
"""
from multiprocessing import Process, Lock
from tool import log as _log
import time, argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-head', '--headless', dest='headless',
default=False, action='store_true', help='режим безголового браузера')
args = parser.parse_args()
log = _log("MAIN")
def proc1():
from first_process import init
main = init( args.headless )
main()
def proc2():
from flasksrv import app_run
app_run()
def main():
p1 = Process( target=proc1, daemon=True)
p2 = Process( target=proc2, daemon=True )
p1.start()
time.sleep(5)
p2.start()
p1.join()
p2.join()
if __name__ == '__main__':
main() | [
"argparse.ArgumentParser",
"first_process.init",
"time.sleep",
"flasksrv.app_run",
"tool.log",
"multiprocessing.Process"
] | [((225, 295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawTextHelpFormatter'}), '(formatter_class=argparse.RawTextHelpFormatter)\n', (248, 295), False, 'import time, argparse\n'), ((471, 483), 'tool.log', '_log', (['"""MAIN"""'], {}), "('MAIN')\n", (475, 483), True, 'from tool import log as _log\n'), ((544, 563), 'first_process.init', 'init', (['args.headless'], {}), '(args.headless)\n', (548, 563), False, 'from first_process import init\n'), ((628, 637), 'flasksrv.app_run', 'app_run', ([], {}), '()\n', (635, 637), False, 'from flasksrv import app_run\n'), ((660, 694), 'multiprocessing.Process', 'Process', ([], {'target': 'proc1', 'daemon': '(True)'}), '(target=proc1, daemon=True)\n', (667, 694), False, 'from multiprocessing import Process, Lock\n'), ((705, 739), 'multiprocessing.Process', 'Process', ([], {'target': 'proc2', 'daemon': '(True)'}), '(target=proc2, daemon=True)\n', (712, 739), False, 'from multiprocessing import Process, Lock\n'), ((762, 775), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (772, 775), False, 'import time, argparse\n')] |
"""Asynchronized (distributed) cnn training."""
import os # noqa isort:skip
os.environ['OMP_NUM_THREADS'] = '1' # noqa isort:skip
import argparse
import logging
import pprint
import time
from dataclasses import asdict, dataclass
from functools import partial
from pathlib import Path
import numpy as np
from dqn.actor_manager import ActorManagerClient, run_actor_manager_server
from dqn.actor_runner import ActorRunner
from dqn.async_train import AsyncTrainerConfig, async_train
from dqn.cnn.config import CNNConfigBase
from dqn.cnn.datum import Batch
from dqn.cnn.evaluator import run_evaluator_server
from dqn.cnn.learner import Learner
from dqn.cnn.replay_buffer import ReplayBufferServer
from dqn.cnn.run_actor import run_actor
from dqn.evaluator import EvaluatorClient, EvaluatorServerRunner
from dqn.param_distributor import (ParamDistributorClient,
run_param_distributor_server)
from dqn.policy import PolicyParam
from dqn.subprocess_manager import SubprocessManager
from dqn.utils import init_log_dir, init_random_seed
@dataclass
class Config(CNNConfigBase):
"""Configuration of CNN asynchronized training."""
trainer: AsyncTrainerConfig = AsyncTrainerConfig()
def init_actor_runner(config: Config) -> ActorRunner:
"""Initialize actor runner.
Args:
config: Configuration of training.
"""
policy_param = PolicyParam(epsilon=np.ones(config.actor.vector_env_size),
gamma=np.ones(config.actor.vector_env_size) * config.gamma)
actor_runner = ActorRunner(n_processes=config.n_actor_process,
run_actor_func=partial(run_actor, init_policy_param=policy_param, config=config))
return actor_runner
def main_run_actor(config: Config, logger: logging.Logger = logging.getLogger(__name__)) -> None:
"""Run actor forever.
Args:
config: Training configuration.
logger: Logger object.
"""
actor_runner = init_actor_runner(config)
logger.info("Actor runner initialized.")
try:
actor_runner.start()
logger.info("Actor runner start.")
while True:
assert actor_runner.workers_alive, f"Actor runner's worker died."
time.sleep(1)
finally:
logger.info(f"Finalize actor runner")
actor_runner.finalize()
def main(log_dir: Path, enable_actor: bool, config: Config,
logger: logging.Logger = logging.getLogger(__name__)) -> None:
"""Initialize and kick all the components of asynchronized training.
Args:
log_dir: Directory to put log data.
config: Training configuration.
logger: Logger object.
"""
# show configuration
logger.info(pprint.pformat(asdict(config)))
# init config
if not enable_actor:
logger.warning('enable_actor is false. You should run actor in other process')
config.n_actor_process = 0 # disable actor
# NOTE: All child processes should be forked before init gRPC channel (https://github.com/grpc/grpc/issues/13873)
subprocess_manager = SubprocessManager()
# init actor manager
subprocess_manager.append_worker(
partial(run_actor_manager_server,
url=config.actor_manager_url,
gamma=config.gamma,
config=config.trainer.actor_manager))
# init param distributor
subprocess_manager.append_worker(partial(run_param_distributor_server, url=config.param_distributor_url))
# init evaluator
evaluator_runner = EvaluatorServerRunner(run_evaluator_server_func=partial(run_evaluator_server, config=config))
# may init actor
actor_runner = init_actor_runner(config)
# init replay buffer
replay_buffer_server = ReplayBufferServer(config=config)
# init learner
learner = Learner(config=config)
try:
def check_subprocess_func():
"""Helper function to check child processes."""
assert subprocess_manager.workers_alive, 'Subprocess manager worker has been dead'
assert evaluator_runner.workers_alive, 'Evaluator runner worker has been dead'
assert actor_runner.workers_alive, 'Actor runner worker has been dead'
check_subprocess_func()
# init gRPC clients
evaluator_runner.start()
actor_runner.start()
evaluator_client = EvaluatorClient(url=config.evaluator_url)
param_distributor_client = ParamDistributorClient(url=config.param_distributor_url)
actor_manager_client = ActorManagerClient(url=config.actor_manager_url)
# run train
async_train(log_dir=log_dir,
check_subprocess_func=check_subprocess_func,
actor_manager_client=actor_manager_client,
evaluator_client=evaluator_client,
param_distributor_client=param_distributor_client,
replay_buffer_server=replay_buffer_server,
learner=learner,
batch_from_sample=Batch.from_buffer_sample,
config=config.trainer)
finally:
replay_buffer_server.finalize()
subprocess_manager.finalize()
evaluator_runner.finalize()
actor_runner.finalize()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Asynchronized CNN-DQN training.")
parser.add_argument('log_dir', type=Path, help="Directory to put log and snapshots")
parser.add_argument('--log_level',
type=str,
choices=('debug', 'info', 'error', 'critical'),
default='info',
help="Logging level")
parser.add_argument('--disable_actor', action='store_true', help="Disable actor module or not.")
parser.add_argument('--run_only_actor', action='store_true', help="Running only actor module or not.")
parser.add_argument('--config', type=Path, help="Path of DQN configuration YAML file.")
parser.add_argument('--seed', type=int, default=1, help="Random seed value.")
args = parser.parse_args()
# init configuration
config = Config.load_from_yaml(args.config) if args.config else Config()
# init log_dir
log_handlers = [logging.StreamHandler()]
if not args.run_only_actor:
args.log_dir.mkdir(exist_ok=False, parents=False)
init_log_dir(args.log_dir, config)
log_handlers.append(logging.FileHandler(args.log_dir / 'main.log'))
# init logger
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='[%(asctime)s %(name)s %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %I:%M:%S',
handlers=log_handlers)
# init random seed
init_random_seed(args.seed)
# start training or exploration
if args.run_only_actor:
assert not args.disable_actor, 'run_actor should be specified without disable_actor.'
main_run_actor(config)
else:
main(args.log_dir, not args.disable_actor, config)
| [
"functools.partial",
"dqn.async_train.AsyncTrainerConfig",
"dqn.cnn.replay_buffer.ReplayBufferServer",
"argparse.ArgumentParser",
"dqn.evaluator.EvaluatorClient",
"dqn.actor_manager.ActorManagerClient",
"dqn.async_train.async_train",
"dqn.cnn.learner.Learner",
"dqn.param_distributor.ParamDistributor... | [((1198, 1218), 'dqn.async_train.AsyncTrainerConfig', 'AsyncTrainerConfig', ([], {}), '()\n', (1216, 1218), False, 'from dqn.async_train import AsyncTrainerConfig, async_train\n'), ((1804, 1831), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1821, 1831), False, 'import logging\n'), ((2441, 2468), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2458, 2468), False, 'import logging\n'), ((3086, 3105), 'dqn.subprocess_manager.SubprocessManager', 'SubprocessManager', ([], {}), '()\n', (3103, 3105), False, 'from dqn.subprocess_manager import SubprocessManager\n'), ((3747, 3780), 'dqn.cnn.replay_buffer.ReplayBufferServer', 'ReplayBufferServer', ([], {'config': 'config'}), '(config=config)\n', (3765, 3780), False, 'from dqn.cnn.replay_buffer import ReplayBufferServer\n'), ((3815, 3837), 'dqn.cnn.learner.Learner', 'Learner', ([], {'config': 'config'}), '(config=config)\n', (3822, 3837), False, 'from dqn.cnn.learner import Learner\n'), ((5301, 5371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Asynchronized CNN-DQN training."""'}), "(description='Asynchronized CNN-DQN training.')\n", (5324, 5371), False, 'import argparse\n'), ((6784, 6811), 'dqn.utils.init_random_seed', 'init_random_seed', (['args.seed'], {}), '(args.seed)\n', (6800, 6811), False, 'from dqn.utils import init_log_dir, init_random_seed\n'), ((3178, 3303), 'functools.partial', 'partial', (['run_actor_manager_server'], {'url': 'config.actor_manager_url', 'gamma': 'config.gamma', 'config': 'config.trainer.actor_manager'}), '(run_actor_manager_server, url=config.actor_manager_url, gamma=\n config.gamma, config=config.trainer.actor_manager)\n', (3185, 3303), False, 'from functools import partial\n'), ((3415, 3486), 'functools.partial', 'partial', (['run_param_distributor_server'], {'url': 'config.param_distributor_url'}), '(run_param_distributor_server, url=config.param_distributor_url)\n', (3422, 3486), False, 'from functools import partial\n'), ((4367, 4408), 'dqn.evaluator.EvaluatorClient', 'EvaluatorClient', ([], {'url': 'config.evaluator_url'}), '(url=config.evaluator_url)\n', (4382, 4408), False, 'from dqn.evaluator import EvaluatorClient, EvaluatorServerRunner\n'), ((4444, 4500), 'dqn.param_distributor.ParamDistributorClient', 'ParamDistributorClient', ([], {'url': 'config.param_distributor_url'}), '(url=config.param_distributor_url)\n', (4466, 4500), False, 'from dqn.param_distributor import ParamDistributorClient, run_param_distributor_server\n'), ((4532, 4580), 'dqn.actor_manager.ActorManagerClient', 'ActorManagerClient', ([], {'url': 'config.actor_manager_url'}), '(url=config.actor_manager_url)\n', (4550, 4580), False, 'from dqn.actor_manager import ActorManagerClient, run_actor_manager_server\n'), ((4610, 4956), 'dqn.async_train.async_train', 'async_train', ([], {'log_dir': 'log_dir', 'check_subprocess_func': 'check_subprocess_func', 'actor_manager_client': 'actor_manager_client', 'evaluator_client': 'evaluator_client', 'param_distributor_client': 'param_distributor_client', 'replay_buffer_server': 'replay_buffer_server', 'learner': 'learner', 'batch_from_sample': 'Batch.from_buffer_sample', 'config': 'config.trainer'}), '(log_dir=log_dir, check_subprocess_func=check_subprocess_func,\n actor_manager_client=actor_manager_client, evaluator_client=\n evaluator_client, param_distributor_client=param_distributor_client,\n replay_buffer_server=replay_buffer_server, learner=learner,\n batch_from_sample=Batch.from_buffer_sample, config=config.trainer)\n', (4621, 4956), False, 'from dqn.async_train import AsyncTrainerConfig, async_train\n'), ((6248, 6271), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (6269, 6271), False, 'import logging\n'), ((6371, 6405), 'dqn.utils.init_log_dir', 'init_log_dir', (['args.log_dir', 'config'], {}), '(args.log_dir, config)\n', (6383, 6405), False, 'from dqn.utils import init_log_dir, init_random_seed\n'), ((1408, 1445), 'numpy.ones', 'np.ones', (['config.actor.vector_env_size'], {}), '(config.actor.vector_env_size)\n', (1415, 1445), True, 'import numpy as np\n'), ((1651, 1716), 'functools.partial', 'partial', (['run_actor'], {'init_policy_param': 'policy_param', 'config': 'config'}), '(run_actor, init_policy_param=policy_param, config=config)\n', (1658, 1716), False, 'from functools import partial\n'), ((2240, 2253), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2250, 2253), False, 'import time\n'), ((2742, 2756), 'dataclasses.asdict', 'asdict', (['config'], {}), '(config)\n', (2748, 2756), False, 'from dataclasses import asdict, dataclass\n'), ((3581, 3625), 'functools.partial', 'partial', (['run_evaluator_server'], {'config': 'config'}), '(run_evaluator_server, config=config)\n', (3588, 3625), False, 'from functools import partial\n'), ((6434, 6480), 'logging.FileHandler', 'logging.FileHandler', (["(args.log_dir / 'main.log')"], {}), "(args.log_dir / 'main.log')\n", (6453, 6480), False, 'import logging\n'), ((1484, 1521), 'numpy.ones', 'np.ones', (['config.actor.vector_env_size'], {}), '(config.actor.vector_env_size)\n', (1491, 1521), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Set the path explicitly #
sys.path.insert(0, os.path.abspath(__file__+"/../.."))
import unittest
class BasicTestSuit(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
def test_check_environment(self):
hrcbus.check_environment()
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((383, 398), 'unittest.main', 'unittest.main', ([], {}), '()\n', (396, 398), False, 'import unittest\n')] |
# class does ...
import sys
import numpy as np
import math
import random
import time
from enum import Enum
from chapters.wall.hyperspace_helper.Segment import Segment
from chapters.wall.hyperspace_helper.AssetLibrary import AssetLibrary
from chapters.wall.hyperspace_helper.RingAssembly import RingAssembly
from chapters.wall.hyperspace_helper.Curve import Curve
from chapters.wall.hyperspace_helper.Maze import Maze
class SCENE_STATE(Enum):
INTRO=0 #pod exiting space ship
OUTRO=1 #pod entering white success
DEATH=2 #pod entering black death
PLAY=3 #normal operation (birth and hot controls)
#precon: first segment is id=0 (where to look in file) and is straight
# (ensure camera is lined up with pod after intro sequence)
#strongly advised: last segment before death and outro be straight to allow for extrapolation
#track the status of the pod through the maze
class SceneManager:
MAX_POD_DISPLACEMENT=3.5 #2.8 #maximum distance the pod can be from the center of the playfield
# ~4x of Segment.BRANCH_DISPLACEMENT_DISTANCE
POD_TRANSLATION_PER_SECOND=10.0 #rate of pod movement per second
POD_ROTATION_DEGREES_PER_SECOND=70.0 #rate of rotation animatic when pod is translating
POD_MAX_ROTATION=[6.0,12.0] #x-translation, y-translation, degrees
INTRO_SECONDS=1 #number of seconds to wait on start for cut scene to play
OUTRO_SECONDS=1
DEATH_SECONDS=1
CAMERA_LAG_DISTANCE=12 #pi3d distance unit between camera and pod
def __init__(self):
self.np=np #for some reason, Python forgets that np was imported...???? so it needs to be stored here for later use... idk/idc
def clean(self,pi3d,display_3d,camera_3d):
#variables
self.pi3d=pi3d
#why is np appaear as a UnboundedLocalError? I imported it up above...
self.pod_offset=self.np.array([0.0,0.0]) #x,y offset
self.pod_offset_rate=self.np.array([0.0,0.0]) #Z,X rotation angles for translation animatic (rotate right to translate right)
self.scene={'state':SCENE_STATE.INTRO,'start_seconds':0,'end_seconds':self.INTRO_SECONDS,'ratio':0.0}
self.life=0
self.level_start_time_seconds=0
self.segment_list=[]
self.pod_segment=None
self.camera_segment=None
self.last_key=-1 #delete from final program - used for smoothing pi3d keyboard inputs
#playfield
self.display = display_3d #self.pi3d.Display.create(background=(0.0, 0.0, 0.0, 0.0))
self.camera = camera_3d #self.pi3d.Camera()
self.light = self.pi3d.Light(lightpos=(10,-10,-7),lightcol=(0.75,0.75,0.45), lightamb=(0.1,0.1,0.42),is_point=False)
#self.keys = self.pi3d.Keyboard() #TODO: remove later...
#objects
self.asset_library=AssetLibrary(self.pi3d)
self.pod=self.asset_library.pod_frame.shallow_clone() #note: all children remain intact
self.maze=Maze()
#debug testing
self.maze.clean()
print(self.maze.getSegmentsBetweenNodes(100,91))
print(self.maze.getSegmentsBetweenNodes(91,100))
print(self.maze.getSegmentsBetweenNodes(91,91))
#print(maze.linear_definition)
#print(maze.branch_definition)
#print(maze.segment_definition)
#print(maze.debris_definition)
segments=self.maze.getSegmentIdAfter(2,3)
print("SceneManager.clean: Next segment: ",segments)
segments=segments[0]
temp2=self.maze.getPopulatedSegment(segments["segment_id"],segments["is_forward"],segments["is_branch"],self.asset_library,self.np.array([0,0,0]),self.np.eye(3),0)
print("SceneManager.clean: populated: ",temp2)
temp3=self.maze.getFirstPopulatedSegment(self.asset_library,0)
print("SceneManager.clean: first segment: ",temp3)
def __getRingCount(self):
count=0
for segment in self.segment_list:
count+=len(segment.ring_assembly_list)
return count
#update list of parameterized arcs
def __updateSegmentQueue(self,level_elapsed_time_seconds):
#if any segments in list are u<0 for camera (already completely used), then dispose of segment
#if any segment has no succesor and the [end time - current time] < queue_time_depth
# then get and append successor
#initialization
if(len(self.segment_list)==0):
segment_joint=self.getSegmentAfter(None)
first_segment=segment_joint['next_segment'][0]
self.segment_list.append(first_segment)
self.pod_segment=first_segment
self.camera_segment=first_segment
#append segments to end when the end is near
segment_index=0
while(segment_index<len(self.segment_list)): #keep adding segments to end when needed
segment=self.segment_list[segment_index]
end_time=segment.durationSeconds()+segment.start_time_seconds
cut_off_time=level_elapsed_time_seconds+RingAssembly.PRE_RENDER_SECONDS
#if(level_elapsed_time_seconds<7):
# print('query: '+str(end_time)+"<"+str(cut_off_time))
# print('size: '+str(len(self.segment_list)))
if(end_time<cut_off_time and segment.hasTraceabilityTo(self.pod_segment)):
if(segment.is_branch):
if(segment.successor[1] is None):
segment_joint=self.getSegmentAfter(segment)
for itr in range(2):
seg_id=itr+1
self.segment_list.append(segment_joint['next_segment'][seg_id])
segment.successor[seg_id]=segment_joint['next_segment'][seg_id]
segment_joint['next_segment'][seg_id].predecessor=segment
else:
if(segment.successor[0] is None):
segment_joint=self.getSegmentAfter(segment)
self.segment_list.append(segment_joint['next_segment'][0])
segment.successor[0]=segment_joint['next_segment'][0]
segment_joint['next_segment'][0].predecessor=segment
segment_index+=1
#remove old segments
camera_time=self.__getCameraTime(level_elapsed_time_seconds)
for segment_index in reversed(range(len(self.segment_list))): #traverse backward to allow for deletion
segment=self.segment_list[segment_index]
ratio=segment.getRatio(camera_time)
if(ratio>1):
if(not segment==self.camera_segment):
segment=self.segment_list.pop(segment_index) #delete stale segments
segment.dispose()
#update graphical rotation of rings, derbis, etc
def __updateSegments(self,level_elapsed_time_seconds):
for segment in self.segment_list:
segment.update(level_elapsed_time_seconds,self.light)
#assumes input for 'k' as 4-element bool np.array
# in the following order: [NORTH,WEST,SOUTH,EAST], where True is an active user input command
def __updatePodPosition(self,k,delta_time):
#position
pod_target=np.array([0,0])
is_x=False
is_y=False
IS_AIRPLANE_CONTROLS=True #True is up joystick means down motion
#if(k==ord('a')):
#pod_target[0]=-1
#is_x=True
#if(k==ord('d')):
#pod_target[0]=1
#is_x=True
#if(k==ord('s')):
#pod_target[1]=1
#is_y=True
#if(k==ord('w')):
#pod_target[1]=-1
#is_y=True
if(k[1]):
pod_target[0]=-1
is_x=True
if(k[3]):
pod_target[0]=1
is_x=True
if(k[2]):
if(IS_AIRPLANE_CONTROLS):
pod_target[1]=-1
else:
pod_target[1]=1
is_y=True
if(k[0]):
if(IS_AIRPLANE_CONTROLS):
pod_target[1]=1
else:
pod_target[1]=-1
is_y=True
delta_pod=pod_target*self.POD_TRANSLATION_PER_SECOND*delta_time*(0.707 if (is_x and is_y) else 1.0)
pod_pos=self.pod_offset+delta_pod
scale=np.linalg.norm(pod_pos)
if(scale>self.MAX_POD_DISPLACEMENT):
pod_pos=pod_pos*self.MAX_POD_DISPLACEMENT/scale
self.pod_offset=pod_pos
#rotation animatic
x_rate=self.pod_offset_rate[0] #x-translation, Z-rotation
delta_x=self.POD_ROTATION_DEGREES_PER_SECOND*delta_time
#if(k==ord('d')):#right
#delta_x=-delta_x
#elif(k==ord('a')):#left
#pass
if(k[3]):#right
delta_x=-delta_x
elif(k[1]):#left
pass
else:#neither, return to center
if(x_rate<0): delta_x=min(-x_rate,delta_x)
elif(x_rate>0): delta_x=max(-x_rate,-delta_x)
else: delta_x=0
self.pod_offset_rate[0]+=delta_x
y_rate=self.pod_offset_rate[1] #y-translation, Y-rotation
delta_y=self.POD_ROTATION_DEGREES_PER_SECOND*delta_time
#if(k==ord('s')):#up
#delta_y=-delta_y
#elif(k==ord('w')):#down
#pass
if(k[0]):#up
if(IS_AIRPLANE_CONTROLS):
pass
else:
delta_y=-delta_y
elif(k[2]):#down
if(IS_AIRPLANE_CONTROLS):
delta_y=-delta_y
else:
pass
else:#neither, return to center
if(y_rate<0): delta_y=min(-y_rate,delta_y)
elif(y_rate>0): delta_y=max(-y_rate,-delta_y)
else: delta_y=0
self.pod_offset_rate[1]+=delta_y
for itr in range(2): #bound rotation
self.pod_offset_rate[itr]=max(self.pod_offset_rate[itr],-self.POD_MAX_ROTATION[itr])
self.pod_offset_rate[itr]=min(self.pod_offset_rate[itr],self.POD_MAX_ROTATION[itr])
def __updateProps(self,level_elapsed_time_seconds):
prop_orientation=self.getPropOrientation(level_elapsed_time_seconds)
#light
light_pos=prop_orientation['light']['position']
self.light.position((light_pos[0],light_pos[1],light_pos[2]))
#pod
pod_pos=prop_orientation['pod']['position']
pod_rot=prop_orientation['pod']['rotation_euler']
self.pod.children[0].rotateToX(self.pod_offset_rate[1])
self.pod.children[0].rotateToZ(self.pod_offset_rate[0])
self.pod.position(pod_pos[0],pod_pos[1],pod_pos[2])
self.pod.rotateToX(pod_rot[0])
self.pod.rotateToY(pod_rot[1])
self.pod.rotateToZ(pod_rot[2])
self.pod.set_light(self.light)
#TO DO make recursive set_light method for pod
self.pod.children[0].set_light(self.light)
self.pod.children[0].children[0].set_light(self.light)
self.pod.children[0].children[0].children[0].set_light(self.light)
#camera
camera_pos=prop_orientation['camera']['position']
camera_rot=prop_orientation['camera']['rotation_euler']
self.camera.reset()
self.camera.position(camera_pos)
# print("SceneManager.__updateProps: camera_pos:",camera_pos)
self.camera.rotate(camera_rot[0],camera_rot[1],camera_rot[2])
def __drawSegments(self):
for segment in self.segment_list:
segment.draw()
def __updatePodSegment(self,level_elapsed_time_seconds):
while(self.pod_segment.getRatio(level_elapsed_time_seconds)>1):
self.pod_segment=self.pod_segment.getSuccessor()
if(self.pod_segment.is_branch): #when entering a branch, decide which path to take
is_left=self.pod_offset[0]<0
self.pod_segment.decideBranch(level_elapsed_time_seconds,is_left)
#print('is_left: ',self.pod_segment.isLeft())
self.pod_orientation=self.pod_segment.getOrientationAtTime(level_elapsed_time_seconds)
def __updateCameraSegment(self,level_elapsed_time_seconds):
camera_time=self.__getCameraTime(level_elapsed_time_seconds)
while(self.camera_segment.getRatio(camera_time)>1):
self.camera_segment=self.camera_segment.getSuccessor()
self.camera_orientation=self.camera_segment.getOrientationAtTime(camera_time)
def __getCameraTime(self,level_elapsed_time_seconds):
camera_lag_time=self.CAMERA_LAG_DISTANCE/(Segment.DISTANCE_BETWEEN_RINGS*Segment.RINGS_PER_SECOND)
camera_time=level_elapsed_time_seconds-camera_lag_time
return camera_time
def getSegmentAfter(self,prev_segment):
if(True): #create per config file
return self.getSegmentAfter_config(prev_segment)
else: #create randomly
return self.getSegmentAfter_random(prev_segment)
#note: is is assumed super method will populate the retuend segment's predecessor
def getSegmentAfter_config(self,prev_segment):
print("SceneManager.getSegmentAfter_config: prev_segment: ",prev_segment)
if(prev_segment is None):
next_segment=self.maze.getFirstPopulatedSegment(self.asset_library,0)#if no segment provided, return the first one
#precon: time is measured in seconds from the start of the current life
out_segment=[next_segment,None,None]
else:
end_point=prev_segment.getEndPoints()
prev_id=prev_segment.segment_id
prev2_id=-100 if prev_segment.predecessor is None else prev_segment.predecessor.segment_id #precon: the id of the segment before the first segment needs to be -100
next_segment_ids=self.maze.getSegmentIdAfter(prev2_id,prev_id)
print("SceneManager.getSegmentAfter_config: next_segment_ids: ",next_segment_ids)
was_branch=len(next_segment_ids)>1
out_segment=[None] if was_branch else [] #goal is to make either [None,Segment,Segment] for a branch, or [Segment,None,None] for straight
for itr in range(2 if was_branch else 1):#precon: only two paths come out of any one branch node
next_segment_def=next_segment_ids[itr]
next_segment=self.maze.getPopulatedSegment(next_segment_def["segment_id"],
next_segment_def["is_forward"],next_segment_def["is_branch"],
self.asset_library,end_point[itr]["position"],
end_point[itr]["rotation_matrix"],end_point[itr]["timestamp_seconds"])
out_segment.append(next_segment)
if(not was_branch):
out_segment.append(None)
out_segment.append(None)
return {'prev_segment':prev_segment,'next_segment':out_segment}
#TODO: is currently a placeholder for Maze...
#given a segment ID, return the parameters needed for the next segment
#input:
#Segment
#output:
#{'previous_segment':Segment,'next_segment':[Segment,Segment,Segment]}
# where previous_segment is the input
# and one of the following is True: 'next_segment'[0] is None OR 'next_segment'[1:2] is None
def getSegmentAfter_random(self,segment):
if(segment is None):
#return first segment
#TODO load from file
previous_segment=None
ring_count=7
segment=Segment(self.asset_library,False,np.array([0,0,0]),np.identity(3),0,
120,60,ring_count)
for ring_id in range(ring_count):
u=ring_id/ring_count
segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
next_segment=[segment,None,None]
else:
#this_segment_id=segment.segment_id
previous_segment=segment
ring_count=[2+random.randint(0,3),2+random.randint(0,3)]
curvature=[random.randint(0,30),random.randint(0,30)]
orientation=[random.randint(0,360),random.randint(0,360)]
was_branch=segment.is_branch #input segmenet was a branch
was_branch2=segment.predecessor is None or segment.predecessor.is_branch
#print('was_branch: ',was_branch)
is_branch=[random.randint(0,100)<20,random.randint(0,100)<20] #next segment is a branch
if(was_branch or was_branch2):
is_branch=[False,False]
#is_branch=[False,False]
end_point=segment.getEndPoints()
if(was_branch):
next_segment=[None]
for itr in range(2):
this_segment=Segment(self.asset_library,is_branch[itr],end_point[itr]['position'],
end_point[itr]['rotation_matrix'],end_point[itr]['timestamp_seconds'],
curvature[itr],orientation[itr],ring_count[itr])
next_segment.append(this_segment)
if(not is_branch[itr]):
for ring_id in range(ring_count[itr]):
u=ring_id/ring_count[itr]
this_segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
else:
next_segment=[]
this_segment=Segment(self.asset_library,is_branch[0],end_point[0]['position'],
end_point[0]['rotation_matrix'],end_point[0]['timestamp_seconds'],
curvature[0],orientation[0],ring_count[0])
next_segment.append(this_segment)
next_segment.append(None)
next_segment.append(None)
if(not is_branch[0]):
for ring_id in range(ring_count[0]):
u=ring_id/ring_count[0]
this_segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
#return next segment
return {'prev_segment':previous_segment,'next_segment':next_segment}
#return the start node, end node, progress and current segment_id
#return a pointer to the segment where the pod is currently located
#return: {"node_from":X,"node_to":Y,"ratio":Z} #ratio between nodes
def getPodStatus(self):
pass
#return the segment where the camera is currently located
def getCameraStatus(self):
pass
#dict with keys:
# pod
# camera
# light
# sub-keys:
# position
# rotation_matrix
# rotation_euler
#note: rotations have not been implemented for light
def getPropOrientation(self,level_elapsed_time_seconds):
#pod
pod_orientation=self.pod_segment.getOrientationAtTime(level_elapsed_time_seconds)
pod_position=pod_orientation["position"]
x_axis=pod_orientation["rotation_matrix"][0,:]
y_axis=pod_orientation["rotation_matrix"][1,:]
pod_position+=x_axis*self.pod_offset[0]
pod_position+=y_axis*self.pod_offset[1]
pod_orientation["position"]=pod_position
#camera
camera_orientation=self.camera_segment.getOrientationAtTime(self.__getCameraTime(level_elapsed_time_seconds))
x_axis=camera_orientation["rotation_matrix"][0,:]
y_axis=camera_orientation["rotation_matrix"][1,:]
position_camera=camera_orientation["position"]
camera_movement_scale=0.5
position_camera+=x_axis*self.pod_offset[0]*camera_movement_scale
position_camera+=y_axis*self.pod_offset[1]*camera_movement_scale
camera_orientation["position"]=position_camera
camera_orientation_to_target=Curve.euler_angles_from_vectors(pod_position-position_camera,'z',y_axis,'y')
camera_orientation["rotation_euler"]=camera_orientation_to_target["rotation_euler"]
camera_orientation["rotation_matrix"]=camera_orientation_to_target["rotation_matrix"]
#light
light_vect=np.array([10,-10,7])
light_vect = np.dot(camera_orientation["rotation_matrix"], light_vect) * [1.0, 1.0, -1.0] #https://github.com/tipam/pi3d/issues/220
light_orientation={'position':light_vect}
#laser...
return {'pod':pod_orientation,'camera':camera_orientation,'light':light_orientation}
#assumes inputs for navigation_joystick,camera_joystick,laser_joystick as 4-element bool np.arrays
# in the following order: [NORTH,WEST,SOUTH,EAST], where True is an active user input command
def update(self,this_frame_number,this_frame_elapsed_seconds,previous_frame_elapsed_seconds,packets,
navigation_joystick,camera_joystick,laser_joystick,is_fire_laser):
scene_state=self.scene['state']
level_elapsed_time_seconds=this_frame_elapsed_seconds-self.level_start_time_seconds
scene_start=self.scene['start_seconds'] #seconds
scene_end=self.scene['end_seconds']
delta_time=this_frame_elapsed_seconds-previous_frame_elapsed_seconds #time between frames
#advance from previous state to curent state
if(scene_end>=0 and level_elapsed_time_seconds>=scene_end):
if(scene_state==SCENE_STATE.INTRO or scene_state==SCENE_STATE.DEATH):
self.__setSceneState(SCENE_STATE.PLAY,this_frame_elapsed_seconds)
#make decisions based on current state
if(scene_end<=scene_start):
ratio=0.0
else:
ratio=(level_elapsed_time_seconds-scene_start)/(scene_end-scene_start)
self.scene['ratio']=ratio
if(scene_state==SCENE_STATE.INTRO):
pass #update pod, space ship, hyperspace effects
elif(scene_state==SCENE_STATE.OUTRO): #when transitioning TO outro, fade out music
if(ratio>=1):
self.is_done=True #stop music in exitChapter()
pass #update sphere of white
elif(scene_state==SCENE_STATE.DEATH):
pass #update sphere of black
else: #CUT_SCENE.PLAY
#if(this_frame_number%30==0):
# print('ring count: '+str(self.__getRingCount()))
self.__updateSegmentQueue(level_elapsed_time_seconds)
self.__updatePodSegment(level_elapsed_time_seconds)
self.__updateCameraSegment(level_elapsed_time_seconds)
#user input
#buttons=[]
#k=0
#while k>=0:
#k = sm.keys.read()
#buttons.append(k)
#k=max(buttons)
#temp=k
#is_smooth_motion_enabled=True
#if(is_smooth_motion_enabled):
#k=max(k,self.last_key)
#self.last_key=temp
k=-1 #temp disconnect from player controls
self.__updatePodPosition(navigation_joystick,delta_time)
self.__updateProps(level_elapsed_time_seconds)
self.__updateSegments(level_elapsed_time_seconds)
#if k==27:
# self.is_done=True
#TODO collissions
#update pod, camera, light, rings, branches, laser, asteroids...
def draw(self):
scene_state=self.scene['state']
ratio=self.scene['ratio']
if(scene_state==SCENE_STATE.INTRO):
self.pod.draw()
elif(scene_state==SCENE_STATE.OUTRO):
pass
elif(scene_state==SCENE_STATE.DEATH):
pass
else:
self.__drawSegments()#standard play scene
self.pod.draw()
#supported state transitions:
#intro to play
#play to death
#play to outro
#death to play
def __setSceneState(self,to_scene_state,this_frame_elapsed_seconds):
from_scene_state=self.scene['state']
level_elapsed_seconds=this_frame_elapsed_seconds-self.level_start_time_seconds
play_scene={'state':SCENE_STATE.PLAY,'start_seconds':level_elapsed_seconds,'end_seconds':-1,'ratio':0.0}
out_scene=None
if(to_scene_state==SCENE_STATE.PLAY):
if(from_scene_state==SCENE_STATE.INTRO): #intro -> play
out_scene=play_scene
#fade in/start music
elif(from_scene_state==SCENE_STATE.DEATH): #death -> play
out_scene=play_scene
self.segment_list=[] #clear segment list
self.life+=1
self.level_start_time_seconds=this_frame_elapsed_seconds
self.pod_segment=None
self.camera_segment=None
elif(to_scene_state==SCENE_STATE.DEATH): #play -> death
if(from_scene_state==SCENE_STATE.PLAY):
out_scene={'state':SCENE_STATE.DEATH,'start_seconds':level_elapsed_seconds,'end_seconds':level_elapsed_seconds+self.DEATH_SECONDS,'ratio':0.0}
elif(to_scene_state==SCENE_STATE.OUTRO):
if(from_scene_state==SCENE_STATE.PLAY): #play -> outro
out_scene={'state':SCENE_STATE.OUTRO,'start_seconds':level_elapsed_seconds,'end_seconds':level_elapsed_seconds+self.OUTRO_SECONDS,'ratio':0.0}
#fade out music
if(not out_scene is None):
self.scene=out_scene
return
raise NotImplementedError('SceneManager.__setSceneState(): Unable to transition from scene state: '+str(from_scene_state)+', to scene state: '+str(to_scene_state))
| [
"chapters.wall.hyperspace_helper.Segment.Segment",
"random.randint",
"chapters.wall.hyperspace_helper.AssetLibrary.AssetLibrary",
"numpy.identity",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"chapters.wall.hyperspace_helper.Maze.Maze",
"chapters.wall.hyperspace_helper.Curve.Curve.euler_angles_... | [((2598, 2621), 'chapters.wall.hyperspace_helper.AssetLibrary.AssetLibrary', 'AssetLibrary', (['self.pi3d'], {}), '(self.pi3d)\n', (2610, 2621), False, 'from chapters.wall.hyperspace_helper.AssetLibrary import AssetLibrary\n'), ((2724, 2730), 'chapters.wall.hyperspace_helper.Maze.Maze', 'Maze', ([], {}), '()\n', (2728, 2730), False, 'from chapters.wall.hyperspace_helper.Maze import Maze\n'), ((6312, 6328), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6320, 6328), True, 'import numpy as np\n'), ((7082, 7105), 'numpy.linalg.norm', 'np.linalg.norm', (['pod_pos'], {}), '(pod_pos)\n', (7096, 7105), True, 'import numpy as np\n'), ((17035, 17120), 'chapters.wall.hyperspace_helper.Curve.Curve.euler_angles_from_vectors', 'Curve.euler_angles_from_vectors', (['(pod_position - position_camera)', '"""z"""', 'y_axis', '"""y"""'], {}), "(pod_position - position_camera, 'z', y_axis,\n 'y')\n", (17066, 17120), False, 'from chapters.wall.hyperspace_helper.Curve import Curve\n'), ((17311, 17333), 'numpy.array', 'np.array', (['[10, -10, 7]'], {}), '([10, -10, 7])\n', (17319, 17333), True, 'import numpy as np\n'), ((17347, 17404), 'numpy.dot', 'np.dot', (["camera_orientation['rotation_matrix']", 'light_vect'], {}), "(camera_orientation['rotation_matrix'], light_vect)\n", (17353, 17404), True, 'import numpy as np\n'), ((13226, 13245), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (13234, 13245), True, 'import numpy as np\n'), ((13244, 13258), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (13255, 13258), True, 'import numpy as np\n'), ((13728, 13749), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (13742, 13749), False, 'import random\n'), ((13749, 13770), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (13763, 13770), False, 'import random\n'), ((13787, 13809), 'random.randint', 'random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (13801, 13809), False, 'import random\n'), ((13809, 13831), 'random.randint', 'random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (13823, 13831), False, 'import random\n'), ((14913, 15101), 'chapters.wall.hyperspace_helper.Segment.Segment', 'Segment', (['self.asset_library', 'is_branch[0]', "end_point[0]['position']", "end_point[0]['rotation_matrix']", "end_point[0]['timestamp_seconds']", 'curvature[0]', 'orientation[0]', 'ring_count[0]'], {}), "(self.asset_library, is_branch[0], end_point[0]['position'],\n end_point[0]['rotation_matrix'], end_point[0]['timestamp_seconds'],\n curvature[0], orientation[0], ring_count[0])\n", (14920, 15101), False, 'from chapters.wall.hyperspace_helper.Segment import Segment\n'), ((13671, 13691), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (13685, 13691), False, 'import random\n'), ((13693, 13713), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (13707, 13713), False, 'import random\n'), ((14020, 14042), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (14034, 14042), False, 'import random\n'), ((14045, 14067), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (14059, 14067), False, 'import random\n'), ((14309, 14511), 'chapters.wall.hyperspace_helper.Segment.Segment', 'Segment', (['self.asset_library', 'is_branch[itr]', "end_point[itr]['position']", "end_point[itr]['rotation_matrix']", "end_point[itr]['timestamp_seconds']", 'curvature[itr]', 'orientation[itr]', 'ring_count[itr]'], {}), "(self.asset_library, is_branch[itr], end_point[itr]['position'],\n end_point[itr]['rotation_matrix'], end_point[itr]['timestamp_seconds'],\n curvature[itr], orientation[itr], ring_count[itr])\n", (14316, 14511), False, 'from chapters.wall.hyperspace_helper.Segment import Segment\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/python2.7
"""Functions for downloading and extracting pretrained MNIST caffe models."""
from __future__ import print_function
import argparse
import tarfile
import os
from six.moves import urllib
VERSION_FORMAT_SPECIFIER = "%08d"
SOURCE_URL = 'https://github.com/rayglover-ibm/serving-caffe/raw/pretrained-models/mnist_pretrained_caffe.tar'
OUT_FILE = 'mnist_pretrained_caffe.tar'
MODEL_FILES = ['classlabels.txt', 'deploy.prototxt', 'weights.caffemodel']
def maybe_download(url, filename, work_directory):
"""Download the data"""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("export_path", help="location to download and extract the model")
parser.add_argument("--version", type=int, default=1, help="model version")
args = parser.parse_args()
export_dir = os.path.join(args.export_path, VERSION_FORMAT_SPECIFIER % args.version)
if os.path.exists(export_dir):
raise RuntimeError("Overwriting exports can cause corruption and are "
"not allowed. Duplicate export dir: %s" % export_dir)
os.makedirs(export_dir)
print('Downloading...', SOURCE_URL)
filename = maybe_download(SOURCE_URL, OUT_FILE, export_dir)
print('Extracting "%s" to "%s"' % (filename, export_dir))
with tarfile.open(filename) as tar:
tar.extractall(path=export_dir)
for p in MODEL_FILES:
if not os.path.isfile(os.path.join(export_dir, p)):
raise FileNotFoundError("Expected model file '%s'" % p)
| [
"os.mkdir",
"os.makedirs",
"argparse.ArgumentParser",
"os.stat",
"os.path.exists",
"six.moves.urllib.request.urlretrieve",
"tarfile.open",
"os.path.join"
] | [((1347, 1385), 'os.path.join', 'os.path.join', (['work_directory', 'filename'], {}), '(work_directory, filename)\n', (1359, 1385), False, 'import os\n'), ((1665, 1690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1688, 1690), False, 'import argparse\n'), ((1907, 1978), 'os.path.join', 'os.path.join', (['args.export_path', '(VERSION_FORMAT_SPECIFIER % args.version)'], {}), '(args.export_path, VERSION_FORMAT_SPECIFIER % args.version)\n', (1919, 1978), False, 'import os\n'), ((1985, 2011), 'os.path.exists', 'os.path.exists', (['export_dir'], {}), '(export_dir)\n', (1999, 2011), False, 'import os\n'), ((2174, 2197), 'os.makedirs', 'os.makedirs', (['export_dir'], {}), '(export_dir)\n', (2185, 2197), False, 'import os\n'), ((1269, 1299), 'os.path.exists', 'os.path.exists', (['work_directory'], {}), '(work_directory)\n', (1283, 1299), False, 'import os\n'), ((1306, 1330), 'os.mkdir', 'os.mkdir', (['work_directory'], {}), '(work_directory)\n', (1314, 1330), False, 'import os\n'), ((1396, 1420), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1410, 1420), False, 'import os\n'), ((1441, 1489), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['SOURCE_URL', 'filepath'], {}), '(SOURCE_URL, filepath)\n', (1467, 1489), False, 'from six.moves import urllib\n'), ((1506, 1523), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (1513, 1523), False, 'import os\n'), ((2373, 2395), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (2385, 2395), False, 'import tarfile\n'), ((2495, 2522), 'os.path.join', 'os.path.join', (['export_dir', 'p'], {}), '(export_dir, p)\n', (2507, 2522), False, 'import os\n')] |
import rlcard
rlcard.make('whist', config={'record_action': True}) | [
"rlcard.make"
] | [((14, 66), 'rlcard.make', 'rlcard.make', (['"""whist"""'], {'config': "{'record_action': True}"}), "('whist', config={'record_action': True})\n", (25, 66), False, 'import rlcard\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-07 13:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site_config", "0014_auto_20171025_1053")]
operations = [
migrations.AlterField(
model_name="siteconfiguration",
name="email_from",
field=models.EmailField(
blank=True, help_text="Email will be sent from this address.", max_length=254, null=True
),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_host",
field=models.CharField(blank=True, help_text="Email host.", max_length=255, null=True),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_password",
field=models.CharField(blank=True, help_text="Email password.", max_length=255, null=True),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_port",
field=models.PositiveIntegerField(blank=True, help_text="Email host port.", null=True),
),
migrations.AlterField(
model_name="siteconfiguration",
name="email_username",
field=models.CharField(blank=True, help_text="Email user name.", max_length=255, null=True),
),
]
| [
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.EmailField"
] | [((405, 517), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'help_text': '"""Email will be sent from this address."""', 'max_length': '(254)', 'null': '(True)'}), "(blank=True, help_text=\n 'Email will be sent from this address.', max_length=254, null=True)\n", (422, 517), False, 'from django.db import migrations, models\n'), ((679, 764), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Email host."""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Email host.', max_length=255, null=True\n )\n", (695, 764), False, 'from django.db import migrations, models\n'), ((900, 988), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Email password."""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Email password.', max_length=255,\n null=True)\n", (916, 988), False, 'from django.db import migrations, models\n'), ((1121, 1206), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'help_text': '"""Email host port."""', 'null': '(True)'}), "(blank=True, help_text='Email host port.', null=True\n )\n", (1148, 1206), False, 'from django.db import migrations, models\n'), ((1342, 1431), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Email user name."""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Email user name.', max_length=255,\n null=True)\n", (1358, 1431), False, 'from django.db import migrations, models\n')] |
from __future__ import print_function
import boto3
from decimal import Decimal
import json
import urllib
import os
print('Loading function')
rekognition = boto3.client('rekognition')
s3 = boto3.client('s3')
# --------------- Helper Functions to call Rekognition APIs ------------------
def add_image_to_Collection(bucket, key, prefix):
doesBlackListImagesExist = False
# Get all the collections
response = rekognition.list_collections(MaxResults=100)
for collectionId in response['CollectionIds']:
if(collectionId == 'BlackListImages'):
doesBlackListImagesExist = True
# Create a blacklist collection
if not doesBlackListImagesExist:
#print('Creating collection : BlackListImages')
rekognition.create_collection(CollectionId='BlackListImages')
# Since the collection did not exist, add the existing images from the blacklist bucket, to the blacklist image collection
#print('Adding BlackList Images')
imageList = s3.list_objects_v2(
Bucket=bucket, Prefix=prefix)
#print(json.dumps(imageList,indent=4, separators=(',', ': ')))
#print(imageList)
if imageList['Contents'] is not None:
for image in imageList['Contents']:
if(image['Size'] == 0):
continue
print('Adding ' + bucket + '/' + image['Key'])
rekognition.index_faces(CollectionId='BlackListImages', Image={"S3Object": {
"Bucket": bucket, "Name": image['Key']}})
else:
# Just add the image which fired the Lambda function.
print('Adding ' + bucket + '/' + key)
rekognition.index_faces(CollectionId='BlackListImages', Image={"S3Object": {
"Bucket": bucket, "Name": key}})
return None
# --------------- Main handler ------------------
def lambda_handler(event, context):
#print("Received event: " + json.dumps(event, indent=2))
# Get the object from the event
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(
event['Records'][0]['s3']['object']['key'].encode('utf8'))
prefix = key[:key.rfind('/')]
try:
# try:
# # delete collections. REMOVE IN PROD.
# rekognition.delete_collection(CollectionId ='BlackListImages')
# except Exception as e:
# print ('Error deleting collections.')
# Create image collections.
add_image_to_Collection(bucket, key, prefix)
# Print response to console.
# print(response)
return True
except Exception as e:
print(e)
print("Error processing object {} from bucket {}. ".format(key, bucket) +
"Make sure your object and bucket exist and your bucket is in the same region as this function.")
raise e
| [
"boto3.client"
] | [((158, 185), 'boto3.client', 'boto3.client', (['"""rekognition"""'], {}), "('rekognition')\n", (170, 185), False, 'import boto3\n'), ((191, 209), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (203, 209), False, 'import boto3\n')] |
"""This module contains the class definition for Users."""
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from server.models.Base import BaseModel
from server.models.Devotional import DevotionalModel
from server.models.Comment import CommentModel
class UserModel(BaseModel):
"""This class represent the db model for a User."""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(50))
first_name = Column(String(50))
last_name = Column(String(50))
devotionals = relationship(DevotionalModel, backref='author')
comments = relationship(CommentModel, backref='author')
| [
"sqlalchemy.String",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column"
] | [((410, 443), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (416, 443), False, 'from sqlalchemy import Column, Integer, String\n'), ((565, 612), 'sqlalchemy.orm.relationship', 'relationship', (['DevotionalModel'], {'backref': '"""author"""'}), "(DevotionalModel, backref='author')\n", (577, 612), False, 'from sqlalchemy.orm import relationship\n'), ((628, 672), 'sqlalchemy.orm.relationship', 'relationship', (['CommentModel'], {'backref': '"""author"""'}), "(CommentModel, backref='author')\n", (640, 672), False, 'from sqlalchemy.orm import relationship\n'), ((463, 473), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (469, 473), False, 'from sqlalchemy import Column, Integer, String\n'), ((499, 509), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (505, 509), False, 'from sqlalchemy import Column, Integer, String\n'), ((534, 544), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (540, 544), False, 'from sqlalchemy import Column, Integer, String\n')] |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from djangobmf.workflows import Workflow, State, Transition
import datetime
class QuotationWorkflow(Workflow):
class States:
draft = State(_(u"Draft"), True, delete=False)
send = State(_(u"Send"), update=False, delete=False)
accepted = State(_(u"Accepted"), update=False, delete=False)
invoiced = State(_(u"Done"), update=False, delete=False)
cancelled = State(_(u"Cancelled"), update=False, delete=True)
class Transitions:
send = Transition(_("Send to customer"), ["draft"], "send")
accept = Transition(_("Quotation accepted by customer"), 'send', 'accepted')
reopen = Transition(_("Reopen this quotation"), 'cancelled', 'draft')
invoice = Transition(_("Generate invoice"), 'accepted', 'invoiced')
revise = Transition(_("Revise this quotation"), ('send', 'accepted'), 'draft')
cancel = Transition(_("Cancel"), ('draft', 'send', 'accepted'), 'cancelled', validate=False)
# def revise(self, instance, user):
# print instance
# print user
# return True
def invoice(self):
if not self.instance.invoice:
invoice_mdl = self.instance._meta.model.invoice.field.related_field.model
products = invoice_mdl.products.through
invoice = invoice_mdl(
customer=self.instance.customer,
project=self.instance.project,
employee=self.instance.employee,
shipping_address=self.instance.shipping_address,
invoice_address=self.instance.invoice_address,
notes=self.instance.notes,
net=self.instance.net,
term_of_payment=self.instance.term_of_payment,
date=datetime.datetime.now().date(),
created_by=self.user,
modified_by=self.user,
)
invoice.save()
# save the items from the quotation to the invoice
for item in self.instance.quotation_products.select_related('product'):
invoice_item = products(
invoice=invoice,
product=item.product,
amount=item.amount,
price=item.price,
name=item.name,
description=item.description,
)
invoice_item.save()
self.instance.invoice = invoice
# self.instance.save()
| [
"django.utils.translation.ugettext_lazy",
"datetime.datetime.now"
] | [((300, 311), 'django.utils.translation.ugettext_lazy', '_', (['u"""Draft"""'], {}), "(u'Draft')\n", (301, 311), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((354, 364), 'django.utils.translation.ugettext_lazy', '_', (['u"""Send"""'], {}), "(u'Send')\n", (355, 364), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((419, 433), 'django.utils.translation.ugettext_lazy', '_', (['u"""Accepted"""'], {}), "(u'Accepted')\n", (420, 433), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((488, 498), 'django.utils.translation.ugettext_lazy', '_', (['u"""Done"""'], {}), "(u'Done')\n", (489, 498), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((554, 569), 'django.utils.translation.ugettext_lazy', '_', (['u"""Cancelled"""'], {}), "(u'Cancelled')\n", (555, 569), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((648, 669), 'django.utils.translation.ugettext_lazy', '_', (['"""Send to customer"""'], {}), "('Send to customer')\n", (649, 669), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((718, 753), 'django.utils.translation.ugettext_lazy', '_', (['"""Quotation accepted by customer"""'], {}), "('Quotation accepted by customer')\n", (719, 753), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((803, 829), 'django.utils.translation.ugettext_lazy', '_', (['"""Reopen this quotation"""'], {}), "('Reopen this quotation')\n", (804, 829), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((882, 903), 'django.utils.translation.ugettext_lazy', '_', (['"""Generate invoice"""'], {}), "('Generate invoice')\n", (883, 903), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((957, 983), 'django.utils.translation.ugettext_lazy', '_', (['"""Revise this quotation"""'], {}), "('Revise this quotation')\n", (958, 983), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1044, 1055), 'django.utils.translation.ugettext_lazy', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (1045, 1055), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1878, 1901), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1899, 1901), False, 'import datetime\n')] |
import pandas as pd
import holoviews as hv
import matplotlib.pyplot as plt
hv.extension('bokeh', width=90)
def plot_heatmap(feature_count_table, feature_count_start_column,
feature_count_end_column, y_label_, output_file):
table = pd.read_table(feature_count_table, sep="\t")
table.set_index('Gene', inplace=True)
value_matrix = _extract_value_matrix(table, feature_count_start_column,
feature_count_end_column)
heatmaps(value_matrix, y_label_, output_file)
def _extract_value_matrix(feature_count_table_df,
feature_count_start_column, feature_count_end_column):
return feature_count_table_df.iloc[:, int(feature_count_start_column) - 1:feature_count_end_column - 1]
def heatmaps(value_matrix, y_label_, output_file):
fig, axes_x = plt.subplots(figsize=(100, 100))
imshow_ = axes_x.imshow(value_matrix, cmap="Greens")
fig.colorbar(imshow_, ax=axes_x)
axes_x.set_xticks(range(len(value_matrix.columns)))
axes_x.set_yticks(range(len(value_matrix.index)))
axes_x.set_xticklabels(range(1, 21))
axes_x.set_yticklabels(value_matrix.index)
axes_x.set_xlabel("Fractions", fontsize=18)
axes_x.set_ylabel(y_label_, fontsize=10)
plt.savefig(output_file)
plt.show()
| [
"matplotlib.pyplot.show",
"holoviews.extension",
"pandas.read_table",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((76, 107), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {'width': '(90)'}), "('bokeh', width=90)\n", (88, 107), True, 'import holoviews as hv\n'), ((255, 299), 'pandas.read_table', 'pd.read_table', (['feature_count_table'], {'sep': '"""\t"""'}), "(feature_count_table, sep='\\t')\n", (268, 299), True, 'import pandas as pd\n'), ((847, 879), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(100, 100)'}), '(figsize=(100, 100))\n', (859, 879), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1293), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (1280, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1298, 1308), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1306, 1308), True, 'import matplotlib.pyplot as plt\n')] |
import re
from collections import defaultdict
from copy import deepcopy
from typing import Dict, List
from ConfigSpace import CategoricalHyperparameter, Constant
from ConfigSpace import ConfigurationSpace, Configuration
from ConfigSpace import ForbiddenInClause, ForbiddenEqualsClause, ForbiddenAndConjunction
from ConfigSpace import InCondition, EqualsCondition
from ultraopt.hdl import hp_def
from ultraopt.hdl.utils import is_hdl_bottom
from ultraopt.utils.logging_ import get_logger
class HDL2CS():
def __init__(self):
self.logger = get_logger(__name__)
def __call__(self, hdl: Dict):
cs = self.recursion(hdl)
return cs
def __condition(self, item: Dict, store: Dict):
child = item["_child"]
child = store[child]
parent = item["_parent"]
parent = store[parent]
value = (item["_values"])
if (isinstance(value, list) and len(value) == 1):
value = value[0]
if isinstance(value, list):
cond = InCondition(child, parent, list(map(hp_def._encode, value)))
else:
cond = EqualsCondition(child, parent, hp_def._encode(value))
return cond
def __forbidden(self, value: List, store: Dict, cs: ConfigurationSpace):
assert isinstance(value, list)
for item in value:
assert isinstance(item, dict)
clauses = []
for name, forbidden_values in item.items():
if isinstance(forbidden_values, list) and len(forbidden_values) == 1:
forbidden_values = forbidden_values[0]
if isinstance(forbidden_values, list):
clauses.append(ForbiddenInClause(store[name], list(map(hp_def._encode, forbidden_values))))
else:
clauses.append(ForbiddenEqualsClause(store[name], hp_def._encode(forbidden_values)))
cs.add_forbidden_clause(ForbiddenAndConjunction(*clauses))
def reverse_dict(self, dict_: Dict):
reversed_dict = defaultdict(list)
for key, value in dict_.items():
if isinstance(value, list):
for v in value:
reversed_dict[v].append(key)
else:
reversed_dict[value].append(key)
reversed_dict = dict(reversed_dict)
for key, value in reversed_dict.items():
reversed_dict[key] = list(set(value))
return reversed_dict
def pop_covered_item(self, dict_: Dict, length: int):
dict_ = deepcopy(dict_)
should_pop = []
for key, value in dict_.items():
assert isinstance(value, list)
if len(value) > length:
self.logger.warning("len(value) > length")
should_pop.append(key)
elif len(value) == length:
should_pop.append(key)
for key in should_pop:
dict_.pop(key)
return dict_
def __activate(self, value: Dict, store: Dict, cs: ConfigurationSpace):
assert isinstance(value, dict)
for k, v in value.items():
assert isinstance(v, dict)
reversed_dict = self.reverse_dict(v)
reversed_dict = self.pop_covered_item(reversed_dict, len(v))
for sk, sv in reversed_dict.items():
cond = self.__condition(
{
"_child": sk,
"_values": sv,
"_parent": k
},
store,
)
cs.add_condition(cond)
def eliminate_suffix(self, key: str):
s = "(choice)"
if key.endswith(s):
key = key[:-len(s)]
return key
def add_configuration_space(
self, cs: ConfigurationSpace, cs_name: str, hdl_value: dict, is_choice: bool,
option_hp: Configuration, children_is_choice=False):
if is_choice:
cs.add_configuration_space(cs_name, self.recursion(hdl_value, children_is_choice),
parent_hyperparameter={"parent": option_hp, "value": cs_name})
else:
cs.add_configuration_space(cs_name, self.recursion(hdl_value, children_is_choice))
def recursion(self, hdl, is_choice=False):
############ Declare ConfigurationSpace variables ###################
cs = ConfigurationSpace()
####### Fill placeholder to empty ConfigurationSpace ################
key_list = list(hdl.keys())
if len(key_list) == 0:
cs.add_hyperparameter(Constant("placeholder", "placeholder"))
return cs
###################### Declare common variables #####################
option_hp = None
pattern = re.compile(r"(.*)\((.*)\)")
store = {}
conditions_dict = {}
########### If parent is choice configuration_space #################
if is_choice:
choices = []
for k, v in hdl.items():
if not is_hdl_bottom(k, v) and isinstance(v, dict):
k = self.eliminate_suffix(k)
choices.append(self.eliminate_suffix(k))
option_hp = CategoricalHyperparameter('__choice__', choices)
cs.add_hyperparameter(option_hp)
#### Travel key,value in hdl items, if value is dict(hdl), do recursion ######
# fixme: 'option_hp' maybe reference without define ?
for hdl_key, hdl_value in hdl.items():
mat = pattern.match(hdl_key)
# add_configuration_space (choice)
if mat and isinstance(hdl_value, dict):
groups = mat.groups()
assert len(groups) == 2, ValueError(f"Invalid hdl_key {hdl_key}")
cs_name, method = groups
assert method == "choice", ValueError(f"Invalid suffix {method}")
self.add_configuration_space(cs, cs_name, hdl_value, is_choice, option_hp, True)
elif is_hdl_bottom(hdl_key, hdl_value):
if hdl_key.startswith("__"):
conditions_dict[hdl_key] = hdl_value
else:
hp = self.__parse_dict_to_config(hdl_key, hdl_value)
cs.add_hyperparameter(hp)
store[hdl_key] = hp
# add_configuration_space
elif isinstance(hdl_value, dict):
cs_name = hdl_key
self.add_configuration_space(cs, cs_name, hdl_value, is_choice, option_hp)
else:
raise NotImplementedError
########### Processing conditional hyperparameters #################
for key, value in conditions_dict.items():
condition_indicator = key
if condition_indicator == "__condition":
assert isinstance(value, list)
for item in value:
cond = self.__condition(item, store)
cs.add_condition(cond)
elif condition_indicator == "__activate":
self.__activate(value, store, cs)
elif condition_indicator == "__forbidden":
self.__forbidden(value, store, cs)
else:
self.logger.warning(f"Invalid condition_indicator: {condition_indicator}")
# fixme: remove 'rely_model'
return cs
# add_hyperparameter
def __parse_dict_to_config(self, key, value):
if isinstance(value, dict):
_type = value.get("_type")
_value = value.get("_value")
_default = value.get("_default")
assert _value is not None
if _type in ("choice", "ordinal"):
return eval(f"hp_def.{_type}(key, _value, _default)")
else:
return eval(f'''hp_def.{_type}("{key}",*_value,default=_default)''')
else:
return Constant(key, hp_def._encode(value))
def hdl2cs(hdl: dict) -> ConfigurationSpace:
return HDL2CS()(hdl)
| [
"ConfigSpace.ConfigurationSpace",
"copy.deepcopy",
"ultraopt.hdl.utils.is_hdl_bottom",
"ConfigSpace.CategoricalHyperparameter",
"ConfigSpace.Constant",
"ultraopt.utils.logging_.get_logger",
"collections.defaultdict",
"ultraopt.hdl.hp_def._encode",
"ConfigSpace.ForbiddenAndConjunction",
"re.compile... | [((553, 573), 'ultraopt.utils.logging_.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (563, 573), False, 'from ultraopt.utils.logging_ import get_logger\n'), ((2025, 2042), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2036, 2042), False, 'from collections import defaultdict\n'), ((2519, 2534), 'copy.deepcopy', 'deepcopy', (['dict_'], {}), '(dict_)\n', (2527, 2534), False, 'from copy import deepcopy\n'), ((4380, 4400), 'ConfigSpace.ConfigurationSpace', 'ConfigurationSpace', ([], {}), '()\n', (4398, 4400), False, 'from ConfigSpace import ConfigurationSpace, Configuration\n'), ((4763, 4791), 're.compile', 're.compile', (['"""(.*)\\\\((.*)\\\\)"""'], {}), "('(.*)\\\\((.*)\\\\)')\n", (4773, 4791), False, 'import re\n'), ((5203, 5251), 'ConfigSpace.CategoricalHyperparameter', 'CategoricalHyperparameter', (['"""__choice__"""', 'choices'], {}), "('__choice__', choices)\n", (5228, 5251), False, 'from ConfigSpace import CategoricalHyperparameter, Constant\n'), ((1139, 1160), 'ultraopt.hdl.hp_def._encode', 'hp_def._encode', (['value'], {}), '(value)\n', (1153, 1160), False, 'from ultraopt.hdl import hp_def\n'), ((1924, 1957), 'ConfigSpace.ForbiddenAndConjunction', 'ForbiddenAndConjunction', (['*clauses'], {}), '(*clauses)\n', (1947, 1957), False, 'from ConfigSpace import ForbiddenInClause, ForbiddenEqualsClause, ForbiddenAndConjunction\n'), ((4580, 4618), 'ConfigSpace.Constant', 'Constant', (['"""placeholder"""', '"""placeholder"""'], {}), "('placeholder', 'placeholder')\n", (4588, 4618), False, 'from ConfigSpace import CategoricalHyperparameter, Constant\n'), ((5990, 6023), 'ultraopt.hdl.utils.is_hdl_bottom', 'is_hdl_bottom', (['hdl_key', 'hdl_value'], {}), '(hdl_key, hdl_value)\n', (6003, 6023), False, 'from ultraopt.hdl.utils import is_hdl_bottom\n'), ((7902, 7923), 'ultraopt.hdl.hp_def._encode', 'hp_def._encode', (['value'], {}), '(value)\n', (7916, 7923), False, 'from ultraopt.hdl import hp_def\n'), ((5024, 5043), 'ultraopt.hdl.utils.is_hdl_bottom', 'is_hdl_bottom', (['k', 'v'], {}), '(k, v)\n', (5037, 5043), False, 'from ultraopt.hdl.utils import is_hdl_bottom\n'), ((1853, 1885), 'ultraopt.hdl.hp_def._encode', 'hp_def._encode', (['forbidden_values'], {}), '(forbidden_values)\n', (1867, 1885), False, 'from ultraopt.hdl import hp_def\n')] |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits import utils
from digits.utils import subclass
from flask.ext.wtf import Form
from wtforms import validators
@subclass
class DatasetForm(Form):
"""
A form used to create a Sunnybrook dataset
"""
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
voc_folder = utils.forms.StringField(
u'VOC folder',
validators=[
validators.DataRequired(),
# validate_folder_path,
],
tooltip="Specify the path to the voc folder"
)
dataset_server_ip = utils.forms.StringField(
'Dataset server ip',
validators=[
],
tooltip="Dataset server ip in format 'xxx.xxx.xxx.xxx'."
)
dataset_server_port = utils.forms.StringField(
'Dataset server port',
validators=[
],
tooltip="Dataset server port in format 'xxxx'."
)
@subclass
class InferenceForm(Form):
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
"""
A form used to perform inference on a text classification dataset
"""
test_image_file = utils.forms.StringField(
u'Image file',
validators=[
validate_file_path,
],
tooltip="Provide the (server) path to an image."
)
validation_record = utils.forms.SelectField(
'Record from validation set',
choices=[
('none', '- select record -'),
],
default='none',
tooltip="Test a record from the validation set."
)
| [
"digits.utils.forms.SelectField",
"os.path.isdir",
"digits.utils.forms.StringField",
"os.path.exists",
"wtforms.validators.DataRequired",
"wtforms.validators.ValidationError"
] | [((996, 1118), 'digits.utils.forms.StringField', 'utils.forms.StringField', (['"""Dataset server ip"""'], {'validators': '[]', 'tooltip': '"""Dataset server ip in format \'xxx.xxx.xxx.xxx\'."""'}), '(\'Dataset server ip\', validators=[], tooltip=\n "Dataset server ip in format \'xxx.xxx.xxx.xxx\'.")\n', (1019, 1118), False, 'from digits import utils\n'), ((1180, 1295), 'digits.utils.forms.StringField', 'utils.forms.StringField', (['"""Dataset server port"""'], {'validators': '[]', 'tooltip': '"""Dataset server port in format \'xxxx\'."""'}), '(\'Dataset server port\', validators=[], tooltip=\n "Dataset server port in format \'xxxx\'.")\n', (1203, 1295), False, 'from digits import utils\n'), ((1868, 1993), 'digits.utils.forms.StringField', 'utils.forms.StringField', (['u"""Image file"""'], {'validators': '[validate_file_path]', 'tooltip': '"""Provide the (server) path to an image."""'}), "(u'Image file', validators=[validate_file_path],\n tooltip='Provide the (server) path to an image.')\n", (1891, 1993), False, 'from digits import utils\n'), ((2068, 2237), 'digits.utils.forms.SelectField', 'utils.forms.SelectField', (['"""Record from validation set"""'], {'choices': "[('none', '- select record -')]", 'default': '"""none"""', 'tooltip': '"""Test a record from the validation set."""'}), "('Record from validation set', choices=[('none',\n '- select record -')], default='none', tooltip=\n 'Test a record from the validation set.')\n", (2091, 2237), False, 'from digits import utils\n'), ((596, 667), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['"""Folder does not exist or is not reachable"""'], {}), "('Folder does not exist or is not reachable')\n", (622, 667), False, 'from wtforms import validators\n'), ((834, 859), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (857, 859), False, 'from wtforms import validators\n'), ((1623, 1692), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['"""File does not exist or is not reachable"""'], {}), "('File does not exist or is not reachable')\n", (1649, 1692), False, 'from wtforms import validators\n'), ((513, 539), 'os.path.exists', 'os.path.exists', (['field.data'], {}), '(field.data)\n', (527, 539), False, 'import os\n'), ((547, 572), 'os.path.isdir', 'os.path.isdir', (['field.data'], {}), '(field.data)\n', (560, 572), False, 'import os\n'), ((1539, 1565), 'os.path.exists', 'os.path.exists', (['field.data'], {}), '(field.data)\n', (1553, 1565), False, 'import os\n'), ((1574, 1599), 'os.path.isdir', 'os.path.isdir', (['field.data'], {}), '(field.data)\n', (1587, 1599), False, 'import os\n')] |
import itertools
import numpy as np
import string
__all__ = ['BigramGenerator', 'SkipgramGenerator',
'id2bigram', 'vocabulary_size', 'all_bigrams']
letters = sorted(set((string.ascii_letters + string.digits + " ").lower()))
class WhitelistTable(object):
# there will be stories
def __init__(self, letters):
self._d = {ord(l): ord(l) for l in letters}
def __getitem__(self, k):
return self._d.get(k)
trans_table = WhitelistTable(letters)
all_bigrams = {x[0] + x[1]: i for i, x in
enumerate(itertools.product(letters, letters))}
inversed_bigrams = {i: x for x, i in all_bigrams.items()}
vocabulary_size = len(all_bigrams)
def id2bigram(i):
return inversed_bigrams[i]
def text_to_bigram_sequence(text):
text = text.translate(trans_table)
if len(text) % 2 != 0:
text += " "
sequence = [all_bigrams[text[i:i + 2]] for i in range(0, len(text), 2)]
return np.array(sequence, dtype=np.int16)
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(
shape=(self._batch_size), dtype=np.int16)
for b in range(self._batch_size):
batch[b] = self._text[self._cursor[b]]
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def to_skipgrams(batches):
""" This converts given number of batches to skipgrams
returns skipgram_batches, skipgram_labels
"""
assert len(batches) % 2 != 0
skip_window = len(batches) // 2
return ([batches[skip_window]] * (len(batches) - 1),
[b for i, b in enumerate(batches) if i != skip_window])
class BigramGenerator(object):
"""Generates batches of bigrams for given text"""
def __init__(self, text, batch_size, num_unrollings=0):
self._bigrams = text_to_bigram_sequence(text)
self._generator = BatchGenerator(
self._bigrams, batch_size, num_unrollings)
def next(self):
return self._generator.next()
class SkipgramGenerator(object):
"""Generates batches/labels of skipgrams for given text"""
def __init__(self, text, batch_size, num_skips):
self._bigrams = text_to_bigram_sequence(text)
self._generator = BatchGenerator(
self._bigrams, batch_size, num_skips * 2)
def next(self):
return to_skipgrams(self._generator.next())
| [
"numpy.zeros",
"numpy.array",
"itertools.product"
] | [((944, 978), 'numpy.array', 'np.array', (['sequence'], {'dtype': 'np.int16'}), '(sequence, dtype=np.int16)\n', (952, 978), True, 'import numpy as np\n'), ((1512, 1560), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self._batch_size', 'dtype': 'np.int16'}), '(shape=self._batch_size, dtype=np.int16)\n', (1520, 1560), True, 'import numpy as np\n'), ((551, 586), 'itertools.product', 'itertools.product', (['letters', 'letters'], {}), '(letters, letters)\n', (568, 586), False, 'import itertools\n')] |
import numpy as np
import eex
import os
from . import amber_metadata as amd
def get_energies(prmtop=None, crd=None, input_file=None, amb_path=None):
"""Evaluate energies of AMBER files. Based on InterMol
Args:
input_file = path to input file (expects data file in same folder)
lmp_path = path to LAMMPS binaries
"""
if not prmtop:
#prmtop = os.path.join(directory, 'parm.prmtop')
raise OSError('Cannot find %s Amber parameter file' % prmtop)
if not crd:
#crd = os.path.join(directory, 'ener.edr')
raise OSError('Cannot find %s Amber parameter file' % crdtop)
directory, _ = os.path.split(os.path.abspath(prmtop))
mdout = os.path.join(directory, 'amber.out')
stdout_path = os.path.join(directory, 'amber_stdout.txt')
stderr_path = os.path.join(directory, 'amber_stderr.txt')
# Did they give a path, or the name of the file?
is_last_bin = os.path.basename(os.path.normpath(amb_path))
if is_last_bin == 'sander':
amber_bin = amb_path
else:
amber_bin = os.path.join(amb_path, 'sander')
if not eex.utility.which(amber_bin):
raise OSError('Unable to find AMBER executable (sander).')
# Run sander.
cmd = [amber_bin, '-i', input_file, '-c', crd, '-p', prmtop, '-o', mdout, '-O']
_ = eex.utility.run_subprocess(cmd, stdout_path, stderr_path)
ret = _group_energy_terms(mdout)
# TODO: Unit conversion
return eex.utility.canonicalize_energy_names(ret, amd.to_canonical)
def _group_energy_terms(mdout):
"""Parse AMBER output file and group the energy terms in a dict. """
with open(mdout) as f:
all_lines = f.readlines()
# Find where the energy information starts.
for i, line in enumerate(all_lines):
if line[0:8] == ' NSTEP':
startline = i
break
else:
raise AmberError('Unable to detect where energy info starts in AMBER '
'output file: {}'.format(mdout))
# Strange ranges for amber file data.
ranges = [[1, 24], [26, 49], [51, 77]]
e_out = dict()
potential = 0
for line in all_lines[startline+3:]:
if '=' in line:
for i in range(3):
r = ranges[i]
term = line[r[0]:r[1]]
if '=' in term:
energy_type, energy_value = term.split('=')
energy_value = float(energy_value)
potential += energy_value
energy_type = energy_type.rstrip()
e_out[energy_type] = energy_value
else:
break
e_out['ENERGY'] = potential
# eex.utility.canonicalize_energy_names(e_out)
return e_out
| [
"eex.utility.canonicalize_energy_names",
"os.path.abspath",
"eex.utility.run_subprocess",
"eex.utility.which",
"os.path.normpath",
"os.path.join"
] | [((701, 737), 'os.path.join', 'os.path.join', (['directory', '"""amber.out"""'], {}), "(directory, 'amber.out')\n", (713, 737), False, 'import os\n'), ((756, 799), 'os.path.join', 'os.path.join', (['directory', '"""amber_stdout.txt"""'], {}), "(directory, 'amber_stdout.txt')\n", (768, 799), False, 'import os\n'), ((818, 861), 'os.path.join', 'os.path.join', (['directory', '"""amber_stderr.txt"""'], {}), "(directory, 'amber_stderr.txt')\n", (830, 861), False, 'import os\n'), ((1323, 1380), 'eex.utility.run_subprocess', 'eex.utility.run_subprocess', (['cmd', 'stdout_path', 'stderr_path'], {}), '(cmd, stdout_path, stderr_path)\n', (1349, 1380), False, 'import eex\n'), ((1460, 1520), 'eex.utility.canonicalize_energy_names', 'eex.utility.canonicalize_energy_names', (['ret', 'amd.to_canonical'], {}), '(ret, amd.to_canonical)\n', (1497, 1520), False, 'import eex\n'), ((663, 686), 'os.path.abspath', 'os.path.abspath', (['prmtop'], {}), '(prmtop)\n', (678, 686), False, 'import os\n'), ((951, 977), 'os.path.normpath', 'os.path.normpath', (['amb_path'], {}), '(amb_path)\n', (967, 977), False, 'import os\n'), ((1070, 1102), 'os.path.join', 'os.path.join', (['amb_path', '"""sander"""'], {}), "(amb_path, 'sander')\n", (1082, 1102), False, 'import os\n'), ((1115, 1143), 'eex.utility.which', 'eex.utility.which', (['amber_bin'], {}), '(amber_bin)\n', (1132, 1143), False, 'import eex\n')] |
"""Retirement Calculator"""
from datetime import date
def prompt(difference_check):
"""Calculate the year you can retire
difference_check - a function that takes a number and returns boolean,
the number provided is the difference between current age and retirement age
"""
age = int(input('What is your age? '))
retirement_age = int(input('At what age would you like to retire? '))
current_year = date.today().year
difference = retirement_age - age
if difference_check(difference):
future_year = current_year + difference
print("You have {difference} years left until you can retire."
.format(difference=difference))
print("It's {current_year}, so you can retire in {future_year}."
.format(current_year=current_year, future_year=future_year))
else:
print('You can retire now!')
def ex6():
"""Do not handle negative numbers"""
prompt(lambda _: True)
def ex6a():
"""Handle negative numbers"""
prompt(lambda x: x > 0)
| [
"datetime.date.today"
] | [((428, 440), 'datetime.date.today', 'date.today', ([], {}), '()\n', (438, 440), False, 'from datetime import date\n')] |
name = "api"
import os
import sys
import requests
import logging
import revops.exceptions
__LOGGING_DEFAULTS__ = {'level': logging.INFO}
__DEFAULT_ENDPOINT__ = 'https://api.revops.io'
logging.basicConfig(**__LOGGING_DEFAULTS__)
class RevOpsAPI(object):
headers = {}
"""
This is the RevOps API Client
Attributes:
api_key (str): API Key used to access RevOps API Access.
endpoint (str): API Endpoint to access your RevOps instance.
If not defined, defaults to 'https://api.revops.io'.
"""
def __init__(self, api_key = None, endpoint = __DEFAULT_ENDPOINT__):
self.api_key = os.environ.get('REVOPS_API_KEY', api_key)
if self.api_key == None or self.api_key == '':
raise Exception("REVOPS_API_KEY environment variable is not set.")
self.api_endpoint = os.environ.get('REVOPS_API_ENDPOINT', endpoint)
self.headers = {
'X-RevOps-API-Key': self.api_key,
'Content-Type': 'application/json',
}
def __getattr__(self, name):
resource = __import__(
"revops.resources.{}".format(name),
fromlist=["revops.resources"]
)
return resource.__api_module__(self)
def request(self, data, api_resource = None, http_method = "GET"):
url = "{}/{}".format(self.api_endpoint, api_resource)
response = requests.request(
http_method,
url,
data=data,
headers=self.headers,
)
if response.status_code == 401:
raise revops.exceptions.AuthenticationException(
"Unauthorized key, please check credentials provided.",
api_resource,
response,
)
return response
| [
"os.environ.get",
"requests.request",
"logging.basicConfig"
] | [((189, 232), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '(**__LOGGING_DEFAULTS__)\n', (208, 232), False, 'import logging\n'), ((634, 675), 'os.environ.get', 'os.environ.get', (['"""REVOPS_API_KEY"""', 'api_key'], {}), "('REVOPS_API_KEY', api_key)\n", (648, 675), False, 'import os\n'), ((838, 885), 'os.environ.get', 'os.environ.get', (['"""REVOPS_API_ENDPOINT"""', 'endpoint'], {}), "('REVOPS_API_ENDPOINT', endpoint)\n", (852, 885), False, 'import os\n'), ((1379, 1446), 'requests.request', 'requests.request', (['http_method', 'url'], {'data': 'data', 'headers': 'self.headers'}), '(http_method, url, data=data, headers=self.headers)\n', (1395, 1446), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
"""
This file implements a psychrometric chart for air at 1 atm
"""
from CoolProp.HumidAirProp import HAPropsSI
from .Plots import InlineLabel
import matplotlib, numpy, textwrap
import_template = (
"""
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots
if __name__=='__main__':
import numpy, matplotlib
from CoolProp.HumidAirProp import HAPropsSI
from CoolProp.Plots.Plots import InlineLabel
p = 101325
Tdb = numpy.linspace(-10,60,100)+273.15
# Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
"""
)
closure_template = (
"""
matplotlib.pyplot.show()
"""
)
Tdb = numpy.linspace(-10, 60, 100) + 273.15
p = 101325
def indented_segment(s):
return '\n'.join([' ' + line for line in textwrap.dedent(s).split('\n')])
class PlotFormatting(object):
def plot(self, ax):
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
def __str__(self):
return indented_segment("""
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
""")
class SaturationLine(object):
def plot(self, ax):
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', 1.0) for T in Tdb]
ax.plot(Tdb - 273.15, w, lw=2)
def __str__(self):
return indented_segment("""
# Saturation line
w = [HAPropsSI('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
"""
)
class HumidityLabels(object):
def __init__(self, RH_values, h):
self.RH_values = RH_values
self.h = h
def plot(self, ax):
xv = Tdb # [K]
for RH in self.RH_values:
yv = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
y = HAPropsSI('W', 'P', p, 'H', self.h, 'R', RH)
T_K, w, rot = InlineLabel(xv, yv, y=y, axis=ax)
string = r'$\phi$=' + '{s:0.0f}'.format(s=RH * 100) + '%'
# Make a temporary label to get its bounding box
bbox_opts = dict(boxstyle='square,pad=0.0', fc='white', ec='None', alpha=0.5)
ax.text(T_K - 273.15, w, string, rotation=rot, ha='center', va='center', bbox=bbox_opts)
def __str__(self):
return indented_segment("""
xv = Tdb #[K]
for RH in {RHValues:s}:
yv = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAPropsSI('W','P',p,'H',{h:f},'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+{s:s}+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
""".format(h=self.h, RHValues=str(self.RH_values), s="'{s:0.0f}'.format(s=RH*100)")
)
class HumidityLines(object):
def __init__(self, RH_values):
self.RH_values = RH_values
def plot(self, ax):
for RH in self.RH_values:
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
ax.plot(Tdb - 273.15, w, 'r', lw=1)
def __str__(self):
return indented_segment("""
# Humidity lines
RHValues = {RHValues:s}
for RH in RHValues:
w = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
""".format(RHValues=str(self.RH_values))
)
class EnthalpyLines(object):
def __init__(self, H_values):
self.H_values = H_values
def plot(self, ax):
for H in self.H_values:
# Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T', 'H', H, 'P', p, 'R', 1.0) - 273.15
T0 = HAPropsSI('T', 'H', H, 'P', p, 'R', 0.0) - 273.15
w1 = HAPropsSI('W', 'H', H, 'P', p, 'R', 1.0)
w0 = HAPropsSI('W', 'H', H, 'P', p, 'R', 0.0)
ax.plot(numpy.r_[T1, T0], numpy.r_[w1, w0], 'r', lw=1)
def __str__(self):
return indented_segment("""
# Humidity lines
for H in {HValues:s}:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAPropsSI('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAPropsSI('W','H',H,'P',p,'R',1.0)
w0 = HAPropsSI('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
""".format(HValues=str(self.H_values))
)
if __name__ == '__main__':
and_plot = False
if and_plot:
fig = matplotlib.pyplot.figure(figsize=(10, 8))
ax = fig.add_axes((0.1, 0.1, 0.85, 0.85))
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
SL = SaturationLine()
if and_plot: SL.plot(ax)
RHL = HumidityLines([0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
if and_plot: RHL.plot(ax)
RHLabels = HumidityLabels([0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], h=65000)
if and_plot: RHLabels.plot(ax)
HL = EnthalpyLines(range(-20000, 100000, 10000))
if and_plot: HL.plot(ax)
PF = PlotFormatting()
if and_plot: PF.plot(ax)
if and_plot: matplotlib.pyplot.show()
with open('PsychScript.py', 'w') as fp:
for chunk in [import_template, SL, RHL, HL, PF, RHLabels, closure_template]:
fp.write(str(chunk).encode('ascii'))
execfile('PsychScript.py')
| [
"textwrap.dedent",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"CoolProp.HumidAirProp.HAPropsSI",
"numpy.linspace"
] | [((799, 827), 'numpy.linspace', 'numpy.linspace', (['(-10)', '(60)', '(100)'], {}), '(-10, 60, 100)\n', (813, 827), False, 'import matplotlib, numpy, textwrap\n'), ((5180, 5221), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (5204, 5221), False, 'import matplotlib, numpy, textwrap\n'), ((5950, 5974), 'matplotlib.pyplot.show', 'matplotlib.pyplot.show', ([], {}), '()\n', (5972, 5974), False, 'import matplotlib, numpy, textwrap\n'), ((1538, 1578), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""T"""', 'T', '"""P"""', 'p', '"""R"""', '(1.0)'], {}), "('W', 'T', T, 'P', p, 'R', 1.0)\n", (1547, 1578), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((2167, 2211), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""P"""', 'p', '"""H"""', 'self.h', '"""R"""', 'RH'], {}), "('W', 'P', p, 'H', self.h, 'R', RH)\n", (2176, 2211), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4326, 4366), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(1.0)'], {}), "('W', 'H', H, 'P', p, 'R', 1.0)\n", (4335, 4366), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4384, 4424), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(0.0)'], {}), "('W', 'H', H, 'P', p, 'R', 0.0)\n", (4393, 4424), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((2097, 2136), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""T"""', 'T', '"""P"""', 'p', '"""R"""', 'RH'], {}), "('W', 'T', T, 'P', p, 'R', RH)\n", (2106, 2136), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((3475, 3514), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""T"""', 'T', '"""P"""', 'p', '"""R"""', 'RH'], {}), "('W', 'T', T, 'P', p, 'R', RH)\n", (3484, 3514), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4192, 4232), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""T"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(1.0)'], {}), "('T', 'H', H, 'P', p, 'R', 1.0)\n", (4201, 4232), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4259, 4299), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""T"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(0.0)'], {}), "('T', 'H', H, 'P', p, 'R', 0.0)\n", (4268, 4299), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((923, 941), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (938, 941), False, 'import matplotlib, numpy, textwrap\n')] |
#!/usr/bin/python
import socket
import sys
from Crypto.Cipher import AES
from Crypto import Random
from config import Config
def createMsg(cfg, user, ip = None):
ipstring = ''
if ip != None:
ipstring = ';'+ip
salt = Random.new().read(16)
cipher = AES.new(cfg.key, AES.MODE_CFB, salt)
encodedmsg = (salt + cipher.encrypt(user+';'+cfg.password+ipstring)).encode('hex')
return encodedmsg
def authenticate(cfg, user, srcip = None):
print('Creating socket...')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Connecting...')
sock.connect((cfg.ip, cfg.port))
print('Sending...')
sock.send(createMsg(cfg, user, srcip))
print('Sent.')
sock.close()
def main():
try:
cfg = Config(sys.argv[sys.argv.index('-c') + 1])
except ValueError:
cfg = Config()
except IOError as e:
print("Got IOError parsing config.")
print(e)
print("Using default ...")
cfg = Config()
try:
srcindex = sys.argv.index('-s')
srcip = sys.argv[srcindex+1]
except ValueError:
srcip = ''
try:
userindex = sys.argv.index('-u')
userid = sys.argv[userindex+1]
except ValueError:
userid = cfg.defaultuser
authenticate(cfg, userid, srcip)
if __name__ == '__main__':
main()
| [
"config.Config",
"socket.socket",
"Crypto.Cipher.AES.new",
"sys.argv.index",
"Crypto.Random.new"
] | [((280, 316), 'Crypto.Cipher.AES.new', 'AES.new', (['cfg.key', 'AES.MODE_CFB', 'salt'], {}), '(cfg.key, AES.MODE_CFB, salt)\n', (287, 316), False, 'from Crypto.Cipher import AES\n'), ((518, 567), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (531, 567), False, 'import socket\n'), ((1033, 1053), 'sys.argv.index', 'sys.argv.index', (['"""-s"""'], {}), "('-s')\n", (1047, 1053), False, 'import sys\n'), ((1163, 1183), 'sys.argv.index', 'sys.argv.index', (['"""-u"""'], {}), "('-u')\n", (1177, 1183), False, 'import sys\n'), ((245, 257), 'Crypto.Random.new', 'Random.new', ([], {}), '()\n', (255, 257), False, 'from Crypto import Random\n'), ((851, 859), 'config.Config', 'Config', ([], {}), '()\n', (857, 859), False, 'from config import Config\n'), ((996, 1004), 'config.Config', 'Config', ([], {}), '()\n', (1002, 1004), False, 'from config import Config\n'), ((787, 807), 'sys.argv.index', 'sys.argv.index', (['"""-c"""'], {}), "('-c')\n", (801, 807), False, 'import sys\n')] |
from functools import partial
from math import inf, nan
from typing import Union
import hypothesis.extra.numpy as npst
import torch
from hypothesis import given, strategies as st
from hypothesis.strategies import SearchStrategy
from pytest import fixture, mark
from torch import allclose, as_tensor, isclose, tensor
from phytorch.roots import companion_matrix, roots, sroots, vieta
from tests.common.dtypes import make_dtype_tests, with_default_double
def compare_sets(a, b, **kwargs):
return isclose(a.unsqueeze(-1), as_tensor(b, dtype=a.dtype).unsqueeze(-2), **kwargs).any(-1).all(-1)
def coeffs_strategy(
n: Union[int, SearchStrategy[int]] = st.integers(min_value=2, max_value=4),
dtype=complex,
elements=st.complex_numbers(min_magnitude=1e-6, max_magnitude=1e6, allow_nan=False, allow_infinity=False)):
if isinstance(n, int):
return coeffs_strategy(st.integers(min_value=2, max_value=n))
return n.flatmap(lambda n: npst.mutually_broadcastable_shapes(num_shapes=n, max_dims=3, max_side=16).flatmap(
lambda shapes: st.tuples(*(npst.arrays(dtype, shape, elements=elements).map(lambda arr: tensor(arr)) for shape in shapes.input_shapes))
))
def test_companion_matrix():
assert (companion_matrix(tensor(-1), tensor(-2), tensor(-3)) == tensor([
[1, 2, 3],
[1, 0, 0],
[0, 1, 0]
])).all()
@given(coeffs_strategy())
def test_companion_matrix_batched(coeffs):
assert companion_matrix(*coeffs).shape == torch.broadcast_shapes(*(c.shape for c in coeffs)) + 2*(len(coeffs),)
@mark.xfail(reason='flaky', strict=False)
@with_default_double
@given(coeffs_strategy())
def test_vieta(coeffs):
for c, _c in zip(coeffs, vieta(roots(*coeffs))[1:]):
assert allclose(_c, c, rtol=1e-3, atol=1e-3)
@with_default_double
@given(coeffs_strategy())
def test_analytic_vs_numeric(coeffs):
assert compare_sets(
sroots(*coeffs, dim=-1),
sroots(*coeffs, dim=-1, force_numeric=True),
rtol=1e-3, atol=1e-3
).all()
class RootsTest:
@mark.parametrize('coeffs, vals', (
((0, 0), (0, 0)),
((1, 0), (-1, 0)),
((0, 1), (-1j, 1j)),
((0, 0, 0), (0, 0, 0)),
((1, 0, 0), (-1, 0, 0)),
((0, 1, 0), (0, 1j, -1j)),
((0, 0, 1), (-1, (-1)**(1 / 3), -(-1)**(2 / 3))),
((1, 1, 0), (0, (-1)**(2 / 3), -(-1)**(1 / 3))),
((0, 0, 0, 0), (0, 0, 0, 0)),
((1, 0, 0, 0), (0, -1, 0, -1)),
((0, 1, 0, 0), (-1j, 0, 1j, 0)),
((0, 0, 1, 0), (-1, 0, (-1)**(1 / 3), -(-1)**(2 / 3))),
((0, 0, 0, 1), (-(-1)**(1 / 4), (-1)**(3 / 4), (-1)**(1 / 4), -(-1)**(3 / 4))),
((1, 1, 0, 0), (-(-1)**(1 / 3), (-1)**(2 / 3), 0, 0)),
((0, 1, 0, 1), (-(-1)**(1 / 3), (-1)**(2 / 3), -(-1)**(2 / 3), (-1)**(1 / 3))),
((1, 1, 1, 0), (-1, 0, 1j, -1j))
))
def test_special(self, coeffs, vals):
assert compare_sets(sroots(*coeffs), vals).all()
@staticmethod
def test_finite():
# Any NaN or infinite coefficient should return NaN
for n in (2, 3, 4):
assert sroots(*(n-1)*(1,)+(nan,)).isnan().all()
assert sroots(*(n-1)*(1,)+(inf,)).isnan().all()
class ForceNumericRootsTest(RootsTest):
@staticmethod
@fixture(autouse=True, scope='class')
def _set_force_numeric():
# see e.g. https://github.com/pytest-dev/pytest/issues/363
# for why this workaround is needed
from _pytest.monkeypatch import MonkeyPatch
mpatch = MonkeyPatch()
mpatch.setitem(globals(), 'roots', partial(roots, force_numeric=True))
yield
mpatch.undo()
# @mark.xfail(reason='NaN in eig (https://github.com/pytorch/pytorch/issues/61251)', strict=True)
@mark.skip(reason='segfaults, so cannot recover...')
def test_finite(self): ...
globals().update(make_dtype_tests((RootsTest,), 'Roots'))
globals().update(make_dtype_tests((ForceNumericRootsTest,), 'ForceNumericRoots'))
| [
"functools.partial",
"phytorch.roots.companion_matrix",
"hypothesis.extra.numpy.arrays",
"pytest.fixture",
"tests.common.dtypes.make_dtype_tests",
"_pytest.monkeypatch.MonkeyPatch",
"phytorch.roots.sroots",
"hypothesis.strategies.complex_numbers",
"torch.broadcast_shapes",
"phytorch.roots.roots",
... | [((1571, 1611), 'pytest.mark.xfail', 'mark.xfail', ([], {'reason': '"""flaky"""', 'strict': '(False)'}), "(reason='flaky', strict=False)\n", (1581, 1611), False, 'from pytest import fixture, mark\n'), ((663, 700), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(2)', 'max_value': '(4)'}), '(min_value=2, max_value=4)\n', (674, 700), True, 'from hypothesis import given, strategies as st\n'), ((742, 850), 'hypothesis.strategies.complex_numbers', 'st.complex_numbers', ([], {'min_magnitude': '(1e-06)', 'max_magnitude': '(1000000.0)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_magnitude=1e-06, max_magnitude=1000000.0, allow_nan=\n False, allow_infinity=False)\n', (760, 850), True, 'from hypothesis import given, strategies as st\n'), ((2056, 2811), 'pytest.mark.parametrize', 'mark.parametrize', (['"""coeffs, vals"""', '(((0, 0), (0, 0)), ((1, 0), (-1, 0)), ((0, 1), (-1.0j, 1.0j)), ((0, 0, 0),\n (0, 0, 0)), ((1, 0, 0), (-1, 0, 0)), ((0, 1, 0), (0, 1.0j, -1.0j)), ((0,\n 0, 1), (-1, (-1) ** (1 / 3), -(-1) ** (2 / 3))), ((1, 1, 0), (0, (-1) **\n (2 / 3), -(-1) ** (1 / 3))), ((0, 0, 0, 0), (0, 0, 0, 0)), ((1, 0, 0, 0\n ), (0, -1, 0, -1)), ((0, 1, 0, 0), (-1.0j, 0, 1.0j, 0)), ((0, 0, 1, 0),\n (-1, 0, (-1) ** (1 / 3), -(-1) ** (2 / 3))), ((0, 0, 0, 1), (-(-1) ** (\n 1 / 4), (-1) ** (3 / 4), (-1) ** (1 / 4), -(-1) ** (3 / 4))), ((1, 1, 0,\n 0), (-(-1) ** (1 / 3), (-1) ** (2 / 3), 0, 0)), ((0, 1, 0, 1), (-(-1) **\n (1 / 3), (-1) ** (2 / 3), -(-1) ** (2 / 3), (-1) ** (1 / 3))), ((1, 1, \n 1, 0), (-1, 0, 1.0j, -1.0j)))'], {}), "('coeffs, vals', (((0, 0), (0, 0)), ((1, 0), (-1, 0)), ((0,\n 1), (-1.0j, 1.0j)), ((0, 0, 0), (0, 0, 0)), ((1, 0, 0), (-1, 0, 0)), ((\n 0, 1, 0), (0, 1.0j, -1.0j)), ((0, 0, 1), (-1, (-1) ** (1 / 3), -(-1) **\n (2 / 3))), ((1, 1, 0), (0, (-1) ** (2 / 3), -(-1) ** (1 / 3))), ((0, 0,\n 0, 0), (0, 0, 0, 0)), ((1, 0, 0, 0), (0, -1, 0, -1)), ((0, 1, 0, 0), (-\n 1.0j, 0, 1.0j, 0)), ((0, 0, 1, 0), (-1, 0, (-1) ** (1 / 3), -(-1) ** (2 /\n 3))), ((0, 0, 0, 1), (-(-1) ** (1 / 4), (-1) ** (3 / 4), (-1) ** (1 / 4\n ), -(-1) ** (3 / 4))), ((1, 1, 0, 0), (-(-1) ** (1 / 3), (-1) ** (2 / 3\n ), 0, 0)), ((0, 1, 0, 1), (-(-1) ** (1 / 3), (-1) ** (2 / 3), -(-1) **\n (2 / 3), (-1) ** (1 / 3))), ((1, 1, 1, 0), (-1, 0, 1.0j, -1.0j))))\n", (2072, 2811), False, 'from pytest import fixture, mark\n'), ((3274, 3310), 'pytest.fixture', 'fixture', ([], {'autouse': '(True)', 'scope': '"""class"""'}), "(autouse=True, scope='class')\n", (3281, 3310), False, 'from pytest import fixture, mark\n'), ((3758, 3809), 'pytest.mark.skip', 'mark.skip', ([], {'reason': '"""segfaults, so cannot recover..."""'}), "(reason='segfaults, so cannot recover...')\n", (3767, 3809), False, 'from pytest import fixture, mark\n'), ((3860, 3899), 'tests.common.dtypes.make_dtype_tests', 'make_dtype_tests', (['(RootsTest,)', '"""Roots"""'], {}), "((RootsTest,), 'Roots')\n", (3876, 3899), False, 'from tests.common.dtypes import make_dtype_tests, with_default_double\n'), ((3918, 3981), 'tests.common.dtypes.make_dtype_tests', 'make_dtype_tests', (['(ForceNumericRootsTest,)', '"""ForceNumericRoots"""'], {}), "((ForceNumericRootsTest,), 'ForceNumericRoots')\n", (3934, 3981), False, 'from tests.common.dtypes import make_dtype_tests, with_default_double\n'), ((1755, 1794), 'torch.allclose', 'allclose', (['_c', 'c'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(_c, c, rtol=0.001, atol=0.001)\n', (1763, 1794), False, 'from torch import allclose, as_tensor, isclose, tensor\n'), ((3521, 3534), '_pytest.monkeypatch.MonkeyPatch', 'MonkeyPatch', ([], {}), '()\n', (3532, 3534), False, 'from _pytest.monkeypatch import MonkeyPatch\n'), ((899, 936), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(2)', 'max_value': 'n'}), '(min_value=2, max_value=n)\n', (910, 936), True, 'from hypothesis import given, strategies as st\n'), ((1463, 1488), 'phytorch.roots.companion_matrix', 'companion_matrix', (['*coeffs'], {}), '(*coeffs)\n', (1479, 1488), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((1498, 1548), 'torch.broadcast_shapes', 'torch.broadcast_shapes', (['*(c.shape for c in coeffs)'], {}), '(*(c.shape for c in coeffs))\n', (1520, 1548), False, 'import torch\n'), ((3578, 3612), 'functools.partial', 'partial', (['roots'], {'force_numeric': '(True)'}), '(roots, force_numeric=True)\n', (3585, 3612), False, 'from functools import partial\n'), ((1302, 1343), 'torch.tensor', 'tensor', (['[[1, 2, 3], [1, 0, 0], [0, 1, 0]]'], {}), '([[1, 2, 3], [1, 0, 0], [0, 1, 0]])\n', (1308, 1343), False, 'from torch import allclose, as_tensor, isclose, tensor\n'), ((1718, 1732), 'phytorch.roots.roots', 'roots', (['*coeffs'], {}), '(*coeffs)\n', (1723, 1732), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((1913, 1936), 'phytorch.roots.sroots', 'sroots', (['*coeffs'], {'dim': '(-1)'}), '(*coeffs, dim=-1)\n', (1919, 1936), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((1946, 1989), 'phytorch.roots.sroots', 'sroots', (['*coeffs'], {'dim': '(-1)', 'force_numeric': '(True)'}), '(*coeffs, dim=-1, force_numeric=True)\n', (1952, 1989), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((969, 1042), 'hypothesis.extra.numpy.mutually_broadcastable_shapes', 'npst.mutually_broadcastable_shapes', ([], {'num_shapes': 'n', 'max_dims': '(3)', 'max_side': '(16)'}), '(num_shapes=n, max_dims=3, max_side=16)\n', (1003, 1042), True, 'import hypothesis.extra.numpy as npst\n'), ((1263, 1273), 'torch.tensor', 'tensor', (['(-1)'], {}), '(-1)\n', (1269, 1273), False, 'from torch import allclose, as_tensor, isclose, tensor\n'), ((1275, 1285), 'torch.tensor', 'tensor', (['(-2)'], {}), '(-2)\n', (1281, 1285), False, 'from torch import allclose, as_tensor, isclose, tensor\n'), ((1287, 1297), 'torch.tensor', 'tensor', (['(-3)'], {}), '(-3)\n', (1293, 1297), False, 'from torch import allclose, as_tensor, isclose, tensor\n'), ((2930, 2945), 'phytorch.roots.sroots', 'sroots', (['*coeffs'], {}), '(*coeffs)\n', (2936, 2945), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((3108, 3142), 'phytorch.roots.sroots', 'sroots', (['*((n - 1) * (1,) + (nan,))'], {}), '(*((n - 1) * (1,) + (nan,)))\n', (3114, 3142), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((3168, 3202), 'phytorch.roots.sroots', 'sroots', (['*((n - 1) * (1,) + (inf,))'], {}), '(*((n - 1) * (1,) + (inf,)))\n', (3174, 3202), False, 'from phytorch.roots import companion_matrix, roots, sroots, vieta\n'), ((526, 553), 'torch.as_tensor', 'as_tensor', (['b'], {'dtype': 'a.dtype'}), '(b, dtype=a.dtype)\n', (535, 553), False, 'from torch import allclose, as_tensor, isclose, tensor\n'), ((1087, 1131), 'hypothesis.extra.numpy.arrays', 'npst.arrays', (['dtype', 'shape'], {'elements': 'elements'}), '(dtype, shape, elements=elements)\n', (1098, 1131), True, 'import hypothesis.extra.numpy as npst\n'), ((1148, 1159), 'torch.tensor', 'tensor', (['arr'], {}), '(arr)\n', (1154, 1159), False, 'from torch import allclose, as_tensor, isclose, tensor\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, EqualTo
class RegisterForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
password_2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()]) | [
"wtforms.validators.DataRequired"
] | [((233, 247), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (245, 247), False, 'from wtforms.validators import DataRequired, EqualTo\n'), ((295, 309), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (307, 309), False, 'from wtforms.validators import DataRequired, EqualTo\n'), ((365, 379), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (377, 379), False, 'from wtforms.validators import DataRequired, EqualTo\n'), ((480, 494), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (492, 494), False, 'from wtforms.validators import DataRequired, EqualTo\n'), ((542, 556), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (554, 556), False, 'from wtforms.validators import DataRequired, EqualTo\n')] |
import pandas as pd
import unittest
import common_python.constants as cn
from common_python.util.item_aggregator import ItemAggregator
IGNORE_TEST = False
SIZE = 10
MULT = 5
ITEMS = [(n, MULT*n) for n in range(SIZE)]
class TestItemAggregator(unittest.TestCase):
def setUp(self):
self.aggregator = ItemAggregator(lambda v: v[0])
def testConstructor(self):
if IGNORE_TEST:
return
self.assertIsNone(self.aggregator._df)
def testAppend(self):
if IGNORE_TEST:
return
self.aggregator.append(ITEMS)
self.assertEqual(len(self.aggregator.sers[0]), SIZE)
self.aggregator.append(ITEMS)
self.assertEqual(len(self.aggregator.sers), 2)
def testDf(self):
if IGNORE_TEST:
return
aggregator1 = ItemAggregator(lambda v: v[1])
for agg in [self.aggregator, aggregator1]:
agg.append(ITEMS)
agg.append(ITEMS)
df = MULT*self.aggregator.df
self.assertTrue(aggregator1.df[cn.MEAN].equals(df[cn.MEAN]))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"common_python.util.item_aggregator.ItemAggregator"
] | [((1008, 1023), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1021, 1023), False, 'import unittest\n'), ((307, 337), 'common_python.util.item_aggregator.ItemAggregator', 'ItemAggregator', (['(lambda v: v[0])'], {}), '(lambda v: v[0])\n', (321, 337), False, 'from common_python.util.item_aggregator import ItemAggregator\n'), ((750, 780), 'common_python.util.item_aggregator.ItemAggregator', 'ItemAggregator', (['(lambda v: v[1])'], {}), '(lambda v: v[1])\n', (764, 780), False, 'from common_python.util.item_aggregator import ItemAggregator\n')] |
"""
Unit tests for Deployment class
"""
from unittest.mock import MagicMock, Mock
import pytest
from colour import Colour
from deployment import Deployment
BLACK = Colour("black")
@pytest.fixture(name="simple_deployment")
def simple_deployment_fixture(simple_box):
"""
This fixture will return a simple Deployment
with width 10, height 20, x_pos 5, y_pos 15, colour black
and name "simple"
"""
deployment = Deployment("simple", simple_box)
return deployment
@pytest.mark.parametrize("name", ["Steve", "Bob"])
def test_create_deployment(name, simple_box):
"""
Test that we can create a deployment with the right name
"""
deployment = Deployment(name, simple_box)
assert deployment.name == name
def test_draw(simple_deployment):
"""
Test that a Deployment basically, we just want the deployment to
call "draw" on its box
"""
driver = Mock()
simple_deployment.box.draw = MagicMock()
simple_deployment.draw(driver)
simple_deployment.box.draw.assert_called_once()
| [
"unittest.mock.MagicMock",
"unittest.mock.Mock",
"pytest.fixture",
"colour.Colour",
"deployment.Deployment",
"pytest.mark.parametrize"
] | [((168, 183), 'colour.Colour', 'Colour', (['"""black"""'], {}), "('black')\n", (174, 183), False, 'from colour import Colour\n'), ((187, 227), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""simple_deployment"""'}), "(name='simple_deployment')\n", (201, 227), False, 'import pytest\n'), ((495, 544), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['Steve', 'Bob']"], {}), "('name', ['Steve', 'Bob'])\n", (518, 544), False, 'import pytest\n'), ((437, 469), 'deployment.Deployment', 'Deployment', (['"""simple"""', 'simple_box'], {}), "('simple', simple_box)\n", (447, 469), False, 'from deployment import Deployment\n'), ((685, 713), 'deployment.Deployment', 'Deployment', (['name', 'simple_box'], {}), '(name, simple_box)\n', (695, 713), False, 'from deployment import Deployment\n'), ((910, 916), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (914, 916), False, 'from unittest.mock import MagicMock, Mock\n'), ((950, 961), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (959, 961), False, 'from unittest.mock import MagicMock, Mock\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyprometheus.values
~~~~~~~~~~~~~~~~~~~
Prometheus instrumentation library for Python applications
:copyright: (c) 2017 by <NAME>.
:license: , see LICENSE for more details.
:github: http://github.com/Lispython/pyprometheus
"""
import time
from pyprometheus.utils import escape_str
from pyprometheus.const import TYPES
from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager
class MetricValue(object):
"""Base metric collector"""
TYPE = TYPES.BASE
POSTFIX = ""
def __init__(self, metric, label_values={}, value=None):
self._metric = metric
self.validate_labels(metric.label_names, label_values)
self._labels, self._label_values = self.prepare_labels(label_values)
self._value = value
@staticmethod
def prepare_labels(label_values):
if isinstance(label_values, (list, tuple)):
labels = tuple(sorted(label_values, key=lambda x: x[0]))
elif isinstance(label_values, dict):
labels = tuple(sorted(label_values.items(), key=lambda x: x[0]))
return labels, dict(label_values)
@property
def metric(self):
return self._metric
def set_value(self, value):
self._value = value
def __repr__(self):
return u"<{0}[{1}]: {2} -> {3}>".format(
self.__class__.__name__, self._metric.name,
str(self._labels).replace("'", "\""), self.__repr_value__())
def validate_labels(self, label_names, labels):
if len(labels) != len(label_names):
raise RuntimeError(u"Invalid label values size: {0} != {1}".format(
len(label_names), len(labels)))
def __repr_value__(self):
return self.get()
# def __str__(self):
# return u"{0}{1}".format(self.__class__.__name__, self._labels)
@property
def key(self):
return (self.TYPE, self._metric.name, self.POSTFIX, self._labels)
def inc(self, amount=1):
return self._metric._storage.inc_value(self.key, amount)
def get(self):
# Do not lookup storage if value 0
if self._value is not None:
return self._value
return self._metric._storage.get_value(self.key)
@property
def value(self):
return self.get()
@property
def export_str(self):
return "{name}{postfix}{{{labels}}} {value} {timestamp}".format(
name=escape_str(self._metric.name), postfix=self.POSTFIX,
labels=self.export_labels, timestamp=int(time.time() * 1000), value=float(self.value))
@property
def export_labels(self):
return ", ".join(["{0}=\"{1}\"".format(self.format_export_label(name), self.format_export_value(value))
for name, value in self._labels])
def format_export_label(self, label):
if label == "bucket":
return "le"
return escape_str(label)
def format_export_value(self, value):
if value == float("inf"):
return "+Inf"
elif value == float("-inf"):
return "-Inf"
# elif math.isnan(value):
# return "NaN"
return escape_str(str(value))
class GaugeValue(MetricValue):
TYPE = TYPES.GAUGE
def dec(self, amount=1):
self.inc(-amount)
def set(self, value):
self._metric._storage.write_value(self.key, value)
return value
@property
def value(self):
return self.get()
def track_in_progress(self):
return InprogressTrackerManager(self)
def set_to_current_time(self):
return self.set(time.time())
def time(self):
return GaugeTimerManager(self)
class CounterValue(MetricValue):
TYPE = TYPES.COUNTER
@property
def value(self):
return self.get()
class SummarySumValue(CounterValue):
TYPE = TYPES.SUMMARY_SUM
POSTFIX = "_sum"
class SummaryCountValue(CounterValue):
TYPE = TYPES.SUMMARY_COUNTER
POSTFIX = "_count"
class SummaryQuantilyValue(GaugeValue):
TYPE = TYPES.SUMMARY_QUANTILE
POSTFIX = "_quantile"
def __init__(self, metric, label_values={}, quantile=0, value=None):
label_values = dict(label_values).copy()
label_values["quantile"] = quantile
self._quantile = quantile
super(SummaryQuantilyValue, self).__init__(metric, label_values, value)
def validate_labels(self, label_names, labels):
if len(labels) != len(label_names) + 1:
raise RuntimeError(u"Invalid label values size: {0} != {1}".format(
len(label_names), len(labels) + 1))
def __repr_value__(self):
return u"{0} -> {1}".format(self._quantile, self._value)
@property
def key(self):
return (self.TYPE, self._metric.name, self.POSTFIX, self._labels)
# return (self.TYPE, self._metric.name, self._metric.name, self._labels)
class SummaryValue(MetricValue):
u"""
summary with a base metric name of <basename> exposes multiple time series during a scrape:
streaming φ-quantiles (0 ≤ φ ≤ 1) of observed events, exposed as <basename>{quantile="<φ>"}
the total sum of all observed values, exposed as <basename>_sum
the count of events that have been observed, exposed as <basename>_count
"""
TYPE = TYPES.SUMMARY
SUBTYPES = {
"_sum": SummarySumValue,
"_count": SummaryCountValue,
"_quantile": SummaryQuantilyValue
}
def __init__(self, metric, label_values={}, value={}):
super(SummaryValue, self).__init__(metric, label_values=label_values)
self._sum = value.pop("sum", None) or SummarySumValue(self._metric, label_values=self._label_values)
self._count = value.pop("count", None) or SummaryCountValue(self._metric, label_values=self._label_values)
if isinstance(self._metric.quantiles, (list, tuple)):
self._quantiles = value.pop("quantiles", []) or [SummaryQuantilyValue(self._metric, label_values=self._label_values, quantile=quantile)
for quantile in self._metric.quantiles]
else:
self._quantiles = []
def __repr_value__(self):
return u"sum={sum} / count={count} = {value} [{quantiles}]".format(
**{
"sum": self._sum.value,
"count": self._count.value,
"value": (self._sum.value / self._count.value) if self._count.value != 0 else "-",
"quantiles": ", ".join([x.__repr_value__() for x in self._quantiles]) if self._quantiles else "empty"
}
)
def observe(self, amount):
self._sum.inc(amount)
self._count.inc()
# TODO: calculate quantiles
# for quantile, value in self._quantiles:
# pass
@property
def value(self):
return {
"sum": self._sum,
"count": self._count,
"quantiles": self._quantiles}
@property
def export_str(self):
return "\n".join([self._sum.export_str, self._count.export_str] + [quantile.export_str for quantile in self._quantiles])
def time(self):
return TimerManager(self)
class HistogramCountValue(SummaryCountValue):
TYPE = TYPES.HISTOGRAM_COUNTER
POSTFIX = "_count"
class HistogramSumValue(SummarySumValue):
TYPE = TYPES.HISTOGRAM_SUM
POSTFIX = "_sum"
class HistogramBucketValue(SummaryCountValue):
"""
""" """
<basename>_bucket{le="<upper inclusive bound>"}
"""
POSTFIX = "_bucket"
TYPE = TYPES.HISTOGRAM_BUCKET
def __init__(self, metric, label_values={}, bucket=None, value=None):
label_values = dict(label_values).copy()
label_values["bucket"] = bucket
self._bucket_threshold = bucket
super(HistogramBucketValue, self).__init__(metric, label_values, value)
def __repr_value__(self):
return u"{0} -> {1}".format(self._bucket_threshold, self._value)
@property
def bucket_threshold(self):
return self._bucket_threshold
def validate_labels(self, label_names, labels):
if len(labels) != len(label_names) + 1:
raise RuntimeError(u"Invalid label values size: {0} != {1}".format(
len(label_names), len(labels) + 1))
class HistogramValue(MetricValue):
TYPE = TYPES.HISTOGRAM
SUBTYPES = {
"_sum": HistogramSumValue,
"_count": HistogramCountValue,
"_bucket": HistogramBucketValue
}
def __init__(self, metric, label_values={}, value={}):
self._buckets = []
super(HistogramValue, self).__init__(metric, label_values=label_values)
self._sum = value.pop("sum", None) or HistogramSumValue(self._metric, label_values=self._label_values)
self._count = value.pop("count", None) or HistogramCountValue(self._metric, label_values=self._label_values)
self._buckets = (value.pop("buckets", []) or [HistogramBucketValue(self._metric, label_values=self._label_values, bucket=bucket)
for bucket in sorted(self._metric.buckets)])
def __repr_value__(self):
return u"sum={sum} / count={count} = {value} [{buckets}]".format(
**{
"sum": self._sum.__repr_value__(),
"count": self._count.__repr_value__(),
"value": (self._sum.value / self._count.value) if self._count.value != 0 else "-",
# "buckets": ""
"buckets": ", ".join([x.__repr_value__() for x in self._buckets]) if self._buckets else "empty"
}
)
def observe(self, amount):
self._sum.inc(amount)
self._count.inc()
for bucket in self._buckets:
bucket.inc(int(amount < bucket.bucket_threshold))
@property
def value(self):
return {
"sum": self._sum,
"count": self._count,
"buckets": self._buckets
}
@property
def export_str(self):
return "\n".join([self._sum.export_str, self._count.export_str] + [bucket.export_str for bucket in self._buckets])
def time(self):
return TimerManager(self)
| [
"pyprometheus.managers.GaugeTimerManager",
"pyprometheus.managers.TimerManager",
"time.time",
"pyprometheus.managers.InprogressTrackerManager",
"pyprometheus.utils.escape_str"
] | [((2941, 2958), 'pyprometheus.utils.escape_str', 'escape_str', (['label'], {}), '(label)\n', (2951, 2958), False, 'from pyprometheus.utils import escape_str\n'), ((3555, 3585), 'pyprometheus.managers.InprogressTrackerManager', 'InprogressTrackerManager', (['self'], {}), '(self)\n', (3579, 3585), False, 'from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager\n'), ((3695, 3718), 'pyprometheus.managers.GaugeTimerManager', 'GaugeTimerManager', (['self'], {}), '(self)\n', (3712, 3718), False, 'from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager\n'), ((7214, 7232), 'pyprometheus.managers.TimerManager', 'TimerManager', (['self'], {}), '(self)\n', (7226, 7232), False, 'from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager\n'), ((10212, 10230), 'pyprometheus.managers.TimerManager', 'TimerManager', (['self'], {}), '(self)\n', (10224, 10230), False, 'from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager\n'), ((3646, 3657), 'time.time', 'time.time', ([], {}), '()\n', (3655, 3657), False, 'import time\n'), ((2461, 2490), 'pyprometheus.utils.escape_str', 'escape_str', (['self._metric.name'], {}), '(self._metric.name)\n', (2471, 2490), False, 'from pyprometheus.utils import escape_str\n'), ((2567, 2578), 'time.time', 'time.time', ([], {}), '()\n', (2576, 2578), False, 'import time\n')] |
import scattertext as st
from scattertext import RankDifference
convention_df = st.SampleCorpora.ConventionData2012.get_data()
convention_df['parse'] = convention_df['text'].apply(st.whitespace_nlp_with_sentences)
unigram_corpus = (st.CorpusFromParsedDocuments(convention_df,
category_col='party',
parsed_col='parse')
.build().get_stoplisted_unigram_corpus())
topic_model = (st.SentencesForTopicModeling(unigram_corpus)
.get_topics_from_terms(['obama', 'romney', 'democrats', 'republicans',
'health', 'military', 'taxes', 'education',
'olympics', 'auto', 'iraq', 'iran', 'israel'],
scorer=RankDifference(), num_terms_per_topic=20))
topic_feature_builder = st.FeatsFromTopicModel(topic_model)
topic_corpus = st.CorpusFromParsedDocuments(
convention_df,
category_col='party',
parsed_col='parse',
feats_from_spacy_doc=topic_feature_builder
).build()
html = st.produce_scattertext_explorer(
topic_corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
width_in_pixels=1000,
metadata=convention_df['speaker'],
use_non_text_features=True,
use_full_doc=True,
pmi_threshold_coefficient=0,
topic_model_term_lists=topic_feature_builder.get_top_model_term_lists()
)
open('./demo_word_list_topic_model.html', 'wb').write(html.encode('utf-8'))
print('Open ./demo_word_list_topic_model.html in Chrome or Firefox.')
| [
"scattertext.SentencesForTopicModeling",
"scattertext.CorpusFromParsedDocuments",
"scattertext.RankDifference",
"scattertext.FeatsFromTopicModel",
"scattertext.SampleCorpora.ConventionData2012.get_data"
] | [((81, 127), 'scattertext.SampleCorpora.ConventionData2012.get_data', 'st.SampleCorpora.ConventionData2012.get_data', ([], {}), '()\n', (125, 127), True, 'import scattertext as st\n'), ((903, 938), 'scattertext.FeatsFromTopicModel', 'st.FeatsFromTopicModel', (['topic_model'], {}), '(topic_model)\n', (925, 938), True, 'import scattertext as st\n'), ((490, 534), 'scattertext.SentencesForTopicModeling', 'st.SentencesForTopicModeling', (['unigram_corpus'], {}), '(unigram_corpus)\n', (518, 534), True, 'import scattertext as st\n'), ((835, 851), 'scattertext.RankDifference', 'RankDifference', ([], {}), '()\n', (849, 851), False, 'from scattertext import RankDifference\n'), ((955, 1088), 'scattertext.CorpusFromParsedDocuments', 'st.CorpusFromParsedDocuments', (['convention_df'], {'category_col': '"""party"""', 'parsed_col': '"""parse"""', 'feats_from_spacy_doc': 'topic_feature_builder'}), "(convention_df, category_col='party',\n parsed_col='parse', feats_from_spacy_doc=topic_feature_builder)\n", (983, 1088), True, 'import scattertext as st\n'), ((234, 323), 'scattertext.CorpusFromParsedDocuments', 'st.CorpusFromParsedDocuments', (['convention_df'], {'category_col': '"""party"""', 'parsed_col': '"""parse"""'}), "(convention_df, category_col='party',\n parsed_col='parse')\n", (262, 323), True, 'import scattertext as st\n')] |
from box import Box
class Board(Box):
def __init__(self):
"""Initialize the board empty."""
self.board = []
self.hitted_not_sunk = [] #Only for CPU
for row in range(10):
line = []
for column in range(10):
element = Box('.', (row, column))
line.append(element)
self.board.append(line)
def print_board(self, person):
"""Print out board."""
print(" A B C D E F G H I J")
index = 1
for line in self.board:
if index != 10:
end = ' '
else:
end = ' '
print(index, end = end)
index += 1
for element in line:
if (element.symbol != '.' and element.is_visible == True) or person == 'own':
print(element.symbol, end = " ")
else:
print(end = ". ")
print('')
print('')
def find_coordinate(self, coordinate):
"""Find the box object corresponding with that coordinate."""
for line in self.board:
for element in line:
if element.place == coordinate:
return element
| [
"box.Box"
] | [((292, 315), 'box.Box', 'Box', (['"""."""', '(row, column)'], {}), "('.', (row, column))\n", (295, 315), False, 'from box import Box\n')] |
# Copyright (c) 2020 Vail-Zero. All Rights Resarved.
# 必要なライブラリーインポート
from tkinter import messagebox
from pack import decker
import tkinter
import threading
import os
import sys
from pack import passgen
# from pack import config
# from pack import GUIl
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import webbrowser
# from pack import regkey
global txt
from pack import PassBox
# 画像の後ろの背景色設定
backclr=""
args=sys.argv
if len(args)==2:
pb = PassBox.PswdBox()
pass1=str(pb.pswd)
if os.path.isfile(args[1])==True:
r, e = os.path.splitext(args[1])
file=args[1]
if e==".dec":
ns=0
n=decker.openzip(file,ns,pass1)
else:
n=decker.comzip(file,pass1)
if e==".dec":
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '復号化が終了しました!')
if n==-2:
messagebox.showerror('エラー', 'パスワードが間違っています')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
root.destroy()
root.mainloop()
else:
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '暗号化が終了しました!')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
if n==-2:
messagebox.showerror('エラー', '対応していないファイルの可能性があります')
root.destroy()
root.mainloop()
else:
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
root.destroy()
root.mainloop()
var = {'Theme': "None", 'online':True,'cash':"None"}
# リソース読み込み関数
def resourcePath(filename):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, filename)
return os.path.join(filename)
# ボタンクリック後の処理
# 暗号化
def btn_click(pass1):
iDir = ""
# var=config.loadconf()
var = {'Theme': "None", 'online':True,'cash':"None"}
fTyp = [("", "*")]
# iDir=os.getenv("HOMEDRIVE") + os.getenv("HOMEPATH") + "\\Desktop"
file = tkinter.filedialog.askopenfilename(filetypes=fTyp,initialdir=iDir)
import shutil
import tempfile
if file=="":
return
if file=="":
return
n=decker.comzip(file,pass1)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '暗号化が終了しました!')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
if n==-2:
messagebox.showerror('エラー', '対応していないファイルの可能性があります')
return
# 復号化関数
def btn2_click(pass1):
iDir = ""
n=-1
var = {'Theme': "None", 'online':True,'cash':"None"}
fTyp = [("DEC Files", "*.dec")]
# iDir=os.getenv("HOMEDRIVE") + os.getenv("HOMEPATH") + "\\Desktop"
file = tkinter.filedialog.askopenfilename(filetypes=fTyp,initialdir=iDir)
import shutil
import tempfile
if file=="":
return
ns=0
n=decker.openzip(file,ns,pass1)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '復号化が終了しました!')
if n==-2:
messagebox.showerror('エラー', 'パスワードが間違っています')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
if n==4:
messagebox.showerror('エラー', 'ファイルが破損している可能性があります。\n元データでもう一度暗号化しなおしてください。\nこのファイルを他人から受け取った場合は、正式なファイルのコピーをもう一度取得してください')
return
# 以下スレッド化
def btn():
pass1=txt.get()
if pass1=="":
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showerror('エラー', 'パスワードを入力してください')
root.destroy()
root.mainloop()
return
thread1 = threading.Thread(target=btn_click,args=([pass1]))
thread1.start()
txt.delete(0, tkinter.END)
return
def btn2():
pass1=txt.get()
if pass1=="":
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showerror('エラー', 'パスワードを入力してください')
root.destroy()
root.mainloop()
return
thread1 = threading.Thread(target=btn2_click,args=([pass1]))
thread1.start()
txt.delete(0, tkinter.END)
return
# ここまで
def btn04():
import pyperclip
txt.delete(0, tkinter.END)
import random
cc=random.randint(5,20)
bpass=passgen.gen(cc)
b=messagebox.askyesno('確認', bpass+'\n 生成したパスワードをパスワードボックスに入れますか?\n 「OK」をクリックした場合、パスワードはクリップボードにコピーされます。')
if b==False:
return
pyperclip.copy(bpass)
txt.insert(tkinter.END,bpass)
return
# ここまで
# 画面初期化
def put(event):
import pyperclip
txt.insert(tkinter.END,pyperclip.paste())
return
# GUIl.delcheck()
# メインウインドウを作成
window = tkinter.Tk()
window.geometry("451x300")
window.title("VailDEC ファイル暗号化ソフト")
#window.configure(bg=backclr)
window.resizable(False, False)
# 背景画像とアイコンの設定
iconfile = resourcePath('resources/IMG_8776.ICO')
# window.iconbitmap(iconfile)
window.attributes("-topmost", False)
txt = tkinter.Entry(font=("",15),show='*')
txt.place(x=130, y=200)
#label2 = ttk.Label(window, text='パスワード')
#label2.place(x=65, y=200)
# ボタンの追加と配置
btn4 = tkinter.Button(window, text="パスワード生成",command = btn04)
btn4.place(x=300, y=18)
label3 = ttk.Label(window, text='パスワードを下に入力してからボタンを押してください')
label3.place(x=101, y=180)
btn = tkinter.Button(window, text="暗号化",command = btn,font=("", 25))
btn.place(x=90, y=100)
btn2 = tkinter.Button(window, text="復号化",command = btn2,font=("", 25))
btn2.place(x=250, y=100)
window.mainloop()
| [
"tkinter.ttk.Label",
"os.path.isfile",
"pack.decker.comzip",
"pack.passgen.gen",
"os.path.join",
"random.randint",
"pyperclip.paste",
"tkinter.Button",
"tkinter.Entry",
"tkinter.filedialog.askopenfilename",
"tkinter.Tk",
"pack.PassBox.PswdBox",
"threading.Thread",
"tkinter.messagebox.showi... | [((5218, 5230), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (5228, 5230), False, 'import tkinter\n'), ((5493, 5531), 'tkinter.Entry', 'tkinter.Entry', ([], {'font': "('', 15)", 'show': '"""*"""'}), "(font=('', 15), show='*')\n", (5506, 5531), False, 'import tkinter\n'), ((5643, 5696), 'tkinter.Button', 'tkinter.Button', (['window'], {'text': '"""パスワード生成"""', 'command': 'btn04'}), "(window, text='パスワード生成', command=btn04)\n", (5657, 5696), False, 'import tkinter\n'), ((5732, 5783), 'tkinter.ttk.Label', 'ttk.Label', (['window'], {'text': '"""パスワードを下に入力してからボタンを押してください"""'}), "(window, text='パスワードを下に入力してからボタンを押してください')\n", (5741, 5783), False, 'from tkinter import ttk\n'), ((5818, 5880), 'tkinter.Button', 'tkinter.Button', (['window'], {'text': '"""暗号化"""', 'command': 'btn', 'font': "('', 25)"}), "(window, text='暗号化', command=btn, font=('', 25))\n", (5832, 5880), False, 'import tkinter\n'), ((5912, 5975), 'tkinter.Button', 'tkinter.Button', (['window'], {'text': '"""復号化"""', 'command': 'btn2', 'font': "('', 25)"}), "(window, text='復号化', command=btn2, font=('', 25))\n", (5926, 5975), False, 'import tkinter\n'), ((495, 512), 'pack.PassBox.PswdBox', 'PassBox.PswdBox', ([], {}), '()\n', (510, 512), False, 'from pack import PassBox\n'), ((2257, 2279), 'os.path.join', 'os.path.join', (['filename'], {}), '(filename)\n', (2269, 2279), False, 'import os\n'), ((2534, 2601), 'tkinter.filedialog.askopenfilename', 'tkinter.filedialog.askopenfilename', ([], {'filetypes': 'fTyp', 'initialdir': 'iDir'}), '(filetypes=fTyp, initialdir=iDir)\n', (2568, 2601), False, 'import tkinter\n'), ((2709, 2735), 'pack.decker.comzip', 'decker.comzip', (['file', 'pass1'], {}), '(file, pass1)\n', (2722, 2735), False, 'from pack import decker\n'), ((3274, 3341), 'tkinter.filedialog.askopenfilename', 'tkinter.filedialog.askopenfilename', ([], {'filetypes': 'fTyp', 'initialdir': 'iDir'}), '(filetypes=fTyp, initialdir=iDir)\n', (3308, 3341), False, 'import tkinter\n'), ((3426, 3457), 'pack.decker.openzip', 'decker.openzip', (['file', 'ns', 'pass1'], {}), '(file, ns, pass1)\n', (3440, 3457), False, 'from pack import decker\n'), ((4184, 4232), 'threading.Thread', 'threading.Thread', ([], {'target': 'btn_click', 'args': '[pass1]'}), '(target=btn_click, args=[pass1])\n', (4200, 4232), False, 'import threading\n'), ((4571, 4620), 'threading.Thread', 'threading.Thread', ([], {'target': 'btn2_click', 'args': '[pass1]'}), '(target=btn2_click, args=[pass1])\n', (4587, 4620), False, 'import threading\n'), ((4786, 4807), 'random.randint', 'random.randint', (['(5)', '(20)'], {}), '(5, 20)\n', (4800, 4807), False, 'import random\n'), ((4817, 4832), 'pack.passgen.gen', 'passgen.gen', (['cc'], {}), '(cc)\n', (4828, 4832), False, 'from pack import passgen\n'), ((4839, 4950), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""確認"""', '(bpass +\n """\n 生成したパスワードをパスワードボックスに入れますか?\n 「OK」をクリックした場合、パスワードはクリップボードにコピーされます。""")'], {}), '(\'確認\', bpass +\n """\n 生成したパスワードをパスワードボックスに入れますか?\n 「OK」をクリックした場合、パスワードはクリップボードにコピーされます。""")\n', (4858, 4950), False, 'from tkinter import messagebox\n'), ((4995, 5016), 'pyperclip.copy', 'pyperclip.copy', (['bpass'], {}), '(bpass)\n', (5009, 5016), False, 'import pyperclip\n'), ((543, 566), 'os.path.isfile', 'os.path.isfile', (['args[1]'], {}), '(args[1])\n', (557, 566), False, 'import os\n'), ((589, 614), 'os.path.splitext', 'os.path.splitext', (['args[1]'], {}), '(args[1])\n', (605, 614), False, 'import os\n'), ((1866, 1878), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (1876, 1878), False, 'import tkinter\n'), ((1955, 2013), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""指定されたファイルまたはディレクトリが見つかりません。"""'], {}), "('エラー', '指定されたファイルまたはディレクトリが見つかりません。')\n", (1975, 2013), False, 'from tkinter import messagebox\n'), ((2211, 2247), 'os.path.join', 'os.path.join', (['sys._MEIPASS', 'filename'], {}), '(sys._MEIPASS, filename)\n', (2223, 2247), False, 'import os\n'), ((2756, 2814), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""指定されたファイルまたはディレクトリが見つかりません。"""'], {}), "('エラー', '指定されたファイルまたはディレクトリが見つかりません。')\n", (2776, 2814), False, 'from tkinter import messagebox\n'), ((2836, 2876), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""確認"""', '"""暗号化が終了しました!"""'], {}), "('確認', '暗号化が終了しました!')\n", (2855, 2876), False, 'from tkinter import messagebox\n'), ((2899, 2948), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""ファイルはアクセスが制限されています"""'], {}), "('エラー', 'ファイルはアクセスが制限されています')\n", (2919, 2948), False, 'from tkinter import messagebox\n'), ((2971, 3022), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""対応していないファイルの可能性があります"""'], {}), "('エラー', '対応していないファイルの可能性があります')\n", (2991, 3022), False, 'from tkinter import messagebox\n'), ((3477, 3535), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""指定されたファイルまたはディレクトリが見つかりません。"""'], {}), "('エラー', '指定されたファイルまたはディレクトリが見つかりません。')\n", (3497, 3535), False, 'from tkinter import messagebox\n'), ((3557, 3597), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""確認"""', '"""復号化が終了しました!"""'], {}), "('確認', '復号化が終了しました!')\n", (3576, 3597), False, 'from tkinter import messagebox\n'), ((3621, 3665), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""パスワードが間違っています"""'], {}), "('エラー', 'パスワードが間違っています')\n", (3641, 3665), False, 'from tkinter import messagebox\n'), ((3688, 3737), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""ファイルはアクセスが制限されています"""'], {}), "('エラー', 'ファイルはアクセスが制限されています')\n", (3708, 3737), False, 'from tkinter import messagebox\n'), ((3759, 3891), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""ファイルが破損している可能性があります。\n元データでもう一度暗号化しなおしてください。\nこのファイルを他人から受け取った場合は、正式なファイルのコピーをもう一度取得してください"""'], {}), '(\'エラー\',\n """ファイルが破損している可能性があります。\n元データでもう一度暗号化しなおしてください。\nこのファイルを他人から受け取った場合は、正式なファイルのコピーをもう一度取得してください"""\n )\n', (3779, 3891), False, 'from tkinter import messagebox\n'), ((3975, 3987), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (3985, 3987), False, 'import tkinter\n'), ((4062, 4107), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""パスワードを入力してください"""'], {}), "('エラー', 'パスワードを入力してください')\n", (4082, 4107), False, 'from tkinter import messagebox\n'), ((4362, 4374), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (4372, 4374), False, 'import tkinter\n'), ((4449, 4494), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""パスワードを入力してください"""'], {}), "('エラー', 'パスワードを入力してください')\n", (4469, 4494), False, 'from tkinter import messagebox\n'), ((5144, 5161), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (5159, 5161), False, 'import pyperclip\n'), ((689, 720), 'pack.decker.openzip', 'decker.openzip', (['file', 'ns', 'pass1'], {}), '(file, ns, pass1)\n', (703, 720), False, 'from pack import decker\n'), ((747, 773), 'pack.decker.comzip', 'decker.comzip', (['file', 'pass1'], {}), '(file, pass1)\n', (760, 773), False, 'from pack import decker\n'), ((814, 826), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (824, 826), False, 'import tkinter\n'), ((1335, 1347), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (1345, 1347), False, 'import tkinter\n'), ((938, 996), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""指定されたファイルまたはディレクトリが見つかりません。"""'], {}), "('エラー', '指定されたファイルまたはディレクトリが見つかりません。')\n", (958, 996), False, 'from tkinter import messagebox\n'), ((1034, 1074), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""確認"""', '"""復号化が終了しました!"""'], {}), "('確認', '復号化が終了しました!')\n", (1053, 1074), False, 'from tkinter import messagebox\n'), ((1114, 1158), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""パスワードが間違っています"""'], {}), "('エラー', 'パスワードが間違っています')\n", (1134, 1158), False, 'from tkinter import messagebox\n'), ((1197, 1246), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""ファイルはアクセスが制限されています"""'], {}), "('エラー', 'ファイルはアクセスが制限されています')\n", (1217, 1246), False, 'from tkinter import messagebox\n'), ((1459, 1517), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""指定されたファイルまたはディレクトリが見つかりません。"""'], {}), "('エラー', '指定されたファイルまたはディレクトリが見つかりません。')\n", (1479, 1517), False, 'from tkinter import messagebox\n'), ((1555, 1595), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""確認"""', '"""暗号化が終了しました!"""'], {}), "('確認', '暗号化が終了しました!')\n", (1574, 1595), False, 'from tkinter import messagebox\n'), ((1634, 1683), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""ファイルはアクセスが制限されています"""'], {}), "('エラー', 'ファイルはアクセスが制限されています')\n", (1654, 1683), False, 'from tkinter import messagebox\n'), ((1722, 1773), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""エラー"""', '"""対応していないファイルの可能性があります"""'], {}), "('エラー', '対応していないファイルの可能性があります')\n", (1742, 1773), False, 'from tkinter import messagebox\n')] |
# This is an answer to: https://codegolf.stackexchange.com/questions/189277/bridge-the-gaps
import sys
import os
from PIL import Image
import numpy as np
import scipy.ndimage
def obtain_groups(image, threshold, structuring_el):
"""
Obtain isles of unconnected pixels via a threshold on the R channel
"""
image_logical = (image[:, :, 1] < threshold).astype(np.int)
return scipy.ndimage.measurements.label(image_logical, structure=structuring_el)
def swap_colors(image, original_color, new_color):
"""
Swap all the pixels of a specific color by another color
"""
r1, g1, b1 = original_color # RGB value to be replaced
r2, g2, b2 = new_color # New RGB value
red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2]
mask = (red == r1) & (green == g1) & (blue == b1)
image[:, :, :3][mask] = [r2, g2, b2]
return image
def main(image_path=None):
"""
For each processed image, we begin by changing the color
of all the white pixels in an image to red. By doing this,
it is guaranteed that all the elements (any isle of black
pixels) are connected.
Then, we iterate over all the pixels in the image starting
from the top left corner and moving right and down. For every
red pixel we find we change its color to white. If after this
change of color there is still only one element (an element
being now any isle of black and red pixels), we leave the pixel
white and move on to the next pixel. However, if after the
color change from red to white the number of elements is bigger
than one, we leave the pixel red and move on to the next pixel.
The connections obtained by only using this method show a regular
pattern and in some cases, there are unnecessary red pixels.
This extra red pixels can be easily removed by iterating again over
the image and performing the same operations as explained above but
from the bottom right corner to the top left corner. This second
pass is much faster since the amount of red pixels that have to be
checked.
"""
images = os.listdir("images")
f = open("results.txt", "w")
if image_path is not None:
images = [image_path]
for image_name in images:
im = Image.open("images/"+image_name).convert("RGBA")
image = np.array(im)
image = swap_colors(image, (255, 255, 255), (255, 0, 0))
# create structuring element to determine unconnected groups of pixels in image
s = scipy.ndimage.morphology.generate_binary_structure(2, 2)
for i in np.ndindex(image.shape[:2]):
# skip black pixels
if sum(image[i[0], i[1]]) == 255:
continue
image[i[0], i[1]] = [255, 255, 255, 255]
# label the different groups, considering diagonal connections as valid
groups, num_groups = obtain_groups(image, 255, s)
if num_groups != 1:
image[i[0], i[1]] = [255, 0, 0, 255]
# Show percentage
print((i[1] + i[0]*im.size[0])/(im.size[0]*im.size[1]))
# Number of red pixels
red_p = 0
for i in np.ndindex(image.shape[:2]):
j = (im.size[1] - i[0] - 1, im.size[0] - i[1] - 1)
# skip black and white pixels
if sum(image[j[0], j[1]]) == 255 or sum(image[j[0], j[1]]) == 255*4:
continue
image[j[0], j[1]] = [255, 255, 255, 255]
# label the different groups, considering diagonal connections as valid
groups, num_groups = obtain_groups(image, 255, s)
if num_groups != 1:
image[j[0], j[1]] = [255, 0, 0, 255]
# Show percentage
print((j[1] + j[0]*im.size[0])/(im.size[0]*im.size[1]))
red_p += (sum(image[j[0], j[1]]) == 255*2)
print(red_p)
f.write("r_"+image_name+": "+str(red_p)+"\n")
im = Image.fromarray(image)
# im.show()
im.save("r_"+image_name)
f.close()
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
main()
| [
"numpy.ndindex",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"os.listdir"
] | [((2124, 2144), 'os.listdir', 'os.listdir', (['"""images"""'], {}), "('images')\n", (2134, 2144), False, 'import os\n'), ((2349, 2361), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2357, 2361), True, 'import numpy as np\n'), ((2604, 2631), 'numpy.ndindex', 'np.ndindex', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (2614, 2631), True, 'import numpy as np\n'), ((3185, 3212), 'numpy.ndindex', 'np.ndindex', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (3195, 3212), True, 'import numpy as np\n'), ((3952, 3974), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3967, 3974), False, 'from PIL import Image\n'), ((2284, 2318), 'PIL.Image.open', 'Image.open', (["('images/' + image_name)"], {}), "('images/' + image_name)\n", (2294, 2318), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
import rospy
import sys
import time
import numpy as np
from realtimepseudoAstar import plan
from globaltorobotcoords import transform
from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo, BallIsHolding
#Initialize desired x depending on obstacle number
ROBOT_NAME = 'rival' + str(sys.argv[1])
possible_x = [-600, -200, 200, 600]
target_1 = np.array([possible_x[int(sys.argv[1]) - 1], -400])
target_2 = np.array([possible_x[int(sys.argv[1]) - 1], 400])
current_target = target_1
# For plotting
# import math
# import matplotlib.pyplot as plt
# Initialize publisher and rate
pub = rospy.Publisher('/' + str(ROBOT_NAME)+'/nubotcontrol/actioncmd', ActionCmd, queue_size=1)
rospy.init_node(str(ROBOT_NAME) + '_brain', anonymous=False)
hertz = 10
rate = rospy.Rate(hertz)
def callback(data):
#Receive all robot info
r = data.robotinfo[int(sys.argv[1]) - 1]
robot_pos = np.array([r.pos.x, r.pos.y])
theta = r.heading.theta
#Alternate between +y and -y target positions
global current_target
if np.linalg.norm(robot_pos - current_target) < 50 and np.all(current_target == target_1):
current_target = target_2
elif np.linalg.norm(robot_pos - current_target) < 50 and np.all(current_target == target_2):
current_target = target_1
target = current_target
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
target = transform(target[0], target[1], robot_pos[0], robot_pos[1], theta)
#Generate ActionCmd() and publish to hwcontroller
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 150
action.handle_enable = 0
action.target_ori = 0
pub.publish(action)
rate.sleep()
def listener():
rospy.Subscriber("/" + str(ROBOT_NAME) + "/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
| [
"globaltorobotcoords.transform",
"nubot_common.msg.ActionCmd",
"rospy.Rate",
"numpy.array",
"numpy.linalg.norm",
"rospy.spin",
"numpy.all"
] | [((822, 839), 'rospy.Rate', 'rospy.Rate', (['hertz'], {}), '(hertz)\n', (832, 839), False, 'import rospy\n'), ((955, 983), 'numpy.array', 'np.array', (['[r.pos.x, r.pos.y]'], {}), '([r.pos.x, r.pos.y])\n', (963, 983), True, 'import numpy as np\n'), ((1494, 1560), 'globaltorobotcoords.transform', 'transform', (['target[0]', 'target[1]', 'robot_pos[0]', 'robot_pos[1]', 'theta'], {}), '(target[0], target[1], robot_pos[0], robot_pos[1], theta)\n', (1503, 1560), False, 'from globaltorobotcoords import transform\n'), ((1629, 1640), 'nubot_common.msg.ActionCmd', 'ActionCmd', ([], {}), '()\n', (1638, 1640), False, 'from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo, BallIsHolding\n'), ((1963, 1975), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1973, 1975), False, 'import rospy\n'), ((1150, 1184), 'numpy.all', 'np.all', (['(current_target == target_1)'], {}), '(current_target == target_1)\n', (1156, 1184), True, 'import numpy as np\n'), ((1098, 1140), 'numpy.linalg.norm', 'np.linalg.norm', (['(robot_pos - current_target)'], {}), '(robot_pos - current_target)\n', (1112, 1140), True, 'import numpy as np\n'), ((1281, 1315), 'numpy.all', 'np.all', (['(current_target == target_2)'], {}), '(current_target == target_2)\n', (1287, 1315), True, 'import numpy as np\n'), ((1229, 1271), 'numpy.linalg.norm', 'np.linalg.norm', (['(robot_pos - current_target)'], {}), '(robot_pos - current_target)\n', (1243, 1271), True, 'import numpy as np\n')] |
#
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The cisco.ios_route_maps config file.
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to its desired end-state is
created.
"""
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.resource_module import (
ResourceModule,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.facts import (
Facts,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
dict_merge,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.rm_templates.route_maps import (
Route_mapsTemplate,
)
class Route_maps(ResourceModule):
"""
The cisco.ios_route_maps config class
"""
parsers = ["continue_entry", "description"]
def __init__(self, module):
super(Route_maps, self).__init__(
empty_fact_val={},
facts_module=Facts(module),
module=module,
resource="route_maps",
tmplt=Route_mapsTemplate(),
)
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
if self.state not in ["parsed", "gathered"]:
self.generate_commands()
self.run_commands()
return self.result
def generate_commands(self):
""" Generate configuration commands to send based on
want, have and desired state.
"""
if self.want:
wantd = {(entry["route_map"]): entry for entry in self.want}
else:
wantd = {}
if self.have:
haved = {(entry["route_map"]): entry for entry in self.have}
else:
haved = {}
# Convert each of config list to dict
for each in wantd, haved:
self.list_to_dict(each)
# if state is merged, merge want onto have and then compare
if self.state == "merged":
wantd = dict_merge(haved, wantd)
# if state is deleted, empty out wantd and set haved to wantd
if self.state == "deleted":
haved = {
k: v for k, v in iteritems(haved) if k in wantd or not wantd
}
wantd = {}
# remove superfluous config for overridden and deleted
if self.state in ["overridden", "deleted"]:
for k, have in iteritems(haved):
if k not in wantd:
route_map_cmd = "no route-map {route_map}".format(**have)
self.commands.append(route_map_cmd)
for k, want in iteritems(wantd):
self._compare(want=want, have=haved.pop(k, {}))
def _compare(self, want, have):
"""Leverages the base class `compare()` method and
populates the list of commands to be run by comparing
the `want` and `have` data with the `parsers` defined
for the Route_maps network resource.
"""
if want != have and self.state != "deleted":
self.entries_compare(want, have)
def entries_compare(self, want, have):
if want.get("entries"):
cmd_len = len(self.commands)
if have.get("entries"):
for k, v in iteritems(want["entries"]):
have_entry = have["entries"].pop(k, {})
if have_entry and want["entries"][k] != have_entry:
# description gets merged with existing description, so explicit delete is required
# replaced and overridden state
if (
(
self.state == "replaced"
or self.state == "overridden"
)
and have_entry.get("description")
and have_entry.get("description")
!= want["entries"][k].get("description")
):
self.compare(
parsers=["description"],
want=dict(),
have=have_entry,
)
self.compare(
parsers=self.parsers,
want=want["entries"][k],
have=have_entry,
)
have_match = have_entry.get("match")
want_match = v.get("match")
if have_match and want_match:
self.list_type_compare(
"match", want=want_match, have=have_match
)
elif not have_match and want_match:
self.list_type_compare(
"match", want=want_match, have=dict()
)
have_set = have_entry.get("set")
want_set = v.get("set")
if have_set and want_set:
self.list_type_compare(
"set", want=want_set, have=have_set
)
elif not have_set and want_set:
self.list_type_compare(
"set", want=want_set, have=dict()
)
if cmd_len != len(self.commands):
route_map_cmd = "route-map {route_map}".format(**want)
if want["entries"][k].get("action"):
route_map_cmd += " {action}".format(
**want["entries"][k]
)
if want["entries"][k].get("sequence"):
route_map_cmd += " {sequence}".format(
**want["entries"][k]
)
self.commands.insert(cmd_len, route_map_cmd)
cmd_len = len(self.commands)
else:
for k, v in iteritems(want["entries"]):
self.compare(
parsers=self.parsers,
want=want["entries"][k],
have=dict(),
)
want_match = v.get("match")
if want_match:
self.list_type_compare(
"match", want=want_match, have=dict()
)
want_set = v.get("set")
if want_set:
self.list_type_compare(
"set", want=want_set, have=dict()
)
if cmd_len != len(self.commands):
route_map_cmd = "route-map {route_map}".format(**want)
if want["entries"][k].get("action"):
route_map_cmd += " {action}".format(
**want["entries"][k]
)
if want["entries"][k].get("sequence"):
route_map_cmd += " {sequence}".format(
**want["entries"][k]
)
self.commands.insert(cmd_len, route_map_cmd)
cmd_len = len(self.commands)
if (
self.state == "replaced" or self.state == "overridden"
) and have.get("entries"):
cmd_len = len(self.commands)
for k, v in iteritems(have["entries"]):
route_map_cmd = "no route-map {route_map}".format(**have)
if have["entries"][k].get("action"):
route_map_cmd += " {action}".format(**have["entries"][k])
if have["entries"][k].get("sequence"):
route_map_cmd += " {sequence}".format(**have["entries"][k])
self.commands.insert(cmd_len, route_map_cmd)
def list_type_compare(self, compare_type, want, have):
parsers = [
"{0}".format(compare_type),
"{0}.ip".format(compare_type),
"{0}.ipv6".format(compare_type),
]
for k, v in iteritems(want):
have_v = have.pop(k, {})
if v != have_v and k not in ["ip", "ipv6", "action", "sequence"]:
if have_v:
self.compare(
parsers=parsers,
want={compare_type: {k: v}},
have={compare_type: {k: have_v}},
)
else:
self.compare(
parsers=parsers,
want={compare_type: {k: v}},
have=dict(),
)
if k in ["ip", "ipv6"]:
for key, val in iteritems(v):
have_val = have_v.pop(key, {})
if val != have_val:
if have_val:
if (
self.state == "overridden"
or self.state == "replaced"
):
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: {key: have_val}}},
)
self.compare(
parsers=parsers,
want={compare_type: {k: {key: val}}},
have={compare_type: {k: {key: have_val}}},
)
else:
self.compare(
parsers=parsers,
want={compare_type: {k: {key: val}}},
have=dict(),
)
if (
self.state == "overridden" or self.state == "replaced"
) and have_v:
for key, val in iteritems(have_v):
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: {key: val}}},
)
if have and (self.state == "replaced" or self.state == "overridden"):
for k, v in iteritems(have):
if k in ["ip", "ipv6"]:
for key, val in iteritems(v):
if key and val:
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: {key: val}}},
)
else:
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: v}},
)
def list_to_dict(self, param):
if param:
def convert_to_dict(inner_match, key):
temp = dict()
for each in inner_match:
temp.update({key + "_" + str(each): each})
return dict(sorted(temp.items(), key=lambda x: x[1]))
for key, val in iteritems(param):
temp_entries = dict()
if val.get("entries"):
for every in val["entries"]:
match = every.get("match")
if match:
if match.get("as_path") and match.get(
"as_path"
).get("acls"):
match["as_path"]["acls"] = convert_to_dict(
match["as_path"]["acls"], "acl"
)
if match.get("community") and match.get(
"community"
).get("name"):
match["community"]["name"] = convert_to_dict(
match["community"]["name"], "name"
)
if match.get("extcommunity"):
match["extcommunity"] = convert_to_dict(
match["extcommunity"], "num"
)
if match.get("interfaces"):
match["interfaces"] = convert_to_dict(
match["interfaces"], "interface"
)
if match.get("ip"):
for each_ip_param in [
"address",
"flowspec",
"next_hop",
"redistribution_source",
"route_source",
]:
if match["ip"].get(each_ip_param):
if match["ip"][each_ip_param].get(
"acls"
):
match["ip"][each_ip_param][
"acls"
] = convert_to_dict(
match["ip"][each_ip_param][
"acls"
],
"acl",
)
elif match["ip"][each_ip_param].get(
"prefix_lists"
):
match["ip"][each_ip_param][
"prefix_lists"
] = convert_to_dict(
match["ip"][each_ip_param][
"prefix_lists"
],
"prefix_list",
)
if match.get("local_preference") and match.get(
"local_preference"
).get("value"):
match["local_preference"][
"value"
] = convert_to_dict(
match["local_preference"]["value"], "value"
)
if match.get("mdt_group") and match.get(
"mdt_group"
).get("acls"):
match["mdt_group"]["acls"] = convert_to_dict(
match["mdt_group"]["acls"], "acl"
)
if match.get("policy_lists"):
match["policy_lists"] = convert_to_dict(
match["policy_lists"], "policy"
)
if match.get("security_group"):
for each_sg_param in ["source", "destination"]:
if match.get("security_group").get(
each_sg_param
):
match["security_group"][
each_sg_param
] = convert_to_dict(
match["security_group"][
each_sg_param
],
each_sg_param,
)
set = every.get("set")
if set:
if set.get("interfaces"):
set["interfaces"] = convert_to_dict(
set["interfaces"], "interface"
)
action = every.get("action")
sequence = every.get("sequence")
temp_entries.update(
{action + "_" + str(sequence): every}
)
val["entries"] = temp_entries
| [
"ansible_collections.cisco.ios.plugins.module_utils.network.ios.rm_templates.route_maps.Route_mapsTemplate",
"ansible.module_utils.six.iteritems",
"ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.facts.Facts",
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.util... | [((2995, 3011), 'ansible.module_utils.six.iteritems', 'iteritems', (['wantd'], {}), '(wantd)\n', (3004, 3011), False, 'from ansible.module_utils.six import iteritems\n'), ((8768, 8783), 'ansible.module_utils.six.iteritems', 'iteritems', (['want'], {}), '(want)\n', (8777, 8783), False, 'from ansible.module_utils.six import iteritems\n'), ((2373, 2397), 'ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils.dict_merge', 'dict_merge', (['haved', 'wantd'], {}), '(haved, wantd)\n', (2383, 2397), False, 'from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import dict_merge\n'), ((2784, 2800), 'ansible.module_utils.six.iteritems', 'iteritems', (['haved'], {}), '(haved)\n', (2793, 2800), False, 'from ansible.module_utils.six import iteritems\n'), ((8101, 8127), 'ansible.module_utils.six.iteritems', 'iteritems', (["have['entries']"], {}), "(have['entries'])\n", (8110, 8127), False, 'from ansible.module_utils.six import iteritems\n'), ((11029, 11044), 'ansible.module_utils.six.iteritems', 'iteritems', (['have'], {}), '(have)\n', (11038, 11044), False, 'from ansible.module_utils.six import iteritems\n'), ((11960, 11976), 'ansible.module_utils.six.iteritems', 'iteritems', (['param'], {}), '(param)\n', (11969, 11976), False, 'from ansible.module_utils.six import iteritems\n'), ((1287, 1300), 'ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.facts.Facts', 'Facts', (['module'], {}), '(module)\n', (1292, 1300), False, 'from ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.facts import Facts\n'), ((1382, 1402), 'ansible_collections.cisco.ios.plugins.module_utils.network.ios.rm_templates.route_maps.Route_mapsTemplate', 'Route_mapsTemplate', ([], {}), '()\n', (1400, 1402), False, 'from ansible_collections.cisco.ios.plugins.module_utils.network.ios.rm_templates.route_maps import Route_mapsTemplate\n'), ((3638, 3664), 'ansible.module_utils.six.iteritems', 'iteritems', (["want['entries']"], {}), "(want['entries'])\n", (3647, 3664), False, 'from ansible.module_utils.six import iteritems\n'), ((6592, 6618), 'ansible.module_utils.six.iteritems', 'iteritems', (["want['entries']"], {}), "(want['entries'])\n", (6601, 6618), False, 'from ansible.module_utils.six import iteritems\n'), ((9412, 9424), 'ansible.module_utils.six.iteritems', 'iteritems', (['v'], {}), '(v)\n', (9421, 9424), False, 'from ansible.module_utils.six import iteritems\n'), ((2560, 2576), 'ansible.module_utils.six.iteritems', 'iteritems', (['haved'], {}), '(haved)\n', (2569, 2576), False, 'from ansible.module_utils.six import iteritems\n'), ((10692, 10709), 'ansible.module_utils.six.iteritems', 'iteritems', (['have_v'], {}), '(have_v)\n', (10701, 10709), False, 'from ansible.module_utils.six import iteritems\n'), ((11122, 11134), 'ansible.module_utils.six.iteritems', 'iteritems', (['v'], {}), '(v)\n', (11131, 11134), False, 'from ansible.module_utils.six import iteritems\n')] |
"""
cpsg.py
~~~~~~
Concfg Preset Screenshot Generator
Only works in pure powershell/pwsh session, does not work in terminal like cmder.
Prerequisites:
Python3.4+, Pillow, jinja2, pywin32
"""
import os
import sys
import glob
import time
import shutil
import argparse
import win32gui
import subprocess
import win32process
from PIL import ImageGrab
from jinja2 import Template
LEGACY_PWSH = False
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PRESETS_DIR = os.path.join(SCRIPT_DIR, os.pardir, 'presets')
PRESET_EXAMPLES_DIR = os.path.join(SCRIPT_DIR, os.pardir, 'preset_examples')
SKIP_LIST = ['basic', 'basic-reset']
def get_hwnds_for_pid(pid):
def callback(hwnd, hwnds):
if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):
_, found_pid = win32process.GetWindowThreadProcessId(hwnd)
if found_pid == pid:
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(callback, hwnds)
return hwnds
def get_presets():
files = glob.glob(os.path.join(PRESETS_DIR, '*.json'))
presets = []
for item in files:
presets.append((os.path.splitext(os.path.basename(item))[0], item))
# preset pair list [(name, path), (name, path), ...]
return presets
def gens_for_preset(preset):
exe = 'powershell' if LEGACY_PWSH else 'pwsh'
print("Taking screenshot of preset '{0}'...".format(preset[0]))
# set color preset
pwsh = subprocess.Popen(
'{0} -noprofile -file {1}/setcolors.ps1 -preset {2}'.format(exe, SCRIPT_DIR, preset[1]),
creationflags=subprocess.CREATE_NEW_CONSOLE
)
# waiting for exit
time.sleep(4.0)
# print out color table then take screenshot
pwsh = subprocess.Popen(
'{0} -noprofile -noexit -file {1}/outcolors.ps1'.format(exe, SCRIPT_DIR),
creationflags=subprocess.CREATE_NEW_CONSOLE
)
# waiting for process
time.sleep(2.0)
for hwnd in get_hwnds_for_pid(pwsh.pid):
win32gui.SetForegroundWindow(hwnd)
bbox = win32gui.GetWindowRect(hwnd)
# remove window box shadow
crop_bbox = (bbox[0]+7, bbox[1], bbox[2]-7, bbox[3]-7)
img = ImageGrab.grab(crop_bbox)
if not os.path.exists(PRESET_EXAMPLES_DIR):
os.makedirs(PRESET_EXAMPLES_DIR)
img.save(os.path.join(PRESET_EXAMPLES_DIR, '{0}.png'.format(preset[0])))
pwsh.kill()
def img_dict(direntry):
return {
'name': direntry.name.replace('.png', ''),
'path': direntry.name
}
def is_img(direntry):
if direntry.is_file and direntry.name.endswith('.png'):
return True
return False
if __name__ == '__main__':
# Usage: python -m cpsg [args]
parser = argparse.ArgumentParser(
description='Concfg Preset Screenshot Generator')
parser.add_argument("-a", "--all",
help="generate screenshot for all presets",
action="store_true")
parser.add_argument("-l", "--legacy",
help="pass this option if you use Windows PowerShell",
action="store_true")
parser.add_argument("-p", "--preset",
help="generate screenshot for single preset")
parser.add_argument("-u", "--update",
help="also update the screenshot README",
action="store_true")
args = parser.parse_args()
if args.all or args.preset:
if not shutil.which('colortool.exe'):
print("Make sure you have 'ColorTool' installed.")
sys.exit(0)
input("NOTICE: Do not have other operations while the script runs, "
"or it will be interrupted when taking screenshots. "
"Hit Enter to continue: ")
presets = get_presets()
if args.legacy:
LEGACY_PWSH = True
if args.all:
for item in presets:
# skip non-color presets
if not item[0] in SKIP_LIST:
gens_for_preset(item)
elif args.preset:
# skip non-color presets
if not args.preset in SKIP_LIST:
match = [item for item in presets if item[0] == args.preset]
if len(match):
gens_for_preset(match[0])
else:
print("No preset named '{0}'.".format(args.preset))
sys.exit(0)
if args.update:
print('Updating screenshots README.md...')
# Get template
with open(os.path.join(SCRIPT_DIR, 'readme.jinja2')) as templateData:
template = Template(templateData.read())
# Get images
images = [img_dict(direntry) for direntry in os.scandir(PRESET_EXAMPLES_DIR) if is_img(direntry)]
images.sort(key=lambda x: x['name'])
# Generate README
with open(os.path.join(PRESET_EXAMPLES_DIR, 'README.md'), 'w') as readme:
readme.write(template.render(images=images))
else:
parser.print_help()
sys.exit(0)
| [
"win32gui.GetWindowRect",
"argparse.ArgumentParser",
"os.makedirs",
"PIL.ImageGrab.grab",
"win32gui.IsWindowVisible",
"win32process.GetWindowThreadProcessId",
"os.path.realpath",
"os.path.basename",
"os.path.exists",
"shutil.which",
"time.sleep",
"win32gui.EnumWindows",
"win32gui.SetForegrou... | [((518, 564), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'os.pardir', '"""presets"""'], {}), "(SCRIPT_DIR, os.pardir, 'presets')\n", (530, 564), False, 'import os\n'), ((588, 642), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'os.pardir', '"""preset_examples"""'], {}), "(SCRIPT_DIR, os.pardir, 'preset_examples')\n", (600, 642), False, 'import os\n'), ((475, 501), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (491, 501), False, 'import os\n'), ((1021, 1058), 'win32gui.EnumWindows', 'win32gui.EnumWindows', (['callback', 'hwnds'], {}), '(callback, hwnds)\n', (1041, 1058), False, 'import win32gui\n'), ((1762, 1777), 'time.sleep', 'time.sleep', (['(4.0)'], {}), '(4.0)\n', (1772, 1777), False, 'import time\n'), ((2035, 2050), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (2045, 2050), False, 'import time\n'), ((2155, 2183), 'win32gui.GetWindowRect', 'win32gui.GetWindowRect', (['hwnd'], {}), '(hwnd)\n', (2177, 2183), False, 'import win32gui\n'), ((2287, 2312), 'PIL.ImageGrab.grab', 'ImageGrab.grab', (['crop_bbox'], {}), '(crop_bbox)\n', (2301, 2312), False, 'from PIL import ImageGrab\n'), ((2841, 2914), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Concfg Preset Screenshot Generator"""'}), "(description='Concfg Preset Screenshot Generator')\n", (2864, 2914), False, 'import argparse\n'), ((1125, 1160), 'os.path.join', 'os.path.join', (['PRESETS_DIR', '"""*.json"""'], {}), "(PRESETS_DIR, '*.json')\n", (1137, 1160), False, 'import os\n'), ((2106, 2140), 'win32gui.SetForegroundWindow', 'win32gui.SetForegroundWindow', (['hwnd'], {}), '(hwnd)\n', (2134, 2140), False, 'import win32gui\n'), ((2325, 2360), 'os.path.exists', 'os.path.exists', (['PRESET_EXAMPLES_DIR'], {}), '(PRESET_EXAMPLES_DIR)\n', (2339, 2360), False, 'import os\n'), ((2371, 2403), 'os.makedirs', 'os.makedirs', (['PRESET_EXAMPLES_DIR'], {}), '(PRESET_EXAMPLES_DIR)\n', (2382, 2403), False, 'import os\n'), ((5267, 5278), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5275, 5278), False, 'import sys\n'), ((760, 790), 'win32gui.IsWindowVisible', 'win32gui.IsWindowVisible', (['hwnd'], {}), '(hwnd)\n', (784, 790), False, 'import win32gui\n'), ((795, 825), 'win32gui.IsWindowEnabled', 'win32gui.IsWindowEnabled', (['hwnd'], {}), '(hwnd)\n', (819, 825), False, 'import win32gui\n'), ((856, 899), 'win32process.GetWindowThreadProcessId', 'win32process.GetWindowThreadProcessId', (['hwnd'], {}), '(hwnd)\n', (893, 899), False, 'import win32process\n'), ((3604, 3633), 'shutil.which', 'shutil.which', (['"""colortool.exe"""'], {}), "('colortool.exe')\n", (3616, 3633), False, 'import shutil\n'), ((3712, 3723), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3720, 3723), False, 'import sys\n'), ((4729, 4770), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""readme.jinja2"""'], {}), "(SCRIPT_DIR, 'readme.jinja2')\n", (4741, 4770), False, 'import os\n'), ((4933, 4964), 'os.scandir', 'os.scandir', (['PRESET_EXAMPLES_DIR'], {}), '(PRESET_EXAMPLES_DIR)\n', (4943, 4964), False, 'import os\n'), ((5092, 5138), 'os.path.join', 'os.path.join', (['PRESET_EXAMPLES_DIR', '"""README.md"""'], {}), "(PRESET_EXAMPLES_DIR, 'README.md')\n", (5104, 5138), False, 'import os\n'), ((1248, 1270), 'os.path.basename', 'os.path.basename', (['item'], {}), '(item)\n', (1264, 1270), False, 'import os\n'), ((4583, 4594), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4591, 4594), False, 'import sys\n')] |
"""
Brython MDCComponent: MDCList
=============================
"""
from browser import html
from .core import MDCTemplate
########################################################################
class __listItem__(MDCTemplate):
""""""
MDC_optionals = {
'meta': '<span class="mdc-list-item__meta">{meta}</span>',
'icon_meta': '<a href="#" class="mdc-list-item__meta material-icons" onclick="event.preventDefault();" style="text-decoration: none; color: {meta_color};">{icon_meta}</a>',
# 'fa_icon_meta': '<a href="#" class="mdc-list-item__meta material-icons" onclick="event.preventDefault();" style="text-decoration: none; color: {meta_color};">{icon_meta}</a>',
'fa_icon_meta': '<i class="mdc-list-item__meta {fa_style_meta} {fa_icon_meta}" onclick="event.preventDefault();" style="text-decoration: none; color: {meta_color};"></i>',
'icon': '<i class="material-icons mdc-list-item__graphic" aria-hidden="true">{icon}</i>',
'fa_icon': '<i class="mdc-list-item__graphic {fa_style} {fa_icon}"></i>',
'avatar': '<span class="mdc-list-item__graphic" style="background-color: {avatar_background_color}; color: {avatar_color};" role="presentation"><i class="material-icons" aria-hidden="true">{avatar}</i></span>',
'placeholder': '<span class="mdc-list-item__graphic" style="background-color: {placeholder_background_color};"></span>',
}
# ----------------------------------------------------------------------
def __new__(self, text, secondary_text=None, icon=False, icon_meta=False, meta=False, avatar=False, placeholder_background_coloiconr='rgba(0,0,0,.38)', avatar_color='white', meta_color='rgba(0,0,0,.38)', avatar_background_color='rgba(0,0,0,.38)', **kwargs):
""""""
if icon and icon.startswith('fa'):
fa_style = icon[:icon.find('-')]
fa_icon = 'fa' + icon[icon.find('-'):]
del icon
if icon_meta and icon_meta.startswith('fa'):
fa_style_meta = icon_meta[:icon_meta.find('-')]
fa_icon_meta = 'fa' + icon_meta[icon_meta.find('-'):]
del icon_meta
self.element = self.render(locals(), kwargs)
return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
if context['secondary_text']:
code = """
<li class="mdc-list-item">
{icon}
{fa_icon}
{avatar}
{placeholder}
<span class="mdc-list-item__text">
<span class="mdc-list-item__primary-text">{text}</span>
<span class="mdc-list-item__secondary-text">{secondary_text}</span>
</span>
{meta}
{icon_meta}
{fa_icon_meta}
</li>
"""
else:
code = """
<li class="mdc-list-item">
{icon}
{avatar}
{placeholder}
<span class="mdc-list-item__text">{text}</span>
{meta}
{icon_meta}
</li>
"""
return cls.render_html(code, context)
# ----------------------------------------------------------------------
@classmethod
def get(self, name):
""""""
if name is 'icon':
return self.element.select('.mdc-list-item__graphic')[0]
elif name is 'icon_meta':
return self.element.select('.mdc-list-item__meta')[0]
elif name is 'primary_text':
return self.element.select('.mdc-list-item__primary-text')[0]
########################################################################
class __listChekItem__(MDCTemplate):
""""""
MDC_optionals = {
'checked': 'checked=true',
}
# ----------------------------------------------------------------------
def __new__(self, text, checked=False, **kwargs):
""""""
self.element = self.render(locals(), kwargs)
return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
code = """
<li class="mdc-list-item checkbox-list-ripple-surface mdc-ripple-upgraded" style="--mdc-ripple-fg-size:360px; --mdc-ripple-fg-scale:1.6997692716423716; --mdc-ripple-fg-translate-start:258px, -163.06666564941406px; --mdc-ripple-fg-translate-end:120px, -156px;">
<label for="trailing-checkbox-blueberries">{text}</label>
<span class="mdc-list-item__meta">
<div class="mdc-checkbox mdc-checkbox--upgraded mdc-ripple-upgraded mdc-ripple-upgraded--unbounded" style="--mdc-ripple-fg-size:24px; --mdc-ripple-fg-scale:1.6666666666666667; --mdc-ripple-left:8px; --mdc-ripple-top:8px;">
<input class="mdc-checkbox__native-control" {checked} id="trailing-checkbox-blueberries" type="checkbox">
<div class="mdc-checkbox__background">
<svg class="mdc-checkbox__checkmark" viewBox="0 0 24 24">
<path class="mdc-checkbox__checkmark-path" fill="none" stroke="white" d="M1.73,12.91 8.1,19.28 22.79,4.59"></path>
</svg>
<div class="mdc-checkbox__mixedmark"></div>
</div>
</div>
</span>
</li>
"""
return cls.render_html(code, context)
########################################################################
class MDCListGroup(MDCTemplate):
""""""
# ----------------------------------------------------------------------
# def __new__(self, **kwargs):
# """"""
#self.element = self.render(locals(), kwargs)
# return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
code = """
<div class="mdc-list-group">
</div>
"""
return cls.render_html(code, context)
# ----------------------------------------------------------------------
@classmethod
def add_list(cls, element, label, list_):
""""""
cls.element <= html.H3(label, Class='mdc-list-group__subheader')
cls.element <= list_
########################################################################
class MDCList(MDCTemplate):
""""""
NAME = 'list', 'MDCList'
MDC_optionals = {
'two_line': 'mdc-list--two-line',
'dense': 'mdc-list--two-line mdc-list--dense',
'avatar': 'mdc-list--avatar-list',
'non_interactive': 'mdc-list--non-interactive',
}
# ----------------------------------------------------------------------
def __new__(self, two_line=False, dense=False, avatar=False, **kwargs):
""""""
self.element = self.render(locals(), kwargs)
return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
code = """
<ul class="mdc-list {two_line} {dense} {avatar} {non_interactive}">
</ul>
"""
return cls.render_html(code, context)
# ----------------------------------------------------------------------
@classmethod
def get(self, name):
""""""
# if name is 'actions':
# return self.element.select('.mdc-card__actions')[0]
# elif name is 'action_buttons':
# return self.element.select('.mdc-card__action-buttons')[0]
# elif name is 'action_icons':
# return self.element.select('.mdc-card__action-icons')[0]
# #----------------------------------------------------------------------
# @classmethod
# def add_action_button(cls, element, element, mdc, *args, **kwargs):
# """"""
# ----------------------------------------------------------------------
@classmethod
def add_item(cls, element, *args, **kwargs):
""""""
item = __listItem__(*args, **kwargs)
cls.element <= item
return item
# ----------------------------------------------------------------------
@classmethod
def add_check_item(cls, element, *args, **kwargs):
""""""
item = __listChekItem__(*args, **kwargs)
cls.element <= item
return item
# ----------------------------------------------------------------------
@classmethod
def add_divider(cls, element, hr=False, inset=False):
""""""
if inset:
inset = 'mdc-list-divider--inset'
else:
inset = ''
if hr:
code = '<hr class="mdc-list-divider {inset}">'.format(inset=inset)
else:
code = '<li role="separator" class="mdc-list-divider {inset}"></li>'.format(
inset=inset)
code = cls.render_str(code)
cls.element <= code
| [
"browser.html.H3"
] | [((6421, 6470), 'browser.html.H3', 'html.H3', (['label'], {'Class': '"""mdc-list-group__subheader"""'}), "(label, Class='mdc-list-group__subheader')\n", (6428, 6470), False, 'from browser import html\n')] |
"""Exercise 2 skeleton: Create a square that bounces when it reaches any boundary of the canvas
Remember to fill out all the TODO's, you can quickly scan for them by pressing CTRL/CMD + F
"""
import sys
import os
import pygame
"""
SETUP section - preparing everything before the main loop runs
"""
pygame.init()
screen_width, screen_height = 1000, 800
screen = pygame.display.set_mode((screen_width, screen_height))
clock = pygame.time.Clock()
FRAME_RATE = 40
BLACK = (0, 0, 0)
# Setup our variables.
RED = (255, 0, 0) # A constant with the color red as a tuple
rect = pygame.Rect(200, 100, 75, 75) # A rectangle object we can manipulate later
speed_x = 5 # The speed we are traveling in the x direction on each frame
speed_y = 5 # The speed we are traveling in the y direction on each frame
def check_collisions(): # TODO: add parameters
# TODO: Create function that checks for colisions (there's more than 1 way to do this)
... # TODO: remove this when you fill in the function body
return # TODO: Add return arguments
while True:
"""
EVENTS section - how the code reacts when users do things
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
"""
UPDATE section - manipulate everything on the screen
"""
# TODO: Update rectangle x and y coordinates if you hit the sides/top/bottom
speed_x, speed_y = check_collisions() # TODO: add arguments
"""
DRAW section - make everything show up on screen
"""
screen.fill(BLACK) # Fill the screen with one colour
pygame.draw.rect(screen, RED, rect) # Draw the rectangle
#### DRAW THINGS BEFORE THIS ####
pygame.display.flip() # Pygame uses a double-buffer, without this we see half-completed frames
clock.tick(FRAME_RATE) # Pause the clock to maintain 40 frames per second | [
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.draw.rect",
"pygame.Rect",
"pygame.init",
"pygame.display.flip",
"pygame.time.Clock",
"sys.exit"
] | [((301, 314), 'pygame.init', 'pygame.init', ([], {}), '()\n', (312, 314), False, 'import pygame\n'), ((365, 419), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(screen_width, screen_height)'], {}), '((screen_width, screen_height))\n', (388, 419), False, 'import pygame\n'), ((429, 448), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (446, 448), False, 'import pygame\n'), ((577, 606), 'pygame.Rect', 'pygame.Rect', (['(200)', '(100)', '(75)', '(75)'], {}), '(200, 100, 75, 75)\n', (588, 606), False, 'import pygame\n'), ((1151, 1169), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1167, 1169), False, 'import pygame\n'), ((1621, 1656), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'RED', 'rect'], {}), '(screen, RED, rect)\n', (1637, 1656), False, 'import pygame\n'), ((1721, 1742), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1740, 1742), False, 'import pygame\n'), ((1221, 1234), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1232, 1234), False, 'import pygame\n'), ((1247, 1257), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1255, 1257), False, 'import sys\n')] |
import fs
from conftest import rules_shortcut, make_files, read_files
from organize import core
def test_rename_issue52():
# test for issue https://github.com/tfeldmann/organize/issues/51
files = {
"files": {
"19asd_WF_test2.PDF": "",
"other.pdf": "",
"18asd_WFX_test2.pdf": "",
}
}
with fs.open_fs("temp://") as mem:
make_files(mem, files)
config = rules_shortcut(
mem,
filters="""
- extension
- name:
startswith: "19"
contains:
- "_WF_"
""",
actions=[
{"rename": "{path.stem}_unread.{extension.lower()}"},
{"copy": {"dest": "files/copy/", "filesystem": mem}},
],
)
core.run(config, simulate=False)
mem.tree()
result = read_files(mem)
assert result == {
"files": {
"copy": {
"19asd_WF_test2_unread.pdf": "",
},
"19asd_WF_test2_unread.pdf": "",
"other.pdf": "",
"18asd_WFX_test2.pdf": "",
}
}
| [
"fs.open_fs",
"conftest.make_files",
"conftest.read_files",
"organize.core.run",
"conftest.rules_shortcut"
] | [((358, 379), 'fs.open_fs', 'fs.open_fs', (['"""temp://"""'], {}), "('temp://')\n", (368, 379), False, 'import fs\n'), ((396, 418), 'conftest.make_files', 'make_files', (['mem', 'files'], {}), '(mem, files)\n', (406, 418), False, 'from conftest import rules_shortcut, make_files, read_files\n'), ((436, 749), 'conftest.rules_shortcut', 'rules_shortcut', (['mem'], {'filters': '"""\n - extension\n - name:\n startswith: "19"\n contains:\n - "_WF_"\n """', 'actions': "[{'rename': '{path.stem}_unread.{extension.lower()}'}, {'copy': {'dest':\n 'files/copy/', 'filesystem': mem}}]"}), '(mem, filters=\n """\n - extension\n - name:\n startswith: "19"\n contains:\n - "_WF_"\n """\n , actions=[{\'rename\': \'{path.stem}_unread.{extension.lower()}\'}, {\n \'copy\': {\'dest\': \'files/copy/\', \'filesystem\': mem}}])\n', (450, 749), False, 'from conftest import rules_shortcut, make_files, read_files\n'), ((837, 869), 'organize.core.run', 'core.run', (['config'], {'simulate': '(False)'}), '(config, simulate=False)\n', (845, 869), False, 'from organize import core\n'), ((906, 921), 'conftest.read_files', 'read_files', (['mem'], {}), '(mem)\n', (916, 921), False, 'from conftest import rules_shortcut, make_files, read_files\n')] |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
app = dash.Dash(__name__)
app.layout = html.Div([
html.H2("True Cost of Car (with Interest and Monthly Expenses)", style={'text-align':'center'}),
html.Hr(),
html.P("This calculator allows you to estimate the true cost of your car (over 10 years), taking into account loan downpayment, loan term, interest rates, and estimated monthly expenses (on fuel, parking etc.).", style={'text-align':'center'}),
html.Div([
dcc.Input(
id='carprice',
min=50000,
value='',
placeholder="Retail Price",
type="number",
style={'text-align':'center'}
),
dcc.Input(
id='monthexp',
min=500,
value='',
placeholder="Monthly Expenses",
type="number",
style={'text-align':'center'}
)], style=dict(display='flex', justifyContent='center')),
html.Div([
dcc.Input(
id='intrate',
min=0.01,
value='',
placeholder="Interest Rates (%)",
type="number",
style={'text-align':'center'}
)], style=dict(display='flex', justifyContent='center')),
html.Hr(),
dcc.Graph(id='graph-car-price')
])
@app.callback(
Output('graph-car-price', 'figure'),
[Input('carprice', 'value'),
Input('monthexp','value'),
Input('intrate','value'),
])
def update_figure(carprice, monthexp, intrate):
downpayment_list = [i for i in range(int(carprice*0.3),int(carprice),200)]
# create dataframe
car_loan_df = pd.DataFrame({"Downpayment" : downpayment_list
})
# add total cost of car to dataframe
for z in range(1,8):
car_loan_df["{} Year".format(z)] = [(((intrate/100)*z*(carprice - downpayment_list[i])+(carprice - downpayment_list[i])))+downpayment_list[i]+monthexp for i in range(0,len(downpayment_list))]
# melt for easier plotting
car_melt = pd.melt(car_loan_df, id_vars="Downpayment")
fig = px.line(car_melt,x="Downpayment",y="value",color="variable",labels={
"Downpayment": "Initial Downpayment",
"value": "Total Cost of Car",
"variable": "Loan Term"
}, color_discrete_sequence=px.colors.qualitative.Bold)
fig.update_layout({"plot_bgcolor":"white"})
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='lightgrey')
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='lightgrey')
fig.update_layout(transition_duration=500)
return fig
if __name__ == '__main__':
app.run_server(debug=False) | [
"pandas.DataFrame",
"dash.Dash",
"dash_html_components.H2",
"plotly.express.line",
"dash_core_components.Input",
"dash.dependencies.Input",
"dash_html_components.P",
"dash_core_components.Graph",
"pandas.melt",
"dash.dependencies.Output",
"dash_html_components.Hr"
] | [((182, 201), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (191, 201), False, 'import dash\n'), ((1660, 1707), 'pandas.DataFrame', 'pd.DataFrame', (["{'Downpayment': downpayment_list}"], {}), "({'Downpayment': downpayment_list})\n", (1672, 1707), True, 'import pandas as pd\n'), ((2056, 2099), 'pandas.melt', 'pd.melt', (['car_loan_df'], {'id_vars': '"""Downpayment"""'}), "(car_loan_df, id_vars='Downpayment')\n", (2063, 2099), True, 'import pandas as pd\n'), ((2111, 2342), 'plotly.express.line', 'px.line', (['car_melt'], {'x': '"""Downpayment"""', 'y': '"""value"""', 'color': '"""variable"""', 'labels': "{'Downpayment': 'Initial Downpayment', 'value': 'Total Cost of Car',\n 'variable': 'Loan Term'}", 'color_discrete_sequence': 'px.colors.qualitative.Bold'}), "(car_melt, x='Downpayment', y='value', color='variable', labels={\n 'Downpayment': 'Initial Downpayment', 'value': 'Total Cost of Car',\n 'variable': 'Loan Term'}, color_discrete_sequence=px.colors.qualitative\n .Bold)\n", (2118, 2342), True, 'import plotly.express as px\n'), ((1354, 1389), 'dash.dependencies.Output', 'Output', (['"""graph-car-price"""', '"""figure"""'], {}), "('graph-car-price', 'figure')\n", (1360, 1389), False, 'from dash.dependencies import Input, Output\n'), ((231, 332), 'dash_html_components.H2', 'html.H2', (['"""True Cost of Car (with Interest and Monthly Expenses)"""'], {'style': "{'text-align': 'center'}"}), "('True Cost of Car (with Interest and Monthly Expenses)', style={\n 'text-align': 'center'})\n", (238, 332), True, 'import dash_html_components as html\n'), ((332, 341), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (339, 341), True, 'import dash_html_components as html\n'), ((347, 601), 'dash_html_components.P', 'html.P', (['"""This calculator allows you to estimate the true cost of your car (over 10 years), taking into account loan downpayment, loan term, interest rates, and estimated monthly expenses (on fuel, parking etc.)."""'], {'style': "{'text-align': 'center'}"}), "(\n 'This calculator allows you to estimate the true cost of your car (over 10 years), taking into account loan downpayment, loan term, interest rates, and estimated monthly expenses (on fuel, parking etc.).'\n , style={'text-align': 'center'})\n", (353, 601), True, 'import dash_html_components as html\n'), ((1283, 1292), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (1290, 1292), True, 'import dash_html_components as html\n'), ((1298, 1329), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph-car-price"""'}), "(id='graph-car-price')\n", (1307, 1329), True, 'import dash_core_components as dcc\n'), ((1396, 1422), 'dash.dependencies.Input', 'Input', (['"""carprice"""', '"""value"""'], {}), "('carprice', 'value')\n", (1401, 1422), False, 'from dash.dependencies import Input, Output\n'), ((1428, 1454), 'dash.dependencies.Input', 'Input', (['"""monthexp"""', '"""value"""'], {}), "('monthexp', 'value')\n", (1433, 1454), False, 'from dash.dependencies import Input, Output\n'), ((1459, 1484), 'dash.dependencies.Input', 'Input', (['"""intrate"""', '"""value"""'], {}), "('intrate', 'value')\n", (1464, 1484), False, 'from dash.dependencies import Input, Output\n'), ((611, 735), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""carprice"""', 'min': '(50000)', 'value': '""""""', 'placeholder': '"""Retail Price"""', 'type': '"""number"""', 'style': "{'text-align': 'center'}"}), "(id='carprice', min=50000, value='', placeholder='Retail Price',\n type='number', style={'text-align': 'center'})\n", (620, 735), True, 'import dash_core_components as dcc\n'), ((790, 916), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""monthexp"""', 'min': '(500)', 'value': '""""""', 'placeholder': '"""Monthly Expenses"""', 'type': '"""number"""', 'style': "{'text-align': 'center'}"}), "(id='monthexp', min=500, value='', placeholder='Monthly Expenses',\n type='number', style={'text-align': 'center'})\n", (799, 916), True, 'import dash_core_components as dcc\n'), ((1045, 1174), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""intrate"""', 'min': '(0.01)', 'value': '""""""', 'placeholder': '"""Interest Rates (%)"""', 'type': '"""number"""', 'style': "{'text-align': 'center'}"}), "(id='intrate', min=0.01, value='', placeholder=\n 'Interest Rates (%)', type='number', style={'text-align': 'center'})\n", (1054, 1174), True, 'import dash_core_components as dcc\n')] |
from django.urls import reverse
from django.shortcuts import resolve_url
from django.contrib.auth import REDIRECT_FIELD_NAME as redirect_field_name
from .models import is_mfa_enabled, is_u2f_enabled
from .views import verify_rmb_cookie
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
# Works perfectly for everyone using MIDDLEWARE_CLASSES
MiddlewareMixin = object
class MfaMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated and ((not verify_rmb_cookie(request) and is_mfa_enabled(request.user)) or (is_u2f_enabled(request.user))):
if not request.session.get('verfied_otp') and not request.session.get('verfied_u2f'):
current_path = request.path
paths = [reverse("mfa:verify_second_factor"), reverse(
"mfa:verify_second_factor_u2f"), reverse("mfa:verify_second_factor_totp")]
if current_path not in paths:
path = request.get_full_path()
resolved_login_url = resolve_url(
reverse("mfa:verify_second_factor"))
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(path, resolved_login_url, redirect_field_name)
return None
| [
"django.urls.reverse",
"django.contrib.auth.views.redirect_to_login"
] | [((821, 856), 'django.urls.reverse', 'reverse', (['"""mfa:verify_second_factor"""'], {}), "('mfa:verify_second_factor')\n", (828, 856), False, 'from django.urls import reverse\n'), ((858, 897), 'django.urls.reverse', 'reverse', (['"""mfa:verify_second_factor_u2f"""'], {}), "('mfa:verify_second_factor_u2f')\n", (865, 897), False, 'from django.urls import reverse\n'), ((920, 960), 'django.urls.reverse', 'reverse', (['"""mfa:verify_second_factor_totp"""'], {}), "('mfa:verify_second_factor_totp')\n", (927, 960), False, 'from django.urls import reverse\n'), ((1277, 1341), 'django.contrib.auth.views.redirect_to_login', 'redirect_to_login', (['path', 'resolved_login_url', 'redirect_field_name'], {}), '(path, resolved_login_url, redirect_field_name)\n', (1294, 1341), False, 'from django.contrib.auth.views import redirect_to_login\n'), ((1137, 1172), 'django.urls.reverse', 'reverse', (['"""mfa:verify_second_factor"""'], {}), "('mfa:verify_second_factor')\n", (1144, 1172), False, 'from django.urls import reverse\n')] |
# Generated by Django 3.2.8 on 2021-10-24 02:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('amcm', '0002_auto_20211024_0134'),
]
operations = [
migrations.AlterModelOptions(
name='tipofecha',
options={'verbose_name': 'Tipo de Fecha del Evento', 'verbose_name_plural': 'Tipos de Fechas del Evento'},
),
migrations.AddField(
model_name='cuotaevento',
name='evento',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='amcm.evento'),
preserve_default=False,
),
migrations.AlterField(
model_name='fechasevento',
name='fecha',
field=models.DateField(verbose_name='Fecha de Vencimiento'),
),
]
| [
"django.db.models.ForeignKey",
"django.db.migrations.AlterModelOptions",
"django.db.models.DateField"
] | [((265, 426), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""tipofecha"""', 'options': "{'verbose_name': 'Tipo de Fecha del Evento', 'verbose_name_plural':\n 'Tipos de Fechas del Evento'}"}), "(name='tipofecha', options={'verbose_name':\n 'Tipo de Fecha del Evento', 'verbose_name_plural':\n 'Tipos de Fechas del Evento'})\n", (293, 426), False, 'from django.db import migrations, models\n'), ((567, 662), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(1)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""amcm.evento"""'}), "(default=1, on_delete=django.db.models.deletion.CASCADE,\n to='amcm.evento')\n", (584, 662), False, 'from django.db import migrations, models\n'), ((821, 874), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Fecha de Vencimiento"""'}), "(verbose_name='Fecha de Vencimiento')\n", (837, 874), False, 'from django.db import migrations, models\n')] |
import os
import sys
from flask import Flask, render_template
from flask.ext.mongoengine import MongoEngine
app = Flask(__name__)
app.config.from_object('config')
app.config["MONGODB_SETTINGS"] = {
"DB": os.environ.get("U_DB"),
"USERNAME": os.environ.get("U_USER"),
"PASSWORD": os.environ.get("U_PASS"),
"HOST": "127.0.0.1",
"PORT": 27017 }
db = MongoEngine(app)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
| [
"os.environ.get",
"flask.Flask",
"flask.render_template",
"flask.ext.mongoengine.MongoEngine"
] | [((115, 130), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (120, 130), False, 'from flask import Flask, render_template\n'), ((369, 385), 'flask.ext.mongoengine.MongoEngine', 'MongoEngine', (['app'], {}), '(app)\n', (380, 385), False, 'from flask.ext.mongoengine import MongoEngine\n'), ((211, 233), 'os.environ.get', 'os.environ.get', (['"""U_DB"""'], {}), "('U_DB')\n", (225, 233), False, 'import os\n'), ((251, 275), 'os.environ.get', 'os.environ.get', (['"""U_USER"""'], {}), "('U_USER')\n", (265, 275), False, 'import os\n'), ((293, 317), 'os.environ.get', 'os.environ.get', (['"""U_PASS"""'], {}), "('U_PASS')\n", (307, 317), False, 'import os\n'), ((443, 470), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (458, 470), False, 'from flask import Flask, render_template\n')] |
from django.contrib import admin
from archives.models import *
from archives.forms import DocumentForm
class AddressInline(admin.StackedInline):
model = Address
extra = 1
class ContainerStatementInline(admin.TabularInline):
model = ContainerStatements
extra = 1
class DocumentStatementInline(admin.TabularInline):
model = DocumentStatements
extra = 1
class CollectionStatementInline(admin.TabularInline):
model = CollectionStatements
extra = 1
class AddressAdmin(admin.ModelAdmin):
search_fields = ['address1']
class RepositoryAdmin(admin.ModelAdmin):
list_display = ('name', 'identifier', 'description')
inlines = [AddressInline]
save_on_top = True
search_fields = ['name']
class ContainerAdmin(admin.ModelAdmin):
list_display = ('__str__', 'get_collection_full_name',
'box', 'folder', 'series', 'description', 'content_type')
list_filter = ('content_type', 'collection')
inlines = [ContainerStatementInline]
search_fields = ['collection__name', 'collection__identifier',
'box', 'folder', 'series', 'description']
fieldsets = [
(None, {'fields': ['collection', 'content_type']}),
('Container Identifiers', {'fields': ['series', 'box', 'folder']}),
]
save_on_top = True
class DocumentAdmin(admin.ModelAdmin):
list_display = ('__str__', 'id_supplied', 'get_container_full_label',
'get_container_content_type', 'coverage_start', 'coverage_end', 'description')
list_filter = ('container',)
inlines = [DocumentStatementInline]
form = DocumentForm
save_on_top = True
class CollectionAdmin(admin.ModelAdmin):
list_display = ('name', 'name_supplied', 'identifier',
'repository', 'description')
inlines = [CollectionStatementInline]
search_fields = ['name', 'identifier', 'description']
save_on_top = True
admin.site.register(Collection, CollectionAdmin)
admin.site.register(Repository, RepositoryAdmin)
admin.site.register(Container, ContainerAdmin)
admin.site.register(Document, DocumentAdmin)
| [
"django.contrib.admin.site.register"
] | [((1932, 1980), 'django.contrib.admin.site.register', 'admin.site.register', (['Collection', 'CollectionAdmin'], {}), '(Collection, CollectionAdmin)\n', (1951, 1980), False, 'from django.contrib import admin\n'), ((1981, 2029), 'django.contrib.admin.site.register', 'admin.site.register', (['Repository', 'RepositoryAdmin'], {}), '(Repository, RepositoryAdmin)\n', (2000, 2029), False, 'from django.contrib import admin\n'), ((2030, 2076), 'django.contrib.admin.site.register', 'admin.site.register', (['Container', 'ContainerAdmin'], {}), '(Container, ContainerAdmin)\n', (2049, 2076), False, 'from django.contrib import admin\n'), ((2077, 2121), 'django.contrib.admin.site.register', 'admin.site.register', (['Document', 'DocumentAdmin'], {}), '(Document, DocumentAdmin)\n', (2096, 2121), False, 'from django.contrib import admin\n')] |
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
import tensorflow as tf
class SummaryCallback(Callback):
def __init__(self, trainer, validation=False):
super(SummaryCallback, self)
self.trainer = trainer
self.summarysteps = trainer.config['summarysteps']
self.validation = validation
self.image = tf.Variable(0., validate_shape=False)
self.mask = tf.Variable(0., validate_shape=False)
self.predicted = tf.Variable(0., validate_shape=False)
model = self.trainer.model.model
self.fetches = [tf.assign(self.image, model.inputs[0], validate_shape=False),
tf.assign(self.mask, model.targets[0], validate_shape=False),
tf.assign(self.predicted, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': self.fetches}
def on_train_begin(self, logs={}):
self.losses = []
model = self.trainer.model.model
self.fetches = [tf.assign(self.image, model.inputs[0], validate_shape=False),
tf.assign(self.mask, model.targets[0], validate_shape=False),
tf.assign(self.predicted, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': self.fetches}
def on_train_end(self, logs={}):
model = self.trainer.model.model
model._function_kwargs = {'fetches': []}
def on_batch_end(self, batch, logs={}):
loss = logs.get('loss')
self.losses.append(loss)
if self.validation is False:
self.trainer.global_step += 1
self.trainer.loss += loss
if batch % self.summarysteps == 0:
if self.trainer.summarywriter:
self.trainer.summarywriter.add_scalar(
self.trainer.name+'loss', loss, global_step=self.trainer.global_step)
image = K.eval(self.image)
if not type(image) is np.float32:
image = image[0]
image = np.rollaxis(image, axis=2, start=0)
mask = K.eval(self.mask)[0]
mask = np.rollaxis(mask, axis=2, start=0)[1]
predicted = K.eval(self.predicted)[0]
predicted = np.rollaxis(predicted, axis=2, start=0)[1]
self.trainer.summarywriter.add_image(
self.trainer.name+'image',image/255.0, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'mask', mask.astype(np.float32), global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'predicted', predicted/(predicted.max()+0.0001), global_step=self.trainer.global_step)
else:
if self.trainer.summarywriter:
self.trainer.summarywriter.add_scalar(
self.trainer.name+'val_loss', loss, global_step=self.trainer.global_step)
image = K.eval(self.image)
if not type(image) is np.float32:
image = image[0]
image = np.rollaxis(image, axis=2, start=0)
mask = K.eval(self.mask)[0]
mask = np.rollaxis(mask, axis=2, start=0)[1]
predicted = K.eval(self.predicted)[0]
predicted = np.rollaxis(predicted, axis=2, start=0)[1]
self.trainer.summarywriter.add_image(
self.trainer.name+'val_image',image/255.0, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'val_mask', mask, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'val_predicted', predicted/(predicted.max()+0.0001), global_step=self.trainer.global_step)
| [
"tensorflow.assign",
"tensorflow.Variable",
"keras.backend.eval",
"numpy.rollaxis"
] | [((382, 420), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'validate_shape': '(False)'}), '(0.0, validate_shape=False)\n', (393, 420), True, 'import tensorflow as tf\n'), ((440, 478), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'validate_shape': '(False)'}), '(0.0, validate_shape=False)\n', (451, 478), True, 'import tensorflow as tf\n'), ((503, 541), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'validate_shape': '(False)'}), '(0.0, validate_shape=False)\n', (514, 541), True, 'import tensorflow as tf\n'), ((606, 666), 'tensorflow.assign', 'tf.assign', (['self.image', 'model.inputs[0]'], {'validate_shape': '(False)'}), '(self.image, model.inputs[0], validate_shape=False)\n', (615, 666), True, 'import tensorflow as tf\n'), ((679, 739), 'tensorflow.assign', 'tf.assign', (['self.mask', 'model.targets[0]'], {'validate_shape': '(False)'}), '(self.mask, model.targets[0], validate_shape=False)\n', (688, 739), True, 'import tensorflow as tf\n'), ((752, 817), 'tensorflow.assign', 'tf.assign', (['self.predicted', 'model.outputs[0]'], {'validate_shape': '(False)'}), '(self.predicted, model.outputs[0], validate_shape=False)\n', (761, 817), True, 'import tensorflow as tf\n'), ((1008, 1068), 'tensorflow.assign', 'tf.assign', (['self.image', 'model.inputs[0]'], {'validate_shape': '(False)'}), '(self.image, model.inputs[0], validate_shape=False)\n', (1017, 1068), True, 'import tensorflow as tf\n'), ((1081, 1141), 'tensorflow.assign', 'tf.assign', (['self.mask', 'model.targets[0]'], {'validate_shape': '(False)'}), '(self.mask, model.targets[0], validate_shape=False)\n', (1090, 1141), True, 'import tensorflow as tf\n'), ((1154, 1219), 'tensorflow.assign', 'tf.assign', (['self.predicted', 'model.outputs[0]'], {'validate_shape': '(False)'}), '(self.predicted, model.outputs[0], validate_shape=False)\n', (1163, 1219), True, 'import tensorflow as tf\n'), ((3137, 3155), 'keras.backend.eval', 'K.eval', (['self.image'], {}), '(self.image)\n', (3143, 3155), True, 'from keras import backend as K\n'), ((1910, 1928), 'keras.backend.eval', 'K.eval', (['self.image'], {}), '(self.image)\n', (1916, 1928), True, 'from keras import backend as K\n'), ((3271, 3306), 'numpy.rollaxis', 'np.rollaxis', (['image'], {'axis': '(2)', 'start': '(0)'}), '(image, axis=2, start=0)\n', (3282, 3306), True, 'import numpy as np\n'), ((2056, 2091), 'numpy.rollaxis', 'np.rollaxis', (['image'], {'axis': '(2)', 'start': '(0)'}), '(image, axis=2, start=0)\n', (2067, 2091), True, 'import numpy as np\n'), ((3334, 3351), 'keras.backend.eval', 'K.eval', (['self.mask'], {}), '(self.mask)\n', (3340, 3351), True, 'from keras import backend as K\n'), ((3382, 3416), 'numpy.rollaxis', 'np.rollaxis', (['mask'], {'axis': '(2)', 'start': '(0)'}), '(mask, axis=2, start=0)\n', (3393, 3416), True, 'import numpy as np\n'), ((3452, 3474), 'keras.backend.eval', 'K.eval', (['self.predicted'], {}), '(self.predicted)\n', (3458, 3474), True, 'from keras import backend as K\n'), ((3510, 3549), 'numpy.rollaxis', 'np.rollaxis', (['predicted'], {'axis': '(2)', 'start': '(0)'}), '(predicted, axis=2, start=0)\n', (3521, 3549), True, 'import numpy as np\n'), ((2123, 2140), 'keras.backend.eval', 'K.eval', (['self.mask'], {}), '(self.mask)\n', (2129, 2140), True, 'from keras import backend as K\n'), ((2175, 2209), 'numpy.rollaxis', 'np.rollaxis', (['mask'], {'axis': '(2)', 'start': '(0)'}), '(mask, axis=2, start=0)\n', (2186, 2209), True, 'import numpy as np\n'), ((2249, 2271), 'keras.backend.eval', 'K.eval', (['self.predicted'], {}), '(self.predicted)\n', (2255, 2271), True, 'from keras import backend as K\n'), ((2311, 2350), 'numpy.rollaxis', 'np.rollaxis', (['predicted'], {'axis': '(2)', 'start': '(0)'}), '(predicted, axis=2, start=0)\n', (2322, 2350), True, 'import numpy as np\n')] |
import logging
logger = logging.getLogger(__name__)
async def do(param: str) -> None:
pass # pragma: no cover
async def log_something() -> None:
logger.debug("debug log")
logger.info("info log")
logger.warning("warning log")
logger.error("debug log")
logger.critical("critical log")
logger.debug("debug log 2")
logger.error("debug error 2")
| [
"logging.getLogger"
] | [((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n')] |
import pytest
from ithkuil.morphology.words import remove_stress
txts_to_test = [
('a', 'a'),
('o', 'o'),
('áu', 'au'),
('ái', 'ai'),
('aú', 'aù'),
('aé', 'ae'),
('á', 'a')
]
@pytest.mark.parametrize('txt, expected', txts_to_test)
def test_word(txt, expected):
assert remove_stress(txt) == expected | [
"pytest.mark.parametrize",
"ithkuil.morphology.words.remove_stress"
] | [((206, 260), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""txt, expected"""', 'txts_to_test'], {}), "('txt, expected', txts_to_test)\n", (229, 260), False, 'import pytest\n'), ((302, 320), 'ithkuil.morphology.words.remove_stress', 'remove_stress', (['txt'], {}), '(txt)\n', (315, 320), False, 'from ithkuil.morphology.words import remove_stress\n')] |
import argparse
import logging
import os
from typing import Tuple
import warnings
import torch
import torchvision
from .. import headmeta
from . import basenetworks, heads, nets
# generate hash values with: shasum -a 256 filename.pkl
PRETRAINED_UNAVAILABLE = object()
# Dataset cocokp is implied. All other datasets need to be explicit.
# Use http instead of https to avoid SSL certificate issues on Windows.
CHECKPOINT_URLS = {
'mobilenetv2': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12a5/mobilenetv2-201112-193315-cocokp-1728a9f5.pkl'),
'resnet18': PRETRAINED_UNAVAILABLE,
'resnet50': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12a7/resnet50-201123-175351-cocokp-o10s-127f7fdf.pkl'),
'resnet50-crowdpose': (
'http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12a7/resnet50-201005-100758-crowdpose-d978a89f.pkl'
),
'resnet101': PRETRAINED_UNAVAILABLE,
'resnet152': PRETRAINED_UNAVAILABLE,
'shufflenetv2x1': PRETRAINED_UNAVAILABLE,
'shufflenetv2x2': PRETRAINED_UNAVAILABLE,
'shufflenetv2k16': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12b4/shufflenetv2k16-210214-123448-cocokp-o10s-e2ae3708.pkl'),
'shufflenetv2k16-withdense': (
'http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12b4/shufflenetv2k16-210221-131426-cocokp-o10s-627d901e.pkl'
),
'shufflenetv2k30': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12b4/shufflenetv2k30-210217-075056-cocokp-o10s-6f9daa84.pkl'),
'shufflenetv2k44': PRETRAINED_UNAVAILABLE,
}
BASE_TYPES = set([
basenetworks.MobileNetV2,
basenetworks.Resnet,
basenetworks.ShuffleNetV2,
basenetworks.ShuffleNetV2K,
basenetworks.SqueezeNet,
])
BASE_FACTORIES = {
'mobilenetv2': lambda: basenetworks.MobileNetV2('mobilenetv2', torchvision.models.mobilenet_v2),
'resnet18': lambda: basenetworks.Resnet('resnet18', torchvision.models.resnet18, 512),
'resnet50': lambda: basenetworks.Resnet('resnet50', torchvision.models.resnet50),
'resnet101': lambda: basenetworks.Resnet('resnet101', torchvision.models.resnet101),
'resnet152': lambda: basenetworks.Resnet('resnet152', torchvision.models.resnet152),
'resnext50': lambda: basenetworks.Resnet('resnext50', torchvision.models.resnext50_32x4d),
'resnext101': lambda: basenetworks.Resnet('resnext101', torchvision.models.resnext101_32x8d),
'shufflenetv2x1': lambda: basenetworks.ShuffleNetV2(
'shufflenetv2x1', torchvision.models.shufflenet_v2_x1_0, 1024),
'shufflenetv2x2': lambda: basenetworks.ShuffleNetV2(
# defined in torchvision as [4, 8, 4], [24, 244, 488, 976, 2048]
'shufflenetv2x2', torchvision.models.shufflenet_v2_x2_0),
'shufflenetv2k16': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k16', [4, 8, 4], [24, 348, 696, 1392, 1392]),
'shufflenetv2k20': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k20', [5, 10, 5], [32, 512, 1024, 2048, 2048]),
'shufflenetv2kx5': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2kx5', [6, 13, 6], [42, 640, 1280, 2560, 2560]),
'shufflenetv2k30': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k30', [8, 16, 6], [32, 512, 1024, 2048, 2048]),
'shufflenetv2k44': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k44', [12, 24, 8], [32, 512, 1024, 2048, 2048]),
'squeezenet': lambda: basenetworks.SqueezeNet('squeezenet', torchvision.models.squeezenet1_1),
}
#: headmeta class to head class
HEADS = {
headmeta.Cif: heads.CompositeField3,
headmeta.Caf: heads.CompositeField3,
headmeta.CifDet: heads.CompositeField3,
}
LOG = logging.getLogger(__name__)
def local_checkpoint_path(checkpoint):
if os.path.exists(checkpoint):
return checkpoint
if checkpoint in CHECKPOINT_URLS:
url = CHECKPOINT_URLS[checkpoint]
base_dir = None
if hasattr(torch, 'hub') and hasattr(torch.hub, 'get_dir'):
# new in pytorch 1.6.0
base_dir = torch.hub.get_dir()
elif os.getenv('TORCH_HOME'):
base_dir = os.getenv('TORCH_HOME')
elif os.getenv('XDG_CACHE_HOME'):
base_dir = os.path.join(os.getenv('XDG_CACHE_HOME'), 'torch')
else:
base_dir = os.path.expanduser(os.path.join('~', '.cache', 'torch'))
file_name = os.path.join(base_dir, 'checkpoints', os.path.basename(url))
if os.path.exists(file_name):
return file_name
return None
class Factory:
base_name = None
checkpoint = None
cross_talk = 0.0
download_progress = True
head_consolidation = 'filter_and_extend'
def __init__(self, **kwargs):
if self.base_name is not None:
assert self.checkpoint is None
if self.checkpoint is not None:
assert self.base_name is None
# use kwargs to set instance attributes to overwrite class attributes
for key, value in kwargs.items():
assert hasattr(self, key), key
setattr(self, key, value)
@classmethod
def cli(cls, parser: argparse.ArgumentParser):
for bn in BASE_TYPES:
bn.cli(parser)
for hn in set(HEADS.values()):
hn.cli(parser)
group = parser.add_argument_group('network configuration')
available_checkpoints = ['"{}"'.format(n) for n, url in CHECKPOINT_URLS.items()
if url is not PRETRAINED_UNAVAILABLE]
group.add_argument(
'--checkpoint', default=cls.checkpoint,
help=(
'Path to a local checkpoint. '
'Or provide one of the following to download a pretrained model: {}'
''.format(', '.join(available_checkpoints))
)
)
group.add_argument('--basenet', default=cls.base_name,
help='base network, e.g. resnet50')
group.add_argument('--cross-talk', default=cls.cross_talk, type=float,
help='[experimental]')
assert cls.download_progress
group.add_argument('--no-download-progress', dest='download_progress',
default=True, action='store_false',
help='suppress model download progress bar')
group.add_argument('--head-consolidation',
choices=('keep', 'create', 'filter_and_extend'),
default=cls.head_consolidation,
help=('consolidation strategy for a checkpoint\'s head '
'networks and the heads specified by the datamodule'))
@classmethod
def configure(cls, args: argparse.Namespace):
for bn in BASE_TYPES:
bn.configure(args)
for hn in set(HEADS.values()):
hn.configure(args)
cls.base_name = args.basenet
cls.checkpoint = args.checkpoint
cls.cross_talk = args.cross_talk
cls.download_progress = args.download_progress
cls.head_consolidation = args.head_consolidation
def factory(self, *, head_metas=None) -> Tuple[nets.Shell, int]:
if self.base_name:
assert head_metas
assert self.checkpoint is None
net_cpu: nets.Shell = self.from_scratch(head_metas)
net_cpu = self.init_net(net_cpu)
epoch = 0
return net_cpu, epoch
net_cpu, epoch = self.from_checkpoint()
if head_metas is not None:
self.consolidate_heads(net_cpu, head_metas)
net_cpu = self.init_net(net_cpu)
return net_cpu, epoch
def consolidate_heads(self, net_cpu, head_metas):
if self.head_consolidation == 'keep':
LOG.info('keeping heads from loaded checkpoint')
# Match head metas by name and overwrite with meta from checkpoint.
# This makes sure that the head metas have their head_index and
# base_stride attributes set.
input_head_meta_indices = {(meta.dataset, meta.name): i
for i, meta in enumerate(head_metas)}
for hn in net_cpu.head_nets:
input_index = input_head_meta_indices.get((hn.meta.dataset, hn.meta.name), None)
if input_index is None:
continue
head_metas[input_index] = hn.meta
elif self.head_consolidation == 'create':
LOG.info('creating new heads')
headnets = [HEADS[h.__class__](h, net_cpu.base_net.out_features)
for h in head_metas]
net_cpu.set_head_nets(headnets)
elif self.head_consolidation == 'filter_and_extend':
LOG.info('filtering for dataset heads and extending existing heads')
existing_headnets = {(hn.meta.dataset, hn.meta.name): hn
for hn in net_cpu.head_nets}
headnets = []
for meta_i, meta in enumerate(head_metas):
if (meta.dataset, meta.name) in existing_headnets:
hn = existing_headnets[(meta.dataset, meta.name)]
headnets.append(hn)
# Match head metas by name and overwrite with meta from checkpoint.
# This makes sure that the head metas have their head_index and
# base_stride attributes set.
head_metas[meta_i] = hn.meta
else:
headnets.append(
HEADS[meta.__class__](meta, net_cpu.base_net.out_features))
net_cpu.set_head_nets(headnets)
else:
raise Exception('head strategy {} unknown'.format(self.head_consolidation))
def from_checkpoint(self) -> Tuple[nets.Shell, int]:
checkpoint = self.checkpoint
if not checkpoint:
checkpoint = 'shufflenetv2k16'
if CHECKPOINT_URLS.get(checkpoint, None) is PRETRAINED_UNAVAILABLE:
raise Exception(
'The pretrained model for {} is not available yet '
'in this release cycle. Use one of {}.'.format(
checkpoint,
[k for k, v in CHECKPOINT_URLS.items() if v is not PRETRAINED_UNAVAILABLE],
)
)
checkpoint = CHECKPOINT_URLS.get(checkpoint, checkpoint)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=torch.serialization.SourceChangeWarning)
if checkpoint.startswith('http'):
checkpoint = torch.hub.load_state_dict_from_url(
checkpoint,
check_hash=not checkpoint.startswith('https'),
progress=self.download_progress)
else:
checkpoint = torch.load(checkpoint)
net_cpu: nets.Shell = checkpoint['model']
epoch = checkpoint['epoch']
# normalize for backwards compatibility
nets.model_migration(net_cpu)
return net_cpu, epoch
def from_scratch(self, head_metas) -> nets.Shell:
if self.base_name not in BASE_FACTORIES:
raise Exception('basenet {} unknown'.format(self.base_name))
basenet = BASE_FACTORIES[self.base_name]()
headnets = [HEADS[h.__class__](h, basenet.out_features) for h in head_metas]
net_cpu = nets.Shell(basenet, headnets)
nets.model_defaults(net_cpu)
return net_cpu
def init_net(self, net_cpu):
if self.cross_talk:
net_cpu.process_input = nets.CrossTalk(self.cross_talk)
# initialize for eval
net_cpu.eval()
LOG.debug(net_cpu)
return net_cpu
| [
"os.path.basename",
"warnings.filterwarnings",
"torch.load",
"torch.hub.get_dir",
"os.path.exists",
"warnings.catch_warnings",
"os.path.join",
"os.getenv",
"logging.getLogger"
] | [((3844, 3871), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3861, 3871), False, 'import logging\n'), ((3920, 3946), 'os.path.exists', 'os.path.exists', (['checkpoint'], {}), '(checkpoint)\n', (3934, 3946), False, 'import os\n'), ((4614, 4639), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (4628, 4639), False, 'import os\n'), ((4206, 4225), 'torch.hub.get_dir', 'torch.hub.get_dir', ([], {}), '()\n', (4223, 4225), False, 'import torch\n'), ((4239, 4262), 'os.getenv', 'os.getenv', (['"""TORCH_HOME"""'], {}), "('TORCH_HOME')\n", (4248, 4262), False, 'import os\n'), ((4580, 4601), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (4596, 4601), False, 'import os\n'), ((10573, 10598), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10596, 10598), False, 'import warnings\n'), ((10612, 10700), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'torch.serialization.SourceChangeWarning'}), "('ignore', category=torch.serialization.\n SourceChangeWarning)\n", (10635, 10700), False, 'import warnings\n'), ((4287, 4310), 'os.getenv', 'os.getenv', (['"""TORCH_HOME"""'], {}), "('TORCH_HOME')\n", (4296, 4310), False, 'import os\n'), ((4324, 4351), 'os.getenv', 'os.getenv', (['"""XDG_CACHE_HOME"""'], {}), "('XDG_CACHE_HOME')\n", (4333, 4351), False, 'import os\n'), ((11007, 11029), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (11017, 11029), False, 'import torch\n'), ((4389, 4416), 'os.getenv', 'os.getenv', (['"""XDG_CACHE_HOME"""'], {}), "('XDG_CACHE_HOME')\n", (4398, 4416), False, 'import os\n'), ((4483, 4519), 'os.path.join', 'os.path.join', (['"""~"""', '""".cache"""', '"""torch"""'], {}), "('~', '.cache', 'torch')\n", (4495, 4519), False, 'import os\n')] |
# coding=utf-8
from random import shuffle
from bs4 import BeautifulSoup
from bandcamp_parser.request import Request
class AlbumResult(object):
""" Just for autocompletion and more 'static' structure instead of json/soup """
def __init__(self, soup):
self.title = soup.attrs['title']
self.href = soup.attrs['href']
def __repr__(self) -> str:
return '\n<BandcampAlbumResult: title: {0} href: {1}>'.format(self.title, self.href)
class Album(object):
""" Album object provides access to its tracks """
def __init__(self, url):
self.url = url
def page(self) -> str:
""" :returns: album's page html """
return Request.get(self.url).content
def tracks(self) -> list:
""" :returns: list of urls of tracks in album """
soup = BeautifulSoup(self.page(), "html.parser")
results = soup.find('table', attrs={'id': 'track_table'}).find_all('a')
results = [item.attrs['href'] for item in results if item.has_attr('href')]
results = [item for item in results if '#lyrics' not in item]
return [self.url[:self.url.find('/album')] + item for item in results]
def track_random(self) -> str:
""" :returns: link to random track """
tracks = self.tracks()
shuffle(tracks)
return tracks[0]
| [
"random.shuffle",
"bandcamp_parser.request.Request.get"
] | [((1295, 1310), 'random.shuffle', 'shuffle', (['tracks'], {}), '(tracks)\n', (1302, 1310), False, 'from random import shuffle\n'), ((684, 705), 'bandcamp_parser.request.Request.get', 'Request.get', (['self.url'], {}), '(self.url)\n', (695, 705), False, 'from bandcamp_parser.request import Request\n')] |
from pytibrv.status import *
from pytibrv.api import *
from pytibrv.disp import *
import unittest
class DispatcherTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
status = tibrv_Open()
assert status == TIBRV_OK, tibrvStatus_GetText(status)
@classmethod
def tearDownClass(cls):
tibrv_Close()
def test_create(self):
que = TIBRV_DEFAULT_QUEUE
status, disp = tibrvDispatcher_Create(que, 1.0)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvDispatcher_SetName(disp, 'TEST')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, name = tibrvDispatcher_GetName(disp)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual('TEST', name)
status = tibrvDispatcher_Destroy(disp)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
if __name__ == "__main__" :
unittest.main(verbosity=2)
| [
"unittest.main"
] | [((984, 1010), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (997, 1010), False, 'import unittest\n')] |
from packerpy import PackerExecutable
from jinja2 import Environment, FileSystemLoader
from modules import aws_service
import sys
class PackerController():
def __init__(self, config, log):
self.config = config
self.log = log
self.p = PackerExecutable("/usr/local/bin/packer")
self.packer_amis = []
self.packer_amis.append('splunk-server')
if self.config['phantom_server']=='1':
self.packer_amis.append('phantom-server')
if self.config['kali_machine']=='1':
self.packer_amis.append('kali_machine')
if self.config['windows_domain_controller']=='1':
self.read_and_write_userdata_file()
self.packer_amis.append('windows-domain-controller')
if self.config['windows_server']=='1':
self.read_and_write_userdata_file()
self.packer_amis.append('windows-server')
if self.config['windows_client']=='1':
self.read_and_write_userdata_file()
self.packer_amis.append('windows-client')
def build_amis(self):
self.log.info("[action] > build AMIs\n")
for packer_ami in self.packer_amis:
self.log.info("Generate new Packer AMI packer-" + packer_ami + "-" + self.config['key_name'] + ". This can take some time.")
template = 'packer/' + packer_ami +'/packer.json'
template_vars = self.config
template_vars['splunk_indexer_ip'] = self.config['splunk_server_private_ip']
(ret, out, err) = self.p.build(template,var=template_vars)
if ret:
self.log.error("ERROR: " + str(out))
sys.exit(1)
self.log.info("successfully generated Packer AMI packer-" + packer_ami + "-" + self.config['key_name'])
def read_and_write_userdata_file(self):
j2_env = Environment(loader=FileSystemLoader('packer/script'),trim_blocks=True)
template = j2_env.get_template('userdata.ps1.j2')
userdata_file = template.render(self.config)
with open('packer/script/userdata.ps1', 'w') as file:
file.write(userdata_file)
def destroy_amis(self):
aws_service.deregister_images(self.packer_amis, self.config, self.log)
| [
"modules.aws_service.deregister_images",
"packerpy.PackerExecutable",
"sys.exit",
"jinja2.FileSystemLoader"
] | [((267, 308), 'packerpy.PackerExecutable', 'PackerExecutable', (['"""/usr/local/bin/packer"""'], {}), "('/usr/local/bin/packer')\n", (283, 308), False, 'from packerpy import PackerExecutable\n'), ((2174, 2244), 'modules.aws_service.deregister_images', 'aws_service.deregister_images', (['self.packer_amis', 'self.config', 'self.log'], {}), '(self.packer_amis, self.config, self.log)\n', (2203, 2244), False, 'from modules import aws_service\n'), ((1663, 1674), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1671, 1674), False, 'import sys\n'), ((1873, 1906), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""packer/script"""'], {}), "('packer/script')\n", (1889, 1906), False, 'from jinja2 import Environment, FileSystemLoader\n')] |
import sys
import requests
import logging
from splunklib.modularinput import *
class MI(Script):
def get_scheme(self):
scheme = Scheme("Twitch Input")
scheme.description = 'Collect Twitch channel stats'
scheme.use_external_validation = True
scheme.use_single_instance = True
username_arg = Argument('username')
username_arg.data_type = Argument.data_type_string
username_arg.description = 'Your Twitch username'
scheme.add_argument(username_arg)
# TODO: Add password
return scheme
def validate_input(self, validation_definition):
logging.info("Validating input")
username = validation_definition.parameters['username']
logging.debug(f'username: {username}')
if len(username) < 1:
raise ValueError('Username is required')
# TODO: Validate username
# TODO: Validate credentials
def stream_events(self, inputs, ew):
for input_name, input_item in inputs.inputs.iteritems():
username = str(input_item['username'])
do_work(input_name, ew, username)
def do_work(input_name, ew, username):
EventWriter.log(ew, EventWriter.INFO,
f'Started Twitch queries for {username}')
# TODO: Make queries
# TODO: Write events
if __name__ == '__main__':
MI().run(sys.argv)
| [
"logging.info",
"logging.debug"
] | [((633, 665), 'logging.info', 'logging.info', (['"""Validating input"""'], {}), "('Validating input')\n", (645, 665), False, 'import logging\n'), ((738, 776), 'logging.debug', 'logging.debug', (['f"""username: {username}"""'], {}), "(f'username: {username}')\n", (751, 776), False, 'import logging\n')] |
"""
Plotting facilities, based on Python's MPL library.
The L{Chart} object is a facade which provides intuitive access to MPL's plotting
objects. The following examples show a typical use of L{Chart}:
1. Instantiation
>>> chart = Chart() # a chart with a single plot
>>> chart = Chart(rows=2, columns=2) # a chart with 2x2=4 plots
2. Accessing plots (equivalent to MPL's subplots)
>>> chart.plots[0] # first plot (at row=0, column=0)
Plot (matplotlib.axes.AxesSubplot)
>>> chart.plots[0, 1]
Plot (matplotlib.axes.AxesSubplot) # plot at row=0, column=1
3. Plotting
>>> chart.plots[0].hist(...)
>>> chart.plots[0].set_title('T')
>>> chart.plots[1].scatter(...)
>>> chart.plots[1].set_xlabel('X')
4. Using the GUI
>>> chart.show()
>>> chart.hide()
5. Saving as image
>>> chart.save(filename, format=chart.formats.PDF)
If using the GUI, do not forget to dispose the chart at the end:
>>> chart.dispose()
or simply use the chart in a context manager:
>>> with Chart() as chart:
chart...
"""
import time
import csb.core
from abc import ABCMeta, abstractmethod
from threading import Thread, Event
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Backend(Thread):
"""
Abstract class defining the behavior of all Chart GUI backends.
Each backend is a 'daemon' that runs in a new thread and handles
all GUI requests from any L{Chart} instance. A backend service must
behave as a singleton - only one service of a given kind may exist at a
given point in time. L{Chart} clients will request GUI operations
on specific figures, the Backend therefore must keep track of all
windows opened, as well as the figure-to-window mapping.
"""
__metaclass__ = ABCMeta
_instances = {}
@staticmethod
def get(backend, started=True):
"""
Backend factory, ensures one instance per subclass.
@param backend: one of the L{Backend} subclasses
@type backend: type
@param started: if True, ensure that the service is running
@type started: bool
@return: an instance of the backend. The returned service
instance may need to be started.
@rtype: L{Backend}
"""
if not issubclass(backend, Backend):
raise TypeError(backend)
if backend in Backend._instances:
instance = Backend._instances[backend]
else:
instance = backend()
if started and not instance.started:
instance.start()
return instance
@staticmethod
def query(backend):
"""
@param backend: one of the L{Backend} subclasses
@type backend: type
@return: True if a service of type C{backend} is running
@rtype: bool
"""
if not issubclass(backend, Backend):
raise TypeError(backend)
if backend in Backend._instances:
instance = Backend._instances[backend]
return instance.started
else:
return False
def __init__(self):
name = self.__class__
if name in Backend._instances:
raise RuntimeError('Backend {0} has already been initialized'.format(name))
else:
Backend._instances[name] = self
super(Backend, self).__init__()
self._figures = {}
self._started = Event()
self._running = Event()
self.setDaemon(True)
@property
def started(self):
"""
True if the service had been started
@rtype: bool
"""
return self._started.isSet()
@property
def running(self):
"""
True if the service had been started and is currently running
@rtype: bool
"""
return self._running.isSet()
@abstractmethod
def _initapp(self):
"""
Create an instance of the GUI application.
"""
pass
@abstractmethod
def _mainloop(self):
"""
Enter the GUI main loop.
"""
pass
@abstractmethod
def _exit(self):
"""
Delete all frames, exit the GUI main loop and perform any cleanup
needed in order to unblock the thread that started the main loop.
"""
pass
@abstractmethod
def _add(self, figure):
"""
Handle an 'Add new figure' event
"""
pass
@abstractmethod
def _show(self, figure):
"""
Handle a 'Show existing figure' event
"""
pass
@abstractmethod
def _resize(self, figure):
"""
Handle a 'Resize existing figure' event
"""
pass
@abstractmethod
def _hide(self, figure):
"""
Handle a 'Hide existing figure' event
"""
pass
@abstractmethod
def _destroy(self, figure):
"""
Handle a 'Delete existing figure' event
"""
pass
@abstractmethod
def _invoke(self, callable, *args):
"""
Pass a GUI message: invoke C{callable} in a thread-safe way
"""
pass
def invoke(self, callable, *args):
"""
Invoke an asynchronous GUI operation (in a thread-safe way)
"""
if not self._running.isSet():
raise RuntimeError('The backend service is not running')
else:
self._invoke(callable, *args)
def add(self, figure):
"""
Add a new figure.
"""
self.invoke(self._add, figure)
def show(self, figure):
"""
Show existing figure.
"""
self.invoke(self._show, figure)
def resize(self, figure):
"""
Resize existing figure.
"""
self.invoke(self._resize, figure)
def hide(self, figure):
"""
Hide existing figure.
"""
self.invoke(self._hide, figure)
def destroy(self, figure, wait=False):
"""
Destroy existing figure. If C{wait} is True, make sure the asynchronous
figure deletion is complete before returning from the method.
"""
has_figure = (figure in self._figures)
self.invoke(self._destroy, figure)
if has_figure and wait:
while figure in self._figures:
pass
def start(self):
"""
Start the Backend service. This method can be called only once.
"""
try:
super(Backend, self).start()
while not self._running.isSet():
time.sleep(0.05)
except BaseException:
raise RuntimeError("Failed to start the backend service")
def run(self):
"""
Main service method, automatically called by C{start}.
"""
self._started.set()
self._initapp()
self._running.set()
self._mainloop()
self._running.clear()
self._started.clear()
def stop(self):
"""
Stop the Backend service. The Backend object can be safely
disposed afterwards.
"""
self._exit()
self._figures = {}
self.join()
self._running.clear()
self._started.clear()
del Backend._instances[self.__class__]
def client_disposed(self, client):
"""
Fired when a client is being deleted. Will stop the service if no
active clients are remaining.
"""
if self._figures is None or len(self._figures) == 0:
self.stop()
def __del__(self):
if self._started.isSet():
self.stop()
class WxBackendImpl(Backend):
"""
WxPython L{Backend} implementor.
@note: not meant to be instantiated directly, use L{Backend.get} instead.
"""
_wxapp = None
def __init__(self):
import wx
from matplotlib.backends.backend_wx import FigureFrameWx
self._wx = wx
self._FigureFrameWx = FigureFrameWx
super(WxBackendImpl, self).__init__()
@property
def _app(self):
if WxBackendImpl._wxapp is None:
WxBackendImpl._wxapp = self._wx.PySimpleApp()
return WxBackendImpl._wxapp
def _initapp(self):
dummy = self._app
frame = self._wx.Frame(None)
frame.Show()
frame.Hide()
def _mainloop(self):
self._app.MainLoop()
def _add(self, figure):
wx = self._wx
FigureFrameWx = self._FigureFrameWx
if figure not in self._figures:
frame = FigureFrameWx(figure._figure_number, figure)
frame.Show()
frame.Bind(wx.EVT_ACTIVATE, lambda e: e.GetEventObject().Layout())
frame.Bind(wx.EVT_CLOSE, lambda e: self.invoke(self._hide, figure))
self._figures[figure] = frame
def _show(self, figure):
if figure not in self._figures:
self._add(figure)
self._figures[figure].Show()
def _resize(self, figure):
if figure in self._figures:
frame = self._figures[figure]
w = figure.get_figwidth() * figure.get_dpi()
h = figure.get_figheight() * figure.get_dpi()
size = self._wx.Size(w, h)
frame.canvas.SetInitialSize(size)
frame.GetSizer().Fit(frame)
def _hide(self, figure):
if figure in self._figures:
self._figures[figure].Hide()
def _destroy(self, figure):
if figure in self._figures:
frame = self._figures[figure]
if not frame.IsBeingDeleted():
frame.Destroy()
del self._figures[figure]
def _invoke(self, callable, *args):
wx = self._wx
wx.CallAfter(callable, *args)
def _exit(self):
for frame in self._figures.values():
if not frame.IsBeingDeleted():
frame.Destroy()
self._app.Exit()
class Backends(object):
"""
Enumeration of chart backends.
"""
WX_WIDGETS = WxBackendImpl
class PlotsCollection(object):
"""
A list-like collection of all plots in the chart (0-based).
"""
def __init__(self, figure, rows=1, columns=1):
assert rows >= 1 and columns >= 1
self._plots = []
self._figure = figure
self._rows = int(rows)
self._columns = int(columns)
for dummy in range(self._rows * self._columns):
self._plots.append(None)
@property
def _active_plots(self):
return [p for p in self._plots if p is not None]
def _add(self, index=1):
assert 0 <= index < len(self._plots)
plot = self._figure.add_subplot(self._rows, self._columns, index + 1)
self._plots[index] = plot
return plot
def __getitem__(self, location):
if isinstance(location, tuple):
row, col = location
i = row * self._columns + col
else:
i = int(location)
if not (0 <= i < len(self._plots)):
raise IndexError("No such plot: {0}".format(location))
if self._plots[i] is None:
return self._add(i)
else:
return self._plots[i]
def __len__(self):
return len(self._active_plots)
def __iter__(self):
return iter(self._active_plots)
class Chart(object):
"""
Simple and clean facade to Matplotlib's plotting API.
A chart instance abstracts a plotting device, on which one or
multiple related plots can be drawn. Charts can be exported as images, or
visualized interactively. Each chart instance will always open in its own
GUI window, and this window will never block the execution of the rest of
the program, or interfere with other L{Chart}s.
The GUI can be safely opened in the background and closed infinite number
of times, as long as the client program is still running.
By default, a chart contains a single plot:
>>> chart.plot
matplotlib.axes.AxesSubplot
>>> chart.plot.hist(...)
If C{rows} and C{columns} are defined, the chart will contain
C{rows} x C{columns} number of plots (equivalent to MPL's sub-plots).
Each plot can be assessed by its index:
>>> chart.plots[0]
first plot
or by its position in the grid:
>>> chart.plots[0, 1]
plot at row=0, column=1
@param number: chart number; by default this a L{Chart.AUTONUMBER}
@type number: int or None
@param title: chart master title
@type title: str
@param rows: number of rows in the chart window
@type rows: int
@param columns: number of columns in the chart window
@type columns: int
@note: additional arguments are passed directly to Matplotlib's Figure
constructor.
"""
AUTONUMBER = None
_serial = 0
def __init__(self, number=None, title='', rows=1, columns=1, backend=Backends.WX_WIDGETS, *fa, **fk):
if number == Chart.AUTONUMBER:
Chart._serial += 1
number = Chart._serial
if rows < 1:
rows = 1
if columns < 1:
columns = 1
self._rows = int(rows)
self._columns = int(columns)
self._number = int(number)
self._title = str(title)
self._figure = Figure(*fa, **fk)
self._figure._figure_number = self._number
self._figure.suptitle(self._title)
self._beclass = backend
self._hasgui = False
self._plots = PlotsCollection(self._figure, self._rows, self._columns)
self._canvas = FigureCanvasAgg(self._figure)
formats = [ (f.upper(), f) for f in self._canvas.get_supported_filetypes() ]
self._formats = csb.core.Enum.create('OutputFormats', **dict(formats))
def __getitem__(self, i):
if i in self._plots:
return self._plots[i]
else:
raise KeyError('No such plot number: {0}'.format(i))
def __enter__(self):
return self
def __exit__(self, *a, **k):
self.dispose()
@property
def _backend(self):
return Backend.get(self._beclass, started=True)
@property
def _backend_started(self):
return Backend.query(self._beclass)
@property
def title(self):
"""
Chart title
@rtype: str
"""
return self._title
@property
def number(self):
"""
Chart number
@rtype: int
"""
return self._number
@property
def plots(self):
"""
Index-based access to the plots in this chart
@rtype: L{PlotsCollection}
"""
return self._plots
@property
def plot(self):
"""
First plot
@rtype: matplotlib.AxesSubplot
"""
return self._plots[0]
@property
def rows(self):
"""
Number of rows in this chart
@rtype: int
"""
return self._rows
@property
def columns(self):
"""
Number of columns in this chart
@rtype: int
"""
return self._columns
@property
def width(self):
"""
Chart's width in inches
@rtype: int
"""
return self._figure.get_figwidth()
@width.setter
def width(self, inches):
self._figure.set_figwidth(inches)
if self._backend_started:
self._backend.resize(self._figure)
@property
def height(self):
"""
Chart's height in inches
@rtype: int
"""
return self._figure.get_figheight()
@height.setter
def height(self, inches):
self._figure.set_figheight(inches)
if self._backend_started:
self._backend.resize(self._figure)
@property
def dpi(self):
"""
Chart's DPI
@rtype: int
"""
return self._figure.get_dpi()
@dpi.setter
def dpi(self, dpi):
self._figure.set_dpi(dpi)
self._backend.resize(self._figure)
@property
def formats(self):
"""
Supported output file formats
@rtype: L{csb.core.enum}
"""
return self._formats
def show(self):
"""
Show the GUI window (non-blocking).
"""
if not self._hasgui:
self._backend.add(self._figure)
self._hasgui = True
self._backend.show(self._figure)
def hide(self):
"""
Hide (but do not dispose) the GUI window.
"""
self._backend.hide(self._figure)
def dispose(self):
"""
Dispose the GUI interface. Must be called at the end if any
chart.show() calls have been made. Automatically called if using
the chart in context manager ("with" statement).
@note: Failing to call this method if show() has been called at least
once may cause backend-related errors.
"""
if self._backend_started:
service = self._backend
if service and service.running:
service.destroy(self._figure, wait=True)
service.client_disposed(self)
def save(self, file, format='png', crop=False, dpi=None, *a, **k):
"""
Save all plots to an image.
@param file: destination file name
@type file: str
@param format: output image format; see C{chart.formats} for enumeration
@type format: str or L{csb.core.EnumItem}
@param crop: if True, crop the image (equivalent to MPL's bbox=tight)
@type crop: bool
@note: additional arguments are passed directly to MPL's savefig method
"""
if 'bbox_inches' in k:
bbox = k['bbox_inches']
del k['bbox_inches']
else:
if crop:
bbox = 'tight'
else:
bbox = None
self._canvas.print_figure(file, format=str(format), bbox_inches=bbox, dpi=dpi, *a, **k)
| [
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"time.sleep",
"wx.CallAfter",
"matplotlib.figure.Figure",
"threading.Event",
"matplotlib.backends.backend_wx.FigureFrameWx"
] | [((3701, 3708), 'threading.Event', 'Event', ([], {}), '()\n', (3706, 3708), False, 'from threading import Thread, Event\n'), ((3741, 3748), 'threading.Event', 'Event', ([], {}), '()\n', (3746, 3748), False, 'from threading import Thread, Event\n'), ((10566, 10595), 'wx.CallAfter', 'wx.CallAfter', (['callable', '*args'], {}), '(callable, *args)\n', (10578, 10595), False, 'import wx\n'), ((14386, 14403), 'matplotlib.figure.Figure', 'Figure', (['*fa'], {}), '(*fa, **fk)\n', (14392, 14403), False, 'from matplotlib.figure import Figure\n'), ((14669, 14698), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['self._figure'], {}), '(self._figure)\n', (14684, 14698), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((9252, 9296), 'matplotlib.backends.backend_wx.FigureFrameWx', 'FigureFrameWx', (['figure._figure_number', 'figure'], {}), '(figure._figure_number, figure)\n', (9265, 9296), False, 'from matplotlib.backends.backend_wx import FigureFrameWx\n'), ((7090, 7106), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (7100, 7106), False, 'import time\n')] |
"""Author: <NAME>."""
from typing import Optional, Tuple
import distutils.version
import uuid
from datetime import datetime
from pathlib import Path
import warnings
import numpy as np
import neo.io.baseio
import pynwb
from hdmf.backends.hdf5 import H5DataIO
from ..nwb_helpers import add_device_from_metadata
from ...utils import OptionalFilePathType
response_classes = dict(
voltage_clamp=pynwb.icephys.VoltageClampSeries,
current_clamp=pynwb.icephys.CurrentClampSeries,
izero=pynwb.icephys.IZeroClampSeries,
)
stim_classes = dict(
voltage_clamp=pynwb.icephys.VoltageClampStimulusSeries,
current_clamp=pynwb.icephys.CurrentClampStimulusSeries,
)
# TODO - get electrodes metadata
def get_electrodes_metadata(neo_reader, electrodes_ids: list, block: int = 0) -> list:
"""
Get electrodes metadata from Neo reader.
The typical information we look for is the information accepted by pynwb.icephys.IntracellularElectrode:
- name – the name of this electrode
- device – the device that was used to record from this electrode
- description – Recording description, description of electrode (e.g., whole-cell, sharp, etc)
- comment: Free-form text (can be from Methods)
- slice – Information about slice used for recording.
- seal – Information about seal used for recording.
- location – Area, layer, comments on estimation, stereotaxis coordinates (if in vivo, etc).
- resistance – Electrode resistance COMMENT: unit: Ohm.
- filtering – Electrode specific filtering.
- initial_access_resistance – Initial access resistance.
Parameters
----------
neo_reader ([type]): Neo reader
electrodes_ids (list): List of electrodes ids.
block (int, optional): Block id. Defaults to 0.
Returns
-------
list: List of dictionaries containing electrodes metadata.
"""
return []
def get_number_of_electrodes(neo_reader) -> int:
"""Get number of electrodes from Neo reader."""
# TODO - take in account the case with multiple streams.
return len(neo_reader.header["signal_channels"])
def get_number_of_segments(neo_reader, block: int = 0) -> int:
"""Get number of segments from Neo reader."""
return neo_reader.header["nb_segment"][block]
def get_command_traces(neo_reader, segment: int = 0, cmd_channel: int = 0) -> Tuple[list, str, str]:
"""
Get command traces (e.g. voltage clamp command traces).
Parameters
----------
neo_reader : neo.io.baseio
segment : int, optional
Defaults to 0.
cmd_channel : int, optional
ABF command channel (0 to 7). Defaults to 0.
"""
try:
traces, titles, units = neo_reader.read_raw_protocol()
return traces[segment][cmd_channel], titles[segment][cmd_channel], units[segment][cmd_channel]
except Exception as e:
msg = ".\n\n WARNING - get_command_traces() only works for AxonIO interface."
e.args = (str(e) + msg,)
return e
def get_conversion_from_unit(unit: str) -> float:
"""
Get conversion (to Volt or Ampere) from unit in string format.
Parameters
----------
unit (str): Unit as string. E.g. pA, mV, uV, etc...
Returns
-------
float: conversion to Ampere or Volt
"""
if unit in ["pA", "pV"]:
conversion = 1e-12
elif unit in ["nA", "nV"]:
conversion = 1e-9
elif unit in ["uA", "uV"]:
conversion = 1e-6
elif unit in ["mA", "mV"]:
conversion = 1e-3
elif unit in ["A", "V"]:
conversion = 1.0
else:
conversion = 1.0
warnings.warn("No valid units found for traces in the current file. Gain is set to 1, but this might be wrong.")
return float(conversion)
def get_nwb_metadata(neo_reader, metadata: dict = None):
"""
Return default metadata for all recording fields.
Parameters
----------
neo_reader: Neo reader object
metadata: dict, optional
Metadata info for constructing the nwb file.
"""
metadata = dict(
NWBFile=dict(
session_description="Auto-generated by NwbRecordingExtractor without description.",
identifier=str(uuid.uuid4()),
),
Icephys=dict(Device=[dict(name="Device", description="no description")]),
)
return metadata
def add_icephys_electrode(neo_reader, nwbfile, metadata: dict = None):
"""
Add icephys electrodes to nwbfile object.
Will always ensure nwbfile has at least one icephys electrode.
Will auto-generate a linked device if the specified name does not exist in the nwbfile.
Parameters
----------
neo_reader : neo.io.baseio
nwbfile : NWBFile
NWBFile object to add the icephys electrode to.
metadata : dict, optional
Metadata info for constructing the nwb file.
Should be of the format
metadata['Icephys']['Electrodes'] = [
{
'name': my_name,
'description': my_description,
'device_name': my_device_name
},
...
]
"""
assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
if len(nwbfile.devices) == 0:
warnings.warn("When adding Icephys Electrode, no Devices were found on nwbfile. Creating a Device now...")
add_device_from_metadata(nwbfile=nwbfile, modality="Icephys", metadata=metadata)
if metadata is None:
metadata = dict()
if "Icephys" not in metadata:
metadata["Icephys"] = dict()
defaults = [
dict(
name=f"icephys_electrode_{elec_id}",
description="no description",
device_name=[i.name for i in nwbfile.devices.values()][0],
)
for elec_id in range(get_number_of_electrodes(neo_reader))
]
if "Electrodes" not in metadata["Icephys"] or len(metadata["Icephys"]["Electrodes"]) == 0:
metadata["Icephys"]["Electrodes"] = defaults
assert all(
[isinstance(x, dict) for x in metadata["Icephys"]["Electrodes"]]
), "Expected metadata['Icephys']['Electrodes'] to be a list of dictionaries!"
# Create Icephys electrode from metadata
for elec in metadata["Icephys"]["Electrodes"]:
if elec.get("name", defaults[0]["name"]) not in nwbfile.icephys_electrodes:
device_name = elec.get("device_name", defaults[0]["device_name"])
if device_name not in nwbfile.devices:
new_device_metadata = dict(Ecephys=dict(Device=[dict(name=device_name)]))
add_device_from_metadata(nwbfile, modality="Icephys", metadata=new_device_metadata)
warnings.warn(
f"Device '{device_name}' not detected in "
"attempted link to icephys electrode! Automatically generating."
)
electrode_kwargs = dict(
name=elec.get("name", defaults[0]["name"]),
description=elec.get("description", defaults[0]["description"]),
device=nwbfile.devices[device_name],
)
nwbfile.create_icephys_electrode(**electrode_kwargs)
def add_icephys_recordings(
neo_reader,
nwbfile: pynwb.NWBFile,
metadata: dict = None,
icephys_experiment_type: Optional[str] = None,
stimulus_type: Optional[str] = None,
skip_electrodes: Tuple[int] = (),
compression: str = "gzip",
):
"""
Add icephys recordings (stimulus/response pairs) to nwbfile object.
Parameters
----------
neo_reader : neo.io.baseio
nwbfile : NWBFile
metadata : dict, optional
icephys_experiment_type : str, optional
Type of Icephys experiment. Allowed types are: 'voltage_clamp', 'current_clamp' and 'izero'.
The default is 'voltage_clamp'.
stimulus_type : str
skip_electrodes: tuple
compression: str | bool
"""
n_segments = get_number_of_segments(neo_reader, block=0)
if icephys_experiment_type is None:
icephys_experiment_type = "voltage_clamp"
if stimulus_type is None:
stimulus_type = "not described"
# Check for protocol data (only ABF2), necessary for stimuli data
if neo_reader._axon_info["fFileVersionNumber"] < 2:
n_commands = 0
warnings.warn(
f"Protocol section is only present in ABF2 files. {neo_reader.filename} has version "
f"{neo_reader._axon_info['fFileVersionNumber']}. Saving experiment as 'i_zero'..."
)
else:
protocol = neo_reader.read_raw_protocol()
n_commands = len(protocol[0])
if n_commands == 0:
icephys_experiment_type = "izero"
warnings.warn(
f"No command data found by neo reader in file {neo_reader.filename}. Saving experiment as 'i_zero'..."
)
else:
assert (
n_commands == n_segments
), f"File contains inconsistent number of segments ({n_segments}) and commands ({n_commands})"
assert icephys_experiment_type in ["voltage_clamp", "current_clamp", "izero"], (
f"'icephys_experiment_type' should be 'voltage_clamp', 'current_clamp' or 'izero', but received value "
f"{icephys_experiment_type}"
)
# Check and auto-create electrodes, in case they don't exist yet in nwbfile
if len(nwbfile.icephys_electrodes) == 0:
warnings.warn(
"When adding Icephys Recording, no Icephys Electrodes were found on nwbfile. Creating Electrodes now..."
)
add_icephys_electrode(
neo_reader=neo_reader,
nwbfile=nwbfile,
metadata=metadata,
)
if getattr(nwbfile, "intracellular_recordings", None):
ri = max(nwbfile.intracellular_recordings["responses"].index)
else:
ri = -1
if getattr(nwbfile, "icephys_simultaneous_recordings", None):
simultaneous_recordings_offset = len(nwbfile.icephys_simultaneous_recordings)
else:
simultaneous_recordings_offset = 0
if getattr(nwbfile, "icephys_sequential_recordings", None):
sessions_offset = len(nwbfile.icephys_sequential_recordings)
else:
sessions_offset = 0
relative_session_start_time = metadata["Icephys"]["Sessions"][sessions_offset]["relative_session_start_time"]
session_stimulus_type = metadata["Icephys"]["Sessions"][sessions_offset]["stimulus_type"]
# Sequential icephys recordings
simultaneous_recordings = list()
for si in range(n_segments):
# Parallel icephys recordings
recordings = list()
for ei, electrode in enumerate(
list(nwbfile.icephys_electrodes.values())[: len(neo_reader.header["signal_channels"]["units"])]
):
if ei in skip_electrodes:
continue
# Starting time is the signal starting time within .abf file + time
# relative to first session (first .abf file)
ri += 1
starting_time = neo_reader.get_signal_t_start(block_index=0, seg_index=si)
starting_time = starting_time + relative_session_start_time
sampling_rate = neo_reader.get_signal_sampling_rate()
response_unit = neo_reader.header["signal_channels"]["units"][ei]
response_conversion = get_conversion_from_unit(unit=response_unit)
response_gain = neo_reader.header["signal_channels"]["gain"][ei]
response_name = f"{icephys_experiment_type}-response-{si + 1 + simultaneous_recordings_offset:02}-ch-{ei}"
response = response_classes[icephys_experiment_type](
name=response_name,
description=f"Response to: {session_stimulus_type}",
electrode=electrode,
data=H5DataIO(
data=neo_reader.get_analogsignal_chunk(block_index=0, seg_index=si, channel_indexes=ei),
compression=compression,
),
starting_time=starting_time,
rate=sampling_rate,
conversion=response_conversion * response_gain,
gain=np.nan,
)
if icephys_experiment_type != "izero":
stim_unit = protocol[2][ei]
stim_conversion = get_conversion_from_unit(unit=stim_unit)
stimulus = stim_classes[icephys_experiment_type](
name=f"stimulus-{si + 1 + simultaneous_recordings_offset:02}-ch-{ei}",
description=f"Stim type: {session_stimulus_type}",
electrode=electrode,
data=protocol[0][si][ei],
rate=sampling_rate,
starting_time=starting_time,
conversion=stim_conversion,
gain=np.nan,
)
icephys_recording = nwbfile.add_intracellular_recording(
electrode=electrode, response=response, stimulus=stimulus
)
else:
icephys_recording = nwbfile.add_intracellular_recording(electrode=electrode, response=response)
recordings.append(icephys_recording)
sim_rec = nwbfile.add_icephys_simultaneous_recording(recordings=recordings)
simultaneous_recordings.append(sim_rec)
nwbfile.add_icephys_sequential_recording(
simultaneous_recordings=simultaneous_recordings, stimulus_type=stimulus_type
)
# TODO
# # Add a list of sequential recordings table indices as a repetition
# run_index = nwbfile.add_icephys_repetition(
# sequential_recordings=[
# seq_rec,
# ]
# )
# # Add a list of repetition table indices as a experimental condition
# nwbfile.add_icephys_experimental_condition(
# repetitions=[
# run_index,
# ]
# )
def add_all_to_nwbfile(
neo_reader,
nwbfile=None,
metadata: dict = None,
compression: Optional[str] = "gzip",
icephys_experiment_type: Optional[str] = "voltage_clamp",
stimulus_type: Optional[str] = None,
skip_electrodes: Tuple[int] = (),
):
"""
Auxiliary static method for nwbextractor.
Adds all recording related information from recording object and metadata to the nwbfile object.
Parameters
----------
neo_reader: Neo reader object
nwbfile: NWBFile
nwb file to which the recording information is to be added
metadata: dict
metadata info for constructing the nwb file (optional).
Check the auxiliary function docstrings for more information
about metadata format.
compression: str (optional, defaults to "gzip")
Type of compression to use. Valid types are "gzip" and "lzf".
Set to None to disable all compression.
icephys_experiment_type: str (optional)
Type of Icephys experiment. Allowed types are: 'voltage_clamp', 'current_clamp' and 'izero'.
If no value is passed, 'voltage_clamp' is used as default.
stimulus_type: str, optional
skip_electrodes: str, optional
"""
if nwbfile is not None:
assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
add_device_from_metadata(nwbfile=nwbfile, modality="Icephys", metadata=metadata)
add_icephys_electrode(
neo_reader=neo_reader,
nwbfile=nwbfile,
metadata=metadata,
)
add_icephys_recordings(
neo_reader=neo_reader,
nwbfile=nwbfile,
metadata=metadata,
icephys_experiment_type=icephys_experiment_type,
stimulus_type=stimulus_type,
skip_electrodes=skip_electrodes,
compression=compression,
)
def write_neo_to_nwb(
neo_reader: neo.io.baseio.BaseIO,
save_path: OptionalFilePathType = None,
overwrite: bool = False,
nwbfile=None,
metadata: dict = None,
compression: Optional[str] = "gzip",
icephys_experiment_type: Optional[str] = None,
stimulus_type: Optional[str] = None,
skip_electrodes: Optional[tuple] = (),
):
"""
Primary method for writing a Neo reader object to an NWBFile.
Parameters
----------
neo_reader: Neo reader
save_path: PathType
Required if an nwbfile is not passed. Must be the path to the nwbfile
being appended, otherwise one is created and written.
overwrite: bool
If using save_path, whether to overwrite the NWBFile if it already exists.
nwbfile: NWBFile
Required if a save_path is not specified. If passed, this function
will fill the relevant fields within the nwbfile.
metadata: dict
metadata info for constructing the nwb file (optional). Should be of the format
metadata['Ecephys'] = {}
with keys of the forms
metadata['Ecephys']['Device'] = [
{
'name': my_name,
'description': my_description
},
...
]
metadata['Ecephys']['ElectrodeGroup'] = [
{
'name': my_name,
'description': my_description,
'location': electrode_location,
'device': my_device_name
},
...
]
metadata['Ecephys']['Electrodes'] = [
{
'name': my_name,
'description': my_description
},
...
]
metadata['Ecephys']['ElectricalSeries'] = {
'name': my_name,
'description': my_description
}
Note that data intended to be added to the electrodes table of the NWBFile should be set as channel
properties in the RecordingExtractor object.
compression: str (optional, defaults to "gzip")
Type of compression to use. Valid types are "gzip" and "lzf".
Set to None to disable all compression.
icephys_experiment_type: str (optional)
Type of Icephys experiment. Allowed types are: 'voltage_clamp', 'current_clamp' and 'izero'.
If no value is passed, 'voltage_clamp' is used as default.
"""
if nwbfile is not None:
assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
assert (
distutils.version.LooseVersion(pynwb.__version__) >= "1.3.3"
), "'write_neo_to_nwb' not supported for version < 1.3.3. Run pip install --upgrade pynwb"
assert save_path is None or nwbfile is None, "Either pass a save_path location, or nwbfile object, but not both!"
if metadata is None:
metadata = get_nwb_metadata(neo_reader=neo_reader)
kwargs = dict(
neo_reader=neo_reader,
metadata=metadata,
compression=compression,
icephys_experiment_type=icephys_experiment_type,
stimulus_type=stimulus_type,
skip_electrodes=skip_electrodes,
)
if nwbfile is None:
if Path(save_path).is_file() and not overwrite:
read_mode = "r+"
else:
read_mode = "w"
with pynwb.NWBHDF5IO(str(save_path), mode=read_mode) as io:
if read_mode == "r+":
nwbfile = io.read()
else:
nwbfile_kwargs = dict(
session_description="Auto-generated by NwbRecordingExtractor without description.",
identifier=str(uuid.uuid4()),
)
if metadata is not None and "NWBFile" in metadata:
nwbfile_kwargs.update(metadata["NWBFile"])
nwbfile = pynwb.NWBFile(**nwbfile_kwargs)
add_all_to_nwbfile(nwbfile=nwbfile, **kwargs)
io.write(nwbfile)
else:
add_all_to_nwbfile(nwbfile=nwbfile, **kwargs)
| [
"warnings.warn",
"pathlib.Path",
"uuid.uuid4",
"pynwb.NWBFile"
] | [((5267, 5383), 'warnings.warn', 'warnings.warn', (['"""When adding Icephys Electrode, no Devices were found on nwbfile. Creating a Device now..."""'], {}), "(\n 'When adding Icephys Electrode, no Devices were found on nwbfile. Creating a Device now...'\n )\n", (5280, 5383), False, 'import warnings\n'), ((8304, 8493), 'warnings.warn', 'warnings.warn', (['f"""Protocol section is only present in ABF2 files. {neo_reader.filename} has version {neo_reader._axon_info[\'fFileVersionNumber\']}. Saving experiment as \'i_zero\'..."""'], {}), '(\n f"Protocol section is only present in ABF2 files. {neo_reader.filename} has version {neo_reader._axon_info[\'fFileVersionNumber\']}. Saving experiment as \'i_zero\'..."\n )\n', (8317, 8493), False, 'import warnings\n'), ((8695, 8822), 'warnings.warn', 'warnings.warn', (['f"""No command data found by neo reader in file {neo_reader.filename}. Saving experiment as \'i_zero\'..."""'], {}), '(\n f"No command data found by neo reader in file {neo_reader.filename}. Saving experiment as \'i_zero\'..."\n )\n', (8708, 8822), False, 'import warnings\n'), ((9377, 9506), 'warnings.warn', 'warnings.warn', (['"""When adding Icephys Recording, no Icephys Electrodes were found on nwbfile. Creating Electrodes now..."""'], {}), "(\n 'When adding Icephys Recording, no Icephys Electrodes were found on nwbfile. Creating Electrodes now...'\n )\n", (9390, 9506), False, 'import warnings\n'), ((6701, 6830), 'warnings.warn', 'warnings.warn', (['f"""Device \'{device_name}\' not detected in attempted link to icephys electrode! Automatically generating."""'], {}), '(\n f"Device \'{device_name}\' not detected in attempted link to icephys electrode! Automatically generating."\n )\n', (6714, 6830), False, 'import warnings\n'), ((19608, 19639), 'pynwb.NWBFile', 'pynwb.NWBFile', ([], {}), '(**nwbfile_kwargs)\n', (19621, 19639), False, 'import pynwb\n'), ((18968, 18983), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (18972, 18983), False, 'from pathlib import Path\n'), ((3606, 3728), 'warnings.warn', 'warnings.warn', (['"""No valid units found for traces in the current file. Gain is set to 1, but this might be wrong."""'], {}), "(\n 'No valid units found for traces in the current file. Gain is set to 1, but this might be wrong.'\n )\n", (3619, 3728), False, 'import warnings\n'), ((4190, 4202), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4200, 4202), False, 'import uuid\n'), ((19419, 19431), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19429, 19431), False, 'import uuid\n')] |
import os
import torch
import torch.distributed as dist
from megatron import get_args
from megatron import print_rank_0
from megatron.indexer_emdr2 import IndexBuilder
from megatron.checkpointing import get_checkpoint_tracker_filename
from megatron.initialize import init_distributed, _init_autoresume, _set_random_seed, \
_write_args_to_tensorboard, _initialize_mem_buffs
from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, \
get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, \
get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized
NEW_INDEX_READY = None
NEW_CHKPT_READY = None
EXIT_INTERVAL = None
def pprint(*args):
print(*args, flush=True)
def initialize_and_run_async_megatron(allow_no_cuda=False):
if not allow_no_cuda:
assert torch.cuda.is_available(), "Megatron required CUDA."
args = get_args()
assert args.async_indexer and args.max_training_rank is not None
init_distributed()
setup_emdr2_groups_and_vars()
pprint("finished setting up EMDR2 groups")
if mips_is_initialized():
print('MIPS group is already initialized')
else:
initialize_mips_group()
_initialize_mem_buffs()
# _init_autoresume()
# pprint("finished setting up autoresume")
# Random seeds for reproducibility.
if args.rank == 0:
pprint('> setting random seeds to {} ...'.format(args.seed))
_set_random_seed(args.seed)
# Write arguments to tensorboard.
_write_args_to_tensorboard()
pprint('finished writing args to tensorboard')
torch.distributed.barrier()
if torch.distributed.get_rank() < args.max_training_rank:
torch.distributed.barrier(get_data_parallel_group())
print_rank_0("Trainer Group: All trainers ready.")
return
else:
runner = AsyncIndexBuilder(args.rank)
torch.distributed.barrier(get_data_parallel_group())
print_rank_0("Indexer Group: All indexers ready.")
runner.run_async()
def setup_emdr2_groups_and_vars():
args = get_args()
world_size = dist.get_world_size()
max_training_rank = args.max_training_rank
# assuming no model parallelism right now
set_model_parallel_group(dist.new_group([args.rank]))
init_emdr2_groups(max_training_rank, world_size)
if args.rank < max_training_rank:
set_data_parallel_group(get_train_group())
else:
set_data_parallel_group(get_index_group())
class AsyncIndexBuilder(IndexBuilder):
def __init__(self, rank):
super().__init__(call_load_attributes_func=False)
self.rank = rank
args = get_args()
self.main_builder_idx = args.max_training_rank
self.exit_handle = None
# Get the path of the correct model to load
iteration = 0
tracker_filename = get_checkpoint_tracker_filename(args.load)
if os.path.isfile(tracker_filename):
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
if iteration > 0:
model_load_path = args.load
key_list = ['retriever/biencoder_model']
else:
model_load_path = args.pretrained_dpr_load
key_list = None
# Load the context encoder weights
self.load_attributes(custom_load_path=model_load_path, key_list=key_list)
global NEW_INDEX_READY
NEW_INDEX_READY = get_new_index_ready()
global NEW_CHKPT_READY
NEW_CHKPT_READY = get_new_chkpt_ready()
def run_async(self):
args = get_args()
global NEW_CHKPT_READY
# When the indexing starts, wait for the NEW_CHKPT_READY signal from trainer process of rank=0
dist.broadcast(NEW_CHKPT_READY, 0, group=get_gloo_comm_group())
while True:
if self.is_main_builder:
print("Starting Indexing again!", flush=True)
self.build_and_save_index()
self.send_index_ready_signal()
self.load_attributes(custom_load_path=args.load,
key_list=['retriever/biencoder_model'])
def send_index_ready_signal(self):
global NEW_INDEX_READY
global NEW_CHKPT_READY
# send handle
if self.is_main_builder:
print("indexer group: broadcasting NEW INDEX READY MESSAGE", flush=True)
dist.broadcast(NEW_INDEX_READY,
self.main_builder_idx,
group=get_gloo_comm_group(),
async_op=True)
# recv handle
dist.broadcast(NEW_CHKPT_READY, 0, group=get_gloo_comm_group())
| [
"megatron.checkpointing.get_checkpoint_tracker_filename",
"megatron.mpu.initialize.get_data_parallel_group",
"os.path.isfile",
"torch.distributed.get_world_size",
"megatron.initialize._set_random_seed",
"megatron.mpu.initialize.mips_is_initialized",
"torch.distributed.get_rank",
"megatron.mpu.initiali... | [((968, 978), 'megatron.get_args', 'get_args', ([], {}), '()\n', (976, 978), False, 'from megatron import get_args\n'), ((1053, 1071), 'megatron.initialize.init_distributed', 'init_distributed', ([], {}), '()\n', (1069, 1071), False, 'from megatron.initialize import init_distributed, _init_autoresume, _set_random_seed, _write_args_to_tensorboard, _initialize_mem_buffs\n'), ((1161, 1182), 'megatron.mpu.initialize.mips_is_initialized', 'mips_is_initialized', ([], {}), '()\n', (1180, 1182), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((1282, 1305), 'megatron.initialize._initialize_mem_buffs', '_initialize_mem_buffs', ([], {}), '()\n', (1303, 1305), False, 'from megatron.initialize import init_distributed, _init_autoresume, _set_random_seed, _write_args_to_tensorboard, _initialize_mem_buffs\n'), ((1516, 1543), 'megatron.initialize._set_random_seed', '_set_random_seed', (['args.seed'], {}), '(args.seed)\n', (1532, 1543), False, 'from megatron.initialize import init_distributed, _init_autoresume, _set_random_seed, _write_args_to_tensorboard, _initialize_mem_buffs\n'), ((1587, 1615), 'megatron.initialize._write_args_to_tensorboard', '_write_args_to_tensorboard', ([], {}), '()\n', (1613, 1615), False, 'from megatron.initialize import init_distributed, _init_autoresume, _set_random_seed, _write_args_to_tensorboard, _initialize_mem_buffs\n'), ((1672, 1699), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (1697, 1699), False, 'import torch\n'), ((2149, 2159), 'megatron.get_args', 'get_args', ([], {}), '()\n', (2157, 2159), False, 'from megatron import get_args\n'), ((2177, 2198), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2196, 2198), True, 'import torch.distributed as dist\n'), ((2355, 2403), 'megatron.mpu.initialize.init_emdr2_groups', 'init_emdr2_groups', (['max_training_rank', 'world_size'], {}), '(max_training_rank, world_size)\n', (2372, 2403), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((903, 928), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (926, 928), False, 'import torch\n'), ((1253, 1276), 'megatron.mpu.initialize.initialize_mips_group', 'initialize_mips_group', ([], {}), '()\n', (1274, 1276), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((1708, 1736), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1734, 1736), False, 'import torch\n'), ((1832, 1882), 'megatron.print_rank_0', 'print_rank_0', (['"""Trainer Group: All trainers ready."""'], {}), "('Trainer Group: All trainers ready.')\n", (1844, 1882), False, 'from megatron import print_rank_0\n'), ((2023, 2073), 'megatron.print_rank_0', 'print_rank_0', (['"""Indexer Group: All indexers ready."""'], {}), "('Indexer Group: All indexers ready.')\n", (2035, 2073), False, 'from megatron import print_rank_0\n'), ((2322, 2349), 'torch.distributed.new_group', 'dist.new_group', (['[args.rank]'], {}), '([args.rank])\n', (2336, 2349), True, 'import torch.distributed as dist\n'), ((2725, 2735), 'megatron.get_args', 'get_args', ([], {}), '()\n', (2733, 2735), False, 'from megatron import get_args\n'), ((2925, 2967), 'megatron.checkpointing.get_checkpoint_tracker_filename', 'get_checkpoint_tracker_filename', (['args.load'], {}), '(args.load)\n', (2956, 2967), False, 'from megatron.checkpointing import get_checkpoint_tracker_filename\n'), ((2979, 3011), 'os.path.isfile', 'os.path.isfile', (['tracker_filename'], {}), '(tracker_filename)\n', (2993, 3011), False, 'import os\n'), ((3515, 3536), 'megatron.mpu.initialize.get_new_index_ready', 'get_new_index_ready', ([], {}), '()\n', (3534, 3536), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((3594, 3615), 'megatron.mpu.initialize.get_new_chkpt_ready', 'get_new_chkpt_ready', ([], {}), '()\n', (3613, 3615), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((3658, 3668), 'megatron.get_args', 'get_args', ([], {}), '()\n', (3666, 3668), False, 'from megatron import get_args\n'), ((1797, 1822), 'megatron.mpu.initialize.get_data_parallel_group', 'get_data_parallel_group', ([], {}), '()\n', (1820, 1822), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((1988, 2013), 'megatron.mpu.initialize.get_data_parallel_group', 'get_data_parallel_group', ([], {}), '()\n', (2011, 2013), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((2475, 2492), 'megatron.mpu.initialize.get_train_group', 'get_train_group', ([], {}), '()\n', (2490, 2492), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((2536, 2553), 'megatron.mpu.initialize.get_index_group', 'get_index_group', ([], {}), '()\n', (2551, 2553), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((3853, 3874), 'megatron.mpu.initialize.get_gloo_comm_group', 'get_gloo_comm_group', ([], {}), '()\n', (3872, 3874), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((4571, 4592), 'megatron.mpu.initialize.get_gloo_comm_group', 'get_gloo_comm_group', ([], {}), '()\n', (4590, 4592), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n'), ((4704, 4725), 'megatron.mpu.initialize.get_gloo_comm_group', 'get_gloo_comm_group', ([], {}), '()\n', (4723, 4725), False, 'from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized\n')] |
import cv2
import numpy as np
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
# ---------------------------------------------------------------------
filter = np.array(
[
[0, -1, 0],
[-1, 5, -1],
[0,-1, 0]
]
)
def sharpen(img):
sharpen_img = cv2.filter2D(img, -1, filter)
return sharpen_img
# ---------------------------------------------------------------
dim = (720, 385)
cap = cv2.VideoCapture('../video_file/Hackathon_high_home_1_Trim.mp4')
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, dim)
pts1 = np.float32([[502,57], [218,57], [690,320], [30,320]])
pts2 = np.float32([[0,0], [dim[0], 0],[0, dim[1]], [dim[0], dim[1]]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame1 = cv2.warpPerspective(frame1, matrix, dim)
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, dim)
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame2 = cv2.warpPerspective(frame2, matrix, dim)
frame1 = sharpen(frame1)
frame2 = sharpen(frame2)
while True:
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.bilateralFilter(gray, 10, 510, 50)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
print(x,y)
if cv2.contourArea(contour) > 100 and cv2.contourArea(contour) < 450:
cv2.rectangle(frame1, (x,y), (x+w, y+h), (255, 255, 0), 1)
# elif cv2.contourArea(contour) < 30:
# cv2.rectangle(frame1, (x,y), (x+w, y+h), (0, 255, 0), 2)
# else:
# cv2.rectangle(frame1, (x,y), (x+w, y+h), (255, 255, 0), 2)
cv2.imshow('video', frame1)
frame1 = frame2
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, dim)
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame2 = cv2.warpPerspective(frame2, matrix, dim)
frame2 = sharpen(frame2)
if cv2.waitKey(27) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# import cv2
# import sys
# (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# if __name__ == '__main__' :
# # Set up tracker.
# # Instead of MIL, you can also use
# tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
# tracker_type = tracker_types[-1]
# if int(minor_ver) < 3:
# tracker = cv2.Tracker_create(tracker_type)
# else:
# if tracker_type == 'BOOSTING':
# tracker = cv2.TrackerBoosting_create()
# if tracker_type == 'MIL':
# tracker = cv2.TrackerMIL_create()
# if tracker_type == 'KCF':
# tracker = cv2.TrackerKCF_create()
# if tracker_type == 'TLD':
# tracker = cv2.TrackerTLD_create()
# if tracker_type == 'MEDIANFLOW':
# tracker = cv2.TrackerMedianFlow_create()
# if tracker_type == 'GOTURN':
# tracker = cv2.TrackerGOTURN_create()
# if tracker_type == 'MOSSE':
# tracker = cv2.TrackerMOSSE_create()
# if tracker_type == "CSRT":
# tracker = cv2.TrackerCSRT_create()
# # Read video
# video = cv2.VideoCapture("../video_file/Hackathon_high_home_1_Trim.mp4")
# # Exit if video not opened.
# if not video.isOpened():
# print("Could not open video")
# sys.exit()
# # Read first frame.
# ok, frame = video.read()
# if not ok:
# print('Cannot read video file')
# sys.exit()
# # Define an initial bounding box
# bbox = (287, 23, 86, 320)
# # Uncomment the line below to select a different bounding box
# bbox = cv2.selectROI(frame)
# # Initialize tracker with first frame and bounding box
# ok = tracker.init(frame, bbox)
# while True:
# # Read a new frame
# ok, frame = video.read()
# if not ok:
# break
# # Start timer
# timer = cv2.getTickCount()
# # Update tracker
# ok, bbox = tracker.update(frame)
# # Calculate Frames per second (FPS)
# fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# # Draw bounding box
# if ok:
# # Tracking success
# p1 = (int(bbox[0]), int(bbox[1]))
# p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
# cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
# else :
# # Tracking failure
# cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# # Display tracker type on frame
# cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# # Display FPS on frame
# cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# # Display result
# cv2.imshow("Tracking", frame)
# # Exit if ESC pressed
# k = cv2.waitKey(1) & 0xff
# if k == 27 : break | [
"cv2.getPerspectiveTransform",
"cv2.bilateralFilter",
"cv2.rectangle",
"cv2.absdiff",
"cv2.imshow",
"cv2.warpPerspective",
"cv2.contourArea",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"numpy.float32",
"cv2.th... | [((250, 297), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n', (258, 297), True, 'import numpy as np\n'), ((514, 578), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../video_file/Hackathon_high_home_1_Trim.mp4"""'], {}), "('../video_file/Hackathon_high_home_1_Trim.mp4')\n", (530, 578), False, 'import cv2\n'), ((614, 637), 'cv2.resize', 'cv2.resize', (['frame1', 'dim'], {}), '(frame1, dim)\n', (624, 637), False, 'import cv2\n'), ((645, 702), 'numpy.float32', 'np.float32', (['[[502, 57], [218, 57], [690, 320], [30, 320]]'], {}), '([[502, 57], [218, 57], [690, 320], [30, 320]])\n', (655, 702), True, 'import numpy as np\n'), ((706, 770), 'numpy.float32', 'np.float32', (['[[0, 0], [dim[0], 0], [0, dim[1]], [dim[0], dim[1]]]'], {}), '([[0, 0], [dim[0], 0], [0, dim[1]], [dim[0], dim[1]]])\n', (716, 770), True, 'import numpy as np\n'), ((778, 817), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (805, 817), False, 'import cv2\n'), ((827, 867), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame1', 'matrix', 'dim'], {}), '(frame1, matrix, dim)\n', (846, 867), False, 'import cv2\n'), ((903, 926), 'cv2.resize', 'cv2.resize', (['frame2', 'dim'], {}), '(frame2, dim)\n', (913, 926), False, 'import cv2\n'), ((936, 975), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (963, 975), False, 'import cv2\n'), ((985, 1025), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame2', 'matrix', 'dim'], {}), '(frame2, matrix, dim)\n', (1004, 1025), False, 'import cv2\n'), ((2214, 2237), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2235, 2237), False, 'import cv2\n'), ((370, 399), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'filter'], {}), '(img, -1, filter)\n', (382, 399), False, 'import cv2\n'), ((1102, 1129), 'cv2.absdiff', 'cv2.absdiff', (['frame1', 'frame2'], {}), '(frame1, frame2)\n', (1113, 1129), False, 'import cv2\n'), ((1141, 1179), 'cv2.cvtColor', 'cv2.cvtColor', (['diff', 'cv2.COLOR_BGR2GRAY'], {}), '(diff, cv2.COLOR_BGR2GRAY)\n', (1153, 1179), False, 'import cv2\n'), ((1191, 1229), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['gray', '(10)', '(510)', '(50)'], {}), '(gray, 10, 510, 50)\n', (1210, 1229), False, 'import cv2\n'), ((1247, 1294), 'cv2.threshold', 'cv2.threshold', (['blur', '(20)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blur, 20, 255, cv2.THRESH_BINARY)\n', (1260, 1294), False, 'import cv2\n'), ((1309, 1347), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(3)'}), '(thresh, None, iterations=3)\n', (1319, 1347), False, 'import cv2\n'), ((1367, 1432), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1383, 1432), False, 'import cv2\n'), ((1893, 1920), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame1'], {}), "('video', frame1)\n", (1903, 1920), False, 'import cv2\n'), ((1983, 2006), 'cv2.resize', 'cv2.resize', (['frame2', 'dim'], {}), '(frame2, dim)\n', (1993, 2006), False, 'import cv2\n'), ((2021, 2060), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (2048, 2060), False, 'import cv2\n'), ((2074, 2114), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame2', 'matrix', 'dim'], {}), '(frame2, matrix, dim)\n', (2093, 2114), False, 'import cv2\n'), ((1487, 1512), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (1503, 1512), False, 'import cv2\n'), ((1622, 1685), 'cv2.rectangle', 'cv2.rectangle', (['frame1', '(x, y)', '(x + w, y + h)', '(255, 255, 0)', '(1)'], {}), '(frame1, (x, y), (x + w, y + h), (255, 255, 0), 1)\n', (1635, 1685), False, 'import cv2\n'), ((2151, 2166), 'cv2.waitKey', 'cv2.waitKey', (['(27)'], {}), '(27)\n', (2162, 2166), False, 'import cv2\n'), ((1543, 1567), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1558, 1567), False, 'import cv2\n'), ((1578, 1602), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1593, 1602), False, 'import cv2\n')] |
# Written by: <NAME>, @dataoutsider
# Viz: "Good Read", enjoy!
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import re
import matplotlib.pyplot as plt
import os
#region Site 1: https://mostrecommendedbooks.com/best-books
def crawl(url):
content = requests.get(url)
soup = BeautifulSoup(content.text, 'html.parser')
return soup
main_page = 'https://mostrecommendedbooks.com/best-books'
links = []
for tag in crawl(main_page).find_all('ul', {'class':'styles_sub-best-books__1VZwz'}):
for attribute in tag.find_all('a'):
element = attribute.get('href')
link = element.replace(element, main_page+element[1:])
links.append(link)
books = {}
for page in links:
link = page.replace('-booksbest','')
category_s = re.search('best-(.*)-books', link)
category = category_s.group(1).replace('-', ' ')
soup = crawl(link)
titles = []
authors = []
for tag in soup.find_all('div', {'class':'styles_book-category-text__272Fl'}):
book = ''
for name in tag.find_all('h2'):
book = name.text
if book in books:
books[book]['category'].append(category)
else:
books[book] = {}
books[book]['category'] = [category]
books[book]['title'] = book
for a in tag.find_all('h3'):
books[book]['author'] = a.text
break
break
df = pd.DataFrame.from_dict({(i): books[i] for i in books.keys() }, orient='index')
print(df)
df.to_csv(os.path.dirname(__file__) + '/mostrecommendedbooks.csv', encoding='utf-8', index=False)
#endregion
#region example
recommended_books = pd.read_csv(os.path.dirname(__file__) + '/most_recommended.csv', header=0, names=['recommender', 'title', 'author'])
recommended_books_reshaped = recommended_books.groupby(['title', 'author'])['recommender'].apply(lambda x: '|'.join(x)).reset_index()
recommended_books_reshaped['title'] = recommended_books_reshaped['title'].str.replace('"','')
query_books = recommended_books_reshaped['title'].tolist()
query_authors = recommended_books_reshaped['author'].tolist()
#endregion | [
"bs4.BeautifulSoup",
"os.path.dirname",
"re.search",
"requests.get"
] | [((294, 311), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (306, 311), False, 'import requests\n'), ((323, 365), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content.text', '"""html.parser"""'], {}), "(content.text, 'html.parser')\n", (336, 365), False, 'from bs4 import BeautifulSoup\n'), ((801, 835), 're.search', 're.search', (['"""best-(.*)-books"""', 'link'], {}), "('best-(.*)-books', link)\n", (810, 835), False, 'import re\n'), ((1614, 1639), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1629, 1639), False, 'import os\n'), ((1762, 1787), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1777, 1787), False, 'import os\n')] |
import tensorflow as tf
import numpy as np
import pandas as pd
import re
import nltk
import string
import random
random.seed(0)
np.random.seed(0)
tf.random.set_seed(42)
tf.random.set_seed(42)
from nltk.tokenize import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
df = pd.read_csv("imdb.csv")
def preprocess(x):
x = x.lower()
x = x.encode("ascii","ignore").decode()
x = re.sub("https*\S+"," ",x)
x = re.sub("@\S+"," ",x)
x = re.sub("#\S+"," ",x)
x = re.sub("\'\w+","",x)
x = re.sub("[%s]" % re.escape(string.punctuation)," ", x)
x = re.sub("\w*\d+\w*","",x)
x = re.sub("\s{2,}"," ",x)
return x
temp = []
data_to_list = df["review"].values.tolist()
for i in range(len(data_to_list)):
temp.append(preprocess(data_to_list[i]))
def tokenize(y):
for x in y:
yield(word_tokenize(str(x)))
data_words = list(tokenize(temp))
def detokenize(txt):
return TreebankWordDetokenizer().detokenize(txt)
final_data = []
for i in range(len(data_words)):
final_data.append(detokenize(data_words[i]))
print(final_data[:5])
final_data = np.array(final_data)
import pickle
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
max_words = 20000
max_len = 200
tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(final_data)
sequences = tokenizer.texts_to_sequences(final_data)
tweets = pad_sequences(sequences,maxlen=max_len)
with open("tokenizer.pickle","wb") as handle:
pickle.dump(tokenizer,handle,protocol=pickle.HIGHEST_PROTOCOL)
print(tweets)
labels = np.array(df["sentiment"])
l = []
for i in range(len(labels)):
if labels[i]=="negative":
l.append(0)
elif labels[i]=="positive":
l.append(1)
l = np.array(l)
labels = tf.keras.utils.to_categorical(l,2,dtype="int32")
del l
x_train,x_test,y_train,y_test = train_test_split(tweets,labels,random_state=42)
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.25,random_state=42)
inputs = tf.keras.Input(shape=(None,),dtype="int32")
x = layers.Embedding(max_words,128)(inputs)
x = layers.GRU(64,return_sequences=True)(x)
x = layers.GRU(64)(x)
outputs = layers.Dense(2,activation="sigmoid")(x)
model = tf.keras.Model(inputs,outputs)
model.summary()
model.compile(optimizer="adam",loss="binary_crossentropy",metrics=["accuracy"])
checkpoint = ModelCheckpoint("model_gru.hdf5",monitor="val_accuracy",verbose=1,save_best_only=True,save_weights_only=False)
model.fit(x_train,y_train,batch_size=32,epochs=5,validation_data=(x_val,y_val),callbacks=[checkpoint])
best = tf.keras.models.load_model("model_gru.hdf5")
loss,acc = best.evaluate(x_test,y_test,verbose=2)
predictions = best.evaluate(x_test)
print("Test acc: {:.2f} %".format(100*acc))
print("Test loss: {:.2f} %".format(100*loss))
| [
"tensorflow.random.set_seed",
"pickle.dump",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.callbacks.ModelCheckpoint",
"nltk.tokenize.treebank.TreebankWordDetokenizer",
"tensorflow.keras.preprocessing.text.Token... | [((114, 128), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (125, 128), False, 'import random\n'), ((129, 146), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (143, 146), True, 'import numpy as np\n'), ((147, 169), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (165, 169), True, 'import tensorflow as tf\n'), ((170, 192), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (188, 192), True, 'import tensorflow as tf\n'), ((299, 322), 'pandas.read_csv', 'pd.read_csv', (['"""imdb.csv"""'], {}), "('imdb.csv')\n", (310, 322), True, 'import pandas as pd\n'), ((1072, 1092), 'numpy.array', 'np.array', (['final_data'], {}), '(final_data)\n', (1080, 1092), True, 'import numpy as np\n'), ((1464, 1494), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'max_words'}), '(num_words=max_words)\n', (1473, 1494), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((1594, 1634), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_len'}), '(sequences, maxlen=max_len)\n', (1607, 1634), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1768, 1793), 'numpy.array', 'np.array', (["df['sentiment']"], {}), "(df['sentiment'])\n", (1776, 1793), True, 'import numpy as np\n'), ((1918, 1929), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (1926, 1929), True, 'import numpy as np\n'), ((1939, 1989), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['l', '(2)'], {'dtype': '"""int32"""'}), "(l, 2, dtype='int32')\n", (1968, 1989), True, 'import tensorflow as tf\n'), ((2027, 2076), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tweets', 'labels'], {'random_state': '(42)'}), '(tweets, labels, random_state=42)\n', (2043, 2076), False, 'from sklearn.model_selection import train_test_split\n'), ((2105, 2172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train', 'y_train'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(x_train, y_train, test_size=0.25, random_state=42)\n', (2121, 2172), False, 'from sklearn.model_selection import train_test_split\n'), ((2180, 2224), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None,)', 'dtype': '"""int32"""'}), "(shape=(None,), dtype='int32')\n", (2194, 2224), True, 'import tensorflow as tf\n'), ((2392, 2423), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2406, 2423), True, 'import tensorflow as tf\n'), ((2533, 2651), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model_gru.hdf5"""'], {'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "('model_gru.hdf5', monitor='val_accuracy', verbose=1,\n save_best_only=True, save_weights_only=False)\n", (2548, 2651), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2754, 2798), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model_gru.hdf5"""'], {}), "('model_gru.hdf5')\n", (2780, 2798), True, 'import tensorflow as tf\n'), ((404, 432), 're.sub', 're.sub', (['"""https*\\\\S+"""', '""" """', 'x'], {}), "('https*\\\\S+', ' ', x)\n", (410, 432), False, 'import re\n'), ((435, 458), 're.sub', 're.sub', (['"""@\\\\S+"""', '""" """', 'x'], {}), "('@\\\\S+', ' ', x)\n", (441, 458), False, 'import re\n'), ((461, 484), 're.sub', 're.sub', (['"""#\\\\S+"""', '""" """', 'x'], {}), "('#\\\\S+', ' ', x)\n", (467, 484), False, 'import re\n'), ((487, 509), 're.sub', 're.sub', (['"""\'\\\\w+"""', '""""""', 'x'], {}), '("\'\\\\w+", \'\', x)\n', (493, 509), False, 'import re\n'), ((572, 601), 're.sub', 're.sub', (['"""\\\\w*\\\\d+\\\\w*"""', '""""""', 'x'], {}), "('\\\\w*\\\\d+\\\\w*', '', x)\n", (578, 601), False, 'import re\n'), ((602, 627), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'x'], {}), "('\\\\s{2,}', ' ', x)\n", (608, 627), False, 'import re\n'), ((1681, 1745), 'pickle.dump', 'pickle.dump', (['tokenizer', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (1692, 1745), False, 'import pickle\n'), ((2228, 2260), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', (['max_words', '(128)'], {}), '(max_words, 128)\n', (2244, 2260), False, 'from tensorflow.keras import layers\n'), ((2272, 2309), 'tensorflow.keras.layers.GRU', 'layers.GRU', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (2282, 2309), False, 'from tensorflow.keras import layers\n'), ((2316, 2330), 'tensorflow.keras.layers.GRU', 'layers.GRU', (['(64)'], {}), '(64)\n', (2326, 2330), False, 'from tensorflow.keras import layers\n'), ((2344, 2381), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2)'], {'activation': '"""sigmoid"""'}), "(2, activation='sigmoid')\n", (2356, 2381), False, 'from tensorflow.keras import layers\n'), ((529, 558), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (538, 558), False, 'import re\n'), ((898, 923), 'nltk.tokenize.treebank.TreebankWordDetokenizer', 'TreebankWordDetokenizer', ([], {}), '()\n', (921, 923), False, 'from nltk.tokenize.treebank import TreebankWordDetokenizer\n')] |
from core.consumable import Consumable
from core.events import *
from core.engine import Engine
class Port:
def __init__(self, id):
self.id = id
self.target = None
self.link_out = None
self.link_in = None
self.delegated = None
# This variable is used to calculate the flows that have
# arrived on this port during two decision cycles. The variable
# is managed by the engine and reset there.
self.cnt_flows_arrived = 0
self.cnt_flows_removed = 0
self.flows = {} # map of registered flows
self.arrival_data = []
self.arrival_rate = 0
self.arrival_last = 0
self.arrival_remove = 0
def register_flow(self, flow):
if not self.flows.get(flow.id):
self.cnt_flows_arrived += 1
self.flows[flow.id] = flow
def unregister_flow(self, flow):
if self.flows.get(flow.id):
del self.flows[flow.id]
self.cnt_flows_removed +=1
def get_flows(self):
"""Returns a list of flows that entered the switch via this port"""
return filter(lambda flow: flow[1].is_finished == False, self.flows.items())
def reset_arrival_counter(self, history=10):
self.arrival_data.append(self.cnt_flows_arrived)
self.arrival_last = self.cnt_flows_arrived
self.cnt_flows_arrived = 0
self.arrival_remove = self.cnt_flows_removed
self.cnt_flows_removed = 0
lastn = [i for i in self.arrival_data[-history:]]
#diffs = [v2-v1 for v1, v2 in list(zip(lastn[0:], lastn[1:]))]
self.arrival_rate = 0
if len(lastn) > 0:
self.arrival_rate = sum(lastn)/float(len(lastn))
# avoid flow_arrival_per_port getting more than usen entries
self.arrival_data = self.arrival_data[-history:]
class FlowTable:
def __init__(self, switch):
self.switch = switch
self.cnt_flows = 0
class Switch(Consumable):
def __init__(self, ctx, **kwargs):
super().__init__(ctx, **kwargs);
self.id = kwargs.get("id") # networkx node id
self.label = kwargs.get("label", "NoLabelSet"); # name in topology
self.x = kwargs.get("x"); # coordinates in topology
self.y = kwargs.get("y"); # coordinates in topology
# create a port object for each port of the switch; these are used
# to store and access port related statistics
cnt = 0
self.ports = {}
for n in ctx.topo.graph.neighbors(self.id):
port = Port(cnt)
cnt += 1
port.target = n
port.link_in = ctx.topo.graph.edges[n, self.id]['_link']
port.link_out = ctx.topo.graph.edges[self.id, n]['_link']
self.ports[(n, self.id)] = port
# create a flow table object for this switch
self.flowtable = FlowTable(self)
# logic of the switch is implemented inside the engine; This is
# similar to connecting a switch to a controller
self.engine = kwargs.get("engine", Engine(self.ctx, **kwargs)) # routing engine
self.cnt_backdelegations = 0
self.cnt_adddelegations = 0
def reset_counter(self):
self.cnt_backdelegations = 0
self.cnt_adddelegations = 0
def on_event(self, ev):
# periodic counter for statistics
if isinstance(ev, EVStats):
return self.engine.on_EVSwitchStats(self, ev)
# a new flow arrives at the switch
if isinstance(ev, EVSwitchNewFlow):
return self.engine.on_EVSwitchNewFlow(self, ev)
# the last packet of a flow arrives at the switch
if isinstance(ev, EVSwitchLastPacketOfFlowArrived):
return self.engine.on_EVSwitchLastPacketOfFlowArrived(self, ev)
| [
"core.engine.Engine"
] | [((3077, 3103), 'core.engine.Engine', 'Engine', (['self.ctx'], {}), '(self.ctx, **kwargs)\n', (3083, 3103), False, 'from core.engine import Engine\n')] |
# ---
# jupyter:
# jupytext_format_version: '1.2'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.4
# ---
# +
import sys
sys.path.insert(0, './../')
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import torch
from torchvision import datasets, transforms
import numpy as np
from matplotlib import pyplot as plt
import foolbox
from foolbox import attacks as fa
# own modules
from abs_models import utils as u
from abs_models import models as mz
from abs_models import attack_utils as au
# -
model = mz.get_VAE(n_iter=10) # ABS, do n_iter=50 for original model
# model = mz.get_VAE(binary=True) # ABS with scaling and binaryzation
# model = mz.get_binary_CNN() # Binary CNN
# model = mz.get_CNN() # Vanilla CNN
# model = mz.get_NearestNeighbor() # Nearest Neighbor, "nearest L2 dist to each class"=logits
# model = mz.get_madry() # Robust network from Madry et al. in tf
# code is agnostic of pytorch/ tensorflow model --> foolbox model
if model.code_base == 'tensorflow':
fmodel = foolbox.models.TensorFlowModel(model.x_input, model.pre_softmax, (0., 1.),
channel_axis=3)
elif model.code_base == 'pytorch':
model.eval()
fmodel = foolbox.models.PyTorchModel(model, # return logits in shape (bs, n_classes)
bounds=(0., 1.), num_classes=10,
device=u.dev())
else:
print('not implemented')
# test model
b, l = u.get_batch(bs=10000) # returns random batch as np.array
pred_label = np.argmax(fmodel.batch_predictions(b), axis=1)
print('score', float(np.sum(pred_label == l)) / b.shape[0])
# # Decision based attacks
# Note that this is only demo code. All experiments were optimized to our compute architecture.
b, l = u.get_batch(bs=1) # returns random batch
# +
import time
start = time.time()
att = fa.DeepFoolL2Attack(fmodel)
metric = foolbox.distances.MSE
criterion = foolbox.criteria.Misclassification()
plt.imshow(b[0, 0], cmap='gray')
plt.title('orig')
plt.axis('off')
plt.show()
# Estimate gradients from scores
if not model.has_grad:
GE = foolbox.gradient_estimators.CoordinateWiseGradientEstimator(0.1)
fmodel = foolbox.models.ModelWithEstimatedGradients(fmodel, GE)
# gernate Adversarial
a = foolbox.adversarial.Adversarial(fmodel, criterion, b[0], l[0], distance=metric)
att(a)
print('runtime', time.time() - start, 'seconds')
print('pred', np.argmax(fmodel.predictions(a.image)))
if a.image is not None: # attack was successful
plt.imshow(a.image[0], cmap='gray')
plt.title('adv')
plt.axis('off')
plt.show()
# -
# # get Trash Adversarials
from foolbox.gradient_estimators import CoordinateWiseGradientEstimator as CWGE
a = np.random.random((1, 28, 28)).astype(np.float32)
a_helper = torch.tensor(torch.from_numpy(a.copy()), requires_grad=True)
fixed_class = 1
GE = CWGE(1.)
opti = torch.optim.SGD([a_helper], lr=1, momentum=0.95)
# +
confidence_level = model.confidence_level # abs 0.0000031, CNN 1439000, madry 60, 1-NN 0.000000000004
logits_scale = model.logit_scale # ABS 430, madry 1, CNN 1, 1-NN 5
a_orig = a
plt.imshow(u.t2n(a[0]), cmap='gray')
plt.show()
for i in range(10000):
logits = fmodel.predictions(a)
probs = u.t2n(u.confidence_softmax(logits_scale*torch.from_numpy(logits[None, :]), dim=1,
const=confidence_level))[0]
pred_class = np.argmax(u.t2n(logits).squeeze())
if probs[fixed_class]>= 0.9:
break
grads = GE(fmodel.batch_predictions, a, fixed_class, (0,1))
a = au.update_distal_adv(a, a_helper, grads, opti)
if i % 1000 == 0:
print(f'probs {probs[pred_class]:.3f} class', pred_class)
fig, ax = plt.subplots(1,3, squeeze=False, figsize=(10, 4))
ax[0, 0].imshow(u.t2n(a[0]), cmap='gray')
ax[0, 1].imshow(u.t2n(grads[0]), cmap='gray')
ax[0, 2].imshow(np.sign(grads[0]), cmap='gray')
plt.show()
plt.imshow(u.t2n(a[0]), cmap='gray')
plt.show()
# -
# # Latent Descent Attack
# +
# only for abs
att = au.LineSearchAttack(model) # BinaryLineSearchAttack
b, l = u.get_batch(bs=200)
advs = att(b, l, n_coarse_steps=50+1, n_ft_steps=2)
for adv in advs:
adv['img'] = adv['img'].cpu().numpy()
for i, (a_i, b_i) in enumerate(zip(advs, b)):
l2 = np.sqrt(a_i['distance'] * 784) # convert from MSE
fig, ax = plt.subplots(1, 2, squeeze=False)
ax[0, 0].set_title(str(a_i['original_label']))
ax[0, 0].imshow(u.t2n(b_i[0]), cmap='gray')
ax[0, 1].set_title(str(a_i['adversarial_label']))
ax[0, 1].imshow(u.t2n(a_i['img'][0]), cmap='gray')
plt.show()
if i ==10:
break
print('mean L2', np.mean([np.sqrt(a_i['distance'] * 784) for a_i in advs]))
| [
"matplotlib.pyplot.title",
"numpy.sum",
"abs_models.attack_utils.LineSearchAttack",
"numpy.sqrt",
"foolbox.criteria.Misclassification",
"foolbox.models.TensorFlowModel",
"matplotlib.pyplot.imshow",
"abs_models.utils.get_batch",
"foolbox.adversarial.Adversarial",
"abs_models.models.get_VAE",
"abs... | [((405, 432), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./../"""'], {}), "(0, './../')\n", (420, 432), False, 'import sys\n'), ((800, 821), 'abs_models.models.get_VAE', 'mz.get_VAE', ([], {'n_iter': '(10)'}), '(n_iter=10)\n', (810, 821), True, 'from abs_models import models as mz\n'), ((1840, 1861), 'abs_models.utils.get_batch', 'u.get_batch', ([], {'bs': '(10000)'}), '(bs=10000)\n', (1851, 1861), True, 'from abs_models import utils as u\n'), ((2151, 2168), 'abs_models.utils.get_batch', 'u.get_batch', ([], {'bs': '(1)'}), '(bs=1)\n', (2162, 2168), True, 'from abs_models import utils as u\n'), ((2218, 2229), 'time.time', 'time.time', ([], {}), '()\n', (2227, 2229), False, 'import time\n'), ((2236, 2263), 'foolbox.attacks.DeepFoolL2Attack', 'fa.DeepFoolL2Attack', (['fmodel'], {}), '(fmodel)\n', (2255, 2263), True, 'from foolbox import attacks as fa\n'), ((2307, 2343), 'foolbox.criteria.Misclassification', 'foolbox.criteria.Misclassification', ([], {}), '()\n', (2341, 2343), False, 'import foolbox\n'), ((2345, 2377), 'matplotlib.pyplot.imshow', 'plt.imshow', (['b[0, 0]'], {'cmap': '"""gray"""'}), "(b[0, 0], cmap='gray')\n", (2355, 2377), True, 'from matplotlib import pyplot as plt\n'), ((2378, 2395), 'matplotlib.pyplot.title', 'plt.title', (['"""orig"""'], {}), "('orig')\n", (2387, 2395), True, 'from matplotlib import pyplot as plt\n'), ((2396, 2411), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2404, 2411), True, 'from matplotlib import pyplot as plt\n'), ((2412, 2422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2420, 2422), True, 'from matplotlib import pyplot as plt\n'), ((2650, 2729), 'foolbox.adversarial.Adversarial', 'foolbox.adversarial.Adversarial', (['fmodel', 'criterion', 'b[0]', 'l[0]'], {'distance': 'metric'}), '(fmodel, criterion, b[0], l[0], distance=metric)\n', (2681, 2729), False, 'import foolbox\n'), ((3250, 3259), 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', 'CWGE', (['(1.0)'], {}), '(1.0)\n', (3254, 3259), True, 'from foolbox.gradient_estimators import CoordinateWiseGradientEstimator as CWGE\n'), ((3267, 3315), 'torch.optim.SGD', 'torch.optim.SGD', (['[a_helper]'], {'lr': '(1)', 'momentum': '(0.95)'}), '([a_helper], lr=1, momentum=0.95)\n', (3282, 3315), False, 'import torch\n'), ((3562, 3572), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3570, 3572), True, 'from matplotlib import pyplot as plt\n'), ((4372, 4382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4380, 4382), True, 'from matplotlib import pyplot as plt\n'), ((4440, 4466), 'abs_models.attack_utils.LineSearchAttack', 'au.LineSearchAttack', (['model'], {}), '(model)\n', (4459, 4466), True, 'from abs_models import attack_utils as au\n'), ((4501, 4520), 'abs_models.utils.get_batch', 'u.get_batch', ([], {'bs': '(200)'}), '(bs=200)\n', (4512, 4520), True, 'from abs_models import utils as u\n'), ((1374, 1470), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['model.x_input', 'model.pre_softmax', '(0.0, 1.0)'], {'channel_axis': '(3)'}), '(model.x_input, model.pre_softmax, (0.0, 1.0),\n channel_axis=3)\n', (1404, 1470), False, 'import foolbox\n'), ((2490, 2554), 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', (['(0.1)'], {}), '(0.1)\n', (2549, 2554), False, 'import foolbox\n'), ((2568, 2622), 'foolbox.models.ModelWithEstimatedGradients', 'foolbox.models.ModelWithEstimatedGradients', (['fmodel', 'GE'], {}), '(fmodel, GE)\n', (2610, 2622), False, 'import foolbox\n'), ((2898, 2933), 'matplotlib.pyplot.imshow', 'plt.imshow', (['a.image[0]'], {'cmap': '"""gray"""'}), "(a.image[0], cmap='gray')\n", (2908, 2933), True, 'from matplotlib import pyplot as plt\n'), ((2938, 2954), 'matplotlib.pyplot.title', 'plt.title', (['"""adv"""'], {}), "('adv')\n", (2947, 2954), True, 'from matplotlib import pyplot as plt\n'), ((2959, 2974), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2967, 2974), True, 'from matplotlib import pyplot as plt\n'), ((2979, 2989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2987, 2989), True, 'from matplotlib import pyplot as plt\n'), ((3536, 3547), 'abs_models.utils.t2n', 'u.t2n', (['a[0]'], {}), '(a[0])\n', (3541, 3547), True, 'from abs_models import utils as u\n'), ((3953, 3999), 'abs_models.attack_utils.update_distal_adv', 'au.update_distal_adv', (['a', 'a_helper', 'grads', 'opti'], {}), '(a, a_helper, grads, opti)\n', (3973, 3999), True, 'from abs_models import attack_utils as au\n'), ((4346, 4357), 'abs_models.utils.t2n', 'u.t2n', (['a[0]'], {}), '(a[0])\n', (4351, 4357), True, 'from abs_models import utils as u\n'), ((4690, 4720), 'numpy.sqrt', 'np.sqrt', (["(a_i['distance'] * 784)"], {}), "(a_i['distance'] * 784)\n", (4697, 4720), True, 'import numpy as np\n'), ((4756, 4789), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'squeeze': '(False)'}), '(1, 2, squeeze=False)\n', (4768, 4789), True, 'from matplotlib import pyplot as plt\n'), ((5002, 5012), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5010, 5012), True, 'from matplotlib import pyplot as plt\n'), ((2758, 2769), 'time.time', 'time.time', ([], {}), '()\n', (2767, 2769), False, 'import time\n'), ((3108, 3137), 'numpy.random.random', 'np.random.random', (['(1, 28, 28)'], {}), '((1, 28, 28))\n', (3124, 3137), True, 'import numpy as np\n'), ((4106, 4156), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'squeeze': '(False)', 'figsize': '(10, 4)'}), '(1, 3, squeeze=False, figsize=(10, 4))\n', (4118, 4156), True, 'from matplotlib import pyplot as plt\n'), ((4324, 4334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4332, 4334), True, 'from matplotlib import pyplot as plt\n'), ((4861, 4874), 'abs_models.utils.t2n', 'u.t2n', (['b_i[0]'], {}), '(b_i[0])\n', (4866, 4874), True, 'from abs_models import utils as u\n'), ((4963, 4983), 'abs_models.utils.t2n', 'u.t2n', (["a_i['img'][0]"], {}), "(a_i['img'][0])\n", (4968, 4983), True, 'from abs_models import utils as u\n'), ((1979, 2002), 'numpy.sum', 'np.sum', (['(pred_label == l)'], {}), '(pred_label == l)\n', (1985, 2002), True, 'import numpy as np\n'), ((4180, 4191), 'abs_models.utils.t2n', 'u.t2n', (['a[0]'], {}), '(a[0])\n', (4185, 4191), True, 'from abs_models import utils as u\n'), ((4230, 4245), 'abs_models.utils.t2n', 'u.t2n', (['grads[0]'], {}), '(grads[0])\n', (4235, 4245), True, 'from abs_models import utils as u\n'), ((4284, 4301), 'numpy.sign', 'np.sign', (['grads[0]'], {}), '(grads[0])\n', (4291, 4301), True, 'import numpy as np\n'), ((5068, 5098), 'numpy.sqrt', 'np.sqrt', (["(a_i['distance'] * 784)"], {}), "(a_i['distance'] * 784)\n", (5075, 5098), True, 'import numpy as np\n'), ((1774, 1781), 'abs_models.utils.dev', 'u.dev', ([], {}), '()\n', (1779, 1781), True, 'from abs_models import utils as u\n'), ((3800, 3813), 'abs_models.utils.t2n', 'u.t2n', (['logits'], {}), '(logits)\n', (3805, 3813), True, 'from abs_models import utils as u\n'), ((3684, 3717), 'torch.from_numpy', 'torch.from_numpy', (['logits[None, :]'], {}), '(logits[None, :])\n', (3700, 3717), False, 'import torch\n')] |
#
# Copyright (C) 2021 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring
import unittest
import anyconfig.api
from . import common
LOADER_TYPES = frozenset(anyconfig.api.list_types())
@unittest.skipIf('yaml' not in LOADER_TYPES,
'yaml loader is not available')
class YamlTestCase(common.TestCase):
kind = 'yaml'
pattern = '*.yml'
@unittest.skipIf('toml' not in LOADER_TYPES,
'toml loader is not available')
class TomlTestCase(YamlTestCase):
kind = 'toml'
pattern = '*.toml'
# vim:sw=4:ts=4:et:
| [
"unittest.skipIf"
] | [((211, 286), 'unittest.skipIf', 'unittest.skipIf', (["('yaml' not in LOADER_TYPES)", '"""yaml loader is not available"""'], {}), "('yaml' not in LOADER_TYPES, 'yaml loader is not available')\n", (226, 286), False, 'import unittest\n'), ((384, 459), 'unittest.skipIf', 'unittest.skipIf', (["('toml' not in LOADER_TYPES)", '"""toml loader is not available"""'], {}), "('toml' not in LOADER_TYPES, 'toml loader is not available')\n", (399, 459), False, 'import unittest\n')] |
from entities.cache.cache_options import CacheOptions
import os
from services.file_service import FileService
from torch._C import dtype
import numpy as np
import torch
from tqdm import tqdm
from typing import List, Tuple
from enums.ocr_output_type import OCROutputType
from enums.language import Language
from services.process.process_service_base import ProcessServiceBase
from services.download.ocr_download_service import OCRDownloadService
from services.arguments.ocr_quality_non_context_arguments_service import OCRQualityNonContextArgumentsService
from services.cache_service import CacheService
from services.log_service import LogService
from services.vocabulary_service import VocabularyService
from services.tokenize.base_tokenize_service import BaseTokenizeService
class ICDARProcessService(ProcessServiceBase):
def __init__(
self,
ocr_download_service: OCRDownloadService,
arguments_service: OCRQualityNonContextArgumentsService,
cache_service: CacheService,
vocabulary_service: VocabularyService,
tokenize_service: BaseTokenizeService,
log_service: LogService):
self._arguments_service = arguments_service
self._cache_service = cache_service
self._ocr_download_service = ocr_download_service
self._vocabulary_service = vocabulary_service
self._tokenize_service = tokenize_service
self._log_service = log_service
self._min_occurrence_limit = self._arguments_service.minimal_occurrence_limit
self._vocab_key = f'vocab-{self._get_dataset_string()}-{arguments_service.ocr_output_type.value}'
if not self._vocabulary_service.load_cached_vocabulary(self._vocab_key):
self._log_service.log_debug(
'Vocabulary was not loaded. Attempting to initialize...')
self._initialize_vocabulary()
else:
self._log_service.log_debug('Vocabulary loaded successfully')
def _initialize_vocabulary(self):
self._ocr_download_service.download_data(
self._arguments_service.language)
ocr_data, gs_data = self._read_data()
tokenized_data = self._tokenize_service.tokenize_sequences(
gs_data if self._arguments_service.ocr_output_type == OCROutputType.GroundTruth else ocr_data
)
self._log_service.log_debug(
f'Tokenized {len(tokenized_data)} strings successfully')
self._vocabulary_service.initialize_vocabulary_from_corpus(
tokenized_data,
min_occurrence_limit=self._min_occurrence_limit,
vocab_key=self._vocab_key)
def _generate_ocr_corpora(self):
ocr_data, gs_data = self._read_data()
tokenized_ocr_data = self._tokenize_service.tokenize_sequences(
ocr_data)
tokenized_gs_data = self._tokenize_service.tokenize_sequences(gs_data)
self._save_common_tokens(tokenized_ocr_data, tokenized_gs_data)
ocr_output_type = self._arguments_service.ocr_output_type
data_ids = [self._vocabulary_service.string_to_ids(
x) for x in (tokenized_ocr_data if ocr_output_type == OCROutputType.Raw else tokenized_gs_data)]
result = self._generate_corpora_entries(data_ids)
return result
def _generate_corpora_entries(self, data_ids):
return None
def _save_common_tokens(self, tokenized_ocr_data: List[List[str]], tokenized_gs_data: List[List[str]]):
"""Saves the intersection of the tokens from both output types, as well as the ids of these tokens for the current output type
:param tokenized_ocr_data: The tokenized data for OCR output type
:type tokenized_ocr_data: List[List[str]]
:param tokenized_gs_data: The tokenized data for GT output type
:type tokenized_gs_data: List[List[str]]
"""
self._log_service.log_debug('Saving common tokens')
token_pairs_cache_key = f'common-t-pairs-{self._get_dataset_string()}-{self._arguments_service.ocr_output_type.value}-lim-{self._arguments_service.minimal_occurrence_limit}'
if self._cache_service.item_exists(CacheOptions(token_pairs_cache_key)):
return
common_tokens = self._cache_service.get_item_from_cache(
CacheOptions(
f'common-tokens-{self._get_dataset_string()}',
configuration_specific=False),
callback_function=lambda: self._combine_common_words(tokenized_ocr_data, tokenized_gs_data))
token_id_pairs = []
for common_token in common_tokens:
token_ids = [self._vocabulary_service.string_to_id(common_token)]
if token_ids[0] == self._vocabulary_service.unk_token:
token_ids = None
token_id_pairs.append((common_token, token_ids))
self._cache_service.cache_item(
token_id_pairs,
CacheOptions(token_pairs_cache_key))
self._log_service.log_debug(
f'Saved {len(token_id_pairs)} common token pairs successfully')
def _combine_common_words(self, tokenized_ocr_data: List[List[str]], tokenized_gs_data: List[List[str]]):
ocr_unique_tokens = set(
[item for sublist in tokenized_ocr_data for item in sublist])
gs_unique_tokens = set(
[item for sublist in tokenized_gs_data for item in sublist])
common_tokens = list(ocr_unique_tokens & gs_unique_tokens)
return common_tokens
def _load_file_data(self):
number_of_files = len(self._arguments_service.datasets)
ocr_file_data = []
gs_file_data = []
for i, dataset in enumerate(self._arguments_service.datasets):
print(f'{i}/{number_of_files} \r', end='')
result = self._ocr_download_service.get_downloaded_dataset(dataset)
if result is None:
self._log_service.log_debug(
f'Did not find \'{dataset}\' dataset to load')
continue
else:
self._log_service.log_debug(f'Loading \'{dataset}\' data')
ocr_file_data.extend(result[0])
gs_file_data.extend(result[1])
return ocr_file_data, gs_file_data
def _read_data(self):
ocr_file_data, gs_file_data = self._cache_service.get_item_from_cache(
CacheOptions(
f'ocr-gs-file-data-{self._get_dataset_string()}',
configuration_specific=False),
callback_function=self._load_file_data)
return ocr_file_data, gs_file_data
def _get_dataset_string(self):
return '-'.join(sorted(self._arguments_service.datasets)) | [
"entities.cache.cache_options.CacheOptions"
] | [((4164, 4199), 'entities.cache.cache_options.CacheOptions', 'CacheOptions', (['token_pairs_cache_key'], {}), '(token_pairs_cache_key)\n', (4176, 4199), False, 'from entities.cache.cache_options import CacheOptions\n'), ((4921, 4956), 'entities.cache.cache_options.CacheOptions', 'CacheOptions', (['token_pairs_cache_key'], {}), '(token_pairs_cache_key)\n', (4933, 4956), False, 'from entities.cache.cache_options import CacheOptions\n')] |
"""
# T: maturity
# n: # option periods
# N: # futures periods
# S: initial stock price
# r: continuously-compounded interest rate
# c: dividend yield
# sigma: annualized volatility
# K: strike price
# cp: +1/-1 with regards to call/put
"""
from __future__ import division
from math import exp, sqrt
import numpy as np
import math
T = 0.25
n = 15 # option periods
N = 15 # futures periods
S = 100 #initial stock price
r = 0.02 #continuously-compounded interest rate
c = 0.01 #dividend yield
sigma = 0.3 #annualized volatility
K = 110 #strike price
cp = -1 #with regards to call/put
def Parameter(T,n,sigma,r,c):
"""Parameter calculation"""
dt = T/n
u = exp(sigma * sqrt(dt))
d = 1/u
q1 = (exp((r-c)*dt)-d)/(u-d)
q2 = 1-q1
R = exp(r*dt)
return (u, d, q1, q2, R)
# =============================================================================
def GenerateTree(T,n,S,sigma,r,c):
"""generate stock tree"""
u, d, q1, q2, R = Parameter(T,n,sigma,r,c)
stockTree = np.zeros((n+1, n+1))
# compute the stock tree
stockTree[0,0] = S
for i in range(1,n+1):
stockTree[0,i] = stockTree[0, i-1]*u
for j in range(1,n+1):
stockTree[j,i] = stockTree[j-1, i-1]*d
return stockTree
# =============================================================================
def StockOptionAM(T,n,S,r,c,sigma,K,cp):
"""first return: American Stock Option Pricing"""
"""second return: when is the earliest time to exercise"""
"""Though it's never optimal to early exercise AM call"""
"""It matters for AM put"""
u, d, q1, q2, R = Parameter(T,n,sigma,r,c)
stockTree = GenerateTree(T,n,S,sigma,r,c)
optionTree = np.zeros((n+1,n+1))
# compute the option tree
for j in range(n+1):
optionTree[j, n] = max(0, cp * (stockTree[j, n]-K))
flag = 0
list = []
for i in range(n-1,-1,-1):
for j in range(i+1):
optionTree[j, i] = max((q1 * optionTree[j, i+1] + q2 * optionTree[j+1, i+1])/R,
cp * (stockTree[j, i] - K))
if (optionTree[j, i] - cp * (stockTree[j, i] - K)) < 1e-10:
flag += 1
list.append(i)
when = n
if(flag): when = list[-1]
print(optionTree, when)
return (optionTree[0,0], when)
z = StockOptionAM(T,n,S,r,c,sigma,K,cp)
option_maturity = 10
class bs_bin_tree:
def __init__(self,T,s0,r,sigma,c,K,n):
self.T = T
self.r = r
self.c = c
self.sigma = sigma
self.K = K
self.s0 = s0
self.n = n
self.u = math.exp(self.sigma*np.sqrt(self.T/self.n))
self.q = (math.exp((self.r-self.c)*T/self.n)-(1/self.u))/(self.u-(1/self.u))
self.R = math.exp(self.r*self.T/self.n)
self.__print_param__()
def __print_param__(self):
print('Time',self.T)
print('Starting Price',self.s0)
print('r',self.r)
print('volatility',self.sigma)
print('dividend yield',self.c)
print('strike',self.K)
print('# period',self.n)
def generate_price(self):
arr=[[self.s0]]
for i in range(self.n):
arr_to_add=[]
for j in range(len(arr[i])):
arr_to_add.append(arr[i][j]/self.u)
if j == (len(arr[i])-1):
arr_to_add.append(arr[i][j]*self.u)
arr.append(arr_to_add)
return arr
def neutral_pricing(self,p1,p2):
price = ((1-self.q)*p1 + (self.q)*p2)/self.R
return price
def eu_put(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
#a = max(arr_rev[i][j]-strike,0)
#a = max(a,price)
a = price
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def eu_call(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
#a = max(arr_rev[i][j]-strike,0)
#a = max(a,price)
a = price
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_call(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_call_price(self):
return self.us_call()[0][0]
def us_put(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(self.K - arr_rev[i][j],0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_put_price(self):
return self.us_put()[0][0]
def us_put_early_ex(self):
early_ex = False
early_ex_earning = 0
early_ex_time = self.n
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(self.K-arr_rev[i][j],0)
if a1 > price:
if early_ex_time == self.n - i:
early_ex_earning = max(early_ex_earning,a1)
else:
early_ex_earning = a1
early_ex =True
early_ex_time = self.n - i
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return {early_ex_time:early_ex_earning} if early_ex == True else False
def us_put_call_parity(self):
LHS = self.us_put_price() + self.s0 * math.exp(-self.c * self.T)
RHS = self.us_call_price() + self.K * math.exp(-self.r * self.T)
print('Put Side',LHS)
print('Call Side',RHS)
return LHS==RHS
def generate_future_price(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
res_to_add.append(arr_rev[i][j])
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])*self.R
res_to_add.append(price)
res.append(res_to_add)
return res[::-1]
def option_on_future(self,option_maturity):
arr = self.generate_future_price()[0:option_maturity+1]
arr_rev = arr[::-1]
res=[]
for i in range(option_maturity+1):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def option_price_on_future(self,option_maturity):
return self.option_on_future(option_maturity)[0][0]
def option_on_future_early_ex(self,option_maturity):
arr = self.generate_future_price()[0:option_maturity+1]
arr_rev = arr[::-1]
res=[]
early_ex = False
early_ex_earning = 0
early_ex_time = self.n
for i in range(option_maturity+1):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
if a1 > price:
if early_ex_time == option_maturity - i:
early_ex_earning = max(early_ex_earning,a1)
else:
early_ex_earning = a1
early_ex =True
early_ex_time = len(arr_rev) - i -1
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return {early_ex_time:early_ex_earning} if early_ex == True else False
def nCr(self,n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def chooser_option_price(self,option_expire):
call = self.eu_call()[option_expire]
put = self.eu_put()[option_expire]
res=[]
for i in range(len(call)):
res.append(max(call[i],put[i]))
result=0
for j in range(0,len(res)):
result += self.nCr(option_expire,j)* (self.q**(j)) * (1-self.q)**(option_expire-j) * res[j]
return (result/self.R**(option_expire))
tree = bs_bin_tree(T, 100, r, sigma, c, K, n)
print(tree.us_call())
print(tree.us_call_price())
print(tree.us_put())
print(tree.us_put_price())
print(tree.us_put_early_ex())
print(tree.us_put_call_parity())
print(tree.option_on_future(option_maturity))
print(tree.option_price_on_future(option_maturity))
print(tree.option_on_future_early_ex(option_maturity))
print(tree.chooser_option_price(10))
| [
"math.exp",
"numpy.zeros",
"math.sqrt",
"numpy.sqrt"
] | [((771, 782), 'math.exp', 'exp', (['(r * dt)'], {}), '(r * dt)\n', (774, 782), False, 'from math import exp, sqrt\n'), ((1034, 1058), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (1042, 1058), True, 'import numpy as np\n'), ((1755, 1779), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (1763, 1779), True, 'import numpy as np\n'), ((2850, 2884), 'math.exp', 'math.exp', (['(self.r * self.T / self.n)'], {}), '(self.r * self.T / self.n)\n', (2858, 2884), False, 'import math\n'), ((689, 697), 'math.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (693, 697), False, 'from math import exp, sqrt\n'), ((726, 743), 'math.exp', 'exp', (['((r - c) * dt)'], {}), '((r - c) * dt)\n', (729, 743), False, 'from math import exp, sqrt\n'), ((2724, 2748), 'numpy.sqrt', 'np.sqrt', (['(self.T / self.n)'], {}), '(self.T / self.n)\n', (2731, 2748), True, 'import numpy as np\n'), ((2766, 2806), 'math.exp', 'math.exp', (['((self.r - self.c) * T / self.n)'], {}), '((self.r - self.c) * T / self.n)\n', (2774, 2806), False, 'import math\n'), ((7626, 7652), 'math.exp', 'math.exp', (['(-self.c * self.T)'], {}), '(-self.c * self.T)\n', (7634, 7652), False, 'import math\n'), ((7699, 7725), 'math.exp', 'math.exp', (['(-self.r * self.T)'], {}), '(-self.r * self.T)\n', (7707, 7725), False, 'import math\n')] |
from fenics import *
from matplotlib.pyplot import show
from dolfin_adjoint import *
from ufl_dnn.neural_network import ANN
class BoundaryOuter(SubDomain):
def inside(self, x, on_boundary):
return on_boundary
def a_u(u, v):
return my * (inner(grad(u), grad(v)) + inner(grad(u), nabla_grad(v))) * dx
def a_p(K, p, q):
return K * dot(grad(p), grad(q)) * dx
def b(s, v):
return s * div(v) * dx
def c(alpha, p, q):
return alpha / Lambda * dot(p, q) * dx
def F(f, v):
return dot(f, v) * dx
def test_SteadyBiot_dnn():
import sympy as sym
x, y = sym.symbols("x[0], x[1]")
my = 1 / 3
Lambda = 16666
alpha = 1.0
c = 1.0
K = 1.0
u = (
sym.sin(2 * sym.pi * y) * (-1 + sym.cos(2 * sym.pi * x))
+ 1 / (my + Lambda) * sym.sin(sym.pi * x) * sym.sin(sym.pi * y)
)
v = (
sym.sin(2 * sym.pi * x) * (1 - sym.cos(2 * sym.pi * y))
+ 1 / (my + Lambda) * sym.sin(sym.pi * x) * sym.sin(sym.pi * y)
)
p1 = -1 * sym.sin(sym.pi * x) * sym.sin(sym.pi * y) # p Network1
p0 = Lambda * (sym.diff(u, x, 1) + sym.diff(v, y, 1)) - alpha * p1
fx,fy = 0.0,0.0 #force term
g_ex = -K * (sym.diff(p1, x, 2) + sym.diff(p1, y, 2))
variables = [
u,
v,
p0,
p1,
my,
Lambda,
alpha,
c,
K,
fx,
fy,
g_ex,
]
variables = [sym.printing.ccode(var) for var in variables] # Generate C++ code
UFLvariables = [Expression(var, degree=2) for var in variables]
(
u,
v,
p0,
p1,
my,
Lambda,
alpha,
c,
K,
fx,
fy,
g_ex,
) = UFLvariables
f = as_vector((fx, fy))
mesh = UnitSquareMesh(10, 10)
g = [g_ex]
alpha = [1, alpha]
c = [c]
K = [K]
# Generate function space
V = VectorElement("CG", triangle, 2) # Displacement
Q_0 = FiniteElement("CG", triangle, 1) # Total pressure
Q_1 = FiniteElement("CG", triangle, 1) #Network 1
mixedElement = []
mixedElement.append(V)
mixedElement.append(Q_0)
mixedElement.append(Q_1)
W_element = MixedElement(mixedElement)
W = FunctionSpace(mesh, W_element)
test = TestFunction(W)
q = split(test) # q[0] = v, q[1],q[2],... = q_0,q_1,...
trial = TrialFunction(W)
p_ = split(trial) # p_[0] = u_, p_[1],p_[2],... = p_0,p_1,...
up_n = Function(W)
p_n = split(up_n) # p_n[0] = u_n, p_n[1],p_n[2],... = p0_n,p1_n,...
# variational formulation
sources = [] # Contains the source term for each network
innerProdP = (
[]
) # Contains the inner product of the gradient of p_j for each network
dotProdP = [] # Contains the dot product of alpha_j & p_j,
bcs_D = [] # Contains the terms for the Dirichlet boundaries
integrals_N = [] # Contains the integrals for the Neumann boundaries
x, y = SpatialCoordinate(mesh)
layers = [4, 10, 1]
g_ex = project(g_ex, V)
obs = project(u_ex, V)
plot(g_ex)
show()
bias = [True, True]
x, y = SpatialCoordinate(mesh)
net = ANN(layers, bias=bias, mesh=mesh)
E = K * inner(grad(u), grad(v)) * dx + net(x, y) * v * dx
bcs = DirichletBC(V, Constant(0.0), "on_boundary")
hat_u = Function(V)
# Solve PDE
solve(lhs(E) == rhs(E), hat_u, bcs)
# L ^ 2 error as loss
loss = assemble((hat_u - obs) ** 2 * dx) # Loss funtction
# Define reduced formulation of problem
hat_loss = ReducedFunctional(loss, net.weights_ctrls())
# Use scipy L - BFGS optimiser
opt_theta = minimize(
hat_loss, options={"disp": True, "gtol": 1e-12, "ftol": 1e-12, "maxiter": 80}
)
net.set_weights(opt_theta)
#assert assemble(net(x, y) ** 2 * dx) < 1e-6
# u_test = Function(V)
# E_test = K * inner(grad(u), grad(v)) * dx + net(x, y) * v * dx
# solve(lhs(E_test) == rhs(E_test),u_test,bcs)
# f_pred = project(net(x,y),W.sub(2))
# plot(f_pred)
# show()
# plot(u_test)
# show()
u_e = Expression((variables[0], variables[1]), degree=2)
V_e = VectorFunctionSpace(mesh, "P", 2)
Q_e = FunctionSpace(mesh, "P", 1)
u_e = project(u_e, V_e)
p_e1 = project(UFLvariables[3], Q_e)
vtkUfile = File("solution_steady/u.pvd")
vtkPfile = File("solution_steady/p1.pvd")
vtkUfile << u
vtkPfile << p[1]
vtkUEfile = File("solution_steady/u_e.pvd")
vtkPEfile = File("solution_steady/p_e1.pvd")
vtkUEfile << u_e
vtkPEfile << p_e1
er2U = errornorm(u_e, u, "L2")
print("Error L2 for velocity = ", er2U)
er2P = errornorm(p_e1, p[1], "L2")
print("Error L2 for pressure = ", er2P)
plot(p[1])
show()
if __name__ == "__main__":
test_SteadyBiot_dnn()
| [
"sympy.symbols",
"sympy.printing.ccode",
"matplotlib.pyplot.show",
"sympy.diff",
"sympy.cos",
"ufl_dnn.neural_network.ANN",
"sympy.sin"
] | [((591, 616), 'sympy.symbols', 'sym.symbols', (['"""x[0], x[1]"""'], {}), "('x[0], x[1]')\n", (602, 616), True, 'import sympy as sym\n'), ((3084, 3090), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (3088, 3090), False, 'from matplotlib.pyplot import show\n'), ((3170, 3203), 'ufl_dnn.neural_network.ANN', 'ANN', (['layers'], {'bias': 'bias', 'mesh': 'mesh'}), '(layers, bias=bias, mesh=mesh)\n', (3173, 3203), False, 'from ufl_dnn.neural_network import ANN\n'), ((4793, 4799), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (4797, 4799), False, 'from matplotlib.pyplot import show\n'), ((1034, 1053), 'sympy.sin', 'sym.sin', (['(sym.pi * y)'], {}), '(sym.pi * y)\n', (1041, 1053), True, 'import sympy as sym\n'), ((1422, 1445), 'sympy.printing.ccode', 'sym.printing.ccode', (['var'], {}), '(var)\n', (1440, 1445), True, 'import sympy as sym\n'), ((710, 733), 'sympy.sin', 'sym.sin', (['(2 * sym.pi * y)'], {}), '(2 * sym.pi * y)\n', (717, 733), True, 'import sympy as sym\n'), ((819, 838), 'sympy.sin', 'sym.sin', (['(sym.pi * y)'], {}), '(sym.pi * y)\n', (826, 838), True, 'import sympy as sym\n'), ((864, 887), 'sympy.sin', 'sym.sin', (['(2 * sym.pi * x)'], {}), '(2 * sym.pi * x)\n', (871, 887), True, 'import sympy as sym\n'), ((972, 991), 'sympy.sin', 'sym.sin', (['(sym.pi * y)'], {}), '(sym.pi * y)\n', (979, 991), True, 'import sympy as sym\n'), ((1012, 1031), 'sympy.sin', 'sym.sin', (['(sym.pi * x)'], {}), '(sym.pi * x)\n', (1019, 1031), True, 'import sympy as sym\n'), ((1190, 1208), 'sympy.diff', 'sym.diff', (['p1', 'x', '(2)'], {}), '(p1, x, 2)\n', (1198, 1208), True, 'import sympy as sym\n'), ((1211, 1229), 'sympy.diff', 'sym.diff', (['p1', 'y', '(2)'], {}), '(p1, y, 2)\n', (1219, 1229), True, 'import sympy as sym\n'), ((742, 765), 'sympy.cos', 'sym.cos', (['(2 * sym.pi * x)'], {}), '(2 * sym.pi * x)\n', (749, 765), True, 'import sympy as sym\n'), ((797, 816), 'sympy.sin', 'sym.sin', (['(sym.pi * x)'], {}), '(sym.pi * x)\n', (804, 816), True, 'import sympy as sym\n'), ((895, 918), 'sympy.cos', 'sym.cos', (['(2 * sym.pi * y)'], {}), '(2 * sym.pi * y)\n', (902, 918), True, 'import sympy as sym\n'), ((950, 969), 'sympy.sin', 'sym.sin', (['(sym.pi * x)'], {}), '(sym.pi * x)\n', (957, 969), True, 'import sympy as sym\n'), ((1087, 1104), 'sympy.diff', 'sym.diff', (['u', 'x', '(1)'], {}), '(u, x, 1)\n', (1095, 1104), True, 'import sympy as sym\n'), ((1107, 1124), 'sympy.diff', 'sym.diff', (['v', 'y', '(1)'], {}), '(v, y, 1)\n', (1115, 1124), True, 'import sympy as sym\n')] |
import click
import logging
import os
import requests
import xml.etree.ElementTree as ET
from multiprocessing.dummy import Pool
class OctocatsDownloader:
feeds_url = "http://feeds.feedburner.com/Octocats"
def __init__(self, output="octocats", max_threads=5, force=False, logger=None):
if not os.path.exists(output):
os.mkdir(output)
self.output = output
self.session = requests.Session()
self.pool = Pool(max_threads)
self.force = force
self.skip_count = 0
self.update_count = 0
self.feeds = None
self.logger = logger or logging.getLogger()
def join_path(self, path):
return os.path.join(self.output, path)
def download_job(self, img_element):
src = img_element.get("src")
filename = src.rsplit("/", 1)[-1]
path = self.join_path(filename)
if not self.force and os.path.exists(path):
self.skip_count += 1
self.logger.info("%s already exists! skip downloading ...", filename)
return
img = self.session.get(src).content
with click.open_file(path, "wb") as fp:
fp.write(img)
self.update_count += 1
self.logger.info("%s successfully downloaded.", filename)
def fetch_feeds(self):
self.logger.info("fetching RSS feeds ...")
response = self.session.get(self.feeds_url)
with click.open_file(self.join_path("Octocats.xml"), "w") as fp:
fp.write(response.text)
self.feeds = ET.fromstring(response.text)
return self.feeds
def download(self):
feeds = self.feeds or self.fetch_feeds()
# http://www.w3school.com.cn/xml/xml_namespaces.asp
img_elements = feeds.iterfind(
".//atom:entry/atom:content/atom:div/atom:a/atom:img",
{"atom": "http://www.w3.org/2005/Atom"},
)
self.logger.info("dispatching download jobs ...")
self.pool.map(self.download_job, img_elements)
self.logger.info(
"all task done, %d updated, %d skipped, enjoy!",
self.update_count,
self.skip_count,
)
@click.command()
@click.pass_context
@click.option(
"-o",
"--output",
type=click.Path(file_okay=False, writable=True),
default="octocats",
help="The directory to save images.",
)
@click.option(
"-m",
"--max-threads",
type=click.IntRange(1, 10),
default=5,
help="Max number of thread pool to download image.",
)
@click.option("-p", "--proxy", type=str, help="HTTP Proxy")
@click.option(
"-f", "--force", is_flag=True, help="Fore download images even they exists."
)
def cli(ctx, output, max_threads, proxy, force):
"""
Download Octocats from https://octodex.github.com
"""
o = OctocatsDownloader(output, max_threads, force, logger=ctx.obj.logger)
if proxy:
o.session.proxies = {"http": proxy}
o.download()
| [
"os.mkdir",
"xml.etree.ElementTree.fromstring",
"multiprocessing.dummy.Pool",
"requests.Session",
"click.option",
"os.path.exists",
"click.command",
"click.IntRange",
"click.open_file",
"click.Path",
"os.path.join",
"logging.getLogger"
] | [((2179, 2194), 'click.command', 'click.command', ([], {}), '()\n', (2192, 2194), False, 'import click\n'), ((2530, 2588), 'click.option', 'click.option', (['"""-p"""', '"""--proxy"""'], {'type': 'str', 'help': '"""HTTP Proxy"""'}), "('-p', '--proxy', type=str, help='HTTP Proxy')\n", (2542, 2588), False, 'import click\n'), ((2590, 2685), 'click.option', 'click.option', (['"""-f"""', '"""--force"""'], {'is_flag': '(True)', 'help': '"""Fore download images even they exists."""'}), "('-f', '--force', is_flag=True, help=\n 'Fore download images even they exists.')\n", (2602, 2685), False, 'import click\n'), ((417, 435), 'requests.Session', 'requests.Session', ([], {}), '()\n', (433, 435), False, 'import requests\n'), ((456, 473), 'multiprocessing.dummy.Pool', 'Pool', (['max_threads'], {}), '(max_threads)\n', (460, 473), False, 'from multiprocessing.dummy import Pool\n'), ((685, 716), 'os.path.join', 'os.path.join', (['self.output', 'path'], {}), '(self.output, path)\n', (697, 716), False, 'import os\n'), ((1548, 1576), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['response.text'], {}), '(response.text)\n', (1561, 1576), True, 'import xml.etree.ElementTree as ET\n'), ((2265, 2307), 'click.Path', 'click.Path', ([], {'file_okay': '(False)', 'writable': '(True)'}), '(file_okay=False, writable=True)\n', (2275, 2307), False, 'import click\n'), ((2432, 2453), 'click.IntRange', 'click.IntRange', (['(1)', '(10)'], {}), '(1, 10)\n', (2446, 2453), False, 'import click\n'), ((311, 333), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (325, 333), False, 'import os\n'), ((347, 363), 'os.mkdir', 'os.mkdir', (['output'], {}), '(output)\n', (355, 363), False, 'import os\n'), ((618, 637), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (635, 637), False, 'import logging\n'), ((908, 928), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (922, 928), False, 'import os\n'), ((1121, 1148), 'click.open_file', 'click.open_file', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (1136, 1148), False, 'import click\n')] |
from pyflink.table import *
from pyflink.table.expressions import col, lit, concat
from pyflink.table.window import Tumble
def demo01():
# environment configuration
t_env = BatchTableEnvironment.create(environment_settings=EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build())
# register Orders table and Result table sink in table environment
source_data_path = "/path/to/source/directory/"
result_data_path = "/path/to/result/directory/"
source_ddl = f"""
create table Orders(
a VARCHAR,
b BIGINT,
c BIGINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '1' SECOND
) with (
'connector' = 'filesystem',
'format' = 'csv',
'path' = '{source_data_path}'
)
"""
t_env.execute_sql(source_ddl)
sink_ddl = f"""
create table `Result`(
a VARCHAR,
cnt BIGINT
) with (
'connector' = 'filesystem',
'format' = 'csv',
'path' = '{result_data_path}'
)
"""
t_env.execute_sql(sink_ddl)
# specify table program
orders = t_env.from_path("Orders") # schema (a, b, c, rowtime)
orders.group_by("a").select(orders.a, orders.b.count.alias('cnt')).execute_insert("result").wait()
orders.where(orders.a == 'red')
orders.filter(orders.b % 2 == 0)
orders.add_columns(concat(orders.c, 'sunny'))
orders.add_or_replace_columns(concat(orders.c, 'sunny').alias('desc'))
orders.drop_columns(orders.b, orders.c)
orders.rename_columns(orders.b.alias('b2'), orders.c.alias('c2'))
orders.group_by(orders.a).select(orders.a, orders.b.sum.alias('d'))
# tab.group_by(tab.key).select(tab.key, tab.value.avg.alias('average'))
# tab.group_by("key").select("key, value.avg as average")
result = orders.filter(orders.a.is_not_null & orders.b.is_not_null & orders.c.is_not_null) \
.select(orders.a.lower_case.alias('a'), orders.b, orders.rowtime) \
.window(Tumble.over(lit(1).hour).on(orders.rowtime).alias("hourly_window")) \
.group_by(col('hourly_window'), col('a')) \
.select(col('a'), col('hourly_window').end.alias('hour'), col('b').avg.alias('avg_billing_amount'))
"""
SELECT user, SUM(amount)
FROM Orders
GROUP BY TUMBLE(rowtime, INTERVAL '1' DAY), user
"""
# SQL内置函数:https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/functions/systemFunctions.html
# SQL Data类型:https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/types.html
# table operator对应的sql:https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/sql/queries.html
# 各种Window 写法;https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/tableApi.html#group-windows
# https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/tableApi.html
# https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/python/table-api-users-guide/operations.html
if __name__ == '__main__':
demo01() | [
"pyflink.table.expressions.col",
"pyflink.table.expressions.lit",
"pyflink.table.expressions.concat"
] | [((1520, 1545), 'pyflink.table.expressions.concat', 'concat', (['orders.c', '"""sunny"""'], {}), "(orders.c, 'sunny')\n", (1526, 1545), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((2274, 2282), 'pyflink.table.expressions.col', 'col', (['"""a"""'], {}), "('a')\n", (2277, 2282), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((1581, 1606), 'pyflink.table.expressions.concat', 'concat', (['orders.c', '"""sunny"""'], {}), "(orders.c, 'sunny')\n", (1587, 1606), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((2224, 2244), 'pyflink.table.expressions.col', 'col', (['"""hourly_window"""'], {}), "('hourly_window')\n", (2227, 2244), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((2246, 2254), 'pyflink.table.expressions.col', 'col', (['"""a"""'], {}), "('a')\n", (2249, 2254), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((2284, 2304), 'pyflink.table.expressions.col', 'col', (['"""hourly_window"""'], {}), "('hourly_window')\n", (2287, 2304), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((2324, 2332), 'pyflink.table.expressions.col', 'col', (['"""b"""'], {}), "('b')\n", (2327, 2332), False, 'from pyflink.table.expressions import col, lit, concat\n'), ((2148, 2154), 'pyflink.table.expressions.lit', 'lit', (['(1)'], {}), '(1)\n', (2151, 2154), False, 'from pyflink.table.expressions import col, lit, concat\n')] |
# -*- coding: utf-8 -*-
"""
PID Control Class
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from collections import deque
import math
import numpy as np
import carla
class Controller:
"""
PID Controller implementation.
Parameters
----------
args : dict
The configuration dictionary parsed from yaml file.
Attributes
----------
_lon_ebuffer : deque
A deque buffer that stores longitudinal control errors.
_lat_ebuffer : deque
A deque buffer that stores latitudinal control errors.
current_transform : carla.transform
Current ego vehicle transformation in CARLA world.
current_speed : float
Current ego vehicle speed.
past_steering : float
Sterring angle from previous control step.
"""
def __init__(self, args):
# longitudinal related
self.max_brake = args['max_brake']
self.max_throttle = args['max_throttle']
self._lon_k_p = args['lon']['k_p']
self._lon_k_d = args['lon']['k_d']
self._lon_k_i = args['lon']['k_i']
self._lon_ebuffer = deque(maxlen=10)
# lateral related
self.max_steering = args['max_steering']
self._lat_k_p = args['lat']['k_p']
self._lat_k_d = args['lat']['k_d']
self._lat_k_i = args['lat']['k_i']
self._lat_ebuffer = deque(maxlen=10)
# simulation time-step
self.dt = args['dt']
# current speed and localization retrieved from sensing layer
self.current_transform = None
self.current_speed = 0.
# past steering
self.past_steering = 0.
self.dynamic = args['dynamic']
def dynamic_pid(self):
"""
Compute kp, kd, ki based on current speed.
"""
pass
def update_info(self, ego_pos, ego_spd):
"""
Update ego position and speed to controller.
Parameters
----------
ego_pos : carla.location
Position of the ego vehicle.
ego_spd : float
Speed of the ego vehicle
Returns
-------
"""
self.current_transform = ego_pos
self.current_speed = ego_spd
if self.dynamic:
self.dynamic_pid()
def lon_run_step(self, target_speed):
"""
Parameters
----------
target_speed : float
Target speed of the ego vehicle.
Returns
-------
acceleration : float
Desired acceleration value for the current step
to achieve target speed.
"""
error = target_speed - self.current_speed
self._lat_ebuffer.append(error)
if len(self._lat_ebuffer) >= 2:
_de = (self._lat_ebuffer[-1] - self._lat_ebuffer[-2]) / self.dt
_ie = sum(self._lat_ebuffer) * self.dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._lat_k_p * error) +
(self._lat_k_d * _de) +
(self._lat_k_i * _ie),
-1.0, 1.0)
"""
Generate the throttle command based on current speed and target speed
Args:
-target_location (carla.loaction): Target location.
Returns:
-current_steering (float): Desired steering angle value
for the current step to achieve target location.
"""
def lat_run_step(self, target_location):
"""
Generate the throttle command based on current speed and target speed
Parameters
----------
target_location : carla.location
Target location.
Returns
-------
current_steering : float
Desired steering angle value for the current step to
achieve target location.
"""
v_begin = self.current_transform.location
v_end = v_begin + carla.Location(
x=math.cos(
math.radians(
self.current_transform.rotation.yaw)), y=math.sin(
math.radians(
self.current_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([target_location.x -
v_begin.x, target_location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(
w_vec, v_vec) / (np.linalg.norm(w_vec) * np.linalg.norm(v_vec)),
-1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
self._lon_ebuffer.append(_dot)
if len(self._lon_ebuffer) >= 2:
_de = (self._lon_ebuffer[-1] - self._lon_ebuffer[-2]) / self.dt
_ie = sum(self._lon_ebuffer) * self.dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._lat_k_p * _dot) + (self._lat_k_d *
_de) + (self._lat_k_i * _ie), -1.0, 1.0)
def run_step(self, target_speed, waypoint):
"""
Execute one step of control invoking both lateral and longitudinal
PID controllers to reach a target waypoint at a given target_speed.
Parameters
----------
target_speed : float
Target speed of the ego vehicle.
waypoint : carla.loaction
Target location.
Returns
-------
control : carla.VehicleControl
Desired vehicle control command for the current step.
"""
# control class for carla vehicle
control = carla.VehicleControl()
# emergency stop
if target_speed == 0 or waypoint is None:
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
return control
acceleration = self.lon_run_step(target_speed)
current_steering = self.lat_run_step(waypoint)
if acceleration >= 0.0:
control.throttle = min(acceleration, self.max_throttle)
control.brake = 0.0
else:
control.throttle = 0.0
control.brake = min(abs(acceleration), self.max_brake)
# Steering regulation: changes cannot happen abruptly, can't steer too
# much.
if current_steering > self.past_steering + 0.2:
current_steering = self.past_steering + 0.2
elif current_steering < self.past_steering - 0.2:
current_steering = self.past_steering - 0.2
if current_steering >= 0:
steering = min(self.max_steering, current_steering)
else:
steering = max(-self.max_steering, current_steering)
control.steer = steering
control.hand_brake = False
control.manual_gear_shift = False
self.past_steering = steering
return control
| [
"math.radians",
"numpy.cross",
"numpy.clip",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"carla.VehicleControl",
"collections.deque"
] | [((1110, 1126), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1115, 1126), False, 'from collections import deque\n'), ((1362, 1378), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1367, 1378), False, 'from collections import deque\n'), ((2932, 3022), 'numpy.clip', 'np.clip', (['(self._lat_k_p * error + self._lat_k_d * _de + self._lat_k_i * _ie)', '(-1.0)', '(1.0)'], {}), '(self._lat_k_p * error + self._lat_k_d * _de + self._lat_k_i * _ie, \n -1.0, 1.0)\n', (2939, 3022), True, 'import numpy as np\n'), ((4128, 4185), 'numpy.array', 'np.array', (['[v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0]'], {}), '([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])\n', (4136, 4185), True, 'import numpy as np\n'), ((4202, 4279), 'numpy.array', 'np.array', (['[target_location.x - v_begin.x, target_location.y - v_begin.y, 0.0]'], {}), '([target_location.x - v_begin.x, target_location.y - v_begin.y, 0.0])\n', (4210, 4279), True, 'import numpy as np\n'), ((4512, 4534), 'numpy.cross', 'np.cross', (['v_vec', 'w_vec'], {}), '(v_vec, w_vec)\n', (4520, 4534), True, 'import numpy as np\n'), ((4868, 4957), 'numpy.clip', 'np.clip', (['(self._lat_k_p * _dot + self._lat_k_d * _de + self._lat_k_i * _ie)', '(-1.0)', '(1.0)'], {}), '(self._lat_k_p * _dot + self._lat_k_d * _de + self._lat_k_i * _ie, -\n 1.0, 1.0)\n', (4875, 4957), True, 'import numpy as np\n'), ((5582, 5604), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (5602, 5604), False, 'import carla\n'), ((4365, 4385), 'numpy.dot', 'np.dot', (['w_vec', 'v_vec'], {}), '(w_vec, v_vec)\n', (4371, 4385), True, 'import numpy as np\n'), ((3938, 3987), 'math.radians', 'math.radians', (['self.current_transform.rotation.yaw'], {}), '(self.current_transform.rotation.yaw)\n', (3950, 3987), False, 'import math\n'), ((4039, 4088), 'math.radians', 'math.radians', (['self.current_transform.rotation.yaw'], {}), '(self.current_transform.rotation.yaw)\n', (4051, 4088), False, 'import math\n'), ((4402, 4423), 'numpy.linalg.norm', 'np.linalg.norm', (['w_vec'], {}), '(w_vec)\n', (4416, 4423), True, 'import numpy as np\n'), ((4426, 4447), 'numpy.linalg.norm', 'np.linalg.norm', (['v_vec'], {}), '(v_vec)\n', (4440, 4447), True, 'import numpy as np\n')] |
from abc_type import IdaTypes
from ida_types import IDA_TYPES
class IdaTStr(IdaTypes):
def __init__(self, ida_type=IDA_TYPES['str']):
self.ida_type = {'idt': ida_type, 'value': ''}
def decode(self, data):
count = ord(data[0])
offset = 1
for i in range(0, count):
import ida_decoder
rbyte, value = ida_decoder.decode_hybrid_type(ida_type=data[offset:])
offset += rbyte
self.ida_type['value'].append(value)
return offset
def get_type(self):
return self.ida_type
def to_string(self, session):
return self.ida_type['value'] + '{ptr} {name}'
def from_dict(self, data):
self.ida_type = data
| [
"ida_decoder.decode_hybrid_type"
] | [((365, 419), 'ida_decoder.decode_hybrid_type', 'ida_decoder.decode_hybrid_type', ([], {'ida_type': 'data[offset:]'}), '(ida_type=data[offset:])\n', (395, 419), False, 'import ida_decoder\n')] |
"""
Test with multiple nodes, and multiple PoP endorsements, checking to make sure nodes stay in sync.
"""
import time
from ..framework.test_framework import PopIntegrationTestFramework
from ..framework.pop_util import endorse_block, mine_until_pop_enabled
from ..framework.sync_util import start_all, connect_all, sync_all
class PopE2E(PopIntegrationTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
start_all(self.nodes)
mine_until_pop_enabled(self.nodes[0])
connect_all(self.nodes)
sync_all(self.nodes)
def run_test(self):
from pypoptools.pypopminer import MockMiner, PublicationData
apm = MockMiner()
self._assert_nodes_peer_info()
vbk_blocks_amount = 100
self.log.info("generate vbk blocks on node0, amount {}".format(vbk_blocks_amount))
vbk_blocks = []
for i in range(vbk_blocks_amount):
vbk_blocks.append(apm.mineVbkBlocks(1))
assert len(vbk_blocks) == vbk_blocks_amount
vtbs_amount = 20
self.log.info("generate vtbs on node0, amount {}".format(vtbs_amount))
for i in range(vtbs_amount):
apm.endorseVbkBlock(apm.vbkTip, apm.btcTip.getHash(), 1)
self.nodes[0].generate(nblocks=10)
last_block = self.nodes[0].getblockcount()
assert last_block >= 5
self.log.info("endorse {} alt block".format(last_block - 5))
endorse_block(self.nodes[0], apm, last_block - 5)
self._assert_nodes_peer_info()
containing_block_hash = self.nodes[0].generate(nblocks=1)[0]
containing_block = self.nodes[0].getblock(containing_block_hash)
assert len(containing_block.containingVTBs) == vtbs_amount
assert len(containing_block.containingVBKs) == vbk_blocks_amount + vtbs_amount + 1
assert last_block >= 6
self.log.info("endorse {} alt block".format(last_block - 6))
endorse_block(self.nodes[0], apm, last_block - 6)
self._assert_nodes_peer_info()
self.nodes[0].generate(nblocks=1)
time.sleep(5)
self.log.info("sync all nodes")
sync_all(self.nodes)
self._assert_nodes_peer_info()
def _assert_nodes_peer_info(self):
self._assert_node_peer_info(self.nodes[0])
self._assert_node_peer_info(self.nodes[1])
def _assert_node_peer_info(self, node):
peer_info = node.getpeerinfo()
assert len(peer_info) == 1
assert peer_info[0].banscore == 0
| [
"pypoptools.pypopminer.MockMiner",
"time.sleep"
] | [((701, 712), 'pypoptools.pypopminer.MockMiner', 'MockMiner', ([], {}), '()\n', (710, 712), False, 'from pypoptools.pypopminer import MockMiner, PublicationData\n'), ((2105, 2118), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2115, 2118), False, 'import time\n')] |
""" rct cnf fs because don't knwo where else to put it and avoid
circular imports
"""
from mechanalyzer.inf import thy as tinfo
from mechlib.filesys._build import build_fs
from mechlib.filesys.mincnf import min_energy_conformer_locators
def rcts_cnf_fs(rct_infos, thy_dct, es_keyword_dct, run_prefix, save_prefix):
""" set reactant filesystem stuff
"""
ini_method_dct = thy_dct.get(es_keyword_dct['inplvl'])
ini_thy_info = tinfo.from_dct(ini_method_dct)
rct_cnf_fs = ()
for rct_info in rct_infos:
mod_ini_thy_info = tinfo.modify_orb_label(
ini_thy_info, rct_info)
# Build filesys for ini thy info
ini_cnf_run_fs, ini_cnf_save_fs = build_fs(
run_prefix, save_prefix, 'CONFORMER',
spc_locs=rct_info,
thy_locs=mod_ini_thy_info[1:])
ini_loc_info = min_energy_conformer_locators(
ini_cnf_save_fs, mod_ini_thy_info)
ini_min_cnf_locs, ini_min_cnf_path = ini_loc_info
# Create run fs if that directory has been deleted to run the jobs
ini_cnf_run_fs[-1].create(ini_min_cnf_locs)
rct_cnf_fs += ((ini_cnf_run_fs, ini_cnf_save_fs,
ini_min_cnf_locs, ini_min_cnf_path),)
return rct_cnf_fs
__all__ = [
'build_fs',
'prefix_fs',
'root_locs',
'mincnf',
'models',
'read',
'save'
]
| [
"mechanalyzer.inf.thy.from_dct",
"mechlib.filesys._build.build_fs",
"mechlib.filesys.mincnf.min_energy_conformer_locators",
"mechanalyzer.inf.thy.modify_orb_label"
] | [((448, 478), 'mechanalyzer.inf.thy.from_dct', 'tinfo.from_dct', (['ini_method_dct'], {}), '(ini_method_dct)\n', (462, 478), True, 'from mechanalyzer.inf import thy as tinfo\n'), ((560, 606), 'mechanalyzer.inf.thy.modify_orb_label', 'tinfo.modify_orb_label', (['ini_thy_info', 'rct_info'], {}), '(ini_thy_info, rct_info)\n', (582, 606), True, 'from mechanalyzer.inf import thy as tinfo\n'), ((704, 805), 'mechlib.filesys._build.build_fs', 'build_fs', (['run_prefix', 'save_prefix', '"""CONFORMER"""'], {'spc_locs': 'rct_info', 'thy_locs': 'mod_ini_thy_info[1:]'}), "(run_prefix, save_prefix, 'CONFORMER', spc_locs=rct_info, thy_locs=\n mod_ini_thy_info[1:])\n", (712, 805), False, 'from mechlib.filesys._build import build_fs\n'), ((862, 926), 'mechlib.filesys.mincnf.min_energy_conformer_locators', 'min_energy_conformer_locators', (['ini_cnf_save_fs', 'mod_ini_thy_info'], {}), '(ini_cnf_save_fs, mod_ini_thy_info)\n', (891, 926), False, 'from mechlib.filesys.mincnf import min_energy_conformer_locators\n')] |
import sys
import os
try:
import sublime
except Exception:
pass
NEW_ACCOUNT_TXT = '''Welcome {username}!\n\nYou're all set to collaborate. You should check out our docs at https://{host}/help/plugins/sublime#usage.
You must run 'Floobits - Complete Sign Up' so you can log in to the website.'''
LINKED_ACCOUNT_TXT = '''Welcome {username}!\n\nYou are all set to collaborate.
You may want to check out our docs at https://{host}/help/plugins/sublime#usage'''
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Sublime Text %s' % py_version
def codename():
return 'sublime'
def ok_cancel_dialog(dialog):
return sublime.ok_cancel_dialog(dialog)
def error_message(msg):
sublime.error_message(msg)
def status_message(msg):
sublime.status_message(msg)
def platform():
return sublime.platform()
def set_timeout(f, timeout):
sublime.set_timeout(f, timeout)
def call_timeouts():
return
def message_dialog(msg):
sublime.message_dialog(msg)
def open_file(file):
win = sublime.active_window()
if win:
win.open_file(file)
def get_line_endings(path=None):
ending = sublime.load_settings('Preferences.sublime-settings').get('default_line_ending')
if ending == 'system':
return os.linesep
if ending == 'windows':
return '\r\n'
return '\n'
def select_auth(*args):
window, auths, cb = args
if not auths:
return cb(None)
auths = dict(auths)
for k, v in auths.items():
v['host'] = k
if len(auths) == 1:
return cb(list(auths.values())[0])
opts = [[h, 'Connect as %s' % a.get('username')] for h, a in auths.items()]
opts.append(['Cancel', ''])
def on_account(index):
if index < 0 or index >= len(auths):
# len(hosts) is cancel, appended to opts at end below
return cb(None)
host = opts[index][0]
return cb(auths[host])
flags = 0
if hasattr(sublime, 'KEEP_OPEN_ON_FOCUS_LOST'):
flags |= sublime.KEEP_OPEN_ON_FOCUS_LOST
return window.show_quick_panel(opts, on_account, flags)
| [
"sublime.message_dialog",
"sublime.platform",
"sublime.set_timeout",
"sublime.active_window",
"sublime.status_message",
"sublime.load_settings",
"sublime.error_message",
"sublime.ok_cancel_dialog"
] | [((699, 731), 'sublime.ok_cancel_dialog', 'sublime.ok_cancel_dialog', (['dialog'], {}), '(dialog)\n', (723, 731), False, 'import sublime\n'), ((762, 788), 'sublime.error_message', 'sublime.error_message', (['msg'], {}), '(msg)\n', (783, 788), False, 'import sublime\n'), ((820, 847), 'sublime.status_message', 'sublime.status_message', (['msg'], {}), '(msg)\n', (842, 847), False, 'import sublime\n'), ((877, 895), 'sublime.platform', 'sublime.platform', ([], {}), '()\n', (893, 895), False, 'import sublime\n'), ((931, 962), 'sublime.set_timeout', 'sublime.set_timeout', (['f', 'timeout'], {}), '(f, timeout)\n', (950, 962), False, 'import sublime\n'), ((1028, 1055), 'sublime.message_dialog', 'sublime.message_dialog', (['msg'], {}), '(msg)\n', (1050, 1055), False, 'import sublime\n'), ((1089, 1112), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (1110, 1112), False, 'import sublime\n'), ((1201, 1254), 'sublime.load_settings', 'sublime.load_settings', (['"""Preferences.sublime-settings"""'], {}), "('Preferences.sublime-settings')\n", (1222, 1254), False, 'import sublime\n')] |
from __future__ import print_function
import os
from supervisord_dependent_startup.supervisord_dependent_startup import (DependentStartup,
DependentStartupError,
get_all_configs,
process_states, Service,
ServiceOptions,
ServicesHandler, xmlrpclib)
from .log_utils import setup_tests_logging
__all__ = ['DependentStartup', 'ServiceOptions', 'DependentStartupError', 'Service',
'ServicesHandler', 'get_all_configs', 'process_states', 'xmlrpclib']
setup_tests_logging()
valid_booleans = {'true': True, 'True': True, 'TRUE': True, 't': True, '1': True}
cleanup_tmp_dir = os.environ.get('CLEANUP_TESTS', "True") in valid_booleans
# Name of directory to store supervisor config files. If unset, a random value is used
test_tmp_dir = os.environ.get('TEST_TMP_DIR', None)
| [
"os.environ.get"
] | [((1112, 1148), 'os.environ.get', 'os.environ.get', (['"""TEST_TMP_DIR"""', 'None'], {}), "('TEST_TMP_DIR', None)\n", (1126, 1148), False, 'import os\n'), ((950, 989), 'os.environ.get', 'os.environ.get', (['"""CLEANUP_TESTS"""', '"""True"""'], {}), "('CLEANUP_TESTS', 'True')\n", (964, 989), False, 'import os\n')] |
#!/usr/bin/env python
# Copyright 2019-2022 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate batches of alerts coming from ZTF or ELaSTICC.
"""
import argparse
import os
import sys
import glob
import time
import asyncio
import gzip
import numpy as np
from fink_alert_simulator import alertProducer
from fink_alert_simulator import avroUtils
from fink_alert_simulator.parser import getargs
def main():
parser = argparse.ArgumentParser(description=__doc__)
args = getargs(parser)
# Configure producer connection to Kafka broker
conf = {'bootstrap.servers': args.servers}
streamproducer = alertProducer.AlertProducer(
args.topic, schema_files=None, **conf)
# Scan for avro files
root = args.datasimpath
# Grab data stored on disk
files = glob.glob(os.path.join(root, "*.avro*"))
# Number of observations, and total number of alerts to send.
nobs = args.nobservations
poolsize = args.nalerts_per_obs * nobs
if nobs == -1:
# Take all alerts available
nobs = int(len(files) / float(args.nalerts_per_obs)) + 1
poolsize = args.nalerts_per_obs * nobs
msg = """
All {} alerts to be sent (nobservations=-1), corresponding
to {} observations ({} alerts each).
""".format(len(files), nobs, args.nalerts_per_obs)
print(msg)
elif len(files) < poolsize:
# Send only available alerts
nobs = int(len(files) / float(args.nalerts_per_obs)) + 1
msg = """
You ask for more data than you have!
Number of alerts on disk ({}): {}
Number of alerts required (nalerts_per_obs * nobservations): {}
Hence, we reduced the number of observations to {}.
""".format(root, len(files), poolsize, nobs)
print(msg)
print('Total alert available ({}): {}'.format(root, len(files)))
print('Total alert to be sent: {}'.format(poolsize))
# Break the alert list into observations
files = np.array_split(files[:poolsize], nobs)[:nobs]
# Starting time
t0 = time.time()
print("t0: {}".format(t0))
def send_visit(list_of_files):
""" Send all alerts of an observation for publication in Kafka
Parameters
----------
list_of_files: list of str
List with filenames containing the alert (avro file). Alerts
can be gzipped, but the extension should be
explicit (`avro` or `avro.gz`).
"""
print('Observation start: t0 + : {:.2f} seconds'.format(
time.time() - t0))
# Load alert contents
startstop = []
for index, fn in enumerate(list_of_files):
if fn.endswith('avro'):
copen = lambda x: open(x, mode='rb')
elif fn.endswith('avro.gz'):
copen = lambda x: gzip.open(x, mode='rb')
else:
msg = """
Alert filename should end with `avro` or `avro.gz`.
Currently trying to read: {}
""".format(fn)
raise NotImplementedError(msg)
with copen(fn) as file_data:
# Read the data
data = avroUtils.readschemadata(file_data)
# Read the Schema
schema = data.schema
# assuming one record per data
record = next(data)
if index == 0 or index == len(list_of_files) - 1:
if args.to_display != 'None':
fields = args.to_display.split(',')
to_display = record[fields[0]]
for field_ in fields[1:]:
to_display = to_display[field_]
startstop.append(to_display)
streamproducer.send(record, alert_schema=schema, encode=True)
if args.to_display != 'None':
print('{} alerts sent ({} to {})'.format(len(
list_of_files),
startstop[0],
startstop[1]))
# Trigger the producer
streamproducer.flush()
loop = asyncio.get_event_loop()
asyncio.ensure_future(
alertProducer.schedule_delays(
loop,
send_visit,
files,
interval=args.tinterval_kafka))
loop.run_forever()
loop.close()
if __name__ == "__main__":
main()
| [
"asyncio.get_event_loop",
"argparse.ArgumentParser",
"gzip.open",
"fink_alert_simulator.alertProducer.schedule_delays",
"time.time",
"fink_alert_simulator.parser.getargs",
"fink_alert_simulator.avroUtils.readschemadata",
"fink_alert_simulator.alertProducer.AlertProducer",
"numpy.array_split",
"os.... | [((962, 1006), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (985, 1006), False, 'import argparse\n'), ((1018, 1033), 'fink_alert_simulator.parser.getargs', 'getargs', (['parser'], {}), '(parser)\n', (1025, 1033), False, 'from fink_alert_simulator.parser import getargs\n'), ((1155, 1221), 'fink_alert_simulator.alertProducer.AlertProducer', 'alertProducer.AlertProducer', (['args.topic'], {'schema_files': 'None'}), '(args.topic, schema_files=None, **conf)\n', (1182, 1221), False, 'from fink_alert_simulator import alertProducer\n'), ((2592, 2603), 'time.time', 'time.time', ([], {}), '()\n', (2601, 2603), False, 'import time\n'), ((4650, 4674), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4672, 4674), False, 'import asyncio\n'), ((1340, 1369), 'os.path.join', 'os.path.join', (['root', '"""*.avro*"""'], {}), "(root, '*.avro*')\n", (1352, 1369), False, 'import os\n'), ((2516, 2554), 'numpy.array_split', 'np.array_split', (['files[:poolsize]', 'nobs'], {}), '(files[:poolsize], nobs)\n', (2530, 2554), True, 'import numpy as np\n'), ((4710, 4800), 'fink_alert_simulator.alertProducer.schedule_delays', 'alertProducer.schedule_delays', (['loop', 'send_visit', 'files'], {'interval': 'args.tinterval_kafka'}), '(loop, send_visit, files, interval=args.\n tinterval_kafka)\n', (4739, 4800), False, 'from fink_alert_simulator import alertProducer\n'), ((3721, 3756), 'fink_alert_simulator.avroUtils.readschemadata', 'avroUtils.readschemadata', (['file_data'], {}), '(file_data)\n', (3745, 3756), False, 'from fink_alert_simulator import avroUtils\n'), ((3078, 3089), 'time.time', 'time.time', ([], {}), '()\n', (3087, 3089), False, 'import time\n'), ((3365, 3388), 'gzip.open', 'gzip.open', (['x'], {'mode': '"""rb"""'}), "(x, mode='rb')\n", (3374, 3388), False, 'import gzip\n')] |
"""Implementation of Rule L001."""
from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
@document_fix_compatible
class Rule_L001(BaseRule):
"""Unnecessary trailing whitespace.
| **Anti-pattern**
| The • character represents a space.
.. code-block::
SELECT
a
FROM foo••
| **Best practice**
| Remove trailing spaces.
.. code-block::
SELECT
a
FROM foo
"""
def _eval(self, segment, raw_stack, **kwargs):
"""Unnecessary trailing whitespace.
Look for newline segments, and then evaluate what
it was preceded by.
"""
# We only trigger on newlines
if (
segment.is_type("newline")
and len(raw_stack) > 0
and raw_stack[-1].is_type("whitespace")
):
# If we find a newline, which is preceded by whitespace, then bad
deletions = []
idx = -1
while raw_stack[idx].is_type("whitespace"):
deletions.append(raw_stack[idx])
idx -= 1
return LintResult(
anchor=deletions[-1], fixes=[LintFix("delete", d) for d in deletions]
)
return LintResult()
| [
"sqlfluff.core.rules.base.LintResult",
"sqlfluff.core.rules.base.LintFix"
] | [((1319, 1331), 'sqlfluff.core.rules.base.LintResult', 'LintResult', ([], {}), '()\n', (1329, 1331), False, 'from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix\n'), ((1249, 1269), 'sqlfluff.core.rules.base.LintFix', 'LintFix', (['"""delete"""', 'd'], {}), "('delete', d)\n", (1256, 1269), False, 'from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix\n')] |
#==================================================================================
# PROGRAM: "Hannibal.py"
# LOCATION: beluga>examples>Mansell
# Author: <NAME> (2016)
#
# Description: Preliminary test of a track path optimization using a user-defined
# terrain elevation profile.
#==================================================================================
#Import Necessary Modules
import numpy as np
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
from math import *
import functools
def get_problem():
"""A simple test of optimal surface track planning."""
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('Hannibal')
#Define independent variables
problem.independent('t', 's')
# Define equations of motion
problem.state('x','V*cos(hdg)','m') \
.state('y','V*sin(hdg)','m') \
# Define controls
problem.control('hdg','rad')
# Define Cost Functional
problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)', 's')
#Define constraints
problem.constraints().initial('x-x_0','m') \
.initial('y-y_0','m') \
.terminal('x-x_f','m') \
.terminal('y-y_f','m')
#Define constants
problem.constant('w',0.0,'1') #Initial Terrain weighting factor
problem.constant('conv',1,'s/m^2') #Integral conversion factor
problem.constant('V',1,'m/s') #Vehicle speed
problem.constant('elev',1,'m') #Initial Elevation
#Unit scaling
problem.scale.unit('m',1) \
.unit('s',1) \
.unit('rad',1)
#Configure solver
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=4)
#Initial Guess
problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A
#Add Continuation Steps
problem.steps.add_step().num_cases(30) \
.terminal('x', 7.2) \
.terminal('y', 8.5)
problem.steps.add_step().num_cases(30) \
.const('w',0.5) #Final Terrain weighting factor
return problem
if __name__ == '__main__':
import beluga.Beluga as Beluga
problem = get_problem()
sol = Beluga.run(problem)
| [
"beluga.Beluga.run",
"beluga.bvpsol.algorithms.MultipleShooting"
] | [((2001, 2138), 'beluga.bvpsol.algorithms.MultipleShooting', 'algorithms.MultipleShooting', ([], {'derivative_method': '"""fd"""', 'tolerance': '(0.0001)', 'max_iterations': '(1000)', 'verbose': '(True)', 'cached': '(False)', 'number_arcs': '(4)'}), "(derivative_method='fd', tolerance=0.0001,\n max_iterations=1000, verbose=True, cached=False, number_arcs=4)\n", (2028, 2138), True, 'import beluga.bvpsol.algorithms as algorithms\n'), ((2703, 2722), 'beluga.Beluga.run', 'Beluga.run', (['problem'], {}), '(problem)\n', (2713, 2722), True, 'import beluga.Beluga as Beluga\n')] |
import typing as tp
from dataclasses import dataclass
from datetime import datetime, timedelta
@dataclass()
class ChannelUploadInfo:
"""
Channel Upload info structure
:param uuid: uuid of db entry to feed
:param interval: interval between readings in seconds
:param factor: multiply to original values, e.g. to conver kWh to Wh
:param last_upload: time of last upload to middleware
:param last_value: last value in middleware
"""
uuid: str
interval: timedelta
factor: float
last_upload: datetime
last_value: tp.Union[int, float]
| [
"dataclasses.dataclass"
] | [((98, 109), 'dataclasses.dataclass', 'dataclass', ([], {}), '()\n', (107, 109), False, 'from dataclasses import dataclass\n')] |
from django.db import models
from django.utils import timezone
import datetime
# Create your models here.
class PurchaseOrder(models.Model):
razorpay_payment_id=models.CharField(max_length=100)
razorpay_order_id=models.CharField(max_length=100)
razorpay_signature=models.CharField(max_length=500)
user_unique_id=models.CharField(max_length=40)
user_email=models.EmailField(max_length=30)
amount_debited=models.FloatField(max_length=20)
points_added=models.IntegerField()
date=models.CharField(max_length=50,default=timezone.now)
def __str__(self):
return self.user_email
| [
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.FloatField",
"django.db.models.EmailField"
] | [((164, 196), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (180, 196), False, 'from django.db import models\n'), ((216, 248), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (232, 248), False, 'from django.db import models\n'), ((269, 301), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (285, 301), False, 'from django.db import models\n'), ((318, 349), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (334, 349), False, 'from django.db import models\n'), ((362, 394), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (379, 394), False, 'from django.db import models\n'), ((411, 443), 'django.db.models.FloatField', 'models.FloatField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (428, 443), False, 'from django.db import models\n'), ((458, 479), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (477, 479), False, 'from django.db import models\n'), ((486, 539), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': 'timezone.now'}), '(max_length=50, default=timezone.now)\n', (502, 539), False, 'from django.db import models\n')] |
# -*- coding: UTF-8 -*-
import cv2
import matplotlib.pyplot as plt
import numpy as np
class PROIE():
def __init__(self):
#####
pass
# PRIVATE METHODS
def _threshold(self):
#####
self.blur_img = cv2.GaussianBlur(self.in_img_g, (5, 5), 0)
_, self.thresh_img = cv2.threshold(
self.blur_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
def _contours(self):
#####
self.contours, _ = cv2.findContours(
self.thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
self.contours = self.contours[0]
self.contour_img = self.in_img_c.copy()
self.contour_img = cv2.drawContours(
self.contour_img, [self.contours], 0, (255, 0, 0), 2)
def _landmarks(self):
#####
M = cv2.moments(self.thresh_img)
x_c = M['m10'] // M['m00']
y_c = M['m01'] // M['m00']
self.center_point = {"x": x_c, "y": y_c}
self.contours = self.contours.reshape(-1, 2)
left_id = np.argmin(self.contours.sum(-1))
self.contours = np.concatenate(
[self.contours[left_id:, :], self.contours[:left_id, :]])
dist_c = np.sqrt(np.square(
self.contours-[self.center_point["x"], self.center_point["y"]]).sum(-1))
f = np.fft.rfft(dist_c)
cutoff = 15
f_new = np.concatenate([f[:cutoff], 0*f[cutoff:]])
dist_c_1 = np.fft.irfft(f_new)
derivative = np.diff(dist_c_1)
sign_change = np.diff(np.sign(derivative))/2
self.landmarks = {"x": [], "y": []}
for landmark in self.contours[np.where(sign_change > 0)[0]]:
self.landmarks["x"].append(landmark[0])
self.landmarks["y"].append(landmark[1])
def _landmarks_select(self):
#####
y_rank = np.array(np.argsort(self.landmarks["y"]))
self.landmarks_selected = {"x": np.array(self.landmarks["x"])[
y_rank][:3], "y": np.array(self.landmarks["y"])[y_rank][:3]}
x_rank = np.array(np.argsort(self.landmarks_selected["x"]))
self.landmarks_selected = {
"x": self.landmarks_selected["x"][x_rank][[0, 2]], "y": self.landmarks_selected["y"][x_rank][[0, 2]]}
def _alignement(self):
#####
h, w = self.in_img_g.shape
theta = np.arctan2((self.landmarks_selected["y"][1] - self.landmarks_selected["y"][0]), (
self.landmarks_selected["x"][1] - self.landmarks_selected["x"][0]))*180/np.pi
R = cv2.getRotationMatrix2D(
(self.landmarks_selected["x"][1], self.landmarks_selected["y"][1]), theta, 1)
self.align_img = cv2.warpAffine(self.in_img_g, R, (w, h))
point_1 = [self.landmarks_selected["x"]
[0], self.landmarks_selected["y"][0]]
point_2 = [self.landmarks_selected["x"]
[1], self.landmarks_selected["y"][1]]
point_1 = (R[:, :2] @ point_1 + R[:, -1]).astype(np.int)
point_2 = (R[:, :2] @ point_2 + R[:, -1]).astype(np.int)
self.landmarks_selected_align = {
"x": [point_1[0], point_2[0]], "y": [point_1[1], point_2[1]]}
def _roi_extract(self):
#####
point_1 = np.array([self.landmarks_selected_align["x"]
[0], self.landmarks_selected_align["y"][0]])
point_2 = np.array([self.landmarks_selected_align["x"]
[1], self.landmarks_selected_align["y"][1]])
self.ux = point_1[0]
self.uy = point_1[1] + (point_2-point_1)[0]//3
self.lx = point_2[0]
self.ly = point_2[1] + 4*(point_2-point_1)[0]//3
self.roi_zone_img = cv2.cvtColor(self.align_img, cv2.COLOR_GRAY2BGR)
cv2.rectangle(self.roi_zone_img, (self.lx, self.ly),
(self.ux, self.uy), (0, 255, 0), 2)
self.roi_img = self.align_img[self.uy:self.ly, self.ux:self.lx]
# PUBLIC METHODS
def extract_roi(self, path_in_img, rotate=False):
#####
self.in_img_c = cv2.imread(path_in_img)
if(rotate):
self.in_img_c = cv2.rotate(self.in_img_c, cv2.ROTATE_90_CLOCKWISE)
if len(self.in_img_c.shape) == 3:
self.in_img_g = cv2.cvtColor(self.in_img_c, cv2.COLOR_BGR2GRAY)
else:
self.in_img_g = self.in_img_c
self._threshold()
self._contours()
self._landmarks()
self._landmarks_select()
self._alignement()
self._roi_extract()
def save(self, path_out_img):
#####
cv2.imwrite(path_out_img, self.roi_img)
def show_result(self):
#####
plt.figure()
plt.subplot(241)
plt.imshow(self.in_img_g, cmap="gray")
plt.title("original")
plt.subplot(242)
plt.imshow(self.thresh_img, cmap="gray")
plt.title("threshold")
plt.subplot(243)
plt.imshow(self.contour_img, cmap="gray")
plt.plot(self.center_point["x"], self.center_point["y"], 'bx')
plt.title("contours")
plt.subplot(244)
plt.imshow(self.in_img_c, cmap="gray")
for idx in range(len(self.landmarks["x"])):
plt.plot(self.landmarks["x"][idx], self.landmarks["y"][idx], 'rx')
plt.title("landmarks")
plt.subplot(245)
plt.imshow(self.in_img_c, cmap="gray")
plt.plot(self.landmarks_selected["x"][0],
self.landmarks_selected["y"][0], 'rx')
plt.plot(self.landmarks_selected["x"][1],
self.landmarks_selected["y"][1], 'rx')
plt.title("selected")
plt.subplot(246)
plt.imshow(self.align_img, cmap="gray")
plt.plot(self.landmarks_selected_align["x"][0],
self.landmarks_selected_align["y"][0], 'rx')
plt.plot(self.landmarks_selected_align["x"][1],
self.landmarks_selected_align["y"][1], 'rx')
plt.title("alignement")
plt.subplot(247)
plt.imshow(self.roi_zone_img, cmap="gray")
plt.title("roi zone")
plt.subplot(248)
plt.imshow(self.roi_img, cmap="gray")
plt.title("extraction")
plt.show() | [
"matplotlib.pyplot.title",
"cv2.GaussianBlur",
"numpy.fft.rfft",
"numpy.arctan2",
"numpy.argsort",
"cv2.warpAffine",
"matplotlib.pyplot.figure",
"cv2.rectangle",
"cv2.getRotationMatrix2D",
"numpy.fft.irfft",
"cv2.cvtColor",
"cv2.imwrite",
"matplotlib.pyplot.imshow",
"cv2.drawContours",
"... | [((244, 286), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.in_img_g', '(5, 5)', '(0)'], {}), '(self.in_img_g, (5, 5), 0)\n', (260, 286), False, 'import cv2\n'), ((316, 389), 'cv2.threshold', 'cv2.threshold', (['self.blur_img', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(self.blur_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (329, 389), False, 'import cv2\n'), ((470, 541), 'cv2.findContours', 'cv2.findContours', (['self.thresh_img', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(self.thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (486, 541), False, 'import cv2\n'), ((671, 741), 'cv2.drawContours', 'cv2.drawContours', (['self.contour_img', '[self.contours]', '(0)', '(255, 0, 0)', '(2)'], {}), '(self.contour_img, [self.contours], 0, (255, 0, 0), 2)\n', (687, 741), False, 'import cv2\n'), ((808, 836), 'cv2.moments', 'cv2.moments', (['self.thresh_img'], {}), '(self.thresh_img)\n', (819, 836), False, 'import cv2\n'), ((1084, 1156), 'numpy.concatenate', 'np.concatenate', (['[self.contours[left_id:, :], self.contours[:left_id, :]]'], {}), '([self.contours[left_id:, :], self.contours[:left_id, :]])\n', (1098, 1156), True, 'import numpy as np\n'), ((1303, 1322), 'numpy.fft.rfft', 'np.fft.rfft', (['dist_c'], {}), '(dist_c)\n', (1314, 1322), True, 'import numpy as np\n'), ((1359, 1403), 'numpy.concatenate', 'np.concatenate', (['[f[:cutoff], 0 * f[cutoff:]]'], {}), '([f[:cutoff], 0 * f[cutoff:]])\n', (1373, 1403), True, 'import numpy as np\n'), ((1421, 1440), 'numpy.fft.irfft', 'np.fft.irfft', (['f_new'], {}), '(f_new)\n', (1433, 1440), True, 'import numpy as np\n'), ((1462, 1479), 'numpy.diff', 'np.diff', (['dist_c_1'], {}), '(dist_c_1)\n', (1469, 1479), True, 'import numpy as np\n'), ((2497, 2603), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (["(self.landmarks_selected['x'][1], self.landmarks_selected['y'][1])", 'theta', '(1)'], {}), "((self.landmarks_selected['x'][1], self.\n landmarks_selected['y'][1]), theta, 1)\n", (2520, 2603), False, 'import cv2\n'), ((2637, 2677), 'cv2.warpAffine', 'cv2.warpAffine', (['self.in_img_g', 'R', '(w, h)'], {}), '(self.in_img_g, R, (w, h))\n', (2651, 2677), False, 'import cv2\n'), ((3198, 3291), 'numpy.array', 'np.array', (["[self.landmarks_selected_align['x'][0], self.landmarks_selected_align['y'][0]]"], {}), "([self.landmarks_selected_align['x'][0], self.\n landmarks_selected_align['y'][0]])\n", (3206, 3291), True, 'import numpy as np\n'), ((3334, 3427), 'numpy.array', 'np.array', (["[self.landmarks_selected_align['x'][1], self.landmarks_selected_align['y'][1]]"], {}), "([self.landmarks_selected_align['x'][1], self.\n landmarks_selected_align['y'][1]])\n", (3342, 3427), True, 'import numpy as np\n'), ((3652, 3700), 'cv2.cvtColor', 'cv2.cvtColor', (['self.align_img', 'cv2.COLOR_GRAY2BGR'], {}), '(self.align_img, cv2.COLOR_GRAY2BGR)\n', (3664, 3700), False, 'import cv2\n'), ((3709, 3801), 'cv2.rectangle', 'cv2.rectangle', (['self.roi_zone_img', '(self.lx, self.ly)', '(self.ux, self.uy)', '(0, 255, 0)', '(2)'], {}), '(self.roi_zone_img, (self.lx, self.ly), (self.ux, self.uy), (0,\n 255, 0), 2)\n', (3722, 3801), False, 'import cv2\n'), ((4008, 4031), 'cv2.imread', 'cv2.imread', (['path_in_img'], {}), '(path_in_img)\n', (4018, 4031), False, 'import cv2\n'), ((4530, 4569), 'cv2.imwrite', 'cv2.imwrite', (['path_out_img', 'self.roi_img'], {}), '(path_out_img, self.roi_img)\n', (4541, 4569), False, 'import cv2\n'), ((4620, 4632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4630, 4632), True, 'import matplotlib.pyplot as plt\n'), ((4642, 4658), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(241)'], {}), '(241)\n', (4653, 4658), True, 'import matplotlib.pyplot as plt\n'), ((4667, 4705), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.in_img_g'], {'cmap': '"""gray"""'}), "(self.in_img_g, cmap='gray')\n", (4677, 4705), True, 'import matplotlib.pyplot as plt\n'), ((4714, 4735), 'matplotlib.pyplot.title', 'plt.title', (['"""original"""'], {}), "('original')\n", (4723, 4735), True, 'import matplotlib.pyplot as plt\n'), ((4745, 4761), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(242)'], {}), '(242)\n', (4756, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4770, 4810), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.thresh_img'], {'cmap': '"""gray"""'}), "(self.thresh_img, cmap='gray')\n", (4780, 4810), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4841), 'matplotlib.pyplot.title', 'plt.title', (['"""threshold"""'], {}), "('threshold')\n", (4828, 4841), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4867), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(243)'], {}), '(243)\n', (4862, 4867), True, 'import matplotlib.pyplot as plt\n'), ((4876, 4917), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.contour_img'], {'cmap': '"""gray"""'}), "(self.contour_img, cmap='gray')\n", (4886, 4917), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4988), 'matplotlib.pyplot.plot', 'plt.plot', (["self.center_point['x']", "self.center_point['y']", '"""bx"""'], {}), "(self.center_point['x'], self.center_point['y'], 'bx')\n", (4934, 4988), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5018), 'matplotlib.pyplot.title', 'plt.title', (['"""contours"""'], {}), "('contours')\n", (5006, 5018), True, 'import matplotlib.pyplot as plt\n'), ((5028, 5044), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(244)'], {}), '(244)\n', (5039, 5044), True, 'import matplotlib.pyplot as plt\n'), ((5053, 5091), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.in_img_c'], {'cmap': '"""gray"""'}), "(self.in_img_c, cmap='gray')\n", (5063, 5091), True, 'import matplotlib.pyplot as plt\n'), ((5231, 5253), 'matplotlib.pyplot.title', 'plt.title', (['"""landmarks"""'], {}), "('landmarks')\n", (5240, 5253), True, 'import matplotlib.pyplot as plt\n'), ((5263, 5279), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(245)'], {}), '(245)\n', (5274, 5279), True, 'import matplotlib.pyplot as plt\n'), ((5288, 5326), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.in_img_c'], {'cmap': '"""gray"""'}), "(self.in_img_c, cmap='gray')\n", (5298, 5326), True, 'import matplotlib.pyplot as plt\n'), ((5335, 5420), 'matplotlib.pyplot.plot', 'plt.plot', (["self.landmarks_selected['x'][0]", "self.landmarks_selected['y'][0]", '"""rx"""'], {}), "(self.landmarks_selected['x'][0], self.landmarks_selected['y'][0], 'rx'\n )\n", (5343, 5420), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5526), 'matplotlib.pyplot.plot', 'plt.plot', (["self.landmarks_selected['x'][1]", "self.landmarks_selected['y'][1]", '"""rx"""'], {}), "(self.landmarks_selected['x'][1], self.landmarks_selected['y'][1], 'rx'\n )\n", (5449, 5526), True, 'import matplotlib.pyplot as plt\n'), ((5547, 5568), 'matplotlib.pyplot.title', 'plt.title', (['"""selected"""'], {}), "('selected')\n", (5556, 5568), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5594), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(246)'], {}), '(246)\n', (5589, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5603, 5642), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.align_img'], {'cmap': '"""gray"""'}), "(self.align_img, cmap='gray')\n", (5613, 5642), True, 'import matplotlib.pyplot as plt\n'), ((5651, 5748), 'matplotlib.pyplot.plot', 'plt.plot', (["self.landmarks_selected_align['x'][0]", "self.landmarks_selected_align['y'][0]", '"""rx"""'], {}), "(self.landmarks_selected_align['x'][0], self.\n landmarks_selected_align['y'][0], 'rx')\n", (5659, 5748), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5866), 'matplotlib.pyplot.plot', 'plt.plot', (["self.landmarks_selected_align['x'][1]", "self.landmarks_selected_align['y'][1]", '"""rx"""'], {}), "(self.landmarks_selected_align['x'][1], self.\n landmarks_selected_align['y'][1], 'rx')\n", (5777, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5910), 'matplotlib.pyplot.title', 'plt.title', (['"""alignement"""'], {}), "('alignement')\n", (5896, 5910), True, 'import matplotlib.pyplot as plt\n'), ((5920, 5936), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(247)'], {}), '(247)\n', (5931, 5936), True, 'import matplotlib.pyplot as plt\n'), ((5945, 5987), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.roi_zone_img'], {'cmap': '"""gray"""'}), "(self.roi_zone_img, cmap='gray')\n", (5955, 5987), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6017), 'matplotlib.pyplot.title', 'plt.title', (['"""roi zone"""'], {}), "('roi zone')\n", (6005, 6017), True, 'import matplotlib.pyplot as plt\n'), ((6027, 6043), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(248)'], {}), '(248)\n', (6038, 6043), True, 'import matplotlib.pyplot as plt\n'), ((6052, 6089), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.roi_img'], {'cmap': '"""gray"""'}), "(self.roi_img, cmap='gray')\n", (6062, 6089), True, 'import matplotlib.pyplot as plt\n'), ((6098, 6121), 'matplotlib.pyplot.title', 'plt.title', (['"""extraction"""'], {}), "('extraction')\n", (6107, 6121), True, 'import matplotlib.pyplot as plt\n'), ((6131, 6141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6139, 6141), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1855), 'numpy.argsort', 'np.argsort', (["self.landmarks['y']"], {}), "(self.landmarks['y'])\n", (1834, 1855), True, 'import numpy as np\n'), ((2028, 2068), 'numpy.argsort', 'np.argsort', (["self.landmarks_selected['x']"], {}), "(self.landmarks_selected['x'])\n", (2038, 2068), True, 'import numpy as np\n'), ((4081, 4131), 'cv2.rotate', 'cv2.rotate', (['self.in_img_c', 'cv2.ROTATE_90_CLOCKWISE'], {}), '(self.in_img_c, cv2.ROTATE_90_CLOCKWISE)\n', (4091, 4131), False, 'import cv2\n'), ((4203, 4250), 'cv2.cvtColor', 'cv2.cvtColor', (['self.in_img_c', 'cv2.COLOR_BGR2GRAY'], {}), '(self.in_img_c, cv2.COLOR_BGR2GRAY)\n', (4215, 4250), False, 'import cv2\n'), ((5156, 5222), 'matplotlib.pyplot.plot', 'plt.plot', (["self.landmarks['x'][idx]", "self.landmarks['y'][idx]", '"""rx"""'], {}), "(self.landmarks['x'][idx], self.landmarks['y'][idx], 'rx')\n", (5164, 5222), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1529), 'numpy.sign', 'np.sign', (['derivative'], {}), '(derivative)\n', (1517, 1529), True, 'import numpy as np\n'), ((1615, 1640), 'numpy.where', 'np.where', (['(sign_change > 0)'], {}), '(sign_change > 0)\n', (1623, 1640), True, 'import numpy as np\n'), ((2313, 2462), 'numpy.arctan2', 'np.arctan2', (["(self.landmarks_selected['y'][1] - self.landmarks_selected['y'][0])", "(self.landmarks_selected['x'][1] - self.landmarks_selected['x'][0])"], {}), "(self.landmarks_selected['y'][1] - self.landmarks_selected['y'][0\n ], self.landmarks_selected['x'][1] - self.landmarks_selected['x'][0])\n", (2323, 2462), True, 'import numpy as np\n'), ((1195, 1270), 'numpy.square', 'np.square', (["(self.contours - [self.center_point['x'], self.center_point['y']])"], {}), "(self.contours - [self.center_point['x'], self.center_point['y']])\n", (1204, 1270), True, 'import numpy as np\n'), ((1897, 1926), 'numpy.array', 'np.array', (["self.landmarks['x']"], {}), "(self.landmarks['x'])\n", (1905, 1926), True, 'import numpy as np\n'), ((1958, 1987), 'numpy.array', 'np.array', (["self.landmarks['y']"], {}), "(self.landmarks['y'])\n", (1966, 1987), True, 'import numpy as np\n')] |
import numpy as np
from pandas.testing import assert_frame_equal
import warnings
from hdmf.backends.hdf5 import HDF5IO
from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable
from hdmf.testing import TestCase, remove_test_file
class TestAlignedDynamicTableContainer(TestCase):
"""
Test the AlignedDynamicTable Container class.
"""
def setUp(self):
warnings.simplefilter("always") # Trigger all warnings
self.path = 'test_icephys_meta_intracellularrecording.h5'
def tearDown(self):
remove_test_file(self.path)
def test_init(self):
"""Test that just checks that populating the tables with data works correctly"""
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container')
def test_init_categories_without_category_tables_error(self):
# Test raise error if categories is given without category_tables
with self.assertRaisesWith(ValueError, "Categories provided but no category_tables given"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['cat1', 'cat2'])
def test_init_length_mismatch_between_categories_and_category_tables(self):
# Test length mismatch between categories and category_tables
with self.assertRaisesWith(ValueError, "0 category_tables given but 2 categories specified"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['cat1', 'cat2'],
category_tables=[])
def test_init_category_table_names_do_not_match_categories(self):
# Construct some categories for testing
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
# Test add category_table that is not listed in the categories list
with self.assertRaisesWith(ValueError,
"DynamicTable test3 does not appear in categories ['test1', 'test2', 't3']"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['test1', 'test2', 't3'], # bad name for 'test3'
category_tables=categories)
def test_init_duplicate_category_table_name(self):
# Test duplicate table name
with self.assertRaisesWith(ValueError, "Duplicate table name test1 found in input dynamic_tables"):
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']]
) for val in ['test1', 'test1', 'test3']]
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['test1', 'test2', 'test3'],
category_tables=categories)
def test_init_misaligned_category_tables(self):
"""Test misaligned category tables"""
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']]
) for val in ['test1', 'test2']]
categories.append(DynamicTable(name='test3',
description="test3 description",
columns=[VectorData(name='test3 '+t,
description='test3 '+t+' description',
data=np.arange(8)) for t in ['c1', 'c2', 'c3']]))
with self.assertRaisesWith(ValueError,
"Category DynamicTable test3 does not align, it has 8 rows expected 10"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['test1', 'test2', 'test3'],
category_tables=categories)
def test_init_with_custom_empty_categories(self):
"""Test that we can create an empty table with custom categories"""
category_names = ['test1', 'test2', 'test3']
categories = [DynamicTable(name=val, description=val+" description") for val in category_names]
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_init_with_custom_nonempty_categories(self):
"""Test that we can create an empty table with custom categories"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
self.assertEqual(temp.categories, category_names)
def test_init_with_custom_nonempty_categories_and_main(self):
"""
Test that we can create a non-empty table with custom non-empty categories
"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']])
self.assertEqual(temp.categories, category_names)
self.assertTrue('test1' in temp) # test that contains category works
self.assertTrue(('test1', 'c1') in temp) # test that contains a column works
# test the error case of a tuple with len !=2
with self.assertRaisesWith(ValueError, "Expected tuple of strings of length 2 got tuple of length 3"):
('test1', 'c1', 't3') in temp
self.assertTupleEqual(temp.colnames, ('main_c1', 'main_c2', 'main_c3')) # confirm column names
def test_init_with_custom_misaligned_categories(self):
"""Test that we cannot create an empty table with custom categories"""
num_rows = 10
val1 = 'test1'
val2 = 'test2'
categories = [DynamicTable(name=val1,
description=val1+" description",
columns=[VectorData(name=val1+t,
description=val1+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]),
DynamicTable(name=val2,
description=val2+" description",
columns=[VectorData(name=val2+t,
description=val2+t+' description',
data=np.arange(num_rows+1)) for t in ['c1', 'c2', 'c3']])
]
with self.assertRaisesWith(ValueError,
"Category DynamicTable test2 does not align, it has 11 rows expected 10"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_init_with_duplicate_custom_categories(self):
"""Test that we can create an empty table with custom categories"""
category_names = ['test1', 'test1']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
with self.assertRaisesWith(ValueError, "Duplicate table name test1 found in input dynamic_tables"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_init_with_bad_custom_categories(self):
"""Test that we cannot provide a category that is not a DynamicTable"""
num_rows = 10
categories = [ # good category
DynamicTable(name='test1',
description="test1 description",
columns=[VectorData(name='test1'+t,
description='test1' + t + ' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
),
# use a list as a bad category example
[0, 1, 2]]
with self.assertRaisesWith(ValueError, "Category table with index 1 is not a DynamicTable"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_round_trip_container(self):
"""Test read and write the container by itself"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
curr = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
with HDF5IO(self.path, manager=get_manager(), mode='w') as io:
io.write(curr)
with HDF5IO(self.path, manager=get_manager(), mode='r') as io:
incon = io.read()
self.assertListEqual(incon.categories, curr.categories)
for n in category_names:
assert_frame_equal(incon[n], curr[n])
def test_add_category(self):
"""Test that we can correct a non-empty category to an existing table"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories[0:2])
self.assertListEqual(adt.categories, category_names[0:2])
adt.add_category(categories[-1])
self.assertListEqual(adt.categories, category_names)
def test_add_category_misaligned_rows(self):
"""Test that we can correct a non-empty category to an existing table"""
category_names = ['test1', 'test2']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
self.assertListEqual(adt.categories, category_names)
with self.assertRaisesWith(ValueError, "New category DynamicTable does not align, it has 8 rows expected 10"):
adt.add_category(DynamicTable(name='test3',
description='test3_description',
columns=[VectorData(name='test3_'+t,
description='test3 '+t+' description',
data=np.arange(num_rows - 2)) for t in ['c1', 'c2', 'c3']
]))
def test_add_category_already_in_table(self):
category_names = ['test1', 'test2', 'test2']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories[0:2])
self.assertListEqual(adt.categories, category_names[0:2])
with self.assertRaisesWith(ValueError, "Category test2 already in the table"):
adt.add_category(categories[-1])
def test_add_column(self):
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
columns=[VectorData(name='test_'+t,
description='test_'+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']])
# Test successful add
adt.add_column(name='testA', description='testA', data=np.arange(10))
self.assertTupleEqual(adt.colnames, ('test_c1', 'test_c2', 'test_c3', 'testA'))
def test_add_column_bad_category(self):
"""Test add column with bad category"""
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
columns=[VectorData(name='test_'+t,
description='test_'+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']])
with self.assertRaisesWith(KeyError, "'Category mycat not in table'"):
adt.add_column(category='mycat', name='testA', description='testA', data=np.arange(10))
def test_add_column_bad_length(self):
"""Test add column that is too short"""
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
columns=[VectorData(name='test_'+t,
description='test_'+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']])
# Test successful add
with self.assertRaisesWith(ValueError, "column must have the same number of rows as 'id'"):
adt.add_column(name='testA', description='testA', data=np.arange(8))
def test_add_column_to_subcategory(self):
"""Test adding a column to a subcategory"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
self.assertListEqual(adt.categories, category_names)
# Test successful add
adt.add_column(category='test2', name='testA', description='testA', data=np.arange(10))
self.assertTupleEqual(adt.get_category('test2').colnames, ('test2c1', 'test2c2', 'test2c3', 'testA'))
def test_add_row(self):
"""Test adding a row to a non_empty table"""
category_names = ['test1', ]
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2']]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)) for t in ['c1', 'c2']])
self.assertListEqual(temp.categories, category_names)
# Test successful add
temp.add_row(test1=dict(c1=1, c2=2), main_c1=3, main_c2=5)
self.assertListEqual(temp[10].iloc[0].tolist(), [3, 5, 10, 1, 2])
# Test successful add version 2
temp.add_row(data=dict(test1=dict(c1=1, c2=2), main_c1=4, main_c2=5))
self.assertListEqual(temp[11].iloc[0].tolist(), [4, 5, 11, 1, 2])
# Test missing categories data
with self.assertRaises(KeyError) as ke:
temp.add_row(main_c1=3, main_c2=5)
self.assertTrue("row data keys do not match" in str(ke.exception))
def test_get_item(self):
"""Test getting elements from the table"""
category_names = ['test1', ]
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows) + i + 3)
for i, t in enumerate(['c1', 'c2'])]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)+2) for t in ['c1', 'c2']])
self.assertListEqual(temp.categories, category_names)
# Test slicing with a single index
self.assertListEqual(temp[5].iloc[0].tolist(), [7, 7, 5, 8, 9])
# Test slice with list
self.assertListEqual(temp[[5, 7]].iloc[0].tolist(), [7, 7, 5, 8, 9])
self.assertListEqual(temp[[5, 7]].iloc[1].tolist(), [9, 9, 7, 10, 11])
# Test slice with slice
self.assertListEqual(temp[5:7].iloc[0].tolist(), [7, 7, 5, 8, 9])
self.assertListEqual(temp[5:7].iloc[1].tolist(), [8, 8, 6, 9, 10])
# Test slice with numpy index arrya
self.assertListEqual(temp[np.asarray([5, 8])].iloc[0].tolist(), [7, 7, 5, 8, 9])
self.assertListEqual(temp[np.asarray([5, 8])].iloc[1].tolist(), [10, 10, 8, 11, 12])
# Test slicing for a single column
self.assertListEqual(temp['main_c1'][:].tolist(), (np.arange(num_rows)+2).tolist())
# Test slicing for a single category
assert_frame_equal(temp['test1'], categories[0].to_dataframe())
# Test getting the main table
assert_frame_equal(temp[None], temp.to_dataframe())
# Test getting a specific column
self.assertListEqual(temp['test1', 'c1'][:].tolist(), (np.arange(num_rows) + 3).tolist())
# Test getting a specific cell
self.assertEqual(temp[None, 'main_c1', 1], 3)
# Test bad selection tuple
with self.assertRaisesWith(ValueError,
"Expected tuple of length 2 or 3 with (category, column, row) as value."):
temp[('main_c1',)]
def test_to_dataframe(self):
"""Test that the to_dataframe method works"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']])
# Test the to_dataframe method with default settings
tdf = adt.to_dataframe()
self.assertListEqual(tdf.index.tolist(), list(range(10)))
self.assertTupleEqual(tdf.index.name, ('test_aligned_table', 'id'))
expected_cols = [('test_aligned_table', 'main_c1'),
('test_aligned_table', 'main_c2'),
('test_aligned_table', 'main_c3'),
('test1', 'id'), ('test1', 'c1'), ('test1', 'c2'), ('test1', 'c3'),
('test2', 'id'), ('test2', 'c1'), ('test2', 'c2'), ('test2', 'c3'),
('test3', 'id'), ('test3', 'c1'), ('test3', 'c2'), ('test3', 'c3')]
tdf_cols = tdf.columns.tolist()
for v in zip(expected_cols, tdf_cols):
self.assertTupleEqual(v[0], v[1])
# test the to_dataframe method with ignore_category_ids set to True
tdf = adt.to_dataframe(ignore_category_ids=True)
self.assertListEqual(tdf.index.tolist(), list(range(10)))
self.assertTupleEqual(tdf.index.name, ('test_aligned_table', 'id'))
expected_cols = [('test_aligned_table', 'main_c1'),
('test_aligned_table', 'main_c2'),
('test_aligned_table', 'main_c3'),
('test1', 'c1'), ('test1', 'c2'), ('test1', 'c3'),
('test2', 'c1'), ('test2', 'c2'), ('test2', 'c3'),
('test3', 'c1'), ('test3', 'c2'), ('test3', 'c3')]
tdf_cols = tdf.columns.tolist()
for v in zip(expected_cols, tdf_cols):
self.assertTupleEqual(v[0], v[1])
def test_nested_aligned_dynamic_table_not_allowed(self):
"""
Test that using and AlignedDynamicTable as category for an AlignedDynamicTable is not allowed
"""
# create an AlignedDynamicTable as category
subsubcol1 = VectorData(name='sub_sub_column1', description='test sub sub column', data=['test11', 'test12'])
sub_category = DynamicTable(name='sub_category1', description='test subcategory table', columns=[subsubcol1, ])
subcol1 = VectorData(name='sub_column1', description='test-subcolumn', data=['test1', 'test2'])
adt_category = AlignedDynamicTable(
name='category1',
description='test using AlignedDynamicTable as a category',
columns=[subcol1, ],
category_tables=[sub_category, ])
# Create a regular column for our main AlignedDynamicTable
col1 = VectorData(name='column1', description='regular test column', data=['test1', 'test2'])
# test 1: Make sure we can't add the AlignedDynamicTable category on init
msg = ("Category table with index %i is an AlignedDynamicTable. "
"Nesting of AlignedDynamicTable is currently not supported." % 0)
with self.assertRaisesWith(ValueError, msg):
# create the nested AlignedDynamicTable with our adt_category as a sub-category
AlignedDynamicTable(
name='nested_adt',
description='test nesting AlignedDynamicTable',
columns=[col1, ],
category_tables=[adt_category, ])
# test 2: Make sure we can't add the AlignedDynamicTable category via add_category
adt = AlignedDynamicTable(
name='nested_adt',
description='test nesting AlignedDynamicTable',
columns=[col1, ])
msg = "Category is an AlignedDynamicTable. Nesting of AlignedDynamicTable is currently not supported."
with self.assertRaisesWith(ValueError, msg):
adt.add_category(adt_category)
| [
"pandas.testing.assert_frame_equal",
"warnings.simplefilter",
"hdmf.common.VectorData",
"numpy.asarray",
"hdmf.common.DynamicTable",
"hdmf.common.AlignedDynamicTable",
"numpy.arange",
"hdmf.common.get_manager",
"hdmf.testing.remove_test_file"
] | [((402, 433), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (423, 433), False, 'import warnings\n'), ((557, 584), 'hdmf.testing.remove_test_file', 'remove_test_file', (['self.path'], {}), '(self.path)\n', (573, 584), False, 'from hdmf.testing import TestCase, remove_test_file\n'), ((708, 797), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""'}), "(name='test_aligned_table', description=\n 'Test aligned container')\n", (727, 797), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((5354, 5471), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (5373, 5471), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((6170, 6287), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (6189, 6287), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((11853, 11970), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (11872, 11970), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((13009, 13131), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories[0:2]'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories[0:2])\n", (13028, 13131), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((13985, 14102), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (14004, 14102), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((15383, 15505), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories[0:2]'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories[0:2])\n", (15402, 15505), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((18141, 18258), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (18160, 18258), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((25625, 25726), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""sub_sub_column1"""', 'description': '"""test sub sub column"""', 'data': "['test11', 'test12']"}), "(name='sub_sub_column1', description='test sub sub column', data=\n ['test11', 'test12'])\n", (25635, 25726), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((25745, 25843), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""sub_category1"""', 'description': '"""test subcategory table"""', 'columns': '[subsubcol1]'}), "(name='sub_category1', description='test subcategory table',\n columns=[subsubcol1])\n", (25757, 25843), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((25860, 25949), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""sub_column1"""', 'description': '"""test-subcolumn"""', 'data': "['test1', 'test2']"}), "(name='sub_column1', description='test-subcolumn', data=['test1',\n 'test2'])\n", (25870, 25949), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((25969, 26126), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""category1"""', 'description': '"""test using AlignedDynamicTable as a category"""', 'columns': '[subcol1]', 'category_tables': '[sub_category]'}), "(name='category1', description=\n 'test using AlignedDynamicTable as a category', columns=[subcol1],\n category_tables=[sub_category])\n", (25988, 26126), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((26254, 26344), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""column1"""', 'description': '"""regular test column"""', 'data': "['test1', 'test2']"}), "(name='column1', description='regular test column', data=['test1',\n 'test2'])\n", (26264, 26344), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((27046, 27153), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""nested_adt"""', 'description': '"""test nesting AlignedDynamicTable"""', 'columns': '[col1]'}), "(name='nested_adt', description=\n 'test nesting AlignedDynamicTable', columns=[col1])\n", (27065, 27153), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((1071, 1189), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'categories': "['cat1', 'cat2']"}), "(name='test_aligned_table', description=\n 'Test aligned container', categories=['cat1', 'cat2'])\n", (1090, 1189), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((1500, 1638), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'categories': "['cat1', 'cat2']", 'category_tables': '[]'}), "(name='test_aligned_table', description=\n 'Test aligned container', categories=['cat1', 'cat2'], category_tables=[])\n", (1519, 1638), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((2583, 2741), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'categories': "['test1', 'test2', 't3']", 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', categories=['test1', 'test2', 't3'],\n category_tables=categories)\n", (2602, 2741), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((3507, 3668), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'categories': "['test1', 'test2', 'test3']", 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', categories=['test1', 'test2', 'test3'],\n category_tables=categories)\n", (3526, 3668), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((4840, 5001), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'categories': "['test1', 'test2', 'test3']", 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', categories=['test1', 'test2', 'test3'],\n category_tables=categories)\n", (4859, 5001), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((5264, 5320), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': 'val', 'description': "(val + ' description')"}), "(name=val, description=val + ' description')\n", (5276, 5320), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((9141, 9258), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (9160, 9258), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((10066, 10183), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (10085, 10183), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((11063, 11180), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""test_aligned_table"""', 'description': '"""Test aligned container"""', 'category_tables': 'categories'}), "(name='test_aligned_table', description=\n 'Test aligned container', category_tables=categories)\n", (11082, 11180), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((26736, 26880), 'hdmf.common.AlignedDynamicTable', 'AlignedDynamicTable', ([], {'name': '"""nested_adt"""', 'description': '"""test nesting AlignedDynamicTable"""', 'columns': '[col1]', 'category_tables': '[adt_category]'}), "(name='nested_adt', description=\n 'test nesting AlignedDynamicTable', columns=[col1], category_tables=[\n adt_category])\n", (26755, 26880), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((12325, 12362), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['incon[n]', 'curr[n]'], {}), '(incon[n], curr[n])\n', (12343, 12362), False, 'from pandas.testing import assert_frame_equal\n'), ((16185, 16198), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (16194, 16198), True, 'import numpy as np\n'), ((18463, 18476), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (18472, 18476), True, 'import numpy as np\n'), ((12043, 12056), 'hdmf.common.get_manager', 'get_manager', ([], {}), '()\n', (12054, 12056), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((12142, 12155), 'hdmf.common.get_manager', 'get_manager', ([], {}), '()\n', (12153, 12155), False, 'from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable\n'), ((16870, 16883), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (16879, 16883), True, 'import numpy as np\n'), ((17497, 17509), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (17506, 17509), True, 'import numpy as np\n'), ((22042, 22061), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (22051, 22061), True, 'import numpy as np\n'), ((22394, 22413), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (22403, 22413), True, 'import numpy as np\n'), ((7387, 7406), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (7396, 7406), True, 'import numpy as np\n'), ((16047, 16060), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (16056, 16060), True, 'import numpy as np\n'), ((16661, 16674), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (16670, 16674), True, 'import numpy as np\n'), ((17255, 17268), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (17264, 17268), True, 'import numpy as np\n'), ((19483, 19502), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (19492, 19502), True, 'import numpy as np\n'), ((23667, 23686), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (23676, 23686), True, 'import numpy as np\n'), ((2221, 2240), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (2230, 2240), True, 'import numpy as np\n'), ((4152, 4165), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4161, 4165), True, 'import numpy as np\n'), ((4627, 4639), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (4636, 4639), True, 'import numpy as np\n'), ((6041, 6060), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (6050, 6060), True, 'import numpy as np\n'), ((6951, 6970), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (6960, 6970), True, 'import numpy as np\n'), ((8511, 8530), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (8520, 8530), True, 'import numpy as np\n'), ((8895, 8918), 'numpy.arange', 'np.arange', (['(num_rows + 1)'], {}), '(num_rows + 1)\n', (8904, 8918), True, 'import numpy as np\n'), ((9832, 9851), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (9841, 9851), True, 'import numpy as np\n'), ((10768, 10787), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (10777, 10787), True, 'import numpy as np\n'), ((11724, 11743), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (11733, 11743), True, 'import numpy as np\n'), ((12881, 12900), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (12890, 12900), True, 'import numpy as np\n'), ((13857, 13876), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (13866, 13876), True, 'import numpy as np\n'), ((15255, 15274), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (15264, 15274), True, 'import numpy as np\n'), ((18013, 18032), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (18022, 18032), True, 'import numpy as np\n'), ((19053, 19072), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (19062, 19072), True, 'import numpy as np\n'), ((21122, 21141), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (21131, 21141), True, 'import numpy as np\n'), ((21792, 21810), 'numpy.asarray', 'np.asarray', (['[5, 8]'], {}), '([5, 8])\n', (21802, 21810), True, 'import numpy as np\n'), ((21881, 21899), 'numpy.asarray', 'np.asarray', (['[5, 8]'], {}), '([5, 8])\n', (21891, 21899), True, 'import numpy as np\n'), ((23232, 23251), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (23241, 23251), True, 'import numpy as np\n'), ((3370, 3383), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3379, 3383), True, 'import numpy as np\n'), ((14693, 14716), 'numpy.arange', 'np.arange', (['(num_rows - 2)'], {}), '(num_rows - 2)\n', (14702, 14716), True, 'import numpy as np\n'), ((20626, 20645), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (20635, 20645), True, 'import numpy as np\n')] |
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.forms.forms import ErrorDict
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic.base import TemplateView
from djblets.auth.views import register
from djblets.configforms.views import ConfigPagesView
from djblets.features.decorators import feature_required
from djblets.forms.fieldsets import filter_fieldsets
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.compat.django.shortcuts import render
from djblets.util.decorators import augment_method_from
from djblets.views.generic.etag import ETagViewMixin
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.accounts.forms.registration import RegistrationForm
from reviewboard.accounts.mixins import CheckLoginRequiredViewMixin
from reviewboard.accounts.models import Profile
from reviewboard.accounts.pages import AccountPage, OAuth2Page, PrivacyPage
from reviewboard.accounts.privacy import is_consent_missing
from reviewboard.admin.decorators import check_read_only
from reviewboard.avatars import avatar_services
from reviewboard.notifications.email.decorators import preview_email
from reviewboard.notifications.email.message import \
prepare_password_changed_mail
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.oauth.forms import (UserApplicationChangeForm,
UserApplicationCreationForm)
from reviewboard.oauth.models import Application
from reviewboard.site.mixins import CheckLocalSiteAccessViewMixin
from reviewboard.site.urlresolvers import local_site_reverse
class UserInfoboxView(CheckLoginRequiredViewMixin,
CheckLocalSiteAccessViewMixin,
ETagViewMixin,
TemplateView):
"""Displays information on a user, for use in user pop-up infoboxes.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
template_name = 'accounts/user_infobox.html'
def __init__(self, **kwargs):
"""Initialize a view for the request.
Args:
**kwargs (dict):
Keyword arguments passed to :py:meth:`as_view`.
"""
super(UserInfoboxView, self).__init__(**kwargs)
self._lookup_user = None
self._show_profile = None
self._timezone = None
def get_etag_data(self, request, username, *args, **kwargs):
"""Return an ETag for the view.
This will look up some state needed for the request and generate a
suitable ETag.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
username (unicode):
The username of the user being looked up.
*args (tuple):
Positional arguments to pass to the handler.
**kwargs (tuple):
Keyword arguments to pass to the handler.
These will be arguments provided by the URL pattern.
Returns:
unicode:
The ETag for the page.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
user = get_object_or_404(User, username=username)
self._lookup_user = user
profile = user.get_profile()
self._show_profile = user.is_profile_visible(request.user)
self._timezone = profile.timezone
etag_data = [
user.first_name,
user.last_name,
user.email,
six.text_type(user.last_login),
six.text_type(settings.TEMPLATE_SERIAL),
six.text_type(self._show_profile),
self._timezone,
]
if avatar_services.avatars_enabled:
avatar_service = avatar_services.for_user(user)
if avatar_service:
etag_data.extend(avatar_service.get_etag_data(user))
local_site = self.local_site
for hook in UserInfoboxHook.hooks:
try:
etag_data.append(hook.get_etag_data(
user=user,
request=request,
local_site=local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'get_etag_data method in extension "%s": %s',
hook.extension.id, e)
return ':'.join(etag_data)
def get_context_data(self, **kwargs):
"""Return data for the template.
This will return information on the user, along with information from
any extension hooks used for the page.
Args:
**kwargs (tuple):
Additional keyword arguments from the URL pattern.
Returns:
dict:
Context data for the template.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
# These are accessed several times, so bring them in to reduce
# attribute lookups.
user = self._lookup_user
username = user.username
local_site = self.local_site
extra_content = []
for hook in UserInfoboxHook.hooks:
try:
extra_content.append(hook.render(
user=user,
request=self.request,
local_site=local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'render method in extension "%s": %s',
hook.extension.id, e)
review_requests_url = local_site_reverse('user', local_site=local_site,
args=[username])
reviews_url = local_site_reverse('user-grid', local_site=local_site,
args=[username, 'reviews'])
has_avatar = (
avatar_services.avatars_enabled and
avatar_services.for_user(user) is not None
)
return {
'extra_content': mark_safe(''.join(extra_content)),
'full_name': user.get_full_name(),
'has_avatar': has_avatar,
'infobox_user': user,
'review_requests_url': review_requests_url,
'reviews_url': reviews_url,
'show_profile': self._show_profile,
'timezone': self._timezone,
}
@csrf_protect
def account_register(request, next_url='dashboard'):
"""Display the appropriate registration page.
If registration is enabled and the selected authentication backend supports
creation of users, this will return the appropriate registration page. If
registration is not supported, this will redirect to the login view.
"""
siteconfig = SiteConfiguration.objects.get_current()
auth_backends = get_enabled_auth_backends()
if (auth_backends[0].supports_registration and
siteconfig.get('auth_enable_registration') and
not siteconfig.get('site_read_only')):
response = register(request, next_page=reverse(next_url),
form_class=RegistrationForm)
return response
return HttpResponseRedirect(reverse("login"))
class MyAccountView(ConfigPagesView):
"""Displays the My Account page containing user preferences.
The page will be built based on registered pages and forms. This makes
it easy to plug in new bits of UI for the page, which is handy for
extensions that want to offer customization for users.
"""
title = _('My Account')
css_bundle_names = [
'account-page',
]
js_bundle_names = [
'3rdparty-jsonlint',
'config-forms',
'account-page',
]
@method_decorator(login_required)
@method_decorator(check_read_only)
@augment_method_from(ConfigPagesView)
def dispatch(self, *args, **kwargs):
"""Handle the view.
This just falls back to the djblets ConfigPagesView.dispatch
implementation.
"""
pass
@property
def nav_title(self):
"""Get the title for the navigation section."""
return self.request.user.username
@property
def page_classes(self):
"""The list of page classes for this view.
If the user is missing any consent requirements or has not accepted
the privacy policy/terms of service, only the privacy page will be
shown.
"""
if self.is_user_missing_consent:
return [AccountPage.registry.get('page_id', PrivacyPage.page_id)]
return list(AccountPage.registry)
@cached_property
def ordered_user_local_sites(self):
"""Get the user's local sites, ordered by name."""
return self.request.user.local_site.order_by('name')
@property
def render_sidebar(self):
"""Whether or not to render the sidebar.
If the user is missing any consent requirements or has not accepted
the privacy policy/terms of service, the sidebar will not render.
This is to prevent the user from navigating away from the privacy page
before making decisions.
"""
return not self.is_user_missing_consent
@cached_property
def is_user_missing_consent(self):
"""Whether or not the user is missing consent."""
return is_consent_missing(self.request.user)
@login_required
@preview_email(prepare_password_changed_mail)
def preview_password_changed_email(request):
return {
'user': request.user,
}
@login_required
@feature_required(oauth2_service_feature)
def edit_oauth_app(request, app_id=None):
"""Create or edit an OAuth2 application.
Args:
request (django.http.HttpRequest):
The current HTTP request.
app_id (int, optional):
The ID of the application to edit.
If this argument is ``None`` a new application will be edited.
Returns:
django.http.HttpResponse:
The rendered view.
"""
# If we import this at global scope, it will cause issues with admin sites
# being automatically registered.
from reviewboard.oauth.admin import ApplicationAdmin
if app_id:
app = get_object_or_404(
Application,
pk=app_id,
user=request.user,
)
form_cls = UserApplicationChangeForm
fieldsets = ApplicationAdmin.fieldsets
else:
app = None
form_cls = UserApplicationCreationForm
fieldsets = ApplicationAdmin.add_fieldsets
if request.method == 'POST':
form_data = request.POST.copy()
form = form_cls(user=request.user, data=form_data, initial=None,
instance=app)
if form.is_valid():
app = form.save()
if app_id is not None:
next_url = OAuth2Page.get_absolute_url()
else:
next_url = reverse('edit-oauth-app', args=(app.pk,))
return HttpResponseRedirect(next_url)
else:
form = form_cls(user=request.user, data=None, initial=None,
instance=app)
# Show a warning at the top of the form when the form is disabled for
# security.
#
# We don't need to worry about full_clean not being called (which would
# be if we went through form.errors) because this form will never be
# saved.
if app and app.is_disabled_for_security:
form._errors = ErrorDict({
'__all__': form.error_class(
[form.DISABLED_FOR_SECURITY_ERROR],
),
})
return render(
request=request,
template_name='accounts/edit_oauth_app.html',
context={
'app': app,
'form': form,
'fieldsets': filter_fieldsets(form=form_cls,
fieldsets=fieldsets),
'oauth2_page_url': OAuth2Page.get_absolute_url(),
'request': request,
})
| [
"django.http.HttpResponseRedirect",
"django.utils.decorators.method_decorator",
"djblets.forms.fieldsets.filter_fieldsets",
"djblets.siteconfig.models.SiteConfiguration.objects.get_current",
"django.core.urlresolvers.reverse",
"django.utils.six.text_type",
"reviewboard.notifications.email.decorators.pre... | [((9993, 10037), 'reviewboard.notifications.email.decorators.preview_email', 'preview_email', (['prepare_password_changed_mail'], {}), '(prepare_password_changed_mail)\n', (10006, 10037), False, 'from reviewboard.notifications.email.decorators import preview_email\n'), ((10151, 10191), 'djblets.features.decorators.feature_required', 'feature_required', (['oauth2_service_feature'], {}), '(oauth2_service_feature)\n', (10167, 10191), False, 'from djblets.features.decorators import feature_required\n'), ((7369, 7408), 'djblets.siteconfig.models.SiteConfiguration.objects.get_current', 'SiteConfiguration.objects.get_current', ([], {}), '()\n', (7406, 7408), False, 'from djblets.siteconfig.models import SiteConfiguration\n'), ((7429, 7456), 'reviewboard.accounts.backends.get_enabled_auth_backends', 'get_enabled_auth_backends', ([], {}), '()\n', (7454, 7456), False, 'from reviewboard.accounts.backends import get_enabled_auth_backends\n'), ((8142, 8157), 'django.utils.translation.ugettext_lazy', '_', (['"""My Account"""'], {}), "('My Account')\n", (8143, 8157), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8328, 8360), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (8344, 8360), False, 'from django.utils.decorators import method_decorator\n'), ((8366, 8399), 'django.utils.decorators.method_decorator', 'method_decorator', (['check_read_only'], {}), '(check_read_only)\n', (8382, 8399), False, 'from django.utils.decorators import method_decorator\n'), ((8405, 8441), 'djblets.util.decorators.augment_method_from', 'augment_method_from', (['ConfigPagesView'], {}), '(ConfigPagesView)\n', (8424, 8441), False, 'from djblets.util.decorators import augment_method_from\n'), ((3736, 3778), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': 'username'}), '(User, username=username)\n', (3753, 3778), False, 'from django.shortcuts import get_object_or_404\n'), ((6199, 6265), 'reviewboard.site.urlresolvers.local_site_reverse', 'local_site_reverse', (['"""user"""'], {'local_site': 'local_site', 'args': '[username]'}), "('user', local_site=local_site, args=[username])\n", (6217, 6265), False, 'from reviewboard.site.urlresolvers import local_site_reverse\n'), ((6337, 6423), 'reviewboard.site.urlresolvers.local_site_reverse', 'local_site_reverse', (['"""user-grid"""'], {'local_site': 'local_site', 'args': "[username, 'reviews']"}), "('user-grid', local_site=local_site, args=[username,\n 'reviews'])\n", (6355, 6423), False, 'from reviewboard.site.urlresolvers import local_site_reverse\n'), ((7792, 7808), 'django.core.urlresolvers.reverse', 'reverse', (['"""login"""'], {}), "('login')\n", (7799, 7808), False, 'from django.core.urlresolvers import reverse\n'), ((9936, 9973), 'reviewboard.accounts.privacy.is_consent_missing', 'is_consent_missing', (['self.request.user'], {}), '(self.request.user)\n', (9954, 9973), False, 'from reviewboard.accounts.privacy import is_consent_missing\n'), ((10814, 10874), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Application'], {'pk': 'app_id', 'user': 'request.user'}), '(Application, pk=app_id, user=request.user)\n', (10831, 10874), False, 'from django.shortcuts import get_object_or_404\n'), ((4075, 4105), 'django.utils.six.text_type', 'six.text_type', (['user.last_login'], {}), '(user.last_login)\n', (4088, 4105), False, 'from django.utils import six\n'), ((4119, 4158), 'django.utils.six.text_type', 'six.text_type', (['settings.TEMPLATE_SERIAL'], {}), '(settings.TEMPLATE_SERIAL)\n', (4132, 4158), False, 'from django.utils import six\n'), ((4172, 4205), 'django.utils.six.text_type', 'six.text_type', (['self._show_profile'], {}), '(self._show_profile)\n', (4185, 4205), False, 'from django.utils import six\n'), ((4319, 4349), 'reviewboard.avatars.avatar_services.for_user', 'avatar_services.for_user', (['user'], {}), '(user)\n', (4343, 4349), False, 'from reviewboard.avatars import avatar_services\n'), ((11586, 11616), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['next_url'], {}), '(next_url)\n', (11606, 11616), False, 'from django.http import HttpResponseRedirect\n'), ((6545, 6575), 'reviewboard.avatars.avatar_services.for_user', 'avatar_services.for_user', (['user'], {}), '(user)\n', (6569, 6575), False, 'from reviewboard.avatars import avatar_services\n'), ((7658, 7675), 'django.core.urlresolvers.reverse', 'reverse', (['next_url'], {}), '(next_url)\n', (7665, 7675), False, 'from django.core.urlresolvers import reverse\n'), ((9102, 9158), 'reviewboard.accounts.pages.AccountPage.registry.get', 'AccountPage.registry.get', (['"""page_id"""', 'PrivacyPage.page_id'], {}), "('page_id', PrivacyPage.page_id)\n", (9126, 9158), False, 'from reviewboard.accounts.pages import AccountPage, OAuth2Page, PrivacyPage\n'), ((11449, 11478), 'reviewboard.accounts.pages.OAuth2Page.get_absolute_url', 'OAuth2Page.get_absolute_url', ([], {}), '()\n', (11476, 11478), False, 'from reviewboard.accounts.pages import AccountPage, OAuth2Page, PrivacyPage\n'), ((11524, 11565), 'django.core.urlresolvers.reverse', 'reverse', (['"""edit-oauth-app"""'], {'args': '(app.pk,)'}), "('edit-oauth-app', args=(app.pk,))\n", (11531, 11565), False, 'from django.core.urlresolvers import reverse\n'), ((12431, 12483), 'djblets.forms.fieldsets.filter_fieldsets', 'filter_fieldsets', ([], {'form': 'form_cls', 'fieldsets': 'fieldsets'}), '(form=form_cls, fieldsets=fieldsets)\n', (12447, 12483), False, 'from djblets.forms.fieldsets import filter_fieldsets\n'), ((12558, 12587), 'reviewboard.accounts.pages.OAuth2Page.get_absolute_url', 'OAuth2Page.get_absolute_url', ([], {}), '()\n', (12585, 12587), False, 'from reviewboard.accounts.pages import AccountPage, OAuth2Page, PrivacyPage\n'), ((4766, 4896), 'logging.exception', 'logging.exception', (['"""Error when running UserInfoboxHook.get_etag_data method in extension "%s": %s"""', 'hook.extension.id', 'e'], {}), '(\n \'Error when running UserInfoboxHook.get_etag_data method in extension "%s": %s\'\n , hook.extension.id, e)\n', (4783, 4896), False, 'import logging\n'), ((5983, 6105), 'logging.exception', 'logging.exception', (['"""Error when running UserInfoboxHook.render method in extension "%s": %s"""', 'hook.extension.id', 'e'], {}), '(\n \'Error when running UserInfoboxHook.render method in extension "%s": %s\',\n hook.extension.id, e)\n', (6000, 6105), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""
main module for cluster_based_k_anon
"""
import operator
import random
import time
from functools import cmp_to_key
from basic_mondrian.models.numrange import NumRange
from basic_mondrian.utils.utility import (cmp_str, get_num_list_from_str,
qid_to_key)
__DEBUG = False
# att_tree store root node for each att
ATT_TREES = []
# databack store all record for dataset
LEN_DATA = 0
QI_LEN = 0
QI_RANGE = []
IS_CAT = []
# get_LCA, gen_result and NCP require huge running time, while most of the function are duplicate
# we can use cache to reduce the running time
LCA_CACHE = []
NCP_CACHE = {}
class Cluster(object):
"""Cluster is for cluster based k-anonymity
self.member: record list in cluster
self.gen_result: generlized value for one cluster
"""
def __init__(self, member, gen_result, information_loss=0.0):
self.information_loss = information_loss
self.member = member
self.gen_result = gen_result[:]
self.center = gen_result[:]
for i in range(QI_LEN):
if IS_CAT[i] is False:
self.center[i] = str(sum([float(t[i]) for t in self.member]) * 1.0 / len(self.member))
def add_record(self, record):
"""
add record to cluster
"""
self.member.append(record)
self.update_gen_result(record, record)
def update_cluster(self):
"""update cluster information when member is changed
"""
self.gen_result = cluster_generalization(self.member)
for i in range(QI_LEN):
if IS_CAT[i]:
self.center[i] = self.gen_result[i]
else:
self.center[i] = str(sum([float(t[i]) for t in self.member]) * 1.0 / len(self.member))
self.information_loss = len(self.member) * NCP(self.gen_result)
def update_gen_result(self, merge_gen_result, center, num=1):
"""
update gen_result and information_loss after adding record or merging cluster
:param merge_gen_result:
:return:
"""
self.gen_result = generalization(self.gen_result, merge_gen_result)
current_len = len(self.member)
for i in range(QI_LEN):
if IS_CAT[i]:
self.center[i] = self.gen_result[i]
else:
self.center[i] = str((float(self.center[i]) * (current_len - num) +
float(center[i]) * num) / current_len)
self.information_loss = len(self.member) * NCP(self.gen_result)
def add_same_record(self, record):
"""
add record with same qid to cluster
"""
self.member.append(record)
def merge_cluster(self, cluster):
"""merge cluster into self and do not delete cluster elements.
update self.gen_result
"""
self.member.extend(cluster.member)
self.update_gen_result(cluster.gen_result, cluster.center, len(cluster))
def __getitem__(self, item):
"""
:param item: index number
:return: gen_result[item]
"""
return self.gen_result[item]
def __len__(self):
"""
return number of records in cluster
"""
return len(self.member)
def __str__(self):
return str(self.gen_result)
def r_distance(source, target):
"""
Return distance between source (cluster or record)
and target (cluster or record). The distance is based on
NCP (Normalized Certainty Penalty) on relational part.
If source or target are cluster, func need to multiply
source_len (or target_len).
"""
source_gen = source
target_gen = target
source_len = 1
target_len = 1
# check if target is Cluster
if isinstance(target, Cluster):
target_gen = target.gen_result
target_len = len(target)
# check if souce is Cluster
if isinstance(source, Cluster):
source_gen = source.gen_result
source_len = len(source)
if source_gen == target_gen:
return 0
gen = generalization(source_gen, target_gen)
# len should be taken into account
distance = (source_len + target_len) * NCP(gen)
return distance
def diff_distance(record, cluster):
"""
Return IL(cluster and record) - IL(cluster).
"""
gen_after = generalization(record, cluster.gen_result)
return NCP(gen_after) * (len(cluster) + 1) - cluster.information_loss
def NCP(record):
"""Compute NCP (Normalized Certainty Penalty)
when generate record to gen_result.
"""
ncp = 0.0
# exclude SA values(last one type [])
list_key = qid_to_key(record)
try:
return NCP_CACHE[list_key]
except KeyError:
pass
for i in range(QI_LEN):
# if leaf_num of numerator is 1, then NCP is 0
width = 0.0
if IS_CAT[i] is False:
try:
float(record[i])
except ValueError:
temp = record[i].split(',')
width = float(temp[1]) - float(temp[0])
else:
width = len(ATT_TREES[i][record[i]]) * 1.0
width /= QI_RANGE[i]
ncp += width
NCP_CACHE[list_key] = ncp
return ncp
def get_LCA(index, item1, item2):
"""Get lowest commmon ancestor (including themselves)"""
# get parent list from
if item1 == item2:
return item1
try:
return LCA_CACHE[index][item1 + item2]
except KeyError:
pass
parent1 = ATT_TREES[index][item1].parent[:]
parent2 = ATT_TREES[index][item2].parent[:]
parent1.insert(0, ATT_TREES[index][item1])
parent2.insert(0, ATT_TREES[index][item2])
min_len = min(len(parent1), len(parent2))
last_LCA = parent1[-1]
# note here: when trying to access list reversely, take care of -0
for i in range(1, min_len + 1):
if parent1[-i].value == parent2[-i].value:
last_LCA = parent1[-i]
else:
break
LCA_CACHE[index][item1 + item2] = last_LCA.value
return last_LCA.value
def generalization(record1, record2):
"""
Compute relational generalization result of record1 and record2
"""
gen = []
for i in range(QI_LEN):
if IS_CAT[i] is False:
split_number = []
split_number.extend(get_num_list_from_str(record1[i]))
split_number.extend(get_num_list_from_str(record2[i]))
split_number = list(set(split_number))
if len(split_number) == 1:
gen.append(split_number[0])
else:
split_number.sort(key=cmp_to_key(cmp_str))
gen.append(split_number[0] + ',' + split_number[-1])
else:
gen.append(get_LCA(i, record1[i], record2[i]))
return gen
def cluster_generalization(records):
"""
calculat gen_result of records(list) recursively.
Compute both relational gen_result for records (list).
"""
len_r = len(records)
gen = records[0]
for i in range(1, len_r):
gen = generalization(gen, records[i])
return gen
def find_best_knn(index, k, data):
"""key fuction of KNN. Find k nearest neighbors of record, remove them from data"""
dist_dict = {}
record = data[index]
# add random seed to cluster
for i, t in enumerate(data):
if i == index:
continue
dist = r_distance(record, t)
dist_dict[i] = dist
sorted_dict = sorted(dist_dict.items(), key=operator.itemgetter(1))
knn = sorted_dict[:k - 1]
knn.append((index, 0))
record_index = [t[0] for t in knn]
elements = [data[t[0]] for t in knn]
gen = cluster_generalization(elements)
cluster = Cluster(elements, gen, k * NCP(gen))
# delete multiple elements from data according to knn index list
return cluster, record_index
def find_best_cluster_iloss(record, clusters):
"""residual assignment. Find best cluster for record."""
min_distance = 1000000000000
min_index = 0
best_cluster = clusters[0]
for i, t in enumerate(clusters):
distance = r_distance(record, t.gen_result)
if distance < min_distance:
min_distance = distance
min_index = i
best_cluster = t
# add record to best cluster
return min_index
def find_best_cluster_iloss_increase(record, clusters):
"""residual assignment. Find best cluster for record."""
min_diff = 1000000000000
min_index = 0
best_cluster = clusters[0]
for i, t in enumerate(clusters):
IF_diff = diff_distance(record, t)
if IF_diff < min_diff:
min_distance = IF_diff
min_index = i
best_cluster = t
# add record to best cluster
return min_index
def find_furthest_record(record, data):
"""
:param record: the latest record be added to cluster
:param data: remain records in data
:return: the index of the furthest record from r_index
"""
max_distance = 0
max_index = -1
for index in range(len(data)):
current_distance = r_distance(record, data[index])
if current_distance >= max_distance:
max_distance = current_distance
max_index = index
return max_index
def find_best_record_iloss_increase(cluster, data):
"""
:param cluster: current
:param data: remain dataset
:return: index of record with min diff on information loss
"""
min_diff = 1000000000000
min_index = 0
for index, record in enumerate(data):
# IL(cluster and record) and |cluster| + 1 is a constant
# so IL(record, cluster.gen_result) is enough
IF_diff = diff_distance(record, cluster)
if IF_diff < min_diff:
min_diff = IF_diff
min_index = index
return min_index
def clustering_knn(data, k=25):
"""
Group record according to QID distance. KNN
"""
clusters = []
# randomly choose seed and find k-1 nearest records to form cluster with size k
while len(data) >= k:
index = random.randrange(len(data))
cluster, record_index = find_best_knn(index, k, data)
data = [t for i, t in enumerate(data[:]) if i not in set(record_index)]
clusters.append(cluster)
# residual assignment
while len(data) > 0:
t = data.pop()
cluster_index = find_best_cluster_iloss(t, clusters)
clusters[cluster_index].add_record(t)
return clusters
def clustering_kmember(data, k=25):
"""
Group record according to NCP. K-member
"""
clusters = []
# randomly choose seed and find k-1 nearest records to form cluster with size k
r_pos = random.randrange(len(data))
r_i = data[r_pos]
while len(data) >= k:
r_pos = find_furthest_record(r_i, data)
r_i = data.pop(r_pos)
cluster = Cluster([r_i], r_i)
while len(cluster) < k:
r_pos = find_best_record_iloss_increase(cluster, data)
r_j = data.pop(r_pos)
cluster.add_record(r_j)
clusters.append(cluster)
# residual assignment
while len(data) > 0:
t = data.pop()
cluster_index = find_best_cluster_iloss_increase(t, clusters)
clusters[cluster_index].add_record(t)
return clusters
def adjust_cluster(cluster, residual, k):
center = cluster.center
dist_dict = {}
# add random seed to cluster
for i, t in enumerate(cluster.member):
dist = r_distance(center, t)
dist_dict[i] = dist
sorted_dict = sorted(dist_dict.iteritems(), key=operator.itemgetter(1))
need_adjust_index = [t[0] for t in sorted_dict[k:]]
need_adjust = [cluster.member[t] for t in need_adjust_index]
residual.extend(need_adjust)
# update cluster
cluster.member = [t for i, t in enumerate(cluster.member)
if i not in set(need_adjust_index)]
cluster.update_cluster()
def clustering_oka(data, k=25):
"""
Group record according to NCP. OKA: one time pass k-means
"""
clusters = []
can_clusters = []
less_clusters = []
# randomly choose seed and find k-1 nearest records to form cluster with size k
seed_index = random.sample(range(len(data)), len(data) / k)
for index in seed_index:
record = data[index]
can_clusters.append(Cluster([record], record))
data = [t for i, t in enumerate(data[:]) if i not in set(seed_index)]
while len(data) > 0:
record = data.pop()
index = find_best_cluster_iloss(record, can_clusters)
can_clusters[index].add_record(record)
residual = []
for cluster in can_clusters:
if len(cluster) < k:
less_clusters.append(cluster)
else:
if len(cluster) > k:
adjust_cluster(cluster, residual, k)
clusters.append(cluster)
while len(residual) > 0:
record = residual.pop()
if len(less_clusters) > 0:
index = find_best_cluster_iloss(record, less_clusters)
less_clusters[index].add_record(record)
if less_clusters[index] >= k:
clusters.append(less_clusters.pop(index))
else:
index = find_best_cluster_iloss(record, clusters)
clusters[index].add_record(record)
return clusters
def init(att_trees, data, SA_num, QI_num=-1):
"""
init global variables
"""
global ATT_TREES, DATA_BACKUP, LEN_DATA, QI_RANGE, IS_CAT, QI_LEN, LCA_CACHE, NCP_CACHE, SA_INDEX
SA_INDEX = SA_num
ATT_TREES = att_trees
QI_RANGE = []
IS_CAT = []
LEN_DATA = len(data)
LCA_CACHE = []
NCP_CACHE = {}
if QI_num <= 0:
QI_LEN = len(data[0]) - 1
else:
QI_LEN = QI_num
for i in range(QI_LEN):
LCA_CACHE.append(dict())
if isinstance(ATT_TREES[i], NumRange):
IS_CAT.append(False)
QI_RANGE.append(ATT_TREES[i].range)
else:
IS_CAT.append(True)
QI_RANGE.append(len(ATT_TREES[i]['*']))
def clustering_based_k_anon(att_trees, data, k, QI_num, SA_num, type_alg):
"""
the main function of clustering based k-anon
"""
init(att_trees, data, SA_num, QI_num)
result = []
start_time = time.time()
if type_alg == 'knn':
print("Begin to KNN Cluster based on NCP")
clusters = clustering_knn(data, k)
elif type_alg == 'kmember':
print("Begin to K-Member Cluster based on NCP")
clusters = clustering_kmember(data, k)
elif type_alg == 'oka':
print("Begin to OKA Cluster based on NCP")
clusters = clustering_oka(data, k)
else:
print("Please choose merge algorithm types")
print("knn | kmember")
return (0, (0, 0))
rtime = float(time.time() - start_time)
ncp = 0.0
for cluster in clusters:
final_result = []
for i in range(len(cluster)):
# Custom! For non QI Values
tmp = []
for s in range(len(cluster.member[i]) - len(SA_INDEX), len(cluster.member[i])):
tmp += [cluster.member[i][s]]
final_result.append(cluster.gen_result + tmp)
result.extend(final_result)
ncp += cluster.information_loss
ncp /= LEN_DATA
ncp /= QI_LEN
ncp *= 100
if __DEBUG:
print("NCP=", ncp)
return (result, (ncp, rtime))
| [
"basic_mondrian.utils.utility.qid_to_key",
"basic_mondrian.utils.utility.get_num_list_from_str",
"time.time",
"operator.itemgetter",
"functools.cmp_to_key"
] | [((4657, 4675), 'basic_mondrian.utils.utility.qid_to_key', 'qid_to_key', (['record'], {}), '(record)\n', (4667, 4675), False, 'from basic_mondrian.utils.utility import cmp_str, get_num_list_from_str, qid_to_key\n'), ((14217, 14228), 'time.time', 'time.time', ([], {}), '()\n', (14226, 14228), False, 'import time\n'), ((7483, 7505), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (7502, 7505), False, 'import operator\n'), ((11551, 11573), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (11570, 11573), False, 'import operator\n'), ((14745, 14756), 'time.time', 'time.time', ([], {}), '()\n', (14754, 14756), False, 'import time\n'), ((6316, 6349), 'basic_mondrian.utils.utility.get_num_list_from_str', 'get_num_list_from_str', (['record1[i]'], {}), '(record1[i])\n', (6337, 6349), False, 'from basic_mondrian.utils.utility import cmp_str, get_num_list_from_str, qid_to_key\n'), ((6383, 6416), 'basic_mondrian.utils.utility.get_num_list_from_str', 'get_num_list_from_str', (['record2[i]'], {}), '(record2[i])\n', (6404, 6416), False, 'from basic_mondrian.utils.utility import cmp_str, get_num_list_from_str, qid_to_key\n'), ((6608, 6627), 'functools.cmp_to_key', 'cmp_to_key', (['cmp_str'], {}), '(cmp_str)\n', (6618, 6627), False, 'from functools import cmp_to_key\n')] |
from django.conf.urls import patterns, url
from pyramidal import views
urlpatterns = patterns('',
#Index
url(r'^$',views.index,name='index'),
#Geneset Views
url(r'^geneset/(?P<gene_list>[a-zA-Z0-9_\-\.\+]+)/?$',views.geneset,name='gene_set'),
#Isoform Views
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/?$',views.geneIsoforms,name='isoform_index'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/?$',views.isoformDetail,name='isoform_show'),
#Isoform Data
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/hivedata/?$',views.isoformHiveData,name='isoform_hive_data'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/expression/?$',views.isoformExpression,name='isoform_expression'),
#Gene detail view
url(r'^genes?/(?P<gene_id>[\w.-]+)/?$',views.geneShow,name='gene_show'),
#Gene Data
url(r'^genes?/(?P<gene_id>[\w.-]+)/hivedata/?$',views.geneHiveData,name='gene_hive_data'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/expression/?$',views.geneExpression,name='gene_expression'),
# #Gene Data
# url(r'^genes?/(?P<gene_id>[\w.-]+)/hivedata/?$',views.geneHiveData,name='gene_hive_data'),
# url(r'^genes?/(?P<gene_id>[\w.-]+)/expression/?$',views.geneExpression,name='gene_expression'),
#All Genes
url(r'^genes/?$',views.geneIndex,name='gene_index'),
#Cluster Views
url(r'^clusters/?$',views.clusterIndex,name='cluster_index'),
url(r'^clusters/(?P<cluster_id>\d+)/?$',views.clusterShow,name='cluster_show'),
#Search
url(r'^search/?$', views.search, name = 'search'),
#Dev
url(r'^dev/$',views.dev),
#Markers
url(r'^markers/?$',views.markers,name = 'markers'),
#Supplement
url(r'^supp/?$',views.supplement,name = 'supplement'),
#TFBS
url(r'^tfbs/?$',views.tfbs,name = 'tfbs'),
#help
url(r'^help/?$',views.help,name = 'help'),
#Devel
url(r'^devel/?$',views.devel,name='devel'),
#About
url(r'^about/?$',views.about,name='about'),
)
| [
"django.conf.urls.url"
] | [((109, 145), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (112, 145), False, 'from django.conf.urls import patterns, url\n'), ((164, 256), 'django.conf.urls.url', 'url', (['"""^geneset/(?P<gene_list>[a-zA-Z0-9_\\\\-\\\\.\\\\+]+)/?$"""', 'views.geneset'], {'name': '"""gene_set"""'}), "('^geneset/(?P<gene_list>[a-zA-Z0-9_\\\\-\\\\.\\\\+]+)/?$', views.geneset,\n name='gene_set')\n", (167, 256), False, 'from django.conf.urls import patterns, url\n'), ((268, 364), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/?$"""', 'views.geneIsoforms'], {'name': '"""isoform_index"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/?$', views.geneIsoforms, name=\n 'isoform_index')\n", (271, 364), False, 'from django.conf.urls import patterns, url\n'), ((360, 479), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/(?P<isoform_id>[\\\\w.]+)/?$"""', 'views.isoformDetail'], {'name': '"""isoform_show"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/(?P<isoform_id>[\\\\w.]+)/?$',\n views.isoformDetail, name='isoform_show')\n", (363, 479), False, 'from django.conf.urls import patterns, url\n'), ((498, 634), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/(?P<isoform_id>[\\\\w.]+)/hivedata/?$"""', 'views.isoformHiveData'], {'name': '"""isoform_hive_data"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/(?P<isoform_id>[\\\\w.]+)/hivedata/?$'\n , views.isoformHiveData, name='isoform_hive_data')\n", (501, 634), False, 'from django.conf.urls import patterns, url\n'), ((629, 770), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/(?P<isoform_id>[\\\\w.]+)/expression/?$"""', 'views.isoformExpression'], {'name': '"""isoform_expression"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/isoforms?/(?P<isoform_id>[\\\\w.]+)/expression/?$'\n , views.isoformExpression, name='isoform_expression')\n", (632, 770), False, 'from django.conf.urls import patterns, url\n'), ((785, 858), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/?$"""', 'views.geneShow'], {'name': '"""gene_show"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/?$', views.geneShow, name='gene_show')\n", (788, 858), False, 'from django.conf.urls import patterns, url\n'), ((879, 975), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/hivedata/?$"""', 'views.geneHiveData'], {'name': '"""gene_hive_data"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/hivedata/?$', views.geneHiveData, name=\n 'gene_hive_data')\n", (882, 975), False, 'from django.conf.urls import patterns, url\n'), ((971, 1071), 'django.conf.urls.url', 'url', (['"""^genes?/(?P<gene_id>[\\\\w.-]+)/expression/?$"""', 'views.geneExpression'], {'name': '"""gene_expression"""'}), "('^genes?/(?P<gene_id>[\\\\w.-]+)/expression/?$', views.geneExpression,\n name='gene_expression')\n", (974, 1071), False, 'from django.conf.urls import patterns, url\n'), ((1296, 1348), 'django.conf.urls.url', 'url', (['"""^genes/?$"""', 'views.geneIndex'], {'name': '"""gene_index"""'}), "('^genes/?$', views.geneIndex, name='gene_index')\n", (1299, 1348), False, 'from django.conf.urls import patterns, url\n'), ((1367, 1428), 'django.conf.urls.url', 'url', (['"""^clusters/?$"""', 'views.clusterIndex'], {'name': '"""cluster_index"""'}), "('^clusters/?$', views.clusterIndex, name='cluster_index')\n", (1370, 1428), False, 'from django.conf.urls import patterns, url\n'), ((1430, 1515), 'django.conf.urls.url', 'url', (['"""^clusters/(?P<cluster_id>\\\\d+)/?$"""', 'views.clusterShow'], {'name': '"""cluster_show"""'}), "('^clusters/(?P<cluster_id>\\\\d+)/?$', views.clusterShow, name='cluster_show'\n )\n", (1433, 1515), False, 'from django.conf.urls import patterns, url\n'), ((1521, 1567), 'django.conf.urls.url', 'url', (['"""^search/?$"""', 'views.search'], {'name': '"""search"""'}), "('^search/?$', views.search, name='search')\n", (1524, 1567), False, 'from django.conf.urls import patterns, url\n'), ((1580, 1604), 'django.conf.urls.url', 'url', (['"""^dev/$"""', 'views.dev'], {}), "('^dev/$', views.dev)\n", (1583, 1604), False, 'from django.conf.urls import patterns, url\n'), ((1618, 1667), 'django.conf.urls.url', 'url', (['"""^markers/?$"""', 'views.markers'], {'name': '"""markers"""'}), "('^markers/?$', views.markers, name='markers')\n", (1621, 1667), False, 'from django.conf.urls import patterns, url\n'), ((1685, 1737), 'django.conf.urls.url', 'url', (['"""^supp/?$"""', 'views.supplement'], {'name': '"""supplement"""'}), "('^supp/?$', views.supplement, name='supplement')\n", (1688, 1737), False, 'from django.conf.urls import patterns, url\n'), ((1749, 1789), 'django.conf.urls.url', 'url', (['"""^tfbs/?$"""', 'views.tfbs'], {'name': '"""tfbs"""'}), "('^tfbs/?$', views.tfbs, name='tfbs')\n", (1752, 1789), False, 'from django.conf.urls import patterns, url\n'), ((1801, 1841), 'django.conf.urls.url', 'url', (['"""^help/?$"""', 'views.help'], {'name': '"""help"""'}), "('^help/?$', views.help, name='help')\n", (1804, 1841), False, 'from django.conf.urls import patterns, url\n'), ((1854, 1897), 'django.conf.urls.url', 'url', (['"""^devel/?$"""', 'views.devel'], {'name': '"""devel"""'}), "('^devel/?$', views.devel, name='devel')\n", (1857, 1897), False, 'from django.conf.urls import patterns, url\n'), ((1908, 1951), 'django.conf.urls.url', 'url', (['"""^about/?$"""', 'views.about'], {'name': '"""about"""'}), "('^about/?$', views.about, name='about')\n", (1911, 1951), False, 'from django.conf.urls import patterns, url\n')] |