code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from dagster import ModeDefinition, pipeline
from .database_resources import postgres_database, sqlite_database
from .solids_with_resources import generate_table_1, generate_table_2
@pipeline(
mode_defs=[
ModeDefinition("local_dev", resource_defs={"database": sqlite_database}),
ModeDefinition("prod", resource_defs={"database": postgres_database}),
],
)
def generate_tables_pipeline():
generate_table_1()
generate_table_2()
| [
"dagster.ModeDefinition"
] | [((220, 292), 'dagster.ModeDefinition', 'ModeDefinition', (['"""local_dev"""'], {'resource_defs': "{'database': sqlite_database}"}), "('local_dev', resource_defs={'database': sqlite_database})\n", (234, 292), False, 'from dagster import ModeDefinition, pipeline\n'), ((302, 371), 'dagster.ModeDefinition', 'ModeDefinition', (['"""prod"""'], {'resource_defs': "{'database': postgres_database}"}), "('prod', resource_defs={'database': postgres_database})\n", (316, 371), False, 'from dagster import ModeDefinition, pipeline\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-15 12:13
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("apostello", "0006_userprofile_show_tour")]
operations = [
migrations.AddField(model_name="userprofile", name="can_archive", field=models.BooleanField(default=True)),
migrations.AlterField(
model_name="recipient",
name="first_name",
field=models.CharField(
db_index=True,
max_length=16,
validators=[
django.core.validators.RegexValidator(
"^[\\s\\w@?£!1$\"¥#è?¤é%ù&ì\\ò(Ç)*:Ø+;ÄäøÆ,<LÖlöæ\\-=ÑñÅß.>ÜüåÉ/§à¡¿']+$",
message="You can only use GSM characters.",
)
],
verbose_name="First Name",
),
),
migrations.AlterField(
model_name="recipient",
name="last_name",
field=models.CharField(
db_index=True,
max_length=40,
validators=[
django.core.validators.RegexValidator(
"^[\\s\\w@?£!1$\"¥#è?¤é%ù&ì\\ò(Ç)*:Ø+;ÄäøÆ,<LÖlöæ\\-=ÑñÅß.>ÜüåÉ/§à¡¿']+$",
message="You can only use GSM characters.",
)
],
verbose_name="Last Name",
),
),
]
| [
"django.db.models.BooleanField"
] | [((391, 424), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (410, 424), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 eminga
# Licensed under MIT License
import datetime, pytz, re, helper
def grab(channel, timespan):
tz = pytz.timezone("Europe/Berlin")
now = datetime.datetime.now(tz)
shows = []
a = 0
if now.time().hour < 7:
a = -1
for i in range(a, 14):
date = now + datetime.timedelta(days=i)
text = helper.download("http://www.zdf.de/live-tv?airtimeDate=" + date.strftime("%Y-%m-%d"))
if text is None:
continue
text = helper.cut(text, "<section class=\"b-epg-timeline timeline-" + channel, "</section>")
sections = helper.split(text, "<li", "</li>")
laststart = datetime.datetime.min.replace(tzinfo=tz)
for section in sections:
show = {}
temp = helper.cut(section, "<span class=\"time\">", "</span>")
temp = re.search("(\d\d):(\d\d) - (\d\d):(\d\d)", temp)
show["start"] = date.replace(hour=int(temp.group(1)), minute=int(temp.group(2)), second=0, microsecond=0)
if show["start"] < laststart:
date += datetime.timedelta(days=1)
show["start"] += datetime.timedelta(days=1)
if (show["start"] - now).total_seconds() / 3600 > timespan:
return shows
laststart = show["start"]
show["stop"] = date.replace(hour=int(temp.group(3)), minute=int(temp.group(4)), second=0, microsecond=0)
if show["stop"] < show["start"]:
show["stop"] += datetime.timedelta(days=1)
temp = re.search("<span class=\"overlay-link-category\">(.*?)<span class=\"visuallyhidden\">:</span></span>\s*(?:<.*>)*\s*(.*?)\s*?</a>", section)
if temp.group(1):
show["title"] = helper.cleanup(temp.group(1) + " - " + temp.group(2))
else:
show["title"] = helper.cleanup(temp.group(2))
temp = re.search("contentUrl\": \"(.*)\"", section)
if temp is not None:
show["details-url"] = "http://www.zdf.de" + temp.group(1)
shows.append(show)
return shows
def grabdetails(url):
text = helper.download(url)
if text is None:
return None
show = {}
subtitle = helper.cut(text, "<h3 class=\"overlay-subtitle\">", "</h3>")
if subtitle is not None and subtitle:
show["sub-title"] = helper.cleanup(subtitle)
description = helper.cut(text, "<p class=\"overlay-text\">", "</p>")
if description is not None and description:
show["desc"] = helper.cleanup(description)
if text.find("Untertitel für Hörgeschädigte") != -1:
show["subtitles"] = True
return show
| [
"pytz.timezone",
"helper.cleanup",
"datetime.datetime.min.replace",
"datetime.datetime.now",
"helper.cut",
"datetime.timedelta",
"helper.download",
"helper.split",
"re.search"
] | [((154, 184), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Berlin"""'], {}), "('Europe/Berlin')\n", (167, 184), False, 'import datetime, pytz, re, helper\n'), ((192, 217), 'datetime.datetime.now', 'datetime.datetime.now', (['tz'], {}), '(tz)\n', (213, 217), False, 'import datetime, pytz, re, helper\n'), ((1877, 1897), 'helper.download', 'helper.download', (['url'], {}), '(url)\n', (1892, 1897), False, 'import datetime, pytz, re, helper\n'), ((1953, 2011), 'helper.cut', 'helper.cut', (['text', '"""<h3 class="overlay-subtitle">"""', '"""</h3>"""'], {}), '(text, \'<h3 class="overlay-subtitle">\', \'</h3>\')\n', (1963, 2011), False, 'import datetime, pytz, re, helper\n'), ((2116, 2168), 'helper.cut', 'helper.cut', (['text', '"""<p class="overlay-text">"""', '"""</p>"""'], {}), '(text, \'<p class="overlay-text">\', \'</p>\')\n', (2126, 2168), False, 'import datetime, pytz, re, helper\n'), ((474, 562), 'helper.cut', 'helper.cut', (['text', '(\'<section class="b-epg-timeline timeline-\' + channel)', '"""</section>"""'], {}), '(text, \'<section class="b-epg-timeline timeline-\' + channel,\n \'</section>\')\n', (484, 562), False, 'import datetime, pytz, re, helper\n'), ((574, 608), 'helper.split', 'helper.split', (['text', '"""<li"""', '"""</li>"""'], {}), "(text, '<li', '</li>')\n", (586, 608), False, 'import datetime, pytz, re, helper\n'), ((623, 663), 'datetime.datetime.min.replace', 'datetime.datetime.min.replace', ([], {'tzinfo': 'tz'}), '(tzinfo=tz)\n', (652, 663), False, 'import datetime, pytz, re, helper\n'), ((2075, 2099), 'helper.cleanup', 'helper.cleanup', (['subtitle'], {}), '(subtitle)\n', (2089, 2099), False, 'import datetime, pytz, re, helper\n'), ((2233, 2260), 'helper.cleanup', 'helper.cleanup', (['description'], {}), '(description)\n', (2247, 2260), False, 'import datetime, pytz, re, helper\n'), ((311, 337), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (329, 337), False, 'import datetime, pytz, re, helper\n'), ((715, 768), 'helper.cut', 'helper.cut', (['section', '"""<span class="time">"""', '"""</span>"""'], {}), '(section, \'<span class="time">\', \'</span>\')\n', (725, 768), False, 'import datetime, pytz, re, helper\n'), ((781, 837), 're.search', 're.search', (['"""(\\\\d\\\\d):(\\\\d\\\\d) - (\\\\d\\\\d):(\\\\d\\\\d)"""', 'temp'], {}), "('(\\\\d\\\\d):(\\\\d\\\\d) - (\\\\d\\\\d):(\\\\d\\\\d)', temp)\n", (790, 837), False, 'import datetime, pytz, re, helper\n'), ((1372, 1520), 're.search', 're.search', (['"""<span class="overlay-link-category">(.*?)<span class="visuallyhidden">:</span></span>\\\\s*(?:<.*>)*\\\\s*(.*?)\\\\s*?</a>"""', 'section'], {}), '(\n \'<span class="overlay-link-category">(.*?)<span class="visuallyhidden">:</span></span>\\\\s*(?:<.*>)*\\\\s*(.*?)\\\\s*?</a>\'\n , section)\n', (1381, 1520), False, 'import datetime, pytz, re, helper\n'), ((1677, 1718), 're.search', 're.search', (['"""contentUrl": "(.*)\\""""', 'section'], {}), '(\'contentUrl": "(.*)"\', section)\n', (1686, 1718), False, 'import datetime, pytz, re, helper\n'), ((985, 1011), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1003, 1011), False, 'import datetime, pytz, re, helper\n'), ((1033, 1059), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1051, 1059), False, 'import datetime, pytz, re, helper\n'), ((1335, 1361), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1353, 1361), False, 'import datetime, pytz, re, helper\n')] |
from genericpath import exists
import math
import numpy as np
import os
import re
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import cm
# append line to log file
def log(file, line, doPrint=True):
f = open(file, "a+")
f.wrtite(line + "\n")
f.close()
if doPrint:
print(line)
# reset log file
def resetLog(file):
f = open(file, "w")
f.close()
def plot_loss(history_L1, history_L1val):
l1train = np.asarray(history_L1)
l1vali = np.asarray(history_L1val)
plt.figure()
plt.plot(np.arange(l1train.shape[0]), l1train, "b", label="Training loss")
plt.plot(np.arange(l1vali.shape[0]), l1vali, "g", label="Validation loss")
plt.legend()
plt.show()
def computeLR(i, epochs, minLR, maxLR):
if i < epochs * 0.5:
return maxLR
e = (i / float(epochs) - 0.5) * 2.0
fmin = 0.0
fmax = 6.0
e = fmin + e * (fmax - fmin)
f = math.pow(0.5, e)
return minLR + (maxLR - minLR) * f
def makeDirs(directoryList):
for directory in directoryList:
if not os.path.exists(directory):
os.makedirs(directory)
def imageOut(filename, _outputs, _targets, saveTargets=False, normalize=False, saveMontage=True):
outputs = np.copy(_outputs)
targets = np.copy(_targets)
s = outputs.shape[1]
if saveMontage:
new_img = Image.new("RGB", ((s + 10) * 3, s * 2), color=(255, 255, 255))
BW_img = Image.new("RGB", ((s + 10) * 3, s * 3), color=(255, 255, 255))
for i in range(3):
outputs[i] = np.flipud(outputs[i].transpose())
targets[i] = np.flipud(targets[i].transpose())
min_value = min(np.min(outputs[i]), np.min(targets[i]))
max_value = max(np.max(outputs[i]), np.max(targets[i]))
if normalize:
outputs[i] -= min_value
targets[i] -= min_value
max_value -= min_value
outputs[i] /= max_value
targets[i] /= max_value
else:
outputs[i] -= -1.0
targets[i] -= -1.0
outputs[i] /= 2.0
targets[i] /= 2.0
if not saveMontage:
suffix = ""
if i == 0:
suffix = "_pressure"
elif i == 1:
suffix = "_velX"
else:
suffix = "_velY"
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
im = im.resize((512, 512))
im.save(filename + suffix + "_pred.png")
im = Image.fromarray(cm.magma(targets[i], bytes=True))
if saveTargets:
im = im.resize((512, 512))
im.save(filename + suffix + "_target.png")
else:
im = Image.fromarray(cm.magma(targets[i], bytes=True))
new_img.paste(im, ((s + 10) * i, s * 0))
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
new_img.paste(im, ((s + 10) * i, s * 1))
im = Image.fromarray(targets[i] * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 0))
im = Image.fromarray(outputs[i] * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 1))
im = Image.fromarray(np.abs(targets[i] - outputs[i]) * 10.0 * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 2))
if saveMontage:
new_img.save(filename + ".png")
BW_img.save(filename + "_bw.png")
def imageOut(filename, _outputs, saveTargets=True, normalize=False):
outputs = np.copy(_outputs)
for i in range(3):
outputs[i] = np.flipud(outputs[i].transpose())
min_value = np.min(outputs[i])
max_value = np.max(outputs[i])
if normalize:
outputs[i] -= min_value
max_value -= min_value
outputs[i] /= max_value
else: # from -1,1 to 0,1
outputs[i] -= -1.0
outputs[i] /= 2.0
suffix = ""
if i == 0:
suffix = "_pressure"
elif i == 1:
suffix = "_velX"
else:
suffix = "_velY"
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
im = im.resize((128, 128))
im.save(filename + suffix + "_pred.png")
def saveOutput(output_arr, target_arr):
if target_arr is None:
imageOut("./results/result", output_arr)
else:
imageOut(
"./results/result", output_arr, target_arr, normalize=False, saveMontage=True
) # write normalized with error
class InputData:
def __init__(self, npz_arr, removePOffset=True, makeDimLess=True):
self.input = None
self.target = None
self.max_inputs_0 = 100.0
self.max_inputs_1 = 38.12
self.max_inputs_2 = 1.0
self.max_targets_0 = 4.65
self.max_targets_1 = 2.04
self.max_targets_2 = 2.37
if npz_arr.shape[0] >= 3:
self.input = npz_arr[0:3]
if npz_arr.shape[0] == 6:
self.target = npz_arr[3:6]
self.removePOffset = removePOffset
self.makeDimLess = makeDimLess
self.normalize()
def normalize(self):
if self.target is not None:
if self.removePOffset:
self.target[0, :, :] -= np.mean(self.target[0, :, :]) # remove offset
self.target[0, :, :] -= self.target[0, :, :] * self.input[2, :, :] # pressure * mask
if self.makeDimLess:
v_norm = (np.max(np.abs(self.input[0, :, :])) ** 2 + np.max(np.abs(self.input[1, :, :])) ** 2) ** 0.5
self.target[0, :, :] /= v_norm ** 2
self.target[1, :, :] /= v_norm
self.target[2, :, :] /= v_norm
self.target[0, :, :] *= 1.0 / self.max_targets_0
self.target[1, :, :] *= 1.0 / self.max_targets_1
self.target[2, :, :] *= 1.0 / self.max_targets_2
if self.input is not None:
self.input[0, :, :] *= 1 / self.max_inputs_0
self.input[1, :, :] *= 1 / self.max_inputs_1
def denormalize(self, data, v_norm):
a = data.copy()
a[0, :, :] /= 1.0 / self.max_targets_0
a[1, :, :] /= 1.0 / self.max_targets_1
a[2, :, :] /= 1.0 / self.max_targets_2
if self.makeDimLess:
a[0, :, :] *= v_norm ** 2
a[1, :, :] *= v_norm
a[2, :, :] *= v_norm
return a
| [
"numpy.copy",
"os.path.exists",
"PIL.Image.fromarray",
"numpy.mean",
"numpy.abs",
"os.makedirs",
"numpy.arange",
"math.pow",
"PIL.Image.new",
"matplotlib.cm.magma",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.sho... | [((460, 482), 'numpy.asarray', 'np.asarray', (['history_L1'], {}), '(history_L1)\n', (470, 482), True, 'import numpy as np\n'), ((496, 521), 'numpy.asarray', 'np.asarray', (['history_L1val'], {}), '(history_L1val)\n', (506, 521), True, 'import numpy as np\n'), ((527, 539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (537, 539), True, 'import matplotlib.pyplot as plt\n'), ((702, 714), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (712, 714), True, 'import matplotlib.pyplot as plt\n'), ((719, 729), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((930, 946), 'math.pow', 'math.pow', (['(0.5)', 'e'], {}), '(0.5, e)\n', (938, 946), False, 'import math\n'), ((1244, 1261), 'numpy.copy', 'np.copy', (['_outputs'], {}), '(_outputs)\n', (1251, 1261), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.copy', 'np.copy', (['_targets'], {}), '(_targets)\n', (1283, 1293), True, 'import numpy as np\n'), ((3467, 3484), 'numpy.copy', 'np.copy', (['_outputs'], {}), '(_outputs)\n', (3474, 3484), True, 'import numpy as np\n'), ((553, 580), 'numpy.arange', 'np.arange', (['l1train.shape[0]'], {}), '(l1train.shape[0])\n', (562, 580), True, 'import numpy as np\n'), ((632, 658), 'numpy.arange', 'np.arange', (['l1vali.shape[0]'], {}), '(l1vali.shape[0])\n', (641, 658), True, 'import numpy as np\n'), ((1358, 1420), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '((s + 10) * 3, s * 2)'], {'color': '(255, 255, 255)'}), "('RGB', ((s + 10) * 3, s * 2), color=(255, 255, 255))\n", (1367, 1420), False, 'from PIL import Image\n'), ((1438, 1500), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '((s + 10) * 3, s * 3)'], {'color': '(255, 255, 255)'}), "('RGB', ((s + 10) * 3, s * 3), color=(255, 255, 255))\n", (1447, 1500), False, 'from PIL import Image\n'), ((3583, 3601), 'numpy.min', 'np.min', (['outputs[i]'], {}), '(outputs[i])\n', (3589, 3601), True, 'import numpy as np\n'), ((3622, 3640), 'numpy.max', 'np.max', (['outputs[i]'], {}), '(outputs[i])\n', (3628, 3640), True, 'import numpy as np\n'), ((1068, 1093), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1082, 1093), False, 'import os\n'), ((1107, 1129), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1118, 1129), False, 'import os\n'), ((1659, 1677), 'numpy.min', 'np.min', (['outputs[i]'], {}), '(outputs[i])\n', (1665, 1677), True, 'import numpy as np\n'), ((1679, 1697), 'numpy.min', 'np.min', (['targets[i]'], {}), '(targets[i])\n', (1685, 1697), True, 'import numpy as np\n'), ((1723, 1741), 'numpy.max', 'np.max', (['outputs[i]'], {}), '(outputs[i])\n', (1729, 1741), True, 'import numpy as np\n'), ((1743, 1761), 'numpy.max', 'np.max', (['targets[i]'], {}), '(targets[i])\n', (1749, 1761), True, 'import numpy as np\n'), ((2953, 2988), 'PIL.Image.fromarray', 'Image.fromarray', (['(targets[i] * 256.0)'], {}), '(targets[i] * 256.0)\n', (2968, 2988), False, 'from PIL import Image\n'), ((3058, 3093), 'PIL.Image.fromarray', 'Image.fromarray', (['(outputs[i] * 256.0)'], {}), '(outputs[i] * 256.0)\n', (3073, 3093), False, 'from PIL import Image\n'), ((4061, 4093), 'matplotlib.cm.magma', 'cm.magma', (['outputs[i]'], {'bytes': '(True)'}), '(outputs[i], bytes=True)\n', (4069, 4093), False, 'from matplotlib import cm\n'), ((2356, 2388), 'matplotlib.cm.magma', 'cm.magma', (['outputs[i]'], {'bytes': '(True)'}), '(outputs[i], bytes=True)\n', (2364, 2388), False, 'from matplotlib import cm\n'), ((2516, 2548), 'matplotlib.cm.magma', 'cm.magma', (['targets[i]'], {'bytes': '(True)'}), '(targets[i], bytes=True)\n', (2524, 2548), False, 'from matplotlib import cm\n'), ((2728, 2760), 'matplotlib.cm.magma', 'cm.magma', (['targets[i]'], {'bytes': '(True)'}), '(targets[i], bytes=True)\n', (2736, 2760), False, 'from matplotlib import cm\n'), ((2848, 2880), 'matplotlib.cm.magma', 'cm.magma', (['outputs[i]'], {'bytes': '(True)'}), '(outputs[i], bytes=True)\n', (2856, 2880), False, 'from matplotlib import cm\n'), ((5195, 5224), 'numpy.mean', 'np.mean', (['self.target[0, :, :]'], {}), '(self.target[0, :, :])\n', (5202, 5224), True, 'import numpy as np\n'), ((3179, 3210), 'numpy.abs', 'np.abs', (['(targets[i] - outputs[i])'], {}), '(targets[i] - outputs[i])\n', (3185, 3210), True, 'import numpy as np\n'), ((5411, 5438), 'numpy.abs', 'np.abs', (['self.input[0, :, :]'], {}), '(self.input[0, :, :])\n', (5417, 5438), True, 'import numpy as np\n'), ((5454, 5481), 'numpy.abs', 'np.abs', (['self.input[1, :, :]'], {}), '(self.input[1, :, :])\n', (5460, 5481), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE.txt for license details.
# Author:
# <NAME> (<EMAIL>)
# Home:
# http://trentm.com/projects/px/
"""Test p4lib.py's interface to 'p4 delete'."""
import os
import sys
import unittest
import types
import pprint
import testsupport
from p4lib import P4, P4LibError
class DeleteTestCase(unittest.TestCase):
def test_delete(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
# Now delete the file.
result = p4.delete(fname)
self.failUnless(result[0]['comment'] == 'opened for delete')
self.failUnless(result[0]['depotFile']\
== p4.where(fname)[0]['depotFile'])
self.failUnless(type(result[0]['rev']) == types.IntType)
opened = p4.opened(fname)
self.failUnless(opened[0]['action'] == 'delete')
self.failUnless(opened[0]['depotFile'] == result[0]['depotFile'])
# cleanup
p4.revert(fname)
finally:
os.chdir(top)
def test_delete_multiple_files(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit some files.
fname1 = 'test_delete_multiple_files_1.txt'
fname2 = 'test_delete_multiple_files_2.txt'
open(fname1, 'w').write('Hello there 1.\n')
open(fname2, 'w').write('Hello there 2.\n')
p4.add([fname1, fname2])
p4.submit([fname1, fname2], 'add files to be deleted')
# Now delete the files.
results = p4.delete([fname1, fname2])
for result in results:
self.failUnless(result['comment'] == 'opened for delete')
self.failUnless(type(result['rev']) == types.IntType)
# cleanup
p4.revert([fname1, fname2])
finally:
os.chdir(top)
def test_delete_already_opened(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete_already_opened.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
# Now open it and then try to delete it.
p4.edit(fname)
result = p4.delete(fname)
self.failUnless(result[0]['comment'] != 'opened for delete')
self.failUnless(result[0]['rev'] is None)
# cleanup
p4.revert(fname)
finally:
os.chdir(top)
def test_delete_specify_change(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete_specify_change.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
# Now delete the file (specifying an existing pending
# change).
c = p4.change([], 'empty pending change for deleted files')
cnum = c['change']
result = p4.delete(fname, change=cnum)
self.failUnless(result[0]['depotFile']\
== p4.where(fname)[0]['depotFile'])
self.failUnless(type(result[0]['rev']) == types.IntType)
c = p4.change(change=cnum)
self.failUnless(c['files'][0]['depotFile']\
== result[0]['depotFile'])
self.failUnless(c['files'][0]['action'] == 'delete')
# cleanup
p4.change(files=[], change=cnum)
p4.change(change=cnum, delete=1)
p4.revert(fname)
finally:
os.chdir(top)
def test_delete_specify_bogus_change(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete_specify_bogus_change.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
latestCnum = p4.changes(max=1)[0]['change']
# Specify an already submitted change.
self.failUnlessRaises(P4LibError, p4.delete, fname,
change=latestCnum)
# Specify a non-existant change.
self.failUnlessRaises(P4LibError, p4.delete, fname,
change=latestCnum+1)
# cleanup
p4.revert(fname)
finally:
os.chdir(top)
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(DeleteTestCase)
| [
"os.chdir",
"unittest.makeSuite",
"p4lib.P4",
"os.getcwd"
] | [((5507, 5541), 'unittest.makeSuite', 'unittest.makeSuite', (['DeleteTestCase'], {}), '(DeleteTestCase)\n', (5525, 5541), False, 'import unittest\n'), ((429, 433), 'p4lib.P4', 'P4', ([], {}), '()\n', (431, 433), False, 'from p4lib import P4, P4LibError\n'), ((448, 459), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (457, 459), False, 'import os\n'), ((1484, 1488), 'p4lib.P4', 'P4', ([], {}), '()\n', (1486, 1488), False, 'from p4lib import P4, P4LibError\n'), ((1503, 1514), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1512, 1514), False, 'import os\n'), ((2413, 2417), 'p4lib.P4', 'P4', ([], {}), '()\n', (2415, 2417), False, 'from p4lib import P4, P4LibError\n'), ((2432, 2443), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2441, 2443), False, 'import os\n'), ((3220, 3224), 'p4lib.P4', 'P4', ([], {}), '()\n', (3222, 3224), False, 'from p4lib import P4, P4LibError\n'), ((3239, 3250), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3248, 3250), False, 'import os\n'), ((4521, 4525), 'p4lib.P4', 'P4', ([], {}), '()\n', (4523, 4525), False, 'from p4lib import P4, P4LibError\n'), ((4540, 4551), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4549, 4551), False, 'import os\n'), ((530, 554), 'os.chdir', 'os.chdir', (["andrew['home']"], {}), "(andrew['home'])\n", (538, 554), False, 'import os\n'), ((1414, 1427), 'os.chdir', 'os.chdir', (['top'], {}), '(top)\n', (1422, 1427), False, 'import os\n'), ((1585, 1609), 'os.chdir', 'os.chdir', (["andrew['home']"], {}), "(andrew['home'])\n", (1593, 1609), False, 'import os\n'), ((2343, 2356), 'os.chdir', 'os.chdir', (['top'], {}), '(top)\n', (2351, 2356), False, 'import os\n'), ((2514, 2538), 'os.chdir', 'os.chdir', (["andrew['home']"], {}), "(andrew['home'])\n", (2522, 2538), False, 'import os\n'), ((3150, 3163), 'os.chdir', 'os.chdir', (['top'], {}), '(top)\n', (3158, 3163), False, 'import os\n'), ((3321, 3345), 'os.chdir', 'os.chdir', (["andrew['home']"], {}), "(andrew['home'])\n", (3329, 3345), False, 'import os\n'), ((4445, 4458), 'os.chdir', 'os.chdir', (['top'], {}), '(top)\n', (4453, 4458), False, 'import os\n'), ((4622, 4646), 'os.chdir', 'os.chdir', (["andrew['home']"], {}), "(andrew['home'])\n", (4630, 4646), False, 'import os\n'), ((5407, 5420), 'os.chdir', 'os.chdir', (['top'], {}), '(top)\n', (5415, 5420), False, 'import os\n')] |
from typing import Any, Generic, Protocol, Type, TypeVar
from loguru import logger
from sqlalchemy import select, func
from sqlalchemy.ext.asyncio import AsyncSession
class ModelBase(Protocol):
id: Any
def __init__(*args, **kwargs):
...
T = TypeVar("T")
ModelT = TypeVar("ModelT", bound=ModelBase)
class FactoryMixin:
def __call__(self: T, session: AsyncSession) -> T:
logger.debug("装填Item session")
self._session = session
return self
class RepoBase(Generic[ModelT], FactoryMixin):
model: Type[ModelT]
_session: AsyncSession
async def get(self, /, id: Any) -> ModelT | None:
return (
await self._session.execute(select(self.model).where(self.model.id == id))
).scalar()
async def get_multi(self, /, offset: int = 0, limit: int = 100) -> list[ModelT]:
return (
(
await self._session.execute(
select(self.model).offset(offset).limit(limit)
)
)
.scalars()
.all()
)
async def count(self) -> int:
return (
(await self._session.execute(select(func.count(self.model.id))))
.scalars()
.one()
)
| [
"sqlalchemy.func.count",
"sqlalchemy.select",
"loguru.logger.debug",
"typing.TypeVar"
] | [((263, 275), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (270, 275), False, 'from typing import Any, Generic, Protocol, Type, TypeVar\n'), ((286, 320), 'typing.TypeVar', 'TypeVar', (['"""ModelT"""'], {'bound': 'ModelBase'}), "('ModelT', bound=ModelBase)\n", (293, 320), False, 'from typing import Any, Generic, Protocol, Type, TypeVar\n'), ((406, 436), 'loguru.logger.debug', 'logger.debug', (['"""装填Item session"""'], {}), "('装填Item session')\n", (418, 436), False, 'from loguru import logger\n'), ((701, 719), 'sqlalchemy.select', 'select', (['self.model'], {}), '(self.model)\n', (707, 719), False, 'from sqlalchemy import select, func\n'), ((1180, 1205), 'sqlalchemy.func.count', 'func.count', (['self.model.id'], {}), '(self.model.id)\n', (1190, 1205), False, 'from sqlalchemy import select, func\n'), ((949, 967), 'sqlalchemy.select', 'select', (['self.model'], {}), '(self.model)\n', (955, 967), False, 'from sqlalchemy import select, func\n')] |
from __future__ import annotations
from .configs import *
from . import shared as td
import hashlib
# if TYPE_CHECKING:
# from ..opentele import *
class AuthKeyType(IntEnum):
"""
Type of `AuthKey`
### Attributes:
Generated (`IntEnum`):
Generated key
Temporary (`IntEnum`):
Temporary key
ReadFromFile (`IntEnum`):
Key red from file
Local (`IntEnum`):
Local key
"""
Generated = 0
Temporary = 1
ReadFromFile = 2
Local = 3
class AuthKey(BaseObject):
"""
Authorization key used for [MTProto](https://core.telegram.org/mtproto)
It's also used to encrypt and decrypt local tdata
### Attributes:
DcId (DcId):
Data Center ID (from 1 to 5).
type (AuthKeyType):
Type of the key.
key (bytes):
The actual key, 256 `bytes` in length.
"""
kSize = 256
def __init__(self, key: bytes = bytes(), type: AuthKeyType = AuthKeyType.Generated, dcId: DcId = DcId.Invalid) -> None: # type: ignore
self.__type = type
self.__dcId = dcId
self.__key = key
# if (type == self.Type.Generated) or (type == self.Type.Temporary):
# self.__creationtime = ...
self.__countKeyId()
@property
def dcId(self) -> DcId:
return self.__dcId
@property
def type(self) -> AuthKeyType:
return self.__type
@property
def key(self) -> bytes:
return self.__key
def write(self, to: QDataStream) -> None:
to.writeRawData(self.key)
def __countKeyId(self) -> None:
hash = hashlib.sha1(self.__key).digest()
self.__keyId = int.from_bytes(hash[12 : 12 + 8], "little")
def prepareAES_oldmtp(
self, msgKey: bytes, send: bool
) -> typing.Tuple[bytes, bytes]:
x = 0 if send else 8
sha1_a = hashlib.sha1(msgKey[:16] + self.__key[x : x + 32]).digest()
sha1_b = hashlib.sha1(
self.__key[x + 32 : x + 32 + 16]
+ msgKey[:16]
+ self.__key[x + 48 : x + 48 + 16]
).digest()
sha1_c = hashlib.sha1(self.__key[x + 64 : x + 64 + 32] + msgKey[:16]).digest()
sha1_d = hashlib.sha1(msgKey[:16] + self.__key[x + 96 : x + 96 + 32]).digest()
aesKey = sha1_a[:8] + sha1_b[8 : 8 + 12] + sha1_c[4 : 4 + 12]
aesIv = sha1_a[8 : 8 + 12] + sha1_b[:8] + sha1_c[16 : 16 + 4] + sha1_d[:8]
return aesKey, aesIv
@staticmethod
def FromStream(
stream: QDataStream,
type: AuthKeyType = AuthKeyType.ReadFromFile,
dcId: DcId = DcId(0),
) -> AuthKey:
keyData = stream.readRawData(AuthKey.kSize)
return AuthKey(keyData, type, dcId)
| [
"hashlib.sha1"
] | [((1662, 1686), 'hashlib.sha1', 'hashlib.sha1', (['self.__key'], {}), '(self.__key)\n', (1674, 1686), False, 'import hashlib\n'), ((1914, 1962), 'hashlib.sha1', 'hashlib.sha1', (['(msgKey[:16] + self.__key[x:x + 32])'], {}), '(msgKey[:16] + self.__key[x:x + 32])\n', (1926, 1962), False, 'import hashlib\n'), ((1992, 2088), 'hashlib.sha1', 'hashlib.sha1', (['(self.__key[x + 32:x + 32 + 16] + msgKey[:16] + self.__key[x + 48:x + 48 + 16])'], {}), '(self.__key[x + 32:x + 32 + 16] + msgKey[:16] + self.__key[x + \n 48:x + 48 + 16])\n', (2004, 2088), False, 'import hashlib\n'), ((2161, 2219), 'hashlib.sha1', 'hashlib.sha1', (['(self.__key[x + 64:x + 64 + 32] + msgKey[:16])'], {}), '(self.__key[x + 64:x + 64 + 32] + msgKey[:16])\n', (2173, 2219), False, 'import hashlib\n'), ((2248, 2306), 'hashlib.sha1', 'hashlib.sha1', (['(msgKey[:16] + self.__key[x + 96:x + 96 + 32])'], {}), '(msgKey[:16] + self.__key[x + 96:x + 96 + 32])\n', (2260, 2306), False, 'import hashlib\n')] |
import os
from pandas import DataFrame
import time
times = [0]
sizes = [0]
run_time_seconds = 200
def run_storage_analysis():
path = 'temporary'
# initialize the size\
total_size = 0
# use the walk() method to navigate through directory tree
for dirpath, dirnames, filenames in os.walk(path):
for name in filenames:
while True:
try:
# use join to concatenate all the components of path
f = os.path.join(dirpath, name)
# use getsize to generate size in bytes and add it to the total size
total_size += os.path.getsize(f)
break
except Exception as e:
time.sleep(0.01)
return total_size
def upload_analysis():
df = DataFrame({'Time': times, 'Size (bytes)': sizes})
df.to_excel('Storage_analysis.xlsx', sheet_name='sheet1', index=False)
past_run_file_size = run_storage_analysis()
sizes[-1] = past_run_file_size
print("Storage analysis started.")
while True:
current_file_size = run_storage_analysis()
if current_file_size == sizes[-1]:
time.sleep(1)
else:
for i in range(run_time_seconds):
times.append(times[-1] + 1)
sizes.append(current_file_size)
time.sleep(1)
current_file_size = run_storage_analysis()
upload_analysis()
break
| [
"os.path.getsize",
"os.path.join",
"time.sleep",
"pandas.DataFrame",
"os.walk"
] | [((315, 328), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (322, 328), False, 'import os\n'), ((846, 895), 'pandas.DataFrame', 'DataFrame', (["{'Time': times, 'Size (bytes)': sizes}"], {}), "({'Time': times, 'Size (bytes)': sizes})\n", (855, 895), False, 'from pandas import DataFrame\n'), ((1199, 1212), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1209, 1212), False, 'import time\n'), ((1366, 1379), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1376, 1379), False, 'import time\n'), ((508, 535), 'os.path.join', 'os.path.join', (['dirpath', 'name'], {}), '(dirpath, name)\n', (520, 535), False, 'import os\n'), ((661, 679), 'os.path.getsize', 'os.path.getsize', (['f'], {}), '(f)\n', (676, 679), False, 'import os\n'), ((768, 784), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (778, 784), False, 'import time\n')] |
"""
Test multilevel overriding of java methods in jythonc.
"""
from java.util import Date
class SubDate(Date):
def toString(self):
s = Date.toString(self)
return 'SubDate -> Date'
class SubSubDate(SubDate):
def toString(self):
return 'SubSubDate -> ' + SubDate.toString(self)
assert SubDate().toString() == 'SubDate -> Date'
assert SubSubDate().toString() == 'SubSubDate -> SubDate -> Date'
| [
"java.util.Date.toString"
] | [((150, 169), 'java.util.Date.toString', 'Date.toString', (['self'], {}), '(self)\n', (163, 169), False, 'from java.util import Date\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions
from PyFin.api import DateGeneration
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import total_risk_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
def _merge_df(engine, names, factor_df, target_df, universe, dates, risk_model, neutralized_risk):
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(total_risk_factors).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
target_df = pd.merge(target_df, risk_df, on=['trade_date', 'code']).dropna()
if neutralized_risk:
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_y = target_df.copy()
risk_exp = train_x[neutralized_risk].values.astype(float)
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
else:
risk_exp = None
train_x = factor_df.copy()
train_y = target_df.copy()
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
codes = train_x['code'].values
date_label = pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()
dates = np.unique(date_label)
return target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes
def prepare_data(engine: SqlEngine,
factors: Union[Transformer, Iterable[object]],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None):
if warm_start > 0:
p = Period(frequency)
p = Period(length=-warm_start * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', start_date, p).strftime('%Y-%m-%d')
dates = makeSchedule(start_date,
end_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Forward)
dates = [d.strftime('%Y-%m-%d') for d in dates]
horizon = map_freq(frequency)
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
factor_df = engine.fetch_factor_range(universe,
factors=transformer,
dates=dates).sort_values(['trade_date', 'code'])
alpha_logger.info("factor data loading finished")
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
alpha_logger.info("fit target data loading finished")
industry_df = engine.fetch_industry_range(universe, dates=dates)
alpha_logger.info("industry data loading finished")
benchmark_df = engine.fetch_benchmark_range(benchmark, dates=dates)
alpha_logger.info("benchmark data loading finished")
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
df = pd.merge(df, benchmark_df, on=['trade_date', 'code'], how='left')
df = pd.merge(df, industry_df, on=['trade_date', 'code'])
df['weight'] = df['weight'].fillna(0.)
df.dropna(inplace=True)
return dates, df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code', 'weight', 'industry_code', 'industry'] + transformer.names]
def batch_processing(names,
x_values,
y_values,
groups,
group_label,
batch,
risk_exp,
pre_process,
post_process,
codes):
train_x_buckets = {}
train_y_buckets = {}
train_risk_buckets = {}
predict_x_buckets = {}
predict_y_buckets = {}
predict_risk_buckets = {}
predict_codes_bucket = {}
for i, start in enumerate(groups[:-batch]):
end = groups[i + batch]
left_index = bisect.bisect_left(group_label, start)
right_index = bisect.bisect_left(group_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
train_x_buckets[end] = pd.DataFrame(factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process),
columns=names)
train_y_buckets[end] = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
train_risk_buckets[end] = this_risk_exp
left_index = bisect.bisect_right(group_label, start)
right_index = bisect.bisect_right(group_label, end)
sub_dates = group_label[left_index:right_index]
this_raw_x = x_values[left_index:right_index]
this_codes = codes[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
predict_x_buckets[end] = pd.DataFrame(ne_x[inner_left_index:inner_right_index],
columns=names)
if risk_exp is not None:
predict_risk_buckets[end] = this_risk_exp[inner_left_index:inner_right_index]
else:
predict_risk_buckets = None
predict_codes_bucket[end] = this_codes[inner_left_index:inner_right_index]
this_raw_y = y_values[left_index:right_index]
if len(this_raw_y) > 0:
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
predict_y_buckets[end] = ne_y[inner_left_index:inner_right_index]
return train_x_buckets, \
train_y_buckets, \
train_risk_buckets, \
predict_x_buckets, \
predict_y_buckets, \
predict_risk_buckets, \
predict_codes_bucket
def fetch_data_package(engine: SqlEngine,
alpha_factors: Iterable[object],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
batch: int = 1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
fit_target: Union[Transformer, object] = None) -> dict:
alpha_logger.info("Starting data package fetching ...")
transformer = Transformer(alpha_factors)
names = transformer.names
dates, target_df, factor_df = prepare_data(engine,
transformer,
start_date,
end_date,
frequency,
universe,
benchmark,
warm_start + batch,
fit_target=fit_target)
target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes = \
_merge_df(engine, names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
alpha_logger.info("data merging finished")
target_df['weight'] = train_x['weight']
target_df['industry'] = train_x['industry']
target_df['industry_code'] = train_x['industry_code']
if neutralized_risk:
for i, name in enumerate(neutralized_risk):
target_df.loc[:, name] = risk_exp[:, i]
alpha_logger.info("Loading data is finished")
train_x_buckets, train_y_buckets, train_risk_buckets, predict_x_buckets, predict_y_buckets, predict_risk_buckets, predict_codes_bucket \
= batch_processing(names,
x_values,
y_values,
dates,
date_label,
batch,
risk_exp,
pre_process,
post_process,
codes)
alpha_logger.info("Data processing is finished")
ret = dict()
ret['x_names'] = names
ret['settlement'] = target_df[target_df.trade_date >= start_date]
train_x_buckets = {k: train_x_buckets[k] for k in train_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_y_buckets = {k: train_y_buckets[k] for k in train_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_risk_buckets = {k: train_risk_buckets[k] for k in train_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_x_buckets = {k: predict_x_buckets[k] for k in predict_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_y_buckets = {k: predict_y_buckets[k] for k in predict_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
if neutralized_risk:
predict_risk_buckets = {k: predict_risk_buckets[k] for k in predict_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
else:
predict_risk_buckets = None
predict_codes_bucket = {k: predict_codes_bucket[k] for k in predict_codes_bucket if
k.strftime('%Y-%m-%d') >= start_date}
ret['train'] = {'x': train_x_buckets, 'y': train_y_buckets, 'risk': train_risk_buckets}
ret['predict'] = {'x': predict_x_buckets, 'y': predict_y_buckets, 'risk': predict_risk_buckets,
'code': predict_codes_bucket}
return ret
def fetch_train_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None) -> dict:
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
target_df, factor_df = df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code'] + transformer.names]
target_df, dates, date_label, risk_exp, x_values, y_values, _, _, codes = \
_merge_df(engine, transformer.names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
require(len(dates) >= 2, ValueError,
"No previous data for training for the date {0}".format(ref_date))
end = dates[-2]
start = dates[-batch - 1] if batch <= len(dates) - 1 else dates[0]
else:
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
index = (date_label >= start) & (date_label <= end)
this_raw_x = x_values[index]
this_raw_y = y_values[index]
this_code = codes[index]
if risk_exp is not None:
this_risk_exp = risk_exp[index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ret = dict()
ret['x_names'] = transformer.names
ret['train'] = {'x': pd.DataFrame(ne_x, columns=transformer.names), 'y': ne_y,
'code': this_code}
return ret
def fetch_predict_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fillna: str = None,
fit_target: Union[Transformer, object] = None):
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch - 1) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fillna:
factor_df = factor_df.groupby('trade_date').apply(
lambda x: x.fillna(x.median())).reset_index(
drop=True).dropna()
else:
factor_df = factor_df.dropna()
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
names = transformer.names
if neutralized_risk:
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(neutralized_risk).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_x = pd.merge(train_x, target_df, on=['trade_date', 'code'], how='left')
risk_exp = train_x[neutralized_risk].values.astype(float)
else:
train_x = pd.merge(factor_df, target_df, on=['trade_date', 'code'], how='left')
risk_exp = None
train_x.dropna(inplace=True, subset=train_x.columns[:-1])
x_values = train_x[names].values.astype(float)
y_values = train_x[['dx']].values.astype(float)
date_label = pd.DatetimeIndex(train_x.trade_date).to_pydatetime()
dates = np.unique(date_label)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
left_index = bisect.bisect_left(date_label, start)
right_index = bisect.bisect_right(date_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
sub_dates = date_label[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
ne_x = ne_x[inner_left_index:inner_right_index]
ne_y = ne_y[inner_left_index:inner_right_index]
left_index = bisect.bisect_left(date_label, end)
right_index = bisect.bisect_right(date_label, end)
codes = train_x.code.values[left_index:right_index]
else:
ne_x = None
ne_y = None
codes = None
ret = dict()
ret['x_names'] = transformer.names
ret['predict'] = {'x': pd.DataFrame(ne_x, columns=transformer.names, index=codes), 'code': codes,
'y': ne_y.flatten()}
return ret
| [
"alphamind.data.processing.factor_processing",
"numpy.unique",
"pandas.DataFrame",
"datetime.datetime.strptime",
"pandas.DatetimeIndex",
"pandas.merge",
"PyFin.api.makeSchedule",
"PyFin.DateUtilities.Period",
"bisect.bisect_right",
"alphamind.utilities.alpha_logger.info",
"PyFin.api.advanceDateB... | [((1853, 1874), 'numpy.unique', 'np.unique', (['date_label'], {}), '(date_label)\n', (1862, 1874), True, 'import numpy as np\n'), ((2571, 2728), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'end_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationRule': 'DateGeneration.Forward'}), "(start_date, end_date, frequency, calendar='china.sse',\n dateRule=BizDayConventions.Following, dateGenerationRule=DateGeneration\n .Forward)\n", (2583, 2728), False, 'from PyFin.api import makeSchedule\n'), ((2913, 2932), 'alphamind.utilities.map_freq', 'map_freq', (['frequency'], {}), '(frequency)\n', (2921, 2932), False, 'from alphamind.utilities import map_freq\n'), ((3269, 3318), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""factor data loading finished"""'], {}), "('factor data loading finished')\n", (3286, 3318), False, 'from alphamind.utilities import alpha_logger\n'), ((3850, 3903), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""fit target data loading finished"""'], {}), "('fit target data loading finished')\n", (3867, 3903), False, 'from alphamind.utilities import alpha_logger\n'), ((3978, 4029), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""industry data loading finished"""'], {}), "('industry data loading finished')\n", (3995, 4029), False, 'from alphamind.utilities import alpha_logger\n'), ((4106, 4158), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""benchmark data loading finished"""'], {}), "('benchmark data loading finished')\n", (4123, 4158), False, 'from alphamind.utilities import alpha_logger\n'), ((4245, 4310), 'pandas.merge', 'pd.merge', (['df', 'benchmark_df'], {'on': "['trade_date', 'code']", 'how': '"""left"""'}), "(df, benchmark_df, on=['trade_date', 'code'], how='left')\n", (4253, 4310), True, 'import pandas as pd\n'), ((4320, 4372), 'pandas.merge', 'pd.merge', (['df', 'industry_df'], {'on': "['trade_date', 'code']"}), "(df, industry_df, on=['trade_date', 'code'])\n", (4328, 4372), True, 'import pandas as pd\n'), ((8805, 8860), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""Starting data package fetching ..."""'], {}), "('Starting data package fetching ...')\n", (8822, 8860), False, 'from alphamind.utilities import alpha_logger\n'), ((8879, 8905), 'alphamind.data.transformer.Transformer', 'Transformer', (['alpha_factors'], {}), '(alpha_factors)\n', (8890, 8905), False, 'from alphamind.data.transformer import Transformer\n'), ((9695, 9737), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""data merging finished"""'], {}), "('data merging finished')\n", (9712, 9737), False, 'from alphamind.utilities import alpha_logger\n'), ((10024, 10069), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""Loading data is finished"""'], {}), "('Loading data is finished')\n", (10041, 10069), False, 'from alphamind.utilities import alpha_logger\n'), ((10584, 10632), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""Data processing is finished"""'], {}), "('Data processing is finished')\n", (10601, 10632), False, 'from alphamind.utilities import alpha_logger\n'), ((12834, 12851), 'PyFin.DateUtilities.Period', 'Period', (['frequency'], {}), '(frequency)\n', (12840, 12851), False, 'from PyFin.DateUtilities import Period\n'), ((12945, 13021), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'ref_date', 'p', 'BizDayConventions.Following'], {}), "('china.sse', ref_date, p, BizDayConventions.Following)\n", (12966, 13021), False, 'from PyFin.api import advanceDateByCalendar\n'), ((13034, 13192), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'ref_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationRule': 'DateGeneration.Backward'}), "(start_date, ref_date, frequency, calendar='china.sse',\n dateRule=BizDayConventions.Following, dateGenerationRule=DateGeneration\n .Backward)\n", (13046, 13192), False, 'from PyFin.api import makeSchedule\n'), ((13324, 13343), 'alphamind.utilities.map_freq', 'map_freq', (['frequency'], {}), '(frequency)\n', (13332, 13343), False, 'from alphamind.utilities import map_freq\n'), ((15030, 15144), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (15047, 15144), False, 'from alphamind.data.processing import factor_processing\n'), ((15239, 15353), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (15256, 15353), False, 'from alphamind.data.processing import factor_processing\n'), ((16430, 16447), 'PyFin.DateUtilities.Period', 'Period', (['frequency'], {}), '(frequency)\n', (16436, 16447), False, 'from PyFin.DateUtilities import Period\n'), ((16545, 16621), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'ref_date', 'p', 'BizDayConventions.Following'], {}), "('china.sse', ref_date, p, BizDayConventions.Following)\n", (16566, 16621), False, 'from PyFin.api import advanceDateByCalendar\n'), ((16634, 16792), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'ref_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationRule': 'DateGeneration.Backward'}), "(start_date, ref_date, frequency, calendar='china.sse',\n dateRule=BizDayConventions.Following, dateGenerationRule=DateGeneration\n .Backward)\n", (16646, 16792), False, 'from PyFin.api import makeSchedule\n'), ((16924, 16943), 'alphamind.utilities.map_freq', 'map_freq', (['frequency'], {}), '(frequency)\n', (16932, 16943), False, 'from alphamind.utilities import map_freq\n'), ((18683, 18704), 'numpy.unique', 'np.unique', (['date_label'], {}), '(date_label)\n', (18692, 18704), True, 'import numpy as np\n'), ((1277, 1332), 'pandas.merge', 'pd.merge', (['factor_df', 'risk_df'], {'on': "['trade_date', 'code']"}), "(factor_df, risk_df, on=['trade_date', 'code'])\n", (1285, 1332), True, 'import pandas as pd\n'), ((2379, 2396), 'PyFin.DateUtilities.Period', 'Period', (['frequency'], {}), '(frequency)\n', (2385, 2396), False, 'from PyFin.DateUtilities import Period\n'), ((3037, 3057), 'alphamind.data.transformer.Transformer', 'Transformer', (['factors'], {}), '(factors)\n', (3048, 3057), False, 'from alphamind.data.transformer import Transformer\n'), ((3470, 3526), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'dates[-1]', 'frequency'], {}), "('china.sse', dates[-1], frequency)\n", (3491, 3526), False, 'from PyFin.api import advanceDateByCalendar\n'), ((5199, 5237), 'bisect.bisect_left', 'bisect.bisect_left', (['group_label', 'start'], {}), '(group_label, start)\n', (5217, 5237), False, 'import bisect\n'), ((5260, 5296), 'bisect.bisect_left', 'bisect.bisect_left', (['group_label', 'end'], {}), '(group_label, end)\n', (5278, 5296), False, 'import bisect\n'), ((5981, 6095), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (5998, 6095), False, 'from alphamind.data.processing import factor_processing\n'), ((6309, 6348), 'bisect.bisect_right', 'bisect.bisect_right', (['group_label', 'start'], {}), '(group_label, start)\n', (6328, 6348), False, 'import bisect\n'), ((6371, 6408), 'bisect.bisect_right', 'bisect.bisect_right', (['group_label', 'end'], {}), '(group_label, end)\n', (6390, 6408), False, 'import bisect\n'), ((6729, 6843), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (6746, 6843), False, 'from alphamind.data.processing import factor_processing\n'), ((6966, 7000), 'bisect.bisect_left', 'bisect.bisect_left', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (6984, 7000), False, 'import bisect\n'), ((7029, 7064), 'bisect.bisect_right', 'bisect.bisect_right', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (7048, 7064), False, 'import bisect\n'), ((7098, 7167), 'pandas.DataFrame', 'pd.DataFrame', (['ne_x[inner_left_index:inner_right_index]'], {'columns': 'names'}), '(ne_x[inner_left_index:inner_right_index], columns=names)\n', (7110, 7167), True, 'import pandas as pd\n'), ((12798, 12824), 'alphamind.data.transformer.Transformer', 'Transformer', (['alpha_factors'], {}), '(alpha_factors)\n', (12809, 12824), False, 'from alphamind.data.transformer import Transformer\n'), ((13581, 13637), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'dates[-1]', 'frequency'], {}), "('china.sse', dates[-1], frequency)\n", (13602, 13637), False, 'from PyFin.api import advanceDateByCalendar\n'), ((14386, 14428), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['ref_date', '"""%Y-%m-%d"""'], {}), "(ref_date, '%Y-%m-%d')\n", (14406, 14428), True, 'import datetime as dt\n'), ((15518, 15563), 'pandas.DataFrame', 'pd.DataFrame', (['ne_x'], {'columns': 'transformer.names'}), '(ne_x, columns=transformer.names)\n', (15530, 15563), True, 'import pandas as pd\n'), ((16394, 16420), 'alphamind.data.transformer.Transformer', 'Transformer', (['alpha_factors'], {}), '(alpha_factors)\n', (16405, 16420), False, 'from alphamind.data.transformer import Transformer\n'), ((17395, 17451), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'dates[-1]', 'frequency'], {}), "('china.sse', dates[-1], frequency)\n", (17416, 17451), False, 'from PyFin.api import advanceDateByCalendar\n'), ((18104, 18159), 'pandas.merge', 'pd.merge', (['factor_df', 'risk_df'], {'on': "['trade_date', 'code']"}), "(factor_df, risk_df, on=['trade_date', 'code'])\n", (18112, 18159), True, 'import pandas as pd\n'), ((18178, 18245), 'pandas.merge', 'pd.merge', (['train_x', 'target_df'], {'on': "['trade_date', 'code']", 'how': '"""left"""'}), "(train_x, target_df, on=['trade_date', 'code'], how='left')\n", (18186, 18245), True, 'import pandas as pd\n'), ((18340, 18409), 'pandas.merge', 'pd.merge', (['factor_df', 'target_df'], {'on': "['trade_date', 'code']", 'how': '"""left"""'}), "(factor_df, target_df, on=['trade_date', 'code'], how='left')\n", (18348, 18409), True, 'import pandas as pd\n'), ((18726, 18768), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['ref_date', '"""%Y-%m-%d"""'], {}), "(ref_date, '%Y-%m-%d')\n", (18746, 18768), True, 'import datetime as dt\n'), ((18883, 18920), 'bisect.bisect_left', 'bisect.bisect_left', (['date_label', 'start'], {}), '(date_label, start)\n', (18901, 18920), False, 'import bisect\n'), ((18943, 18979), 'bisect.bisect_right', 'bisect.bisect_right', (['date_label', 'end'], {}), '(date_label, end)\n', (18962, 18979), False, 'import bisect\n'), ((19301, 19415), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (19318, 19415), False, 'from alphamind.data.processing import factor_processing\n'), ((19526, 19640), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (19543, 19640), False, 'from alphamind.data.processing import factor_processing\n'), ((19763, 19797), 'bisect.bisect_left', 'bisect.bisect_left', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (19781, 19797), False, 'import bisect\n'), ((19826, 19861), 'bisect.bisect_right', 'bisect.bisect_right', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (19845, 19861), False, 'import bisect\n'), ((19997, 20032), 'bisect.bisect_left', 'bisect.bisect_left', (['date_label', 'end'], {}), '(date_label, end)\n', (20015, 20032), False, 'import bisect\n'), ((20055, 20091), 'bisect.bisect_right', 'bisect.bisect_right', (['date_label', 'end'], {}), '(date_label, end)\n', (20074, 20091), False, 'import bisect\n'), ((20308, 20366), 'pandas.DataFrame', 'pd.DataFrame', (['ne_x'], {'columns': 'transformer.names', 'index': 'codes'}), '(ne_x, columns=transformer.names, index=codes)\n', (20320, 20366), True, 'import pandas as pd\n'), ((1168, 1223), 'pandas.merge', 'pd.merge', (['target_df', 'risk_df'], {'on': "['trade_date', 'code']"}), "(target_df, risk_df, on=['trade_date', 'code'])\n", (1176, 1223), True, 'import pandas as pd\n'), ((1786, 1824), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['factor_df.trade_date'], {}), '(factor_df.trade_date)\n', (1802, 1824), True, 'import pandas as pd\n'), ((4169, 4226), 'pandas.merge', 'pd.merge', (['factor_df', 'target_df'], {'on': "['trade_date', 'code']"}), "(factor_df, target_df, on=['trade_date', 'code'])\n", (4177, 4226), True, 'import pandas as pd\n'), ((5593, 5707), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (5610, 5707), False, 'from alphamind.data.processing import factor_processing\n'), ((7580, 7694), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (7597, 7694), False, 'from alphamind.data.processing import factor_processing\n'), ((13967, 14024), 'pandas.merge', 'pd.merge', (['factor_df', 'target_df'], {'on': "['trade_date', 'code']"}), "(factor_df, target_df, on=['trade_date', 'code'])\n", (13975, 14024), True, 'import pandas as pd\n'), ((18618, 18654), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['train_x.trade_date'], {}), '(train_x.trade_date)\n', (18634, 18654), True, 'import pandas as pd\n'), ((2487, 2536), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'start_date', 'p'], {}), "('china.sse', start_date, p)\n", (2508, 2536), False, 'from PyFin.api import advanceDateByCalendar\n')] |
import logging
import sys
from AppleFluenza.bot import auto_load_cogs, bot
from utils.getenv import getenv
from utils.cli import header, option_parser
if __name__ == "__main__":
header()
auto_load_cogs(bot)
optparser = option_parser()
(options, args) = optparser.parse_args(sys.argv)
token = getenv("TOKEN")
if options.debug is not None:
logging.getLogger().setLevel(logging.DEBUG)
bot.logger.info("WARNING: AppleFluenza is now in debug mode.")
token = getenv("TEST_TOKEN")
if options.override is not None:
bot.logger.info("Overriding token.")
token = options.override
bot.run(token)
| [
"logging.getLogger",
"utils.getenv.getenv",
"utils.cli.option_parser",
"AppleFluenza.bot.auto_load_cogs",
"AppleFluenza.bot.bot.run",
"AppleFluenza.bot.bot.logger.info",
"utils.cli.header"
] | [((186, 194), 'utils.cli.header', 'header', ([], {}), '()\n', (192, 194), False, 'from utils.cli import header, option_parser\n'), ((200, 219), 'AppleFluenza.bot.auto_load_cogs', 'auto_load_cogs', (['bot'], {}), '(bot)\n', (214, 219), False, 'from AppleFluenza.bot import auto_load_cogs, bot\n'), ((237, 252), 'utils.cli.option_parser', 'option_parser', ([], {}), '()\n', (250, 252), False, 'from utils.cli import header, option_parser\n'), ((319, 334), 'utils.getenv.getenv', 'getenv', (['"""TOKEN"""'], {}), "('TOKEN')\n", (325, 334), False, 'from utils.getenv import getenv\n'), ((651, 665), 'AppleFluenza.bot.bot.run', 'bot.run', (['token'], {}), '(token)\n', (658, 665), False, 'from AppleFluenza.bot import auto_load_cogs, bot\n'), ((430, 492), 'AppleFluenza.bot.bot.logger.info', 'bot.logger.info', (['"""WARNING: AppleFluenza is now in debug mode."""'], {}), "('WARNING: AppleFluenza is now in debug mode.')\n", (445, 492), False, 'from AppleFluenza.bot import auto_load_cogs, bot\n'), ((509, 529), 'utils.getenv.getenv', 'getenv', (['"""TEST_TOKEN"""'], {}), "('TEST_TOKEN')\n", (515, 529), False, 'from utils.getenv import getenv\n'), ((576, 612), 'AppleFluenza.bot.bot.logger.info', 'bot.logger.info', (['"""Overriding token."""'], {}), "('Overriding token.')\n", (591, 612), False, 'from AppleFluenza.bot import auto_load_cogs, bot\n'), ((378, 397), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (395, 397), False, 'import logging\n')] |
# %% Packages
import os
import pickle
from pyhocon import ConfigTree
# %% Functions
def load_pickle(loading_path: str):
"""This method loads the file at the specified path
:param loading_path: Path at which object is saved
:type loading_path: str
:return: Desired file
:rtype: Could be basically anything
"""
file = open(f"{loading_path}.pickle", "rb")
return pickle.load(file)
def check_scrapping_task(task, config: ConfigTree) -> None:
"""This method tests the scrapping task. It is checked whether
the task can scrape the images and whether the result are actually
image-filled folders.
:param task: The task we would like to do
:type task: self-written class
:param config: Configuration file for the class
:type config: ConfigTree
"""
# Initiate task and run it
task = task(config=config, re_scrape_data=False)
task.run()
# Checking whether the every number in the dataframe has a corresponding image
path_config = config.get_config("paths").get_config(task.name)
path_output = path_config.get_config("path_output")
image_path = path_output.get_string("image_data")
meta_df_path = path_output.get_string("processed_meta_information")
meta_df = load_pickle(meta_df_path)
image_number_list = meta_df.loc[:, "number"].tolist()
meta_df_images = sorted([f"athlete_{x}.png" for x in image_number_list])
sorted_images = sorted(os.listdir(image_path))
assert (
meta_df_images == sorted_images
), "We have a mismatch between meta information and images"
# Checking that we do not have any missing values
assert meta_df.isna().sum().sum() == 0, "We have missing observations"
# Checking age for sensibility
age_min = meta_df.loc[:, "age"].min()
age_max = meta_df.loc[:, "age"].max()
assert age_min >= 0 and age_max <= 100, "The age range seems questionable"
def check_preprocessing(task, config: ConfigTree) -> None:
"""This method checks the image preprocessing task
:param task: Image classification task
:type task: self-written class
:param config: Corresponding Configuration file
:type config: ConfigTree
"""
# Initiate task and run it
task = task(config=config)
task.run()
# Getting testing paths ready
path_config = config.get_config("paths").get_config(task.name)
path_input = path_config.get_config("path_input")
path_output = path_config.get_config("path_output")
def check_image_classifer(task, config: ConfigTree) -> None:
"""This method checks the image classification task
:param task: Image classification task
:type task: self-written class
:param config: Corresponding Configuration file
:type config: ConfigTree
"""
# Initiate task and run it
task = task(config=config)
task.run()
# Getting testing paths ready
path_config = config.get_config("paths").get_config(task.name)
path_output = path_config.get_config("path_output")
| [
"os.listdir",
"pickle.load"
] | [((397, 414), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (408, 414), False, 'import pickle\n'), ((1449, 1471), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (1459, 1471), False, 'import os\n')] |
import logging
import blueforge.apis.telegram as tg
import requests
from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, \
GenericTemplate, Element, PostBackButton
from chatbrick.util import get_items_from_xml, UNKNOWN_ERROR_MSG
import time
logger = logging.getLogger(__name__)
BRICK_DEFAULT_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_13_001.png'
GATE_INFO = {
'0': '원할',
'1': '보통',
'2': '혼잡',
'3': '매우혼잡',
'9': '종료'
}
class Icn(object):
def __init__(self, fb, brick_db):
self.brick_db = brick_db
self.fb = fb
async def facebook(self, command):
if command == 'get_started':
# send_message = [
# Message(
# attachment=ImageAttachment(
# url=BRICK_DEFAULT_IMAGE
# )
# ),
# Message(
# text='인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.'
# ),
# Message(
# attachment=TemplateAttachment(
# payload=GenericTemplate(
# elements=[
# Element(
# image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
# title='제 1여객터미널',
# subtitle='제 1여객터미널의 게이트별 대기인원을 알려드려요.',
# buttons=[
# PostBackButton(
# title='1여객터미널 조회',
# payload='brick|icn|1'
# )
# ]
# ),
# Element(
# image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
# title='제 2여객터미널',
# subtitle='제 2여객터미널의 게이트별 대기인원을 알려드려요.',
# buttons=[
# PostBackButton(
# title='2여객터미널 조회',
# payload='brick|icn|2'
# )
# ]
# )
# ]
# )
# )
# )
# ]
send_message = [
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(image_url=BRICK_DEFAULT_IMAGE,
title='출국장 대기인원 조회 서비스',
subtitle='인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.')
]
)
)
),
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(
image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
title='제 1여객터미널',
subtitle='제 1여객터미널의 게이트별 대기인원을 알려드려요.',
buttons=[
PostBackButton(
title='1여객터미널 조회',
payload='brick|icn|1'
)
]
),
Element(
image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
title='제 2여객터미널',
subtitle='제 2여객터미널의 게이트별 대기인원을 알려드려요.',
buttons=[
PostBackButton(
title='2여객터미널 조회',
payload='brick|icn|2'
)
]
)
]
)
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.save()
elif command == '1' or command == '2':
input_data = await self.brick_db.get()
res = requests.get(
url='http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s' % (
input_data['data']['api_key'], command), headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'})
items = get_items_from_xml(res)
if type(items) is dict:
if items.get('code', '00') == '99' or items.get('code', '00') == '30':
send_message = [
Message(
text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.',
)
]
else:
send_message = [
Message(
text=UNKNOWN_ERROR_MSG
)
]
else:
if command == '1':
the_other = '2'
else:
the_other = '1'
raw_data = items[0]
sending_message = '제 {terno} 여객터미널\n조회날짜 : {cgtdt}\n조회시간 : {cgthm}'.format(**raw_data)
if command == '1':
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n3번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
sending_message += '\n4번 출국장: %s명 (%s)' % (raw_data['gateinfo3'], GATE_INFO[raw_data['gate3']])
sending_message += '\n5번 출국장: %s명 (%s)' % (raw_data['gateinfo4'], GATE_INFO[raw_data['gate4']])
elif command == '2':
sending_message += '\n1번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
send_message = [
Message(
text=sending_message,
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='새로고침',
payload='brick|icn|%s' % command
),
QuickReplyTextItem(
title='제%s여객터미널 조회' % the_other,
payload='brick|icn|%s' % the_other
)
]
)
)
]
await self.fb.send_messages(send_message)
return None
async def telegram(self, command):
if command == 'get_started':
send_message = [
tg.SendPhoto(
photo=BRICK_DEFAULT_IMAGE
),
tg.SendMessage(
text='인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='제1여객터미널',
callback_data='BRICK|icn|1'
),
tg.CallbackButton(
text='제2여객터미널',
callback_data='BRICK|icn|2'
)
]
]
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.save()
elif command == '1' or command == '2':
input_data = await self.brick_db.get()
res = requests.get(
url='http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s' % (
input_data['data']['api_key'], command), headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'})
items = get_items_from_xml(res)
if type(items) is dict:
if items.get('code', '00') == '99' or items.get('code', '00') == '30':
send_message = [
tg.SendMessage(
text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.',
)
]
else:
send_message = [
tg.SendMessage(
text=UNKNOWN_ERROR_MSG
)
]
else:
if command == '1':
the_other = '2'
else:
the_other = '1'
raw_data = items[0]
sending_message = '*제 {terno} 여객터미널*\n조회날짜 : {cgtdt}\n조회시간 : {cgthm}'.format(**raw_data)
if command == '1':
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n3번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
sending_message += '\n4번 출국장: %s명 (%s)' % (raw_data['gateinfo3'], GATE_INFO[raw_data['gate3']])
sending_message += '\n5번 출국장: %s명 (%s)' % (raw_data['gateinfo4'], GATE_INFO[raw_data['gate4']])
elif command == '2':
sending_message += '\n1번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
send_message = [
tg.SendMessage(
text=sending_message,
parse_mode='Markdown',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='새로고침',
callback_data='BRICK|icn|%s' % command
)
],
[
tg.CallbackButton(
text='제%s여객터미널 조회' % the_other,
callback_data='BRICK|icn|%s' % the_other
)
]
]
)
)
]
await self.fb.send_messages(send_message)
return None
| [
"logging.getLogger",
"blueforge.apis.telegram.CallbackButton",
"blueforge.apis.facebook.Message",
"blueforge.apis.telegram.SendPhoto",
"blueforge.apis.facebook.PostBackButton",
"chatbrick.util.get_items_from_xml",
"requests.get",
"blueforge.apis.facebook.QuickReplyTextItem",
"blueforge.apis.telegram... | [((318, 345), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (335, 345), False, 'import logging\n'), ((4882, 5220), 'requests.get', 'requests.get', ([], {'url': "('http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s'\n % (input_data['data']['api_key'], command))", 'headers': "{'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }"}), "(url=\n 'http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s'\n % (input_data['data']['api_key'], command), headers={'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n })\n", (4894, 5220), False, 'import requests\n'), ((5282, 5305), 'chatbrick.util.get_items_from_xml', 'get_items_from_xml', (['res'], {}), '(res)\n', (5300, 5305), False, 'from chatbrick.util import get_items_from_xml, UNKNOWN_ERROR_MSG\n'), ((7817, 7856), 'blueforge.apis.telegram.SendPhoto', 'tg.SendPhoto', ([], {'photo': 'BRICK_DEFAULT_IMAGE'}), '(photo=BRICK_DEFAULT_IMAGE)\n', (7829, 7856), True, 'import blueforge.apis.telegram as tg\n'), ((8842, 9180), 'requests.get', 'requests.get', ([], {'url': "('http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s'\n % (input_data['data']['api_key'], command))", 'headers': "{'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }"}), "(url=\n 'http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s'\n % (input_data['data']['api_key'], command), headers={'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n })\n", (8854, 9180), False, 'import requests\n'), ((9242, 9265), 'chatbrick.util.get_items_from_xml', 'get_items_from_xml', (['res'], {}), '(res)\n', (9260, 9265), False, 'from chatbrick.util import get_items_from_xml, UNKNOWN_ERROR_MSG\n'), ((5491, 5560), 'blueforge.apis.facebook.Message', 'Message', ([], {'text': '"""chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요."""'}), "(text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.')\n", (5498, 5560), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n'), ((5721, 5752), 'blueforge.apis.facebook.Message', 'Message', ([], {'text': 'UNKNOWN_ERROR_MSG'}), '(text=UNKNOWN_ERROR_MSG)\n', (5728, 5752), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n'), ((9451, 9527), 'blueforge.apis.telegram.SendMessage', 'tg.SendMessage', ([], {'text': '"""chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요."""'}), "(text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.')\n", (9465, 9527), True, 'import blueforge.apis.telegram as tg\n'), ((9688, 9726), 'blueforge.apis.telegram.SendMessage', 'tg.SendMessage', ([], {'text': 'UNKNOWN_ERROR_MSG'}), '(text=UNKNOWN_ERROR_MSG)\n', (9702, 9726), True, 'import blueforge.apis.telegram as tg\n'), ((8150, 8212), 'blueforge.apis.telegram.CallbackButton', 'tg.CallbackButton', ([], {'text': '"""제1여객터미널"""', 'callback_data': '"""BRICK|icn|1"""'}), "(text='제1여객터미널', callback_data='BRICK|icn|1')\n", (8167, 8212), True, 'import blueforge.apis.telegram as tg\n'), ((8352, 8414), 'blueforge.apis.telegram.CallbackButton', 'tg.CallbackButton', ([], {'text': '"""제2여객터미널"""', 'callback_data': '"""BRICK|icn|2"""'}), "(text='제2여객터미널', callback_data='BRICK|icn|2')\n", (8369, 8414), True, 'import blueforge.apis.telegram as tg\n'), ((2851, 2968), 'blueforge.apis.facebook.Element', 'Element', ([], {'image_url': 'BRICK_DEFAULT_IMAGE', 'title': '"""출국장 대기인원 조회 서비스"""', 'subtitle': '"""인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요."""'}), '(image_url=BRICK_DEFAULT_IMAGE, title=\'출국장 대기인원 조회 서비스\', subtitle=\n \'인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.\')\n', (2858, 2968), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n'), ((7124, 7190), 'blueforge.apis.facebook.QuickReplyTextItem', 'QuickReplyTextItem', ([], {'title': '"""새로고침"""', 'payload': "('brick|icn|%s' % command)"}), "(title='새로고침', payload='brick|icn|%s' % command)\n", (7142, 7190), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n'), ((7330, 7421), 'blueforge.apis.facebook.QuickReplyTextItem', 'QuickReplyTextItem', ([], {'title': "('제%s여객터미널 조회' % the_other)", 'payload': "('brick|icn|%s' % the_other)"}), "(title='제%s여객터미널 조회' % the_other, payload='brick|icn|%s' %\n the_other)\n", (7348, 7421), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n'), ((11197, 11267), 'blueforge.apis.telegram.CallbackButton', 'tg.CallbackButton', ([], {'text': '"""새로고침"""', 'callback_data': "('BRICK|icn|%s' % command)"}), "(text='새로고침', callback_data='BRICK|icn|%s' % command)\n", (11214, 11267), True, 'import blueforge.apis.telegram as tg\n'), ((11491, 11587), 'blueforge.apis.telegram.CallbackButton', 'tg.CallbackButton', ([], {'text': "('제%s여객터미널 조회' % the_other)", 'callback_data': "('BRICK|icn|%s' % the_other)"}), "(text='제%s여객터미널 조회' % the_other, callback_data=\n 'BRICK|icn|%s' % the_other)\n", (11508, 11587), True, 'import blueforge.apis.telegram as tg\n'), ((3674, 3730), 'blueforge.apis.facebook.PostBackButton', 'PostBackButton', ([], {'title': '"""1여객터미널 조회"""', 'payload': '"""brick|icn|1"""'}), "(title='1여객터미널 조회', payload='brick|icn|1')\n", (3688, 3730), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n'), ((4303, 4359), 'blueforge.apis.facebook.PostBackButton', 'PostBackButton', ([], {'title': '"""2여객터미널 조회"""', 'payload': '"""brick|icn|2"""'}), "(title='2여객터미널 조회', payload='brick|icn|2')\n", (4317, 4359), False, 'from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, GenericTemplate, Element, PostBackButton\n')] |
from django.db import models
from django.contrib.auth.models import User
class Flight(models.Model):
STATUSES = (
('SCHEDULED', 'SCHEDULED'),
('DELAYED', 'DELAYED'),
('ON_TIME', 'ON TIME'),
('ARRIVED', 'ARRIVED'),
('LATE', 'LATE')
)
number = models.CharField(max_length=10)
departure_time = models.DateTimeField()
arrival_time = models.DateTimeField()
origin = models.CharField(max_length=150)
destination = models.CharField(max_length=150)
status = models.CharField(choices=STATUSES, max_length=100)
@property
def duration(self):
timespan = self.arrival_time - self.departure_time
days, seconds = timespan.days, timespan.seconds
return days * 24 + seconds // 3600 # return hours
@property
def available_seats(self):
return self.seats.all()
def __str__(self):
return self.number
class Seat(models.Model):
flight = models.ForeignKey(
Flight, on_delete=models.CASCADE, related_name='seats')
number = models.CharField(max_length=50)
is_available = models.BooleanField(default=True)
def __str__(self):
return self.number
class Reservation(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
flight = models.ForeignKey(Flight, on_delete=models.CASCADE)
seat = models.ForeignKey(Seat, on_delete=models.CASCADE)
is_notified = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
| [
"django.db.models.DateTimeField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ForeignKey"
] | [((296, 327), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (312, 327), False, 'from django.db import models\n'), ((349, 371), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (369, 371), False, 'from django.db import models\n'), ((391, 413), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (411, 413), False, 'from django.db import models\n'), ((427, 459), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (443, 459), False, 'from django.db import models\n'), ((478, 510), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (494, 510), False, 'from django.db import models\n'), ((524, 574), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'STATUSES', 'max_length': '(100)'}), '(choices=STATUSES, max_length=100)\n', (540, 574), False, 'from django.db import models\n'), ((958, 1031), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Flight'], {'on_delete': 'models.CASCADE', 'related_name': '"""seats"""'}), "(Flight, on_delete=models.CASCADE, related_name='seats')\n", (975, 1031), False, 'from django.db import models\n'), ((1054, 1085), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1070, 1085), False, 'from django.db import models\n'), ((1105, 1138), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1124, 1138), False, 'from django.db import models\n'), ((1236, 1285), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1253, 1285), False, 'from django.db import models\n'), ((1299, 1350), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Flight'], {'on_delete': 'models.CASCADE'}), '(Flight, on_delete=models.CASCADE)\n', (1316, 1350), False, 'from django.db import models\n'), ((1362, 1411), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Seat'], {'on_delete': 'models.CASCADE'}), '(Seat, on_delete=models.CASCADE)\n', (1379, 1411), False, 'from django.db import models\n'), ((1430, 1464), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1449, 1464), False, 'from django.db import models\n'), ((1479, 1518), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1499, 1518), False, 'from django.db import models\n')] |
import geopandas as gpd
# not used anymore - converts esri jobs shapefile to a csv
# see assign_jobs_lat_lng.py
gdf = gpd.GeoDataFrame.from_file("est10_esri_gt1.shp")
gdf = gdf.to_crs(epsg=4326)
fname_map = {
'Duns_Numbe': 'duns_number',
'Business_N': 'business_name',
'Emp_Total': 'total_employment',
'Emp_Here': 'local_employment',
'Year_Start': 'start_year',
'sixcat': 'PBA_category',
'remi70': 'REMI_category',
'steelhead': 'steelhead_category',
'naics2': 'NAICS'
}
out_gdf = gdf[['Duns_Numbe', 'Business_N', 'geometry', 'Emp_Total', 'Emp_Here',
'Year_Start', 'sixcat', 'remi70', 'steelhead', 'naics2']].\
rename(columns=fname_map)
# see the bigger establishments
out_gdf.sort_values('total_employment', ascending=False)
out_gdf.to_csv("jobs.csv", index=False)
| [
"geopandas.GeoDataFrame.from_file"
] | [((120, 168), 'geopandas.GeoDataFrame.from_file', 'gpd.GeoDataFrame.from_file', (['"""est10_esri_gt1.shp"""'], {}), "('est10_esri_gt1.shp')\n", (146, 168), True, 'import geopandas as gpd\n')] |
import argparse
import random
import operator
import os
def parse_grammar(file_path):
"""
Generate a grammar from a file describing the production rules.
Note that the symbols are inferred from the production rules.
For more information on the format of the file, please reffer to
the README.md or the the sample grammars provided in this repository.
:param file_path: Path to the file containing the description of the grammar.
:returns: the grammar object and the starting symbol.
"""
with open(file_path) as f:
content = f.read().splitlines()
if len(content) <= 1:
raise Exception('Grammar should have at least one production rule and a starting symbol')
# First line should be the starting symbol
start_symbol = content[0]
grammar = {}
for line in content[1:]:
# Each line should be in the format:
# X -> A B ... C
symbols = line.split()
if len(symbols) <= 2 or symbols[1] != '->':
raise Exception('Each production line should be in the format: X -> A B ... C')
if symbols[0] not in grammar:
grammar[symbols[0]] = []
grammar[symbols[0]].append(symbols[2:])
if start_symbol not in grammar:
raise Exception('Grammar should have at leats one production rule with the start_symbol.')
return grammar, start_symbol
def find_terminals(grammar):
"""
For a given grammar, return a set of the terminal symbols.
:param grammar: The grammar (set of productions rules).
:return: set of terminal symbols.
"""
terminals = set()
for key, val in grammar.items():
for word_list in val:
for word in word_list:
if word not in grammar:
terminals.add(word)
return terminals
def analyze_stats(sentences):
"""
For a given set of sentences, print how many times each symbol appears,
printing statistics sorted by occurrance.
:param sentences: List of sentences.
"""
counts = {}
for sentence in sentences:
for element in sentence.split():
if element not in counts:
counts[element] = 1
else:
counts[element] += 1
# print stats
sorted_counts = sorted(counts.items(), key = operator.itemgetter(1))
for key, val in sorted_counts:
print("%5d %s" % (val, key))
def generate_random_sentence(grammar, start_symbol, print_sentence = True):
"""
For a given grammar (set of production rules) and a starting symbol,
randomly generate a sentence using the production rules.
:param sentences: The grammar (set of productions rules).
:param start_symbol: The starting symbol.
:param print_sentence: Wether to print the generated sentence. Defaults to true.
:returns: A randomly generated sentence.
"""
# Starting symbol must be a part of the grammar
assert start_symbol in grammar
sentence = [start_symbol]
idx = 0
while idx < len(sentence):
if sentence[idx] in terminals:
idx += 1
else:
choices = grammar[sentence[idx]]
choice = random.choice(choices)
sentence = sentence[:idx] + choice + sentence[idx+1:]
sentence = " ".join([word.upper() for word in sentence])
if print_sentence:
print(sentence)
return sentence
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Grammar utils')
parser.add_argument('--grammar', type=str, default='simple_grammar.txt',
help='Path to grammar file.')
parser.add_argument('--print_terminal_symbols', type=bool, default=False,
help='Print the terminal symbols of the grammar.')
parser.add_argument('--num_sentences', type=int, default=0,
help='The number of random sentences to generate.')
args = parser.parse_args()
grammar, start_symbol = parse_grammar(args.grammar)
terminals = find_terminals(grammar)
if args.print_terminal_symbols:
for terminal in sorted(terminals):
print(terminal)
print('-----------------')
print('There are', len(terminals), 'terminals')
sentences = []
for i in range(args.num_sentences):
sentences.append(generate_random_sentence(grammar, start_symbol, False))
for i in range(len(sentences)):
print("%d. %s" % (i, sentences[i])) | [
"operator.itemgetter",
"random.choice",
"argparse.ArgumentParser"
] | [((3060, 3112), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Grammar utils"""'}), "(description='Grammar utils')\n", (3083, 3112), False, 'import argparse\n'), ((2052, 2074), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2071, 2074), False, 'import operator\n'), ((2829, 2851), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (2842, 2851), False, 'import random\n')] |
#TODO: use only one (RGB) channel
import numpy as np
import pandas as pd
import os
from torch.utils import data
from torch.utils.data.dataloader import DataLoader as DataLoader
import torch
from torchvision import transforms
from natsort import natsorted, ns
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
dataset_path = "C:\\Users\\User\\Documents\\GitHub\\Csgo-NeuralNetwork\\output\\"
#train_split and test_split 0.1 > x > 0.9 and must add up to 1
train_split = 0.7
test_split = 0.3
num_epochs = 10
batch_size = 100
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("Running on: %s"%(torch.cuda.get_device_name(device)))
else:
device = torch.device("cpu")
print('running on: CPU')
class CsgoPersonNoPersonDataset(data.Dataset):
"""pretty description."""
length = -1
def __init__(self, root_dir, transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.6)
"""
self.root_dir = root_dir
self.transform = transform
self.length = 0
# dictionary that marks what the last frame of each folder is
# ie. number of examples in specific folder
self.folder_system = {2426: 'CSGOraw2'}
for folder_index in self.folder_system:
self.length += folder_index
# returns name of folder that contains specific frame
def find_folder(self, idx):
for num_frames in self.folder_system:
if num_frames >= idx:
return str(self.folder_system[num_frames])
def __len__(self):
return self.length
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# sets path and gets txt/jpg files
img_path = self.find_folder(idx)
img_name = "%sframe#%s" % (img_path, idx)
img_path = os.path.join(self.root_dir,
img_path, img_name)
img_path_ext = img_path + '.jpg'
img = Image.open((img_path_ext))
# img = np.array(img)
label_path = str(img_path) + '.txt'
label = 0
# loads label from disk, converts csv to tensor
label = torch.as_tensor(os.stat(label_path).st_size != 0, dtype=torch.float).reshape((1,))
sample = {'image': img, 'label': label}
# apply transforms
# TODO: farofa aqui hein
if self.transform:
img = self.transform(sample['image'])
# img = img.reshape(172800)
sample['image'] = img
return sample
#defining NN layeres
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 61 * 33, 120)
self.fc2 = nn.Linear(120, 60)
self.fc3 = nn.Linear(60, 1)
self.fc4 = nn.Linear(30, 15)
self.fc5 = nn.Linear(15, 7)
self.fc6 = nn.Linear(7, 1)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 61 * 33)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
#x = F.relu(self.fc4(x))
#x = F.relu(self.fc5(x))
#x = F.relu(self.fc6(x))
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight.data)
#runs NN in training mode
def train_run(train_loader, criterion, optimizer, device):
losses = []
print(len(train_loader.dataset))
for epoch in range(num_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data['image'], data['label']
#if labels[0].item() == -1:
# continue
#sends batch to gpu
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
#print(f"{epoch}, {i}")
outputs = net(inputs)
#print(f"Labels: {labels.shape}, {labels.dtype}")
#print(f"Outputs: {outputs.shape}, {outputs.dtype}")
loss = criterion(outputs, labels)
losses.append(loss.item())
running_loss += loss.item()
if (i + 1) % 10 == 0: # print every 10 mini-batches
print(f"Labels: {torch.transpose(labels, 0, 1)}")
print(f"Outputs: {torch.transpose(outputs, 0, 1)}")
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 10))
running_loss = 0.0
print("-------------------------------------")
loss.backward()
optimizer.step()
print('Finished Training')
return losses
net = Net().to(device)
net.apply(weights_init)
transform = transforms.Compose([
transforms.Resize([256, 144]),
# transforms.Resize([57600, 1]),
transforms.ToTensor(),
])
dataset = CsgoPersonNoPersonDataset(dataset_path, transform)
dataset_len = len(dataset)
train_split = int(np.floor(dataset_len * train_split))
test_split = int(np.floor(dataset_len * test_split))
while train_split + test_split != dataset_len:
train_split += 1
train_set, test_set = torch.utils.data.random_split(\
dataset, [train_split, test_split])
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=False, drop_last=True)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True, drop_last=True)
def my_binary_loss(output, target):
return (output and target).mean
criterion = nn.MSELoss()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters())
# for i in range(500):
# image, label = dataset[i]['image'], dataset[i]['label']
# print(label)
losses = train_run(train_loader, criterion, optimizer, device)
print("------------------------------------------------------------")
print("Losses")
for loss in losses:
print(loss)
print("------------------------------------------------------------") | [
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.init.xavier_uniform_",
"torch.utils.data.dataloader.DataLoader",
"torchvision.transforms.ToTensor",
"torch.utils.data.random_split",
"numpy.floor",
"torch.transpose",
"torch.is_tensor",
"torchvision.transforms.Resize",
"torch.nn.BCEWithLog... | [((647, 672), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (670, 672), False, 'import torch\n'), ((5829, 5894), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset', '[train_split, test_split]'], {}), '(dataset, [train_split, test_split])\n', (5858, 5894), False, 'import torch\n'), ((5921, 6008), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'batch_size', 'shuffle': '(False)', 'drop_last': '(True)'}), '(dataset=train_set, batch_size=batch_size, shuffle=False,\n drop_last=True)\n', (5931, 6008), True, 'from torch.utils.data.dataloader import DataLoader as DataLoader\n'), ((6019, 6105), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset=test_set, batch_size=batch_size, shuffle=True, drop_last\n =True)\n', (6029, 6105), True, 'from torch.utils.data.dataloader import DataLoader as DataLoader\n'), ((6187, 6199), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6197, 6199), True, 'import torch.nn as nn\n'), ((6212, 6234), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (6232, 6234), True, 'import torch.nn as nn\n'), ((687, 709), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (699, 709), False, 'import torch\n'), ((794, 813), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (806, 813), False, 'import torch\n'), ((5649, 5684), 'numpy.floor', 'np.floor', (['(dataset_len * train_split)'], {}), '(dataset_len * train_split)\n', (5657, 5684), True, 'import numpy as np\n'), ((5703, 5737), 'numpy.floor', 'np.floor', (['(dataset_len * test_split)'], {}), '(dataset_len * test_split)\n', (5711, 5737), True, 'import numpy as np\n'), ((1874, 1894), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1889, 1894), False, 'import torch\n'), ((2081, 2128), 'os.path.join', 'os.path.join', (['self.root_dir', 'img_path', 'img_name'], {}), '(self.root_dir, img_path, img_name)\n', (2093, 2128), False, 'import os\n'), ((2216, 2240), 'PIL.Image.open', 'Image.open', (['img_path_ext'], {}), '(img_path_ext)\n', (2226, 2240), False, 'from PIL import Image\n'), ((2900, 2918), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(6)', '(5)'], {}), '(3, 6, 5)\n', (2909, 2918), True, 'import torch.nn as nn\n'), ((2940, 2958), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (2952, 2958), True, 'import torch.nn as nn\n'), ((2980, 2999), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (2989, 2999), True, 'import torch.nn as nn\n'), ((3021, 3039), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (3033, 3039), True, 'import torch.nn as nn\n'), ((3059, 3087), 'torch.nn.Linear', 'nn.Linear', (['(16 * 61 * 33)', '(120)'], {}), '(16 * 61 * 33, 120)\n', (3068, 3087), True, 'import torch.nn as nn\n'), ((3107, 3125), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(60)'], {}), '(120, 60)\n', (3116, 3125), True, 'import torch.nn as nn\n'), ((3145, 3161), 'torch.nn.Linear', 'nn.Linear', (['(60)', '(1)'], {}), '(60, 1)\n', (3154, 3161), True, 'import torch.nn as nn\n'), ((3181, 3198), 'torch.nn.Linear', 'nn.Linear', (['(30)', '(15)'], {}), '(30, 15)\n', (3190, 3198), True, 'import torch.nn as nn\n'), ((3218, 3234), 'torch.nn.Linear', 'nn.Linear', (['(15)', '(7)'], {}), '(15, 7)\n', (3227, 3234), True, 'import torch.nn as nn\n'), ((3254, 3269), 'torch.nn.Linear', 'nn.Linear', (['(7)', '(1)'], {}), '(7, 1)\n', (3263, 3269), True, 'import torch.nn as nn\n'), ((3776, 3820), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (3805, 3820), False, 'import torch\n'), ((5442, 5471), 'torchvision.transforms.Resize', 'transforms.Resize', (['[256, 144]'], {}), '([256, 144])\n', (5459, 5471), False, 'from torchvision import transforms\n'), ((5514, 5535), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5533, 5535), False, 'from torchvision import transforms\n'), ((738, 772), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['device'], {}), '(device)\n', (764, 772), False, 'import torch\n'), ((2424, 2443), 'os.stat', 'os.stat', (['label_path'], {}), '(label_path)\n', (2431, 2443), False, 'import os\n'), ((4941, 4970), 'torch.transpose', 'torch.transpose', (['labels', '(0)', '(1)'], {}), '(labels, 0, 1)\n', (4956, 4970), False, 'import torch\n'), ((5008, 5038), 'torch.transpose', 'torch.transpose', (['outputs', '(0)', '(1)'], {}), '(outputs, 0, 1)\n', (5023, 5038), False, 'import torch\n')] |
# imports - module imports
from deeply.exception import (
DeeplyError
)
# imports - test imports
import pytest
def test_deeply_error():
with pytest.raises(DeeplyError):
raise DeeplyError | [
"pytest.raises"
] | [((151, 177), 'pytest.raises', 'pytest.raises', (['DeeplyError'], {}), '(DeeplyError)\n', (164, 177), False, 'import pytest\n')] |
""" Common setup and patching for tests """
#pylint: disable=wrong-import-order
from datetime import datetime as orig_datetime, timedelta
from mock import patch
import threading
#pylint: disable=W0401,W0614
from test.fixtures import *
_thread_state = threading.local()
def _new_utcnow():
""" Return last set datetime, or set it to current datetime if not set """
if not hasattr(_thread_state, 'utcnow'):
_thread_state.utcnow = orig_datetime.utcnow()
return _thread_state.utcnow
def _new_now():
""" Work out current local datetime """
return _new_utcnow() + (orig_datetime.now() - orig_datetime.utcnow())
def clock_load(utcnow):
""" Set datetime """
_thread_state.utcnow = utcnow
return _thread_state.utcnow
def clock_tick(delta=timedelta()):
""" Tick clock """
return clock_load(_new_utcnow() + delta)
def clock_reset():
""" Forget set datetime """
if hasattr(_thread_state, 'utcnow'):
delattr(_thread_state, 'utcnow')
_config = {'utcnow.side_effect': _new_utcnow,
'now.side_effect': _new_now}
_patcher = patch('datetime.datetime', **_config)
_mocker = _patcher.start()
| [
"threading.local",
"mock.patch",
"datetime.datetime.utcnow",
"datetime.datetime.now",
"datetime.timedelta"
] | [((253, 270), 'threading.local', 'threading.local', ([], {}), '()\n', (268, 270), False, 'import threading\n'), ((1088, 1125), 'mock.patch', 'patch', (['"""datetime.datetime"""'], {}), "('datetime.datetime', **_config)\n", (1093, 1125), False, 'from mock import patch\n'), ((774, 785), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (783, 785), False, 'from datetime import datetime as orig_datetime, timedelta\n'), ((446, 468), 'datetime.datetime.utcnow', 'orig_datetime.utcnow', ([], {}), '()\n', (466, 468), True, 'from datetime import datetime as orig_datetime, timedelta\n'), ((590, 609), 'datetime.datetime.now', 'orig_datetime.now', ([], {}), '()\n', (607, 609), True, 'from datetime import datetime as orig_datetime, timedelta\n'), ((612, 634), 'datetime.datetime.utcnow', 'orig_datetime.utcnow', ([], {}), '()\n', (632, 634), True, 'from datetime import datetime as orig_datetime, timedelta\n')] |
import re
import numpy as np
import pandas as pd
import scipy.stats as stats
R_REGEX = re.compile('(.*):(.*)-(.*)')
R_REGEX_STRAND = re.compile('(.*):(.*)-(.*):(.*)')
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(l), n):
yield l[i:i + n]
def estimate_allele_frequency(ac, an, a=1, b=100):
"""
Make sample (or other) names.
Parameters:
-----------
ac : array-like
Array-like object with the observed allele counts for each variant. If
ac is a pandas Series, the output dataframe will have the same index as
ac.
an : array-like
Array-like object with the number of haplotypes that were genotyped.
a : float
Parameter for prior distribution beta(a, b).
b : float
Parameter for prior distribution beta(a, b).
Returns
-------
out : pandas.DataFrame
Pandas dataframe with allele frequency estimate
"""
# Credible interval is 95% highest posterior density
td = dict(zip(['ci_lower', 'ci_upper'],
stats.beta(a + ac, b + an - ac).interval(0.95)))
td['af'] = (a + ac) / (a + b + an)
td['af_mle'] = np.array(ac).astype(float) / np.array(an)
out = pd.DataFrame(td)[['af_mle', 'af', 'ci_lower', 'ci_upper']]
if type(ac) == pd.Series:
out.index = ac.index
return(out)
def transform_standard_normal(df):
"""Transform a series or the rows of a dataframe to the values of a standard
normal based on rank."""
import pandas as pd
import scipy.stats as stats
if type(df) == pd.core.frame.DataFrame:
gc_ranks = df.rank(axis=1)
gc_ranks = gc_ranks / (gc_ranks.shape[1] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.DataFrame(std_norm, index=gc_ranks.index,
columns=gc_ranks.columns)
elif type(df) == pd.core.series.Series:
gc_ranks = df.rank()
gc_ranks = gc_ranks / (gc_ranks.shape[0] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.Series(std_norm, index=df.index)
return std_norm
def read_gzipped_text_url(url):
"""Read a gzipped text file from a URL and return
contents as a string."""
import urllib2
import zlib
from StringIO import StringIO
opener = urllib2.build_opener()
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
respond = opener.open(request)
compressedData = respond.read()
respond.close()
opener.close()
compressedDataBuf = StringIO(compressedData)
d = zlib.decompressobj(16+zlib.MAX_WBITS)
buffer = compressedDataBuf.read(1024)
#saveFile = open('/tmp/test.txt', "wb")
s = []
while buffer:
s.append(d.decompress(buffer))
buffer = compressedDataBuf.read(1024)
s = ''.join(s)
return s
def parse_region(region):
"""
Parse region of type chr1:10-20 or chr1:10-20:+
Parameters:
-----------
region : str
Region of type chr1:10-20 or chr1:10-20:+.
Returns
-------
groups : tuple
Tuple of groups from regex e.g. (chr1, 10, 20) or (chr1, 10, 20, +).
"""
m = R_REGEX_STRAND.search(region)
if not m:
m = R_REGEX.search(region)
if m:
groups = m.groups()
return groups
else:
return None
def _sample_names(files, kwargs):
"""
Make sample (or other) names.
Parameters:
-----------
files : list of string
Typically a list of file paths although could be any list of strings
that you want to make names for. If neither names nor define_sample_name
are provided, then files is returned as is.
kwargs : dict
kwargs from another function. Can include the following keys with
appropriate arguments.
names : list of strings
Names to use. Overrides define_sample_name if provided.
define_sample_name : function that takes string as input
Function mapping string to name. For instance, you may have a sample
name in a file path and use a regex to extract it.
"""
if 'define_sample_name' not in kwargs.keys():
define_sample_name = lambda x: x
else:
define_sample_name = kwargs['define_sample_name']
if 'names' in kwargs.keys():
names = kwargs['names']
else:
names = [define_sample_name(f) for f in files]
assert len(names) == len(files)
return names
| [
"StringIO.StringIO",
"pandas.Series",
"zlib.decompressobj",
"re.compile",
"scipy.stats.norm.ppf",
"urllib2.Request",
"numpy.array",
"scipy.stats.beta",
"pandas.DataFrame",
"urllib2.build_opener"
] | [((89, 117), 're.compile', 're.compile', (['"""(.*):(.*)-(.*)"""'], {}), "('(.*):(.*)-(.*)')\n", (99, 117), False, 'import re\n'), ((135, 168), 're.compile', 're.compile', (['"""(.*):(.*)-(.*):(.*)"""'], {}), "('(.*):(.*)-(.*):(.*)')\n", (145, 168), False, 'import re\n'), ((2426, 2448), 'urllib2.build_opener', 'urllib2.build_opener', ([], {}), '()\n', (2446, 2448), False, 'import urllib2\n'), ((2464, 2484), 'urllib2.Request', 'urllib2.Request', (['url'], {}), '(url)\n', (2479, 2484), False, 'import urllib2\n'), ((2669, 2693), 'StringIO.StringIO', 'StringIO', (['compressedData'], {}), '(compressedData)\n', (2677, 2693), False, 'from StringIO import StringIO\n'), ((2702, 2741), 'zlib.decompressobj', 'zlib.decompressobj', (['(16 + zlib.MAX_WBITS)'], {}), '(16 + zlib.MAX_WBITS)\n', (2720, 2741), False, 'import zlib\n'), ((1321, 1333), 'numpy.array', 'np.array', (['an'], {}), '(an)\n', (1329, 1333), True, 'import numpy as np\n'), ((1344, 1360), 'pandas.DataFrame', 'pd.DataFrame', (['td'], {}), '(td)\n', (1356, 1360), True, 'import pandas as pd\n'), ((1832, 1856), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['gc_ranks'], {}), '(gc_ranks)\n', (1846, 1856), True, 'import scipy.stats as stats\n'), ((1876, 1946), 'pandas.DataFrame', 'pd.DataFrame', (['std_norm'], {'index': 'gc_ranks.index', 'columns': 'gc_ranks.columns'}), '(std_norm, index=gc_ranks.index, columns=gc_ranks.columns)\n', (1888, 1946), True, 'import pandas as pd\n'), ((2126, 2150), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['gc_ranks'], {}), '(gc_ranks)\n', (2140, 2150), True, 'import scipy.stats as stats\n'), ((2170, 2205), 'pandas.Series', 'pd.Series', (['std_norm'], {'index': 'df.index'}), '(std_norm, index=df.index)\n', (2179, 2205), True, 'import pandas as pd\n'), ((1292, 1304), 'numpy.array', 'np.array', (['ac'], {}), '(ac)\n', (1300, 1304), True, 'import numpy as np\n'), ((1185, 1216), 'scipy.stats.beta', 'stats.beta', (['(a + ac)', '(b + an - ac)'], {}), '(a + ac, b + an - ac)\n', (1195, 1216), True, 'import scipy.stats as stats\n')] |
import os
import sys
import argparse
import difflib
import json
import logging
import pluggy
import colorama
import boto3
from pathlib import Path
from . lib.autoawsume import create_autoawsume_profile
from ..autoawsume.process import kill, kill_autoawsume
from . lib.profile import aggregate_profiles, get_role_chain, get_profile_name
from . lib.config_management import load_config
from . lib.aws_files import get_aws_files, add_section, get_section
from . lib.profile import credentials_to_profile, is_mutable_profile
from . lib import exceptions
from . lib.logger import logger
from . lib.safe_print import safe_print
from . lib import constants
from . lib import saml as saml
from . lib import aws as aws_lib
from . import hookspec
from . import default_plugins
class Awsume(object):
def __init__(self, is_interactive: bool = True):
logger.debug('Initalizing app')
self.plugin_manager = self.get_plugin_manager()
self.config = load_config()
self.config['is_interactive'] = is_interactive
self.is_interactive = is_interactive
colorama.init(autoreset=True)
def get_plugin_manager(self) -> pluggy.PluginManager:
logger.debug('Creating plugin manager')
pm = pluggy.PluginManager('awsume')
pm.add_hookspecs(hookspec)
logger.debug('Loading plugins')
pm.register(default_plugins)
pm.load_setuptools_entrypoints('awsume')
return pm
def parse_args(self, system_arguments: list) -> argparse.Namespace:
logger.debug('Gathering arguments')
epilog = """Thank you for using AWSume! Check us out at https://trek10.com"""
description="""Awsume - A cli that makes using AWS IAM credentials easy"""
argument_parser = argparse.ArgumentParser(
prog='awsume',
description=description,
epilog=epilog,
formatter_class=lambda prog: (argparse.RawDescriptionHelpFormatter(prog, max_help_position=80, width=80)), # pragma: no cover
)
self.plugin_manager.hook.pre_add_arguments(
config=self.config,
)
self.plugin_manager.hook.add_arguments(
config=self.config,
parser=argument_parser,
)
logger.debug('Parsing arguments')
args = argument_parser.parse_args(system_arguments)
logger.debug('Handling arguments')
if args.refresh_autocomplete:
autocomplete_file = Path('~/.awsume/autocomplete.json').expanduser()
result = self.plugin_manager.hook.get_profile_names(
config=self.config,
arguments=args,
)
profile_names = [y for x in result for y in x]
json.dump({'profile-names': profile_names}, open(autocomplete_file, 'w'))
raise exceptions.EarlyExit()
if args.list_plugins:
for plugin_name, _ in self.plugin_manager.list_name_plugin():
if 'default_plugins' not in plugin_name:
safe_print(plugin_name, color=colorama.Fore.LIGHTCYAN_EX)
raise exceptions.EarlyExit()
self.plugin_manager.hook.post_add_arguments(
config=self.config,
arguments=args,
parser=argument_parser,
)
args.system_arguments = system_arguments
return args
def get_profiles(self, args: argparse.Namespace) -> dict:
logger.debug('Gathering profiles')
config_file, credentials_file = get_aws_files(args, self.config)
self.plugin_manager.hook.pre_collect_aws_profiles(
config=self.config,
arguments=args,
credentials_file=credentials_file,
config_file=config_file,
)
aws_profiles_result = self.plugin_manager.hook.collect_aws_profiles(
config=self.config,
arguments=args,
credentials_file=credentials_file,
config_file=config_file,
)
profiles = aggregate_profiles(aws_profiles_result)
self.plugin_manager.hook.post_collect_aws_profiles(
config=self.config,
arguments=args,
profiles=profiles,
)
return profiles
def get_saml_credentials(self, args: argparse.Namespace, profiles: dict) -> dict:
assertion = self.plugin_manager.hook.get_credentials_with_saml(
config=self.config,
arguments=args,
)
assertion = next((_ for _ in assertion if _), None) # pragma: no cover
if not assertion:
raise exceptions.SAMLAssertionNotFoundError('No assertion to use!')
roles = saml.parse_assertion(assertion)
if not roles:
raise exceptions.SAMLAssertionMissingRoleError('No roles found in the saml assertion')
role_arn = None
principal_arn = None
role_duration = args.role_duration or int(self.config.get('role-duration', '0'))
if len(roles) > 1:
if args.role_arn and args.principal_arn:
principal_plus_role_arn = ','.join(args.role_arn, args.principal_arn)
if self.config.get('fuzzy-match'):
choice = difflib.get_close_matches(principal_plus_role_arn, roles, cutoff=0)[0]
safe_print('Closest match: {}'.format(choice))
else:
if principal_plus_role_arn not in roles:
raise exceptions.SAMLRoleNotFoundError(args.principal_arn, args.role_arn)
else:
choice = principal_plus_role_arn
elif args.profile_name:
profile_role_arn = profiles.get(args.profile_name, {}).get('role_arn')
principal_arn = profiles.get(args.profile_name, {}).get('principal_arn')
if profile_role_arn is None or principal_arn is None:
raise exceptions.InvalidProfileError(args.profile_name, 'both role_arn and principal_arn are necessary for saml profiles')
principal_plus_profile_role_arn = ','.join([principal_arn, profile_role_arn])
if principal_plus_profile_role_arn in roles:
choice = principal_plus_profile_role_arn
else:
raise exceptions.SAMLRoleNotFoundError(principal_arn, profile_role_arn)
safe_print('Match: {}'.format(choice))
else:
for index, choice in enumerate(roles):
safe_print('{}) {}'.format(index, choice), color=colorama.Fore.LIGHTYELLOW_EX)
safe_print('Which role do you want to assume? > ', end='', color=colorama.Fore.LIGHTCYAN_EX)
response = input()
if response.isnumeric():
choice = roles[int(response)]
else:
choice = difflib.get_close_matches(response, roles, cutoff=0)[0]
role_arn = choice.split(',')[1]
principal_arn = choice.split(',')[0]
else:
role_arn = roles[0].split(',')[1]
principal_arn = roles[0].split(',')[0]
safe_print('Assuming role: {},{}'.format(principal_arn, role_arn), color=colorama.Fore.GREEN)
credentials = aws_lib.assume_role_with_saml(
role_arn,
principal_arn,
assertion,
region=None,
role_duration=role_duration,
)
return credentials
def get_credentials(self, args: argparse.Namespace, profiles: dict) -> dict:
logger.debug('Getting credentials')
self.plugin_manager.hook.pre_get_credentials(
config=self.config,
arguments=args,
profiles=profiles,
)
try:
if not args.auto_refresh and args.json: # sending credentials to awsume directly
logger.debug('Pulling credentials from json parameter')
args.target_profile_name = 'json'
credentials = json.loads(args.json)
if 'Credentials' in credentials:
credentials = credentials['Credentials']
elif args.with_saml:
logger.debug('Pulling credentials from saml')
credentials = self.get_saml_credentials(args, profiles)
elif args.with_web_identity:
logger.debug('Pulling credentials from web identity')
credentials = self.plugin_manager.hook.get_credentials_with_web_identity(
config=self.config,
arguments=args,
)
else:
logger.debug('Pulling credentials from default awsume flow')
credentials = self.plugin_manager.hook.get_credentials(config=self.config, arguments=args, profiles=profiles)
credentials = next((_ for _ in credentials if _), {})
if args.auto_refresh:
create_autoawsume_profile(self.config, args, profiles, credentials)
if self.config.get('is_interactive'):
logger.debug('Interactive execution, killing existing autoawsume processes')
kill_autoawsume()
except exceptions.ProfileNotFoundError as e:
self.plugin_manager.hook.catch_profile_not_found_exception(config=self.config, arguments=args, profiles=profiles, error=e)
raise
except exceptions.InvalidProfileError as e:
self.plugin_manager.hook.catch_invalid_profile_exception(config=self.config, arguments=args, profiles=profiles, error=e)
raise
except exceptions.UserAuthenticationError as e:
self.plugin_manager.hook.catch_user_authentication_error(config=self.config, arguments=args, profiles=profiles, error=e)
raise
except exceptions.RoleAuthenticationError as e:
self.plugin_manager.hook.catch_role_authentication_error(config=self.config, arguments=args, profiles=profiles, error=e)
raise
if type(credentials) == list: # pragma: no cover
credentials = next((_ for _ in credentials if _), {}) # pragma: no cover
self.plugin_manager.hook.post_get_credentials(
config=self.config,
arguments=args,
profiles=profiles,
credentials=credentials,
)
if not credentials:
safe_print('No credentials to awsume', colorama.Fore.RED)
raise exceptions.NoCredentialsError()
return credentials
def export_data(self, arguments: argparse.Namespace, profiles: dict, credentials: dict, awsume_flag: str, awsume_list: list):
logger.debug('Exporting data')
if self.is_interactive:
print(awsume_flag, end=' ')
print(' '.join(awsume_list))
session = boto3.Session(
aws_access_key_id=credentials.get('AccessKeyId'),
aws_secret_access_key=credentials.get('SecretAccessKey'),
aws_session_token=credentials.get('SessionToken'),
profile_name=credentials.get('AwsProfile'),
region_name=credentials.get('Region'),
)
if arguments.output_profile and not arguments.auto_refresh:
if not is_mutable_profile(profiles, arguments.output_profile):
raise exceptions.ImmutableProfileError(arguments.output_profile, 'not awsume-managed')
_, credentials_file = get_aws_files(arguments, self.config)
awsumed_profile = credentials_to_profile(credentials)
if 'Expiration' in credentials:
awsumed_profile['expiration'] = credentials['Expiration'].strftime('%Y-%m-%d %H:%M:%S')
add_section(arguments.output_profile, awsumed_profile, credentials_file, True)
session.awsume_credentials = credentials
return session
def run(self, system_arguments: list):
try:
args = self.parse_args(system_arguments)
profiles = self.get_profiles(args)
credentials = self.get_credentials(args, profiles)
if args.auto_refresh:
return self.export_data(args, profiles, credentials, 'Auto', [
args.output_profile or 'autoawsume-{}'.format(args.target_profile_name),
credentials.get('Region'),
args.target_profile_name,
])
else:
return self.export_data(args, profiles, credentials, 'Awsume', [
str(credentials.get('AccessKeyId')),
str(credentials.get('SecretAccessKey')),
str(credentials.get('SessionToken')),
str(credentials.get('Region')),
str(args.target_profile_name),
str(credentials.get('AwsProfile')),
str(credentials['Expiration'].strftime('%Y-%m-%dT%H:%M:%S') if 'Expiration' in credentials else None),
])
except exceptions.EarlyExit:
logger.debug('', exc_info=True)
logger.debug('EarlyExit exception raised, no more work to do')
except exceptions.AwsumeException as e:
logger.debug('', exc_info=True)
if self.is_interactive:
safe_print('Awsume error: {}'.format(e), color=colorama.Fore.RED)
else:
raise
| [
"argparse.RawDescriptionHelpFormatter",
"json.loads",
"pathlib.Path",
"pluggy.PluginManager",
"difflib.get_close_matches",
"colorama.init"
] | [((1085, 1114), 'colorama.init', 'colorama.init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (1098, 1114), False, 'import colorama\n'), ((1236, 1266), 'pluggy.PluginManager', 'pluggy.PluginManager', (['"""awsume"""'], {}), "('awsume')\n", (1256, 1266), False, 'import pluggy\n'), ((7989, 8010), 'json.loads', 'json.loads', (['args.json'], {}), '(args.json)\n', (7999, 8010), False, 'import json\n'), ((1917, 1991), 'argparse.RawDescriptionHelpFormatter', 'argparse.RawDescriptionHelpFormatter', (['prog'], {'max_help_position': '(80)', 'width': '(80)'}), '(prog, max_help_position=80, width=80)\n', (1953, 1991), False, 'import argparse\n'), ((2458, 2493), 'pathlib.Path', 'Path', (['"""~/.awsume/autocomplete.json"""'], {}), "('~/.awsume/autocomplete.json')\n", (2462, 2493), False, 'from pathlib import Path\n'), ((5189, 5256), 'difflib.get_close_matches', 'difflib.get_close_matches', (['principal_plus_role_arn', 'roles'], {'cutoff': '(0)'}), '(principal_plus_role_arn, roles, cutoff=0)\n', (5214, 5256), False, 'import difflib\n'), ((6859, 6911), 'difflib.get_close_matches', 'difflib.get_close_matches', (['response', 'roles'], {'cutoff': '(0)'}), '(response, roles, cutoff=0)\n', (6884, 6911), False, 'import difflib\n')] |
# Generated by Django 3.0.5 on 2020-04-28 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("meadow", "0006_mmake_isbn_charfield"),
]
operations = [
migrations.AddField(model_name="book", name="is_approved", field=models.BooleanField(default=False),),
]
| [
"django.db.models.BooleanField"
] | [((301, 335), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (320, 335), False, 'from django.db import migrations, models\n')] |
import cv2 as cv
"""
Choose background substractor
"""
algo = 'MOG2'
input = 'videos/shine.mp4'
if algo == 'MOG2':
backSub = cv.createBackgroundSubtractorMOG2()
else:
backSub = cv.createBackgroundSubtractorKNN()
capture = cv.VideoCapture(input)
if not capture.isOpened():
print('Unable to open: ' + input)
exit(0)
while True:
ret, frame = capture.read()
if frame is None:
break
fgMask = backSub.apply(frame)
cv.rectangle(frame, (10, 2), (100,20), (255,255,255), -1)
cv.putText(frame, str(capture.get(cv.CAP_PROP_POS_FRAMES)), (15, 15),
cv.FONT_HERSHEY_SIMPLEX, 0.5 , (0,0,0))
cv.imshow('Frame', frame)
cv.imshow('FG Mask', fgMask)
keyboard = cv.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break | [
"cv2.createBackgroundSubtractorMOG2",
"cv2.rectangle",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.createBackgroundSubtractorKNN",
"cv2.waitKey"
] | [((235, 257), 'cv2.VideoCapture', 'cv.VideoCapture', (['input'], {}), '(input)\n', (250, 257), True, 'import cv2 as cv\n'), ((132, 167), 'cv2.createBackgroundSubtractorMOG2', 'cv.createBackgroundSubtractorMOG2', ([], {}), '()\n', (165, 167), True, 'import cv2 as cv\n'), ((188, 222), 'cv2.createBackgroundSubtractorKNN', 'cv.createBackgroundSubtractorKNN', ([], {}), '()\n', (220, 222), True, 'import cv2 as cv\n'), ((468, 528), 'cv2.rectangle', 'cv.rectangle', (['frame', '(10, 2)', '(100, 20)', '(255, 255, 255)', '(-1)'], {}), '(frame, (10, 2), (100, 20), (255, 255, 255), -1)\n', (480, 528), True, 'import cv2 as cv\n'), ((668, 693), 'cv2.imshow', 'cv.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (677, 693), True, 'import cv2 as cv\n'), ((698, 726), 'cv2.imshow', 'cv.imshow', (['"""FG Mask"""', 'fgMask'], {}), "('FG Mask', fgMask)\n", (707, 726), True, 'import cv2 as cv\n'), ((747, 761), 'cv2.waitKey', 'cv.waitKey', (['(30)'], {}), '(30)\n', (757, 761), True, 'import cv2 as cv\n')] |
"Common functions that may be used everywhere"
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from distutils.util import strtobool
try:
input = raw_input
except NameError:
pass
def yes_no_query(question):
"""Ask the user *question* for 'yes' or 'no'; ask again until user
inputs a valid option.
Returns:
'True' if user answered 'y', 'yes', 't', 'true', 'on' or '1'.
'False' if user answered 'n', 'no', 'f', 'false', 'off' or '0'.
"""
print("{} (y/n)".format(question), end=" "),
while True:
try:
return strtobool(input().lower())
except ValueError:
print("Please respond with 'y' or 'n'.")
def ask_overwrite(dest):
"""Check if file *dest* exists. If 'True', asks if the user wants
to overwrite it (just remove the file for later overwrite).
"""
msg = "File '{}' already exists. Overwrite file?".format(dest)
if os.path.exists(dest):
if yes_no_query(msg):
os.remove(dest)
else:
sys.exit("Cancelling operation...")
| [
"os.path.exists",
"sys.exit",
"os.remove"
] | [((1004, 1024), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (1018, 1024), False, 'import os\n'), ((1068, 1083), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (1077, 1083), False, 'import os\n'), ((1110, 1145), 'sys.exit', 'sys.exit', (['"""Cancelling operation..."""'], {}), "('Cancelling operation...')\n", (1118, 1145), False, 'import sys\n')] |
import inspect
from typing import Union, Callable, Any, Iterable
from pytest_lambda.exceptions import DisabledFixtureError, NotImplementedFixtureError
from pytest_lambda.impl import LambdaFixture
__all__ = ['lambda_fixture', 'static_fixture', 'error_fixture',
'disabled_fixture', 'not_implemented_fixture']
def lambda_fixture(fixture_name_or_lambda: Union[str, Callable]=None,
*other_fixture_names: Iterable[str],
bind=False,
scope="function", params=None, autouse=False, ids=None, name=None):
"""Use a fixture name or lambda function to compactly declare a fixture
Usage:
class DescribeMyTests:
url = lambda_fixture('list_url')
updated_name = lambda_fixture(lambda vendor: vendor.name + ' updated')
:param fixture_name_or_lambda: Either the name of another fixture, or a
lambda function, which can request other fixtures with its params. If
None, this defaults to the name of the attribute containing the lambda_fixture.
:param bind: Set this to true to pass self to your fixture. It must be the
first parameter in your fixture. This cannot be true if using a fixture
name.
"""
if other_fixture_names:
fixture_names_or_lambda = (fixture_name_or_lambda,) + other_fixture_names
else:
fixture_names_or_lambda = fixture_name_or_lambda
return LambdaFixture(fixture_names_or_lambda, bind=bind, scope=scope,
params=params, autouse=autouse, ids=ids, name=name)
def static_fixture(value: Any, **fixture_kwargs):
"""Compact method for defining a fixture that returns a static value
"""
return lambda_fixture(lambda: value, **fixture_kwargs)
RAISE_EXCEPTION_FIXTURE_FUNCTION_FORMAT = '''
def raise_exception({args}):
exc = error_fn({kwargs})
if exc is not None:
raise exc
'''
def error_fixture(error_fn: Callable, **fixture_kwargs):
"""Fixture whose usage results in the raising of an exception
Usage:
class DescribeMyTests:
url = error_fixture(lambda request: Exception(
f'Please override the {request.fixturename} fixture!'))
:param error_fn: fixture method which returns an exception to raise. It may
request pytest fixtures in its arguments
"""
proto = tuple(inspect.signature(error_fn).parameters)
args = ', '.join(proto)
kwargs = ', '.join(f'{arg}={arg}' for arg in proto)
source = RAISE_EXCEPTION_FIXTURE_FUNCTION_FORMAT.format(
args=args,
kwargs=kwargs,
)
ctx = {'error_fn': error_fn}
exec(source, ctx)
raise_exception = ctx['raise_exception']
return lambda_fixture(raise_exception, **fixture_kwargs)
def disabled_fixture(**fixture_kwargs):
"""Mark a fixture as disabled – using the fixture will raise an error
This is useful when you know any usage of a fixture would be in error. When
using disabled_fixture, pytest will raise an error if the fixture is
requested, so errors can be detected early, and faulty assumptions may be
avoided.
Usage:
class DescribeMyListOnlyViewSet(ViewSetTest):
list_route = lambda_fixture(lambda: reverse('...'))
detail_route = disabled_fixture()
class DescribeRetrieve(UsesDetailRoute):
def test_that_should_throw_error():
print('I should never be executed!')
"""
def build_disabled_fixture_error(request):
msg = (f'Usage of the {request.fixturename} fixture has been disabled '
f'in the current context.')
return DisabledFixtureError(msg)
return error_fixture(build_disabled_fixture_error, **fixture_kwargs)
def not_implemented_fixture(**fixture_kwargs):
"""Mark a fixture as abstract – requiring definition/override by the user
This is useful when defining abstract base classes requiring implementation
to be used correctly.
Usage:
class MyBaseTest:
list_route = not_implemented_fixture()
class TestThings(MyBaseTest):
list_route = lambda_fixture(lambda: reverse(...))
"""
def build_not_implemented_fixture_error(request):
msg = (f'Please define/override the {request.fixturename} fixture in '
f'the current context.')
return NotImplementedFixtureError(msg)
return error_fixture(build_not_implemented_fixture_error, **fixture_kwargs)
| [
"pytest_lambda.exceptions.NotImplementedFixtureError",
"pytest_lambda.impl.LambdaFixture",
"inspect.signature",
"pytest_lambda.exceptions.DisabledFixtureError"
] | [((1430, 1549), 'pytest_lambda.impl.LambdaFixture', 'LambdaFixture', (['fixture_names_or_lambda'], {'bind': 'bind', 'scope': 'scope', 'params': 'params', 'autouse': 'autouse', 'ids': 'ids', 'name': 'name'}), '(fixture_names_or_lambda, bind=bind, scope=scope, params=\n params, autouse=autouse, ids=ids, name=name)\n', (1443, 1549), False, 'from pytest_lambda.impl import LambdaFixture\n'), ((3663, 3688), 'pytest_lambda.exceptions.DisabledFixtureError', 'DisabledFixtureError', (['msg'], {}), '(msg)\n', (3683, 3688), False, 'from pytest_lambda.exceptions import DisabledFixtureError, NotImplementedFixtureError\n'), ((4385, 4416), 'pytest_lambda.exceptions.NotImplementedFixtureError', 'NotImplementedFixtureError', (['msg'], {}), '(msg)\n', (4411, 4416), False, 'from pytest_lambda.exceptions import DisabledFixtureError, NotImplementedFixtureError\n'), ((2371, 2398), 'inspect.signature', 'inspect.signature', (['error_fn'], {}), '(error_fn)\n', (2388, 2398), False, 'import inspect\n')] |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import hashlib
import sys
from datetime import datetime
import sentry_sdk
from authlib.oauth2 import OAuth2Error
from flask import flash, g, has_request_context, jsonify, render_template, request, session
from itsdangerous import Signer
from markupsafe import Markup
from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot
from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse
from indico.util.caching import memoize_request
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
def inject_js(js):
"""Inject JavaScript into the current page.
:param js: Code wrapped in a ``<script>`` tag.
"""
if 'injected_js' not in g:
g.injected_js = []
g.injected_js.append(Markup(js))
def _pop_injected_js():
js = None
if 'injected_js' in g:
js = g.injected_js
del g.injected_js
return js
def jsonify_form(form, fields=None, submit=None, back=None, back_url=None, back_button=True, disabled_until_change=True,
disabled_fields=(), form_header_kwargs=None, skip_labels=False, save_reminder=False,
footer_align_right=False, disable_if_locked=True, message=None):
"""Return a json response containing a rendered WTForm.
This is shortcut to the ``simple_form`` jinja macro to avoid
adding new templates that do nothing besides importing and
calling this macro.
:param form: A WTForms `Form` instance
:param fields: A list of fields to be displayed on the form
:param submit: The title of the submit button
:param back: The title of the back button
:param back_url: The URL the back button redirects to
:param back_button: Whether to show a back button
:param disabled_until_change: Whether to disable form submission
until a field is changed
:param disabled_fields: List of field names to disable
:param form_header_kwargs: Keyword arguments passed to the
``form_header`` macro
:param skip_labels: Whether to show labels on the fields
:param save_reminder: Whether to show a message when the form has
been modified and the save button is not
visible
:param footer_align_right: Whether the buttons in the event footer
should be aligned to the right.
:param disable_if_locked: Whether the form should be disabled when
the associated event is locked (based on
a CSS class in the DOM structure)
"""
if submit is None:
submit = _('Save')
if back is None:
back = _('Cancel')
if form_header_kwargs is None:
form_header_kwargs = {}
tpl = get_template_module('forms/_form.html')
html = tpl.simple_form(form, fields=fields, submit=submit, back=back, back_url=back_url, back_button=back_button,
disabled_until_change=disabled_until_change, disabled_fields=disabled_fields,
form_header_kwargs=form_header_kwargs, skip_labels=skip_labels, save_reminder=save_reminder,
footer_align_right=footer_align_right, disable_if_locked=disable_if_locked, message=message)
return jsonify(html=html, js=_pop_injected_js())
def jsonify_template(template, _render_func=render_template, _success=None, **context):
"""Return a json response containing a rendered template."""
html = _render_func(template, **context)
jsonify_kw = {}
if _success is not None:
jsonify_kw['success'] = _success
return jsonify(html=html, js=_pop_injected_js(), **jsonify_kw)
def jsonify_data(flash=True, **json_data):
"""Return a json response with some default fields.
This behaves similar to :func:`~flask.jsonify`, but includes
``success=True`` and flashed messages by default.
:param flash: if the json data should contain flashed messages
:param json_data: the data to include in the json response
"""
json_data.setdefault('success', True)
if flash:
json_data['flashed_messages'] = render_template('flashed_messages.html')
return jsonify(**json_data)
class ExpectedError(ImATeapot):
"""
An error that is expected to happen and is guaranteed to be handled
by client-side code.
Use this class in new react-based code together with the AJAX
actions when you expect things to go wrong and want to handle
them in a nicer way than the usual error dialog.
:param message: A short message describing the error
:param data: Any additional data to return
"""
def __init__(self, message, **data):
super().__init__(message or 'Something went wrong')
self.data = dict(data, message=message)
def _format_request_data(data, hide_passwords=False):
if not hasattr(data, 'lists'):
data = ((k, [v]) for k, v in data.items())
else:
data = data.lists()
rv = {}
for key, values in data:
if hide_passwords and 'password' in key:
values = [v if not v else f'<{len(v)} chars hidden>' for v in values]
rv[key] = values if len(values) != 1 else values[0]
return rv
def get_request_info(hide_passwords=True):
"""Get various information about the current HTTP request.
This is especially useful for logging purposes where you want
as many information as possible.
:param hide_passwords: Hides the actual value of POST fields
if their name contains ``password``.
:return: a dictionary containing request information, or ``None``
when called outside a request context
"""
if not has_request_context():
return None
try:
user_info = {
'id': session.user.id,
'name': session.user.full_name,
'email': session.user.email
} if session.user else None
except Exception as exc:
user_info = f'ERROR: {exc}'
return {
'id': request.id,
'time': datetime.now().isoformat(),
'url': request.url,
'endpoint': request.url_rule.endpoint if request.url_rule else None,
'method': request.method,
'rh': g.rh.__class__.__name__ if 'rh' in g else None,
'user': user_info,
'ip': request.remote_addr,
'user_agent': str(request.user_agent),
'referrer': request.referrer,
'data': {
'url': _format_request_data(request.view_args) if request.view_args is not None else None,
'get': _format_request_data(request.args),
'post': _format_request_data(request.form, hide_passwords=hide_passwords),
'json': request.get_json(silent=True),
'headers': _format_request_data(request.headers, False),
}
}
def url_for_index(_external=False, _anchor=None):
from indico.web.flask.util import url_for
return url_for('categories.display', _external=_external, _anchor=_anchor)
def is_legacy_signed_url_valid(user, url):
"""Check whether a legacy signed URL is valid for a user.
This util is deprecated and only exists because people may be actively
using URLs using the old style token. Any new code should use the new
:func:`signed_url_for_user` and :func:`verify_signed_user_url` utils
which encode the user id within the signature.
"""
parsed = url_parse(url)
params = url_decode(parsed.query)
try:
signature = params.pop('token')
except KeyError:
return False
url = url_unparse((
'',
'',
parsed.path,
url_encode(params, sort=False),
parsed.fragment
))
signer = Signer(user.signing_secret, salt='url-signing')
return signer.verify_signature(url.encode(), signature)
def _get_user_url_signer(user):
return Signer(user.signing_secret, salt='user-url-signing', digest_method=hashlib.sha256)
def signed_url_for_user(user, endpoint, /, *args, **kwargs):
"""Get a URL for an endpoint, which is signed using a user's signing secret.
The user id, path and query string are encoded within the signature.
"""
from indico.web.flask.util import url_for
_external = kwargs.pop('_external', False)
url = url_for(endpoint, *args, **kwargs)
# we include the plain userid in the token so we know which signing secret to load.
# the signature itself is over the method, user id and URL, so tampering with that ID
# would not help.
# using signed urls for anything that's not GET is also very unlikely, but we include
# the method as well just to make sure we don't accidentally sign some URL where POST
# is more powerful and has a body that's not covered by the signature. if we ever want
# to allow such a thing we could of course make the method configurable instead of
# hardcoding GET.
signer = _get_user_url_signer(user)
signature_data = f'GET:{user.id}:{url}'
signature = signer.get_signature(signature_data).decode()
user_token = f'{user.id}_{signature}'
# this is the final URL including the signature ('user_token' parameter); it also
# takes the `_external` flag into account (which is omitted for the signature in
# order to never include the host in the signed part)
return url_for(endpoint, *args, **kwargs, _external=_external, user_token=user_token)
def verify_signed_user_url(url, method):
"""Verify a signed URL and extract the associated user.
:param url: the full relative URL of the request, including the query string
:param method: the HTTP method of the request
:return: the user associated with the signed link or `None` if no token was provided
:raise Forbidden: if a token is present but invalid
"""
from indico.modules.users import User
parsed = url_parse(url)
params = url_decode(parsed.query)
try:
user_id, signature = params.pop('user_token').split('_', 1)
user_id = int(user_id)
except KeyError:
return None
except ValueError:
raise BadRequest(_('The persistent link you used is invalid.'))
url = url_unparse((
'',
'',
parsed.path,
url_encode(params, sort=False),
parsed.fragment
))
user = User.get(user_id)
if not user:
raise BadRequest(_('The persistent link you used is invalid.'))
signer = _get_user_url_signer(user)
signature_data = f'{method}:{user.id}:{url}'
if not signer.verify_signature(signature_data.encode(), signature):
raise BadRequest(_('The persistent link you used is invalid.'))
return user
def get_oauth_user(scopes):
from indico.core.oauth import require_oauth
from indico.core.oauth.util import TOKEN_PREFIX_SERVICE
token = request.headers.get('Authorization', '')
if not token.lower().startswith('bearer ') or token.lower().startswith(f'bearer {TOKEN_PREFIX_SERVICE}'):
return None
try:
oauth_token = require_oauth.acquire_token(scopes)
except OAuth2Error as exc:
require_oauth.raise_error_response(exc)
return oauth_token.user
def _lookup_request_user(allow_signed_url=False, oauth_scope_hint=None):
oauth_scopes = [oauth_scope_hint] if oauth_scope_hint else []
if request.method == 'GET':
oauth_scopes += ['read:everything', 'full:everything']
else:
oauth_scopes += ['full:everything']
signed_url_user = verify_signed_user_url(request.full_path, request.method)
oauth_user = get_oauth_user(oauth_scopes)
session_user = session.get_session_user()
if oauth_user:
if signed_url_user:
raise BadRequest('OAuth tokens and signed URLs cannot be mixed')
if session_user:
raise BadRequest('OAuth tokens and session cookies cannot be mixed')
if signed_url_user and not allow_signed_url:
raise BadRequest('Signature auth is not allowed for this URL')
if signed_url_user:
return signed_url_user, 'signed_url'
elif oauth_user:
return oauth_user, 'oauth'
elif session_user:
return session_user, 'session'
return None, None
def _request_likely_seen_by_user():
return not request.is_xhr and not request.is_json and request.blueprint != 'assets'
def _check_request_user(user, source):
if not user:
return None, None
elif user.is_deleted:
merged_into_user = user.merged_into_user
if source != 'session':
if merged_into_user:
raise Forbidden('User has been merged into another user')
else:
raise Forbidden('User has been deleted')
user = source = None
# If the user is deleted and the request is likely to be seen by
# the user, we forcefully log him out and inform him about it.
if _request_likely_seen_by_user():
session.clear()
if merged_into_user:
msg = _('Your profile has been merged into <strong>{}</strong>. Please log in using that profile.')
flash(Markup(msg).format(merged_into_user.full_name), 'warning')
else:
flash(_('Your profile has been deleted.'), 'error')
elif user.is_blocked:
if source != 'session':
raise Forbidden('User has been blocked')
user = source = None
if _request_likely_seen_by_user():
session.clear()
flash(_('Your profile has been blocked.'), 'error')
return user, source
@memoize_request
def get_request_user():
"""Get the user associated with the current request.
This looks up the user using all ways of authentication that are
supported on the current endpoint. In most cases that's the user
from the active session (via a session cookie), but it may also be
set (or even overridden if there is a session as well) through other
means, such as:
- an OAuth token
- a signature for a persistent url
"""
if g.get('get_request_user_failed'):
# If getting the current user failed, we abort early in case something
# tries again since that code may be in logging or error handling, and
# we don't want that code to fail because of an invalid token in the URL
return None, None
current_exc = sys.exc_info()[1]
rh = type(g.rh) if 'rh' in g else None
oauth_scope_hint = getattr(rh, '_OAUTH_SCOPE', None)
allow_signed_url = getattr(rh, '_ALLOW_SIGNED_URL', False)
try:
user, source = _lookup_request_user(allow_signed_url, oauth_scope_hint)
user, source = _check_request_user(user, source)
except Exception as exc:
g.get_request_user_failed = True
if current_exc:
# If we got here while handling another exception, we silently ignore
# any failure related to authenticating the current user and pretend
# there is no user so we can continue handling the original exception.
# one case when this happens is passing a `user_token` arg to a page
# that 404s. of course the token is not valid there, but the 404 error
# is the more interesting one.
from indico.core.logger import Logger
Logger.get('auth').info('Discarding exception "%s" while authenticating request user during handling of '
'exception "%s"', exc, current_exc)
return None, None
raise
if user:
sentry_sdk.set_user({
'id': user.id,
'email': user.email,
'name': user.full_name,
'source': source
})
return user, source
| [
"flask.render_template",
"indico.web.flask.templating.get_template_module",
"indico.util.i18n._",
"indico.core.oauth.require_oauth.raise_error_response",
"sys.exc_info",
"werkzeug.exceptions.BadRequest",
"flask.session.clear",
"flask.request.headers.get",
"flask.jsonify",
"flask.g.get",
"werkzeu... | [((3012, 3051), 'indico.web.flask.templating.get_template_module', 'get_template_module', (['"""forms/_form.html"""'], {}), "('forms/_form.html')\n", (3031, 3051), False, 'from indico.web.flask.templating import get_template_module\n'), ((4433, 4453), 'flask.jsonify', 'jsonify', ([], {}), '(**json_data)\n', (4440, 4453), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((7183, 7250), 'indico.web.flask.util.url_for', 'url_for', (['"""categories.display"""'], {'_external': '_external', '_anchor': '_anchor'}), "('categories.display', _external=_external, _anchor=_anchor)\n", (7190, 7250), False, 'from indico.web.flask.util import url_for\n'), ((7653, 7667), 'werkzeug.urls.url_parse', 'url_parse', (['url'], {}), '(url)\n', (7662, 7667), False, 'from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse\n'), ((7681, 7705), 'werkzeug.urls.url_decode', 'url_decode', (['parsed.query'], {}), '(parsed.query)\n', (7691, 7705), False, 'from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse\n'), ((7951, 7998), 'itsdangerous.Signer', 'Signer', (['user.signing_secret'], {'salt': '"""url-signing"""'}), "(user.signing_secret, salt='url-signing')\n", (7957, 7998), False, 'from itsdangerous import Signer\n'), ((8104, 8191), 'itsdangerous.Signer', 'Signer', (['user.signing_secret'], {'salt': '"""user-url-signing"""', 'digest_method': 'hashlib.sha256'}), "(user.signing_secret, salt='user-url-signing', digest_method=hashlib.\n sha256)\n", (8110, 8191), False, 'from itsdangerous import Signer\n'), ((8517, 8551), 'indico.web.flask.util.url_for', 'url_for', (['endpoint', '*args'], {}), '(endpoint, *args, **kwargs)\n', (8524, 8551), False, 'from indico.web.flask.util import url_for\n'), ((9562, 9640), 'indico.web.flask.util.url_for', 'url_for', (['endpoint', '*args'], {'_external': '_external', 'user_token': 'user_token'}), '(endpoint, *args, **kwargs, _external=_external, user_token=user_token)\n', (9569, 9640), False, 'from indico.web.flask.util import url_for\n'), ((10085, 10099), 'werkzeug.urls.url_parse', 'url_parse', (['url'], {}), '(url)\n', (10094, 10099), False, 'from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse\n'), ((10113, 10137), 'werkzeug.urls.url_decode', 'url_decode', (['parsed.query'], {}), '(parsed.query)\n', (10123, 10137), False, 'from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse\n'), ((10535, 10552), 'indico.modules.users.User.get', 'User.get', (['user_id'], {}), '(user_id)\n', (10543, 10552), False, 'from indico.modules.users import User\n'), ((11043, 11083), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""', '""""""'], {}), "('Authorization', '')\n", (11062, 11083), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((11824, 11850), 'flask.session.get_session_user', 'session.get_session_user', ([], {}), '()\n', (11848, 11850), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((14253, 14285), 'flask.g.get', 'g.get', (['"""get_request_user_failed"""'], {}), "('get_request_user_failed')\n", (14258, 14285), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((972, 982), 'markupsafe.Markup', 'Markup', (['js'], {}), '(js)\n', (978, 982), False, 'from markupsafe import Markup\n'), ((2877, 2886), 'indico.util.i18n._', '_', (['"""Save"""'], {}), "('Save')\n", (2878, 2886), False, 'from indico.util.i18n import _\n'), ((2923, 2934), 'indico.util.i18n._', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (2924, 2934), False, 'from indico.util.i18n import _\n'), ((4381, 4421), 'flask.render_template', 'render_template', (['"""flashed_messages.html"""'], {}), "('flashed_messages.html')\n", (4396, 4421), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((5950, 5971), 'flask.has_request_context', 'has_request_context', ([], {}), '()\n', (5969, 5971), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((11245, 11280), 'indico.core.oauth.require_oauth.acquire_token', 'require_oauth.acquire_token', (['scopes'], {}), '(scopes)\n', (11272, 11280), False, 'from indico.core.oauth import require_oauth\n'), ((12146, 12202), 'werkzeug.exceptions.BadRequest', 'BadRequest', (['"""Signature auth is not allowed for this URL"""'], {}), "('Signature auth is not allowed for this URL')\n", (12156, 12202), False, 'from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot\n'), ((14571, 14585), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (14583, 14585), False, 'import sys\n'), ((15752, 15856), 'sentry_sdk.set_user', 'sentry_sdk.set_user', (["{'id': user.id, 'email': user.email, 'name': user.full_name, 'source': source}"], {}), "({'id': user.id, 'email': user.email, 'name': user.\n full_name, 'source': source})\n", (15771, 15856), False, 'import sentry_sdk\n'), ((6958, 6987), 'flask.request.get_json', 'request.get_json', ([], {'silent': '(True)'}), '(silent=True)\n', (6974, 6987), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((7875, 7905), 'werkzeug.urls.url_encode', 'url_encode', (['params'], {'sort': '(False)'}), '(params, sort=False)\n', (7885, 7905), False, 'from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse\n'), ((10460, 10490), 'werkzeug.urls.url_encode', 'url_encode', (['params'], {'sort': '(False)'}), '(params, sort=False)\n', (10470, 10490), False, 'from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse\n'), ((10595, 10640), 'indico.util.i18n._', '_', (['"""The persistent link you used is invalid."""'], {}), "('The persistent link you used is invalid.')\n", (10596, 10640), False, 'from indico.util.i18n import _\n'), ((10829, 10874), 'indico.util.i18n._', '_', (['"""The persistent link you used is invalid."""'], {}), "('The persistent link you used is invalid.')\n", (10830, 10874), False, 'from indico.util.i18n import _\n'), ((11320, 11359), 'indico.core.oauth.require_oauth.raise_error_response', 'require_oauth.raise_error_response', (['exc'], {}), '(exc)\n', (11354, 11359), False, 'from indico.core.oauth import require_oauth\n'), ((11917, 11975), 'werkzeug.exceptions.BadRequest', 'BadRequest', (['"""OAuth tokens and signed URLs cannot be mixed"""'], {}), "('OAuth tokens and signed URLs cannot be mixed')\n", (11927, 11975), False, 'from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot\n'), ((12019, 12081), 'werkzeug.exceptions.BadRequest', 'BadRequest', (['"""OAuth tokens and session cookies cannot be mixed"""'], {}), "('OAuth tokens and session cookies cannot be mixed')\n", (12029, 12081), False, 'from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot\n'), ((6299, 6313), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6311, 6313), False, 'from datetime import datetime\n'), ((10335, 10380), 'indico.util.i18n._', '_', (['"""The persistent link you used is invalid."""'], {}), "('The persistent link you used is invalid.')\n", (10336, 10380), False, 'from indico.util.i18n import _\n'), ((13141, 13156), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (13154, 13156), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((12786, 12837), 'werkzeug.exceptions.Forbidden', 'Forbidden', (['"""User has been merged into another user"""'], {}), "('User has been merged into another user')\n", (12795, 12837), False, 'from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot\n'), ((12878, 12912), 'werkzeug.exceptions.Forbidden', 'Forbidden', (['"""User has been deleted"""'], {}), "('User has been deleted')\n", (12887, 12912), False, 'from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot\n'), ((13212, 13310), 'indico.util.i18n._', '_', (['"""Your profile has been merged into <strong>{}</strong>. Please log in using that profile."""'], {}), "('Your profile has been merged into <strong>{}</strong>. Please log in using that profile.'\n )\n", (13213, 13310), False, 'from indico.util.i18n import _\n'), ((13549, 13583), 'werkzeug.exceptions.Forbidden', 'Forbidden', (['"""User has been blocked"""'], {}), "('User has been blocked')\n", (13558, 13583), False, 'from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot\n'), ((13668, 13683), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (13681, 13683), False, 'from flask import flash, g, has_request_context, jsonify, render_template, request, session\n'), ((13427, 13462), 'indico.util.i18n._', '_', (['"""Your profile has been deleted."""'], {}), "('Your profile has been deleted.')\n", (13428, 13462), False, 'from indico.util.i18n import _\n'), ((13702, 13737), 'indico.util.i18n._', '_', (['"""Your profile has been blocked."""'], {}), "('Your profile has been blocked.')\n", (13703, 13737), False, 'from indico.util.i18n import _\n'), ((15508, 15526), 'indico.core.logger.Logger.get', 'Logger.get', (['"""auth"""'], {}), "('auth')\n", (15518, 15526), False, 'from indico.core.logger import Logger\n'), ((13328, 13339), 'markupsafe.Markup', 'Markup', (['msg'], {}), '(msg)\n', (13334, 13339), False, 'from markupsafe import Markup\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Mon Aug 17 11:31:32 2020
Distance-Controlled Boundaries Coefficient (DCBC) evaluation
for a functional parcellation of brain cortex
INPUTS:
sn: The return subject number
hems: Hemisphere to test. 'L' - left hemisphere; 'R' - right hemisphere; 'all' - both hemispheres
binWidth: The spatial binning width in mm, default 1 mm
maxDist: The maximum distance for vertices pairs
parcels: The cortical parcellation labels (integer value) to be evaluated, shape is (N,)
N is the number of vertices, 0 - medial wall
condType: The condition type for evaluating
'unique' - evaluation will be done by using unique task conditions of the task set
'all' - evaluation will be done by all task conditions of the task set
taskSet: The task set of MDTB to use for evaluating. 1 - taskset A; 2 - taskset B; [1,2] - both
resolution: The resolution of surface space, either 32k or 164k, 32k as default
distType: The distance metric of vertices pairs, for example Dijkstra's distance, GOD distance
Euclidean distance. Dijkstra's distance as default
icoRes: Icosahedron resolution, 42, 162, 362, 642, 1002, ... default to use 2562
mwallFile: The medial wall to be excluded from the evaluation
OUTPUT:
M: Gifti object- can be saved as a *.func.gii or *.label.gii file
Author: <NAME>
'''
import os
import numpy as np
import pandas as pd
import scipy.io as spio
from scipy.sparse import find
import nibabel as nb
def eval_DCBC(sn=[2],subj_name=['s02'], hems='L', maxDist=35, binWidth=1, parcels='',
condType='unique', taskSet=[1],resolution='32k', distType='Dijkstra',
icoRes=162, mWallFile='icos_162'):
taskConds = pd.read_table('DCBC/sc1_sc2_taskConds.txt', delim_whitespace=True)
numBins = int(np.floor(maxDist / binWidth))
if distType is 'Dijkstra':
dist = spio.loadmat("DCBC/distAvrg_sp.mat")['avrgDs']
elif distType is 'Sphere':
dist = spio.loadmat("DCBC/distSphere_sp.mat")['avrgDs']
else:
raise TypeError("Distance type cannot be recognized!")
# Determine which hemisphere shall be evaluated
if hems is 'all':
hems = ['L', 'R']
elif hems is 'L' or 'R':
hems = [hems]
else:
raise TypeError("Hemisphere type cannot be recognized!")
# Initialization of the result buffers
studyNum, SN, hem = [], [], []
N, bwParcel, distmin, distmax, meanCorr, weightedCorr = [], [], [], [], [], []
for h in hems:
mWall = np.where(parcels == 0)[0]
parcels = np.delete(parcels, mWall) # remove medial wall
parcels = np.abs(parcels - parcels[:, np.newaxis])
dist=dist.todense()
dist = np.delete(dist, mWall, 0)
dist = np.delete(dist, mWall, 1)
row, col, dist = find(dist)
sameRegion = np.zeros((dist.shape[0],), dtype=int)
for i in range(len(row)):
if parcels[row[i]][col[i]] == 0:
sameRegion[i] = 1 # within-parcel
else:
sameRegion[i] = 2 # between-parcel
del parcels
for ts in taskSet:
taskConds = taskConds[taskConds['StudyNum'] == ts]
if condType is 'unique': # unique conditions in taskset ts
condIdx = taskConds['condNum'][taskConds['overlap']==0]
elif condType is 'all': # all conditions in taskset ts
condIdx = taskConds['condNum']
else:
raise TypeError("Invalid condition type input!")
for s in sn:
this_wcon = nb.load("DCBC/%s/%s.%s.sc%s.con.%s.func.gii" %
(subj_name[s-1],subj_name[s-1], h, ts, resolution))
this_wcon = [x.data for x in this_wcon.darrays]
this_wcon = np.reshape(this_wcon, (len(this_wcon), len(this_wcon[0]))).transpose()
res = np.sqrt(this_wcon[:,-1])
this_wcon = np.delete(this_wcon, [0, this_wcon.shape[1] - 1], axis=1) # remove instruction
this_wcon = np.concatenate((this_wcon, np.zeros((this_wcon.shape[0], 1))), axis=1) # add rest
for i in range(this_wcon.shape[0]): # noise normalize
this_wcon[i, :] = this_wcon[i, :] / res[i]
this_wcon = np.delete(this_wcon, mWall, axis=0)
this_wcon = this_wcon[:,condIdx-1] # take the right subset
mean_wcon = this_wcon.mean(1)
for i in range(this_wcon.shape[0]):
this_wcon[i, :] = this_wcon[i, :] - mean_wcon[i]
this_wcon = this_wcon.astype('float32').transpose()
K=this_wcon.shape[0]
del res, mean_wcon
SD = np.sqrt(np.sum(np.square(this_wcon), axis=0)/K) # standard deviation
SD = np.reshape(SD, (SD.shape[0], 1))
VAR = np.matmul(SD, SD.transpose())
COV = np.matmul(this_wcon.transpose(), this_wcon) / K
VAR = VAR[row,col]
COV = COV[row,col]
del SD, this_wcon
print("\n")
for bw in range(1,3):
for i in range(numBins):
print(".")
inBin = np.zeros((dist.shape[0],), dtype=int)
for j in range(len(inBin)):
if (dist[j] > i*binWidth) & (dist[j] <= (i+1)*binWidth) & (sameRegion[j] == bw):
inBin[j] = 1
# inBin = np.where(dist>i*binWidth) & (dist<=(i+1)*binWidth) & (sameRegion==bw)
# inBin = np.reshape(inBin, (inBin.shape[1],))
N = np.append(N, np.count_nonzero(inBin == 1))
studyNum = np.append(studyNum, ts)
SN = np.append(SN, s)
hem = np.append(hem, h)
bwParcel = np.append(bwParcel, bw - 1)
distmin = np.append(distmin, i * binWidth)
distmax = np.append(distmax, (i + 1) * binWidth)
meanCorr = np.append(meanCorr, np.nanmean(COV[inBin == 1]) / np.nanmean(VAR[inBin == 1]))
del inBin
del VAR, COV
num_w = N[bwParcel == 0]
num_b = N[bwParcel == 1]
weight = 1/(1/num_w + 1/num_b)
weight = weight / np.sum(weight)
weightedCorr = np.append(meanCorr * weight)
print("\n")
struct = {
"SN": SN,
"hem": hem,
"studyNum": studyNum,
"N": N,
"bwParcel": bwParcel,
"distmin": distmin,
"distmax":distmax,
"meanCorr": meanCorr,
"weightedCorr": weightedCorr
}
return struct
| [
"numpy.abs",
"numpy.sqrt",
"numpy.reshape",
"nibabel.load",
"numpy.where",
"numpy.delete",
"scipy.io.loadmat",
"numpy.floor",
"numpy.square",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.count_nonzero",
"numpy.nanmean",
"pandas.read_table",
"scipy.sparse.find"
] | [((1943, 2009), 'pandas.read_table', 'pd.read_table', (['"""DCBC/sc1_sc2_taskConds.txt"""'], {'delim_whitespace': '(True)'}), "('DCBC/sc1_sc2_taskConds.txt', delim_whitespace=True)\n", (1956, 2009), True, 'import pandas as pd\n'), ((2028, 2056), 'numpy.floor', 'np.floor', (['(maxDist / binWidth)'], {}), '(maxDist / binWidth)\n', (2036, 2056), True, 'import numpy as np\n'), ((2788, 2813), 'numpy.delete', 'np.delete', (['parcels', 'mWall'], {}), '(parcels, mWall)\n', (2797, 2813), True, 'import numpy as np\n'), ((2853, 2893), 'numpy.abs', 'np.abs', (['(parcels - parcels[:, np.newaxis])'], {}), '(parcels - parcels[:, np.newaxis])\n', (2859, 2893), True, 'import numpy as np\n'), ((2938, 2963), 'numpy.delete', 'np.delete', (['dist', 'mWall', '(0)'], {}), '(dist, mWall, 0)\n', (2947, 2963), True, 'import numpy as np\n'), ((2979, 3004), 'numpy.delete', 'np.delete', (['dist', 'mWall', '(1)'], {}), '(dist, mWall, 1)\n', (2988, 3004), True, 'import numpy as np\n'), ((3030, 3040), 'scipy.sparse.find', 'find', (['dist'], {}), '(dist)\n', (3034, 3040), False, 'from scipy.sparse import find\n'), ((3062, 3099), 'numpy.zeros', 'np.zeros', (['(dist.shape[0],)'], {'dtype': 'int'}), '((dist.shape[0],), dtype=int)\n', (3070, 3099), True, 'import numpy as np\n'), ((2105, 2141), 'scipy.io.loadmat', 'spio.loadmat', (['"""DCBC/distAvrg_sp.mat"""'], {}), "('DCBC/distAvrg_sp.mat')\n", (2117, 2141), True, 'import scipy.io as spio\n'), ((2744, 2766), 'numpy.where', 'np.where', (['(parcels == 0)'], {}), '(parcels == 0)\n', (2752, 2766), True, 'import numpy as np\n'), ((2198, 2236), 'scipy.io.loadmat', 'spio.loadmat', (['"""DCBC/distSphere_sp.mat"""'], {}), "('DCBC/distSphere_sp.mat')\n", (2210, 2236), True, 'import scipy.io as spio\n'), ((3807, 3915), 'nibabel.load', 'nb.load', (["('DCBC/%s/%s.%s.sc%s.con.%s.func.gii' % (subj_name[s - 1], subj_name[s - 1],\n h, ts, resolution))"], {}), "('DCBC/%s/%s.%s.sc%s.con.%s.func.gii' % (subj_name[s - 1], subj_name\n [s - 1], h, ts, resolution))\n", (3814, 3915), True, 'import nibabel as nb\n'), ((4127, 4152), 'numpy.sqrt', 'np.sqrt', (['this_wcon[:, -1]'], {}), '(this_wcon[:, -1])\n', (4134, 4152), True, 'import numpy as np\n'), ((4180, 4237), 'numpy.delete', 'np.delete', (['this_wcon', '[0, this_wcon.shape[1] - 1]'], {'axis': '(1)'}), '(this_wcon, [0, this_wcon.shape[1] - 1], axis=1)\n', (4189, 4237), True, 'import numpy as np\n'), ((4532, 4567), 'numpy.delete', 'np.delete', (['this_wcon', 'mWall'], {'axis': '(0)'}), '(this_wcon, mWall, axis=0)\n', (4541, 4567), True, 'import numpy as np\n'), ((5064, 5096), 'numpy.reshape', 'np.reshape', (['SD', '(SD.shape[0], 1)'], {}), '(SD, (SD.shape[0], 1))\n', (5074, 5096), True, 'import numpy as np\n'), ((6737, 6765), 'numpy.append', 'np.append', (['(meanCorr * weight)'], {}), '(meanCorr * weight)\n', (6746, 6765), True, 'import numpy as np\n'), ((6691, 6705), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (6697, 6705), True, 'import numpy as np\n'), ((4314, 4347), 'numpy.zeros', 'np.zeros', (['(this_wcon.shape[0], 1)'], {}), '((this_wcon.shape[0], 1))\n', (4322, 4347), True, 'import numpy as np\n'), ((5502, 5539), 'numpy.zeros', 'np.zeros', (['(dist.shape[0],)'], {'dtype': 'int'}), '((dist.shape[0],), dtype=int)\n', (5510, 5539), True, 'import numpy as np\n'), ((6029, 6052), 'numpy.append', 'np.append', (['studyNum', 'ts'], {}), '(studyNum, ts)\n', (6038, 6052), True, 'import numpy as np\n'), ((6082, 6098), 'numpy.append', 'np.append', (['SN', 's'], {}), '(SN, s)\n', (6091, 6098), True, 'import numpy as np\n'), ((6129, 6146), 'numpy.append', 'np.append', (['hem', 'h'], {}), '(hem, h)\n', (6138, 6146), True, 'import numpy as np\n'), ((6182, 6209), 'numpy.append', 'np.append', (['bwParcel', '(bw - 1)'], {}), '(bwParcel, bw - 1)\n', (6191, 6209), True, 'import numpy as np\n'), ((6244, 6276), 'numpy.append', 'np.append', (['distmin', '(i * binWidth)'], {}), '(distmin, i * binWidth)\n', (6253, 6276), True, 'import numpy as np\n'), ((6311, 6349), 'numpy.append', 'np.append', (['distmax', '((i + 1) * binWidth)'], {}), '(distmax, (i + 1) * binWidth)\n', (6320, 6349), True, 'import numpy as np\n'), ((4989, 5009), 'numpy.square', 'np.square', (['this_wcon'], {}), '(this_wcon)\n', (4998, 5009), True, 'import numpy as np\n'), ((5964, 5992), 'numpy.count_nonzero', 'np.count_nonzero', (['(inBin == 1)'], {}), '(inBin == 1)\n', (5980, 5992), True, 'import numpy as np\n'), ((6405, 6432), 'numpy.nanmean', 'np.nanmean', (['COV[inBin == 1]'], {}), '(COV[inBin == 1])\n', (6415, 6432), True, 'import numpy as np\n'), ((6435, 6462), 'numpy.nanmean', 'np.nanmean', (['VAR[inBin == 1]'], {}), '(VAR[inBin == 1])\n', (6445, 6462), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Container Security API
# Authentication You must authenticate to the Qualys Cloud Platform using Qualys account credentials (user name and password) and get the JSON Web Token (JWT) before you can start using the Container Security APIs. Use the Qualys Authentication API to get the JWT. **Example Authentication Curl Request**: curl -X POST https://gateway/auth -H 'Content-Type: application/x-www-form-urlencoded' -d 'username=value1&password=<PASSWORD>&token=true' where - gateway is the base URL to the Qualys API server where your account is located. - **username** and **password** are the credentials of the user account for which you want to fetch Container Security data. - **token** should be **true** - **Content-Type** should be **application/x-www-form-urlencoded** # noqa: E501
OpenAPI spec version: v1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Repository(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'repo_name': 'str',
'total_images': 'int',
'total_scanned_images': 'int',
'total_vulnerable_images': 'int'
}
attribute_map = {
'repo_name': 'repoName',
'total_images': 'totalImages',
'total_scanned_images': 'totalScannedImages',
'total_vulnerable_images': 'totalVulnerableImages'
}
def __init__(self, repo_name=None, total_images=None, total_scanned_images=None, total_vulnerable_images=None): # noqa: E501
"""Repository - a model defined in Swagger""" # noqa: E501
self._repo_name = None
self._total_images = None
self._total_scanned_images = None
self._total_vulnerable_images = None
self.discriminator = None
if repo_name is not None:
self.repo_name = repo_name
if total_images is not None:
self.total_images = total_images
if total_scanned_images is not None:
self.total_scanned_images = total_scanned_images
if total_vulnerable_images is not None:
self.total_vulnerable_images = total_vulnerable_images
@property
def repo_name(self):
"""Gets the repo_name of this Repository. # noqa: E501
:return: The repo_name of this Repository. # noqa: E501
:rtype: str
"""
return self._repo_name
@repo_name.setter
def repo_name(self, repo_name):
"""Sets the repo_name of this Repository.
:param repo_name: The repo_name of this Repository. # noqa: E501
:type: str
"""
self._repo_name = repo_name
@property
def total_images(self):
"""Gets the total_images of this Repository. # noqa: E501
:return: The total_images of this Repository. # noqa: E501
:rtype: int
"""
return self._total_images
@total_images.setter
def total_images(self, total_images):
"""Sets the total_images of this Repository.
:param total_images: The total_images of this Repository. # noqa: E501
:type: int
"""
self._total_images = total_images
@property
def total_scanned_images(self):
"""Gets the total_scanned_images of this Repository. # noqa: E501
:return: The total_scanned_images of this Repository. # noqa: E501
:rtype: int
"""
return self._total_scanned_images
@total_scanned_images.setter
def total_scanned_images(self, total_scanned_images):
"""Sets the total_scanned_images of this Repository.
:param total_scanned_images: The total_scanned_images of this Repository. # noqa: E501
:type: int
"""
self._total_scanned_images = total_scanned_images
@property
def total_vulnerable_images(self):
"""Gets the total_vulnerable_images of this Repository. # noqa: E501
:return: The total_vulnerable_images of this Repository. # noqa: E501
:rtype: int
"""
return self._total_vulnerable_images
@total_vulnerable_images.setter
def total_vulnerable_images(self, total_vulnerable_images):
"""Sets the total_vulnerable_images of this Repository.
:param total_vulnerable_images: The total_vulnerable_images of this Repository. # noqa: E501
:type: int
"""
self._total_vulnerable_images = total_vulnerable_images
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Repository, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Repository):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((4957, 4990), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (4970, 4990), False, 'import six\n')] |
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional
import numpy as np
class Rotate(nn.Module):
"""
Rotate the image by random angle between -degrees and degrees.
"""
def __init__(self, degrees, interpolation_method='nearest'):
super(Rotate, self).__init__()
self.degrees = degrees
self.interpolation_method = interpolation_method
def forward(self, noised_and_cover):
rotation_angle = np.random.uniform(-self.degrees, self.degrees)
noised_image = noised_and_cover[0]
noised_and_cover[0] = functional.rotate(noised_image, rotation_angle)
return noised_and_cover
| [
"torchvision.transforms.functional.rotate",
"numpy.random.uniform"
] | [((488, 534), 'numpy.random.uniform', 'np.random.uniform', (['(-self.degrees)', 'self.degrees'], {}), '(-self.degrees, self.degrees)\n', (505, 534), True, 'import numpy as np\n'), ((608, 655), 'torchvision.transforms.functional.rotate', 'functional.rotate', (['noised_image', 'rotation_angle'], {}), '(noised_image, rotation_angle)\n', (625, 655), False, 'from torchvision.transforms import functional\n')] |
import random
import string
def random_string_digits(string_length=10):
"""Generate a random string of letters and digits."""
letters_and_digits = string.ascii_letters + string.digits
return ''.join(random.choice(letters_and_digits) for _ in range(string_length))
| [
"random.choice"
] | [((213, 246), 'random.choice', 'random.choice', (['letters_and_digits'], {}), '(letters_and_digits)\n', (226, 246), False, 'import random\n')] |
#!/usr/bin/env python3
import sys
import json
import time
import subprocess
cats = {
"any": { "id": "vdoq4xvk", "output_file": "all.json", "output_file2": "all2.json", },
"100": { "id": "xk9jv4gd", "output_file": "100.json", "output_file2": "1002.json", },
'amq': { "id": "n2yj3r82", "output_file": "amq.json", "output_file2": "amq2.json", },
"as": { "id": "wkpqmw8d", "output_file": "as.json", "output_file2": "as2.json", },
"ad": { "id": "9d8jgv7k", "output_file": "ad.json", "output_file2": "ad2.json", }
}
def get_next_link( v ) :
links = v['pagination']['links']
URL = None
for link in links:
if link['rel'] == 'next':
return link['uri']
return None
def get_player_ids_from_runs( out ):
ids = []
for run in out:
pids = [p['id'] for p in run['players'] if 'id' in p]
ids.extend(pids)
return list(set(sorted(ids)))
def get_url( URL ):
p = subprocess.run(['curl', URL], capture_output = True)
return json.loads( p.stdout )
def get_runs_at_offset( category, offset ):
URL="https://www.speedrun.com/api/v1/runs?category={}&orderby=submitted&direction=asc&offset={}".format( category, offset )
return get_url( URL )
def get_all_runs( category, offset = 0 ):
out = []
URL="https://www.speedrun.com/api/v1/runs?category={}&orderby=submitted&direction=asc&offset={}".format( category, offset )
n=0
while URL is not None:
v = get_url( URL )
out.extend( v['data'] )
URL = get_next_link( v )
print("{:5} {}".format(len(out), URL))
time.sleep(1)
return out
def load_runs( filename ):
chunk = 20
runs = json.load(open(filename,'r'))
category = runs[0]['category']
n = len(runs)
n20 = (n // chunk) * chunk
tmp = get_runs_at_offset(category, n20)
if tmp['data'][0]['id'] == runs[n20]['id']: # Check if first id in downloaded data matches the same in the existing data
print(" Last ID matches, just update runs", len(runs))
runs = runs[:n20] # Truncate runs at beginning of chunk
runs.extend(tmp['data']) # Add "new" runs
URL = get_next_link( tmp )
if URL is not None:
print(" ... Last chunk has more runs, get 'em")
r = get_all_runs( category, offset = n20 + chunk )
runs.extend( r )
return runs
else :
raise ValueError("IDs do not match")
return runs
def get_all_players( ids ):
pid = {}
for xid in ids:
#print(id)
url = "https://www.speedrun.com/api/v1/users/{}".format(xid)
#print(url)
args = ['curl', '--silent', url]
p = subprocess.run(args, stdout=subprocess.PIPE)
v = json.loads(p.stdout.decode())
pid[xid] = v
print("ID {}".format(xid))
time.sleep(1)
return pid
def update_players_list( runs, players_file = 'players.json'):
ids = get_player_ids_from_runs( runs )
players = json.load(open(players_file,'r'))
new_ids = [ key for key in ids if key not in players ]
if len(new_ids) > 0:
players_new = get_all_players( new_ids )
players.update( players_new )
with open(players_file, 'w') as fp:
json.dump( players, fp )
if __name__ == '__main__':
players_file = "players.json"
players_file2 = "players2.json"
if len(sys.argv) < 2:
print("Usage: get_data.py -a -p -r -f category")
print(" -a Get all runs (starts over)")
print(" -f Filter runs and players")
print(" -u Update runs")
print(" -p Update players")
print(" Ex: next.py -u -p -f all # Update all categories ")
print(" next.py -a -p -f all # Get all categories ")
sys.exit(0)
cat = sys.argv[-1]
if cat not in cats and cat != "all":
print("Please specify a category")
for key in cats.keys() :
print(" ", key)
print(" all")
sys.exit(-1)
if cat == "all":
categories = list(cats.keys())
else:
categories = [ cat ]
get_all = any([arg == '-a' for arg in sys.argv[1:]])
update_players = any([arg == '-p' for arg in sys.argv[1:]])
update_runs = any([arg == '-u' for arg in sys.argv[1:]])
filter_data = any([arg == '-f' for arg in sys.argv[1:]])
runs = []
if get_all:
for cat in categories:
c = cats[cat]
runs = get_all_runs( c['id'] )
out = c['output_file']
with open(out, 'w') as fp:
json.dump( runs, fp )
if update_players:
update_players_list( runs )
if update_runs:
for cat in categories:
print("Updating category:", cat)
c = cats[cat]
out = c['output_file']
runs = load_runs( out )
with open(out, 'w') as fp:
json.dump( runs, fp )
if update_players:
print(" Updating players ...")
update_players_list( runs )
if filter_data:
for cat in categories:
c = cats[cat]
out = c['output_file']
out2 = c['output_file2']
runs = json.load(open(out,'r'))
rout = []
# Remove extra field we are not using
for r in runs:
r2 = {}
if len(r['players']) == 0:
continue
if 'id' not in r['players'][0]:
continue
for k in ['id','weblink','submitted']:
r2[k] = r[k]
r2['status'] = { 'status': r['status']['status'] }
r2['times'] = {'primary_t': r['times']['primary_t'] }
r2['players'] = [ {'id': r['players'][0]['id'] } ]
r2['values'] = r['values']
rout.append(r2)
json.dump( rout, open(out2,'w'))
# Players file Remove extra fields we
players = json.load(open(players_file, 'r'))
pout = {}
for k,v in players.items():
pout[k] = {
'data': {
'weblink': v['data']['weblink'],
'names': v['data']['names'],
'name-style': v['data']['name-style'],
}
}
json.dump( pout, open(players_file2,'w'))
| [
"json.loads",
"subprocess.run",
"time.sleep",
"sys.exit",
"json.dump"
] | [((941, 991), 'subprocess.run', 'subprocess.run', (["['curl', URL]"], {'capture_output': '(True)'}), "(['curl', URL], capture_output=True)\n", (955, 991), False, 'import subprocess\n'), ((1005, 1025), 'json.loads', 'json.loads', (['p.stdout'], {}), '(p.stdout)\n', (1015, 1025), False, 'import json\n'), ((1595, 1608), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1605, 1608), False, 'import time\n'), ((2671, 2715), 'subprocess.run', 'subprocess.run', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (2685, 2715), False, 'import subprocess\n'), ((2822, 2835), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2832, 2835), False, 'import time\n'), ((3768, 3779), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3776, 3779), False, 'import sys\n'), ((3981, 3993), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3989, 3993), False, 'import sys\n'), ((3235, 3257), 'json.dump', 'json.dump', (['players', 'fp'], {}), '(players, fp)\n', (3244, 3257), False, 'import json\n'), ((4564, 4583), 'json.dump', 'json.dump', (['runs', 'fp'], {}), '(runs, fp)\n', (4573, 4583), False, 'import json\n'), ((4910, 4929), 'json.dump', 'json.dump', (['runs', 'fp'], {}), '(runs, fp)\n', (4919, 4929), False, 'import json\n')] |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, <NAME> <<EMAIL>>
# Copyright 2018, <NAME> <<EMAIL>>
'''
Devices
=======
This module contains two classes describing UPnP devices.
:class:`Device`
---------------
The base class for all devices.
:class:`RootDevice`
-------------------
A device representing a root device.
'''
import time
from lxml import etree
from eventdispatcher import EventDispatcher, Property, ListProperty
from twisted.internet import defer
from coherence import log
from coherence.upnp.core import utils
from coherence.upnp.core.service import Service
from . import xml_constants
ns = xml_constants.UPNP_DEVICE_NS
class Device(EventDispatcher, log.LogAble):
'''
Represents a UPnP's device, but this is not a root device, it's the base
class used for any device. See :class:`RootDevice` if you want a root
device.
.. versionchanged:: 0.9.0
* Migrated from louie/dispatcher to EventDispatcher
* The emitted events changed:
- Coherence.UPnP.Device.detection_completed =>
device_detection_completed
- Coherence.UPnP.Device.remove_client =>
device_remove_client
* New events: device_service_notified, device_got_client
* Changes some class variables to benefit from the EventDispatcher's
properties:
- :attr:`client`
- :attr:`devices`
- :attr:`services`
- :attr:`client`
- :attr:`detection_completed`
'''
logCategory = 'device'
client = Property(None)
'''
Defined by :class:`~coherence.upnp.devices.controlpoint.ControlPoint`.
It should be one of:
- Initialized instance of a class
:class:`~coherence.upnp.devices.media_server_client.MediaServerClient`
- Initialized instance of a class
:class:`~coherence.upnp.devices.media_renderer_client.MediaRendererClient`
- Initialized instance of a class
:class:`~coherence.upnp.devices.internet_gateway_device_client.InternetGatewayDeviceClient`
Whenever a client is set an event will be sent notifying it by
:meth:`on_client`.
''' # noqa
icons = ListProperty([])
'''A list of the device icons.'''
devices = ListProperty([])
'''A list of the device devices.'''
services = ListProperty([])
'''A list of the device services.'''
detection_completed = Property(False)
'''
To know whenever the device detection has completed. Defaults to `False`
and it will be set automatically to `True` by the class method
:meth:`receiver`.
'''
def __init__(self, parent=None, udn=None):
log.LogAble.__init__(self)
EventDispatcher.__init__(self)
self.register_event(
'device_detection_completed',
'device_remove_client',
'device_service_notified',
'device_got_client',
)
self.parent = parent
self.udn = udn
# self.uid = self.usn[:-len(self.st)-2]
self.friendly_name = ''
self.device_type = ''
self.upnp_version = 'n/a'
self.friendly_device_type = '[unknown]'
self.device_type_version = 0
def __repr__(self):
return (
f'embedded device {self.friendly_name} '
+ f'{self.device_type}, parent {self.parent}'
)
# def __del__(self):
# # print('Device removal completed')
# pass
def as_dict(self):
d = {
'device_type': self.get_device_type(),
'friendly_name': self.get_friendly_name(),
'udn': self.get_id(),
'services': [x.as_dict() for x in self.services],
}
icons = []
for icon in self.icons:
icons.append(
{
'mimetype': icon['mimetype'],
'url': icon['url'],
'height': icon['height'],
'width': icon['width'],
'depth': icon['depth'],
}
)
d['icons'] = icons
return d
def remove(self, *args):
self.info(f'removal of {self.friendly_name} {self.udn}')
while len(self.devices) > 0:
device = self.devices.pop()
self.debug(f'try to remove {device}')
device.remove()
while len(self.services) > 0:
service = self.services.pop()
self.debug(f'try to remove {service}')
service.remove()
if self.client is not None:
self.dispatch_event('device_remove_client', self.udn, self.client)
self.client = None
# del self
return True
def receiver(self, *args, **kwargs):
if self.detection_completed:
return
for s in self.services:
if not s.detection_completed:
return
self.dispatch_event('device_service_notified', service=s)
if self.udn is None:
return
self.detection_completed = True
if self.parent is not None:
self.info(
f'embedded device {self.friendly_name} '
+ f'{self.device_type} initialized, parent {self.parent}'
)
if self.parent is not None:
self.dispatch_event('device_detection_completed', self.parent)
else:
self.dispatch_event('device_detection_completed', self)
def service_detection_failed(self, device):
self.remove()
def get_id(self):
return self.udn
def get_uuid(self):
return self.udn[5:]
def get_embedded_devices(self):
return self.devices
def get_embedded_device_by_type(self, type):
r = []
for device in self.devices:
if type == device.friendly_device_type:
r.append(device)
return r
def get_services(self):
return self.services
def get_service_by_type(self, service_type):
if not isinstance(service_type, (tuple, list)):
service_type = [service_type]
for service in self.services:
_, _, _, service_class, version = service.service_type.split(':')
if service_class in service_type:
return service
def add_service(self, service):
'''
Add a service to the device. Also we check if service already notified,
and trigger the callback if needed. We also connect the device to
service in case the service still not completed his detection in order
that the device knows when the service has completed his detection.
Args:
service (object): A service which should be an initialized instance
of :class:`~coherence.upnp.core.service.Service`
'''
self.debug(f'add_service {service}')
if service.detection_completed:
self.receiver(service)
service.bind(
service_detection_completed=self.receiver,
service_detection_failed=self.service_detection_failed,
)
self.services.append(service)
# fixme: This fails as Service.get_usn() is not implemented.
def remove_service_with_usn(self, service_usn):
for service in self.services:
if service.get_usn() == service_usn:
service.unbind(
service_detection_completed=self.receiver,
service_detection_failed=self.service_detection_failed,
)
self.services.remove(service)
service.remove()
break
def add_device(self, device):
self.debug(f'Device add_device {device}')
self.devices.append(device)
def get_friendly_name(self):
return self.friendly_name
def get_device_type(self):
return self.device_type
def get_friendly_device_type(self):
return self.friendly_device_type
def get_markup_name(self):
try:
return self._markup_name
except AttributeError:
self._markup_name = (
f'{self.friendly_device_type}:{self.device_type_version} '
+ f'{self.friendly_name}'
)
return self._markup_name
def get_device_type_version(self):
return self.device_type_version
def set_client(self, client):
self.client = client
def get_client(self):
return self.client
def on_client(self, *args):
'''
Automatically triggered whenever a client is set or changed. Emmit
an event notifying that the client has changed.
.. versionadded:: 0.9.0
'''
self.dispatch_event('device_got_client', self, client=self.client)
def renew_service_subscriptions(self):
''' iterate over device's services and renew subscriptions '''
self.info(f'renew service subscriptions for {self.friendly_name}')
now = time.time()
for service in self.services:
self.info(
f'check service {service.id} {service.get_sid()} '
+ f'{service.get_timeout()} {now}'
)
if service.get_sid() is not None:
if service.get_timeout() < now:
self.debug(
f'wow, we lost an event subscription for '
+ f'{self.friendly_name} {service.get_id()}, '
+ f'maybe we need to rethink the loop time and '
+ f'timeout calculation?'
)
if service.get_timeout() < now + 30:
service.renew_subscription()
for device in self.devices:
device.renew_service_subscriptions()
def unsubscribe_service_subscriptions(self):
'''Iterate over device's services and unsubscribe subscriptions '''
sl = []
for service in self.get_services():
if service.get_sid() is not None:
sl.append(service.unsubscribe())
dl = defer.DeferredList(sl)
return dl
def parse_device(self, d):
self.info(f'parse_device {d}')
self.device_type = d.findtext(f'./{{{ns}}}deviceType')
(
self.friendly_device_type, self.device_type_version,
) = self.device_type.split(':')[-2:]
self.friendly_name = d.findtext(f'./{{{ns}}}friendlyName')
self.udn = d.findtext(f'./{{{ns}}}UDN')
self.info(f'found udn {self.udn} {self.friendly_name}')
try:
self.manufacturer = d.findtext(f'./{{{ns}}}manufacturer')
except Exception:
pass
try:
self.manufacturer_url = d.findtext(f'./{{{ns}}}manufacturerURL')
except Exception:
pass
try:
self.model_name = d.findtext(f'./{{{ns}}}modelName')
except Exception:
pass
try:
self.model_description = d.findtext(f'./{{{ns}}}modelDescription')
except Exception:
pass
try:
self.model_number = d.findtext(f'./{{{ns}}}modelNumber')
except Exception:
pass
try:
self.model_url = d.findtext(f'./{{{ns}}}modelURL')
except Exception:
pass
try:
self.serial_number = d.findtext(f'./{{{ns}}}serialNumber')
except Exception:
pass
try:
self.upc = d.findtext(f'./{{{ns}}}UPC')
except Exception:
pass
try:
self.presentation_url = d.findtext(f'./{{{ns}}}presentationURL')
except Exception:
pass
try:
for dlna_doc in d.findall(
'./{urn:schemas-dlna-org:device-1-0}X_DLNADOC'
):
try:
self.dlna_dc.append(dlna_doc.text)
except AttributeError:
self.dlna_dc = []
self.dlna_dc.append(dlna_doc.text)
except Exception:
pass
try:
for dlna_cap in d.findall(
'./{urn:schemas-dlna-org:device-1-0}X_DLNACAP'
):
for cap in dlna_cap.text.split(','):
try:
self.dlna_cap.append(cap)
except AttributeError:
self.dlna_cap = []
self.dlna_cap.append(cap)
except Exception:
pass
icon_list = d.find(f'./{{{ns}}}iconList')
if icon_list is not None:
from urllib.parse import urlparse
url_base = '%s://%s' % urlparse(self.get_location())[:2]
for icon in icon_list.findall(f'./{{{ns}}}icon'):
try:
i = {}
i['mimetype'] = icon.find(f'./{{{ns}}}mimetype').text
i['width'] = icon.find(f'./{{{ns}}}width').text
i['height'] = icon.find(f'./{{{ns}}}height').text
i['depth'] = icon.find(f'./{{{ns}}}depth').text
i['realurl'] = icon.find(f'./{{{ns}}}url').text
i['url'] = self.make_fullyqualified(i['realurl']).decode(
'utf-8'
)
self.icons.append(i)
self.debug(f'adding icon {i} for {self.friendly_name}')
except Exception as e:
import traceback
self.debug(traceback.format_exc())
self.warning(
f'device {self.friendly_name} seems to have an invalid'
+ f' icon description, ignoring that icon [error: {e}]'
)
serviceList = d.find(f'./{{{ns}}}serviceList')
if serviceList is not None:
for service in serviceList.findall(f'./{{{ns}}}service'):
serviceType = service.findtext(f'{{{ns}}}serviceType')
serviceId = service.findtext(f'{{{ns}}}serviceId')
controlUrl = service.findtext(f'{{{ns}}}controlURL')
eventSubUrl = service.findtext(f'{{{ns}}}eventSubURL')
presentationUrl = service.findtext(f'{{{ns}}}presentationURL')
scpdUrl = service.findtext(f'{{{ns}}}SCPDURL')
# check if values are somehow reasonable
if len(scpdUrl) == 0:
self.warning('service has no uri for its description')
continue
if len(eventSubUrl) == 0:
self.warning('service has no uri for eventing')
continue
if len(controlUrl) == 0:
self.warning('service has no uri for controling')
continue
try:
self.add_service(
Service(
serviceType,
serviceId,
self.get_location(),
controlUrl,
eventSubUrl,
presentationUrl,
scpdUrl,
self,
)
)
except Exception as e:
self.error(
f'Error on adding service: {service} [ERROR: {e}]'
)
# now look for all sub devices
embedded_devices = d.find(f'./{{{ns}}}deviceList')
if embedded_devices is not None:
for d in embedded_devices.findall(f'./{{{ns}}}device'):
embedded_device = Device(self)
self.add_device(embedded_device)
embedded_device.parse_device(d)
self.receiver()
def get_location(self):
return self.parent.get_location()
def get_usn(self):
return self.parent.get_usn()
def get_upnp_version(self):
return self.parent.get_upnp_version()
def get_urlbase(self):
return self.parent.get_urlbase()
def get_presentation_url(self):
try:
return self.make_fullyqualified(self.presentation_url)
except Exception:
return ''
def get_parent_id(self):
try:
return self.parent.get_id()
except Exception:
return ''
def make_fullyqualified(self, url):
return self.parent.make_fullyqualified(url)
def as_tuples(self):
r = []
def append(name, attribute):
try:
if isinstance(attribute, tuple):
if callable(attribute[0]):
v1 = attribute[0]()
else:
v1 = getattr(self, attribute[0])
if v1 in [None, 'None']:
return
if callable(attribute[1]):
v2 = attribute[1]()
else:
v2 = getattr(self, attribute[1])
if v2 in [None, 'None']:
return
r.append((name, (v1, v2)))
return
elif callable(attribute):
v = attribute()
else:
v = getattr(self, attribute)
if v not in [None, 'None']:
r.append((name, v))
except Exception as e:
self.error(f'Device.as_tuples: {e}')
import traceback
self.debug(traceback.format_exc())
try:
r.append(('Location', (self.get_location(), self.get_location())))
except Exception:
pass
try:
append('URL base', self.get_urlbase)
except Exception:
pass
try:
r.append(('UDN', self.get_id()))
except Exception:
pass
try:
r.append(('Type', self.device_type))
except Exception:
pass
try:
r.append(('UPnP Version', self.upnp_version))
except Exception:
pass
try:
r.append(('DLNA Device Class', ','.join(self.dlna_dc)))
except Exception:
pass
try:
r.append(('DLNA Device Capability', ','.join(self.dlna_cap)))
except Exception:
pass
try:
r.append(('Friendly Name', self.friendly_name))
except Exception:
pass
try:
append('Manufacturer', 'manufacturer')
except Exception:
pass
try:
append(
'Manufacturer URL', ('manufacturer_url', 'manufacturer_url')
)
except Exception:
pass
try:
append('Model Description', 'model_description')
except Exception:
pass
try:
append('Model Name', 'model_name')
except Exception:
pass
try:
append('Model Number', 'model_number')
except Exception:
pass
try:
append('Model URL', ('model_url', 'model_url'))
except Exception:
pass
try:
append('Serial Number', 'serial_number')
except Exception:
pass
try:
append('UPC', 'upc')
except Exception:
pass
try:
append(
'Presentation URL',
(
'presentation_url',
lambda: self.make_fullyqualified(
getattr(self, 'presentation_url')
),
),
)
except Exception:
pass
for icon in self.icons:
r.append(
(
'Icon',
(
icon['realurl'],
self.make_fullyqualified(icon['realurl']),
{
'Mimetype': icon['mimetype'],
'Width': icon['width'],
'Height': icon['height'],
'Depth': icon['depth'],
},
),
)
)
return r
class RootDevice(Device):
'''
Description for a root device.
.. versionchanged:: 0.9.0
* Migrated from louie/dispatcher to EventDispatcher
* The emitted events changed:
- Coherence.UPnP.RootDevice.detection_completed =>
root_device_detection_completed
- Coherence.UPnP.RootDevice.removed => root_device_removed
'''
root_detection_completed = Property(False)
'''
To know whenever the root device detection has completed. Defaults to
`False` and it will be set automatically to `True` by the class method
:meth:`device_detect`.
'''
def __init__(self, infos):
self.usn = infos['USN']
self.udn = infos.get('UDN', '')
self.server = infos['SERVER']
self.st = infos['ST']
self.location = infos['LOCATION']
self.manifestation = infos['MANIFESTATION']
self.host = infos['HOST']
Device.__init__(self, None)
self.register_event(
'root_device_detection_completed', 'root_device_removed'
)
self.bind(detection_completed=self.device_detect)
# we need to handle root device completion
# these events could be our self or our children.
self.parse_description()
self.debug(f'RootDevice initialized: {self.location}')
def __repr__(self):
return (
f'rootdevice {self.friendly_name} {self.udn} {self.st} '
f'{self.host}, manifestation {self.manifestation}'
)
def remove(self, *args):
result = Device.remove(self, *args)
self.dispatch_event('root_device_removed', self, usn=self.get_usn())
return result
def get_usn(self):
return self.usn
def get_st(self):
return self.st
def get_location(self):
return (
self.location
if isinstance(self.location, bytes)
else self.location.encode('ascii')
if self.location
else None
)
def get_upnp_version(self):
return self.upnp_version
def get_urlbase(self):
return (
self.urlbase
if isinstance(self.urlbase, bytes)
else self.urlbase.encode('ascii')
if self.urlbase
else None
)
def get_host(self):
return self.host
def is_local(self):
if self.manifestation == 'local':
return True
return False
def is_remote(self):
if self.manifestation != 'local':
return True
return False
def device_detect(self, *args, **kwargs):
'''
This method is automatically triggered whenever the property of the
base class :attr:`Device.detection_completed` is set to `True`. Here we
perform some more operations, before the :class:`RootDevice` emits
an event notifying that the root device detection has completed.
'''
self.debug(f'device_detect {kwargs}')
self.debug(f'root_detection_completed {self.root_detection_completed}')
if self.root_detection_completed:
return
# our self is not complete yet
self.debug(f'detection_completed {self.detection_completed}')
if not self.detection_completed:
return
# now check child devices.
self.debug(f'self.devices {self.devices}')
for d in self.devices:
self.debug(f'check device {d.detection_completed} {d}')
if not d.detection_completed:
return
# now must be done, so notify root done
self.root_detection_completed = True
self.info(
f'rootdevice {self.friendly_name} {self.st} {self.host} '
+ f'initialized, manifestation {self.manifestation}'
)
self.dispatch_event('root_device_detection_completed', self)
def add_device(self, device):
self.debug(f'RootDevice add_device {device}')
self.devices.append(device)
def get_devices(self):
self.debug(f'RootDevice get_devices: {self.devices}')
return self.devices
def parse_description(self):
def gotPage(x):
self.debug(f'got device description from {self.location}')
self.debug(f'data is {x}')
data, headers = x
xml_data = None
try:
xml_data = etree.fromstring(data)
except Exception:
self.warning(
f'Invalid device description received from {self.location}'
)
import traceback
self.debug(traceback.format_exc())
if xml_data is not None:
tree = xml_data
major = tree.findtext(f'./{{{ns}}}specVersion/{{{ns}}}major')
minor = tree.findtext(f'./{{{ns}}}specVersion/{{{ns}}}minor')
try:
self.upnp_version = '.'.join((major, minor))
except Exception:
self.upnp_version = 'n/a'
try:
self.urlbase = tree.findtext(f'./{{{ns}}}URLBase')
except Exception:
import traceback
self.debug(traceback.format_exc())
d = tree.find(f'./{{{ns}}}device')
if d is not None:
self.parse_device(d) # root device
self.debug(f'device parsed successfully {self.location}')
def gotError(failure, url):
self.warning(f'error getting device description from {url}')
self.info(failure)
try:
utils.getPage(self.location).addCallbacks(
gotPage, gotError, None, None, [self.location], None
)
except Exception as e:
self.error(f'Error on parsing device description: {e}')
def make_fullyqualified(self, url):
'''Be aware that this function returns a byte string'''
self.info(f'make_fullyqualified: {url} [{type(url)}]')
if isinstance(url, str):
url = url.encode('ascii')
if url.startswith(b'http://'):
return url
from urllib.parse import urljoin
base = self.get_urlbase()
if isinstance(base, str):
base = base.encode('ascii')
if base is not None:
if base[-1] != b'/':
base += b'/'
r = urljoin(base, url)
else:
loc = self.get_location()
if isinstance(loc, str):
loc = loc.encode('ascii')
r = urljoin(loc, url)
return r
| [
"traceback.format_exc",
"eventdispatcher.EventDispatcher.__init__",
"eventdispatcher.Property",
"coherence.log.LogAble.__init__",
"coherence.upnp.core.utils.getPage",
"twisted.internet.defer.DeferredList",
"urllib.parse.urljoin",
"lxml.etree.fromstring",
"time.time",
"eventdispatcher.ListProperty"... | [((1665, 1679), 'eventdispatcher.Property', 'Property', (['None'], {}), '(None)\n', (1673, 1679), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((2303, 2319), 'eventdispatcher.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (2315, 2319), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((2373, 2389), 'eventdispatcher.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (2385, 2389), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((2446, 2462), 'eventdispatcher.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (2458, 2462), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((2531, 2546), 'eventdispatcher.Property', 'Property', (['(False)'], {}), '(False)\n', (2539, 2546), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((20945, 20960), 'eventdispatcher.Property', 'Property', (['(False)'], {}), '(False)\n', (20953, 20960), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((2785, 2811), 'coherence.log.LogAble.__init__', 'log.LogAble.__init__', (['self'], {}), '(self)\n', (2805, 2811), False, 'from coherence import log\n'), ((2820, 2850), 'eventdispatcher.EventDispatcher.__init__', 'EventDispatcher.__init__', (['self'], {}), '(self)\n', (2844, 2850), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((9099, 9110), 'time.time', 'time.time', ([], {}), '()\n', (9108, 9110), False, 'import time\n'), ((10195, 10217), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['sl'], {}), '(sl)\n', (10213, 10217), False, 'from twisted.internet import defer\n'), ((26964, 26982), 'urllib.parse.urljoin', 'urljoin', (['base', 'url'], {}), '(base, url)\n', (26971, 26982), False, 'from urllib.parse import urljoin\n'), ((27130, 27147), 'urllib.parse.urljoin', 'urljoin', (['loc', 'url'], {}), '(loc, url)\n', (27137, 27147), False, 'from urllib.parse import urljoin\n'), ((24925, 24947), 'lxml.etree.fromstring', 'etree.fromstring', (['data'], {}), '(data)\n', (24941, 24947), False, 'from lxml import etree\n'), ((26181, 26209), 'coherence.upnp.core.utils.getPage', 'utils.getPage', (['self.location'], {}), '(self.location)\n', (26194, 26209), False, 'from coherence.upnp.core import utils\n'), ((17742, 17764), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17762, 17764), False, 'import traceback\n'), ((25167, 25189), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (25187, 25189), False, 'import traceback\n'), ((13633, 13655), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13653, 13655), False, 'import traceback\n'), ((25778, 25800), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (25798, 25800), False, 'import traceback\n')] |
"""
DO NOT MODIFY
Dataloder for parts 2 and 3
We will also call this file when loading test data
"""
import os
import glob
import io
from torchtext import data
class IMDB(data.Dataset):
name = 'imdb'
dirname = 'aclImdb'
def __init__(self, path, text_field, label_field, **kwargs):
fields = [('text', text_field), ('label', label_field)]
examples = []
for label in ['pos', 'neg']:
for fname in glob.iglob(os.path.join(path, label, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as f:
text = f.readline()
examples.append(data.Example.fromlist([text, label], fields))
super(IMDB, self).__init__(examples, fields, **kwargs)
@classmethod
def splits(cls, text_field, label_field, root='data',
train=None, test=None, validation=None, **kwargs):
return super(IMDB, cls).splits(
root=root, text_field=text_field, label_field=label_field,
train=train, validation=validation, test=test, **kwargs)
| [
"torchtext.data.Example.fromlist",
"os.path.join",
"io.open"
] | [((456, 490), 'os.path.join', 'os.path.join', (['path', 'label', '"""*.txt"""'], {}), "(path, label, '*.txt')\n", (468, 490), False, 'import os\n'), ((514, 551), 'io.open', 'io.open', (['fname', '"""r"""'], {'encoding': '"""utf-8"""'}), "(fname, 'r', encoding='utf-8')\n", (521, 551), False, 'import io\n'), ((630, 674), 'torchtext.data.Example.fromlist', 'data.Example.fromlist', (['[text, label]', 'fields'], {}), '([text, label], fields)\n', (651, 674), False, 'from torchtext import data\n')] |
#!/usr/bin/env python3
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool to print GPOS and GSUB features supported by font file(s).
"""
from __future__ import print_function
import contextlib
import os
import sys
from fontTools.ttLib import TTFont
from gftools.util import google_fonts as fonts
from absl import app
def ListFeatures(font):
"""List features for specified font. Table assumed structured like GPS/GSUB.
Args:
font: a TTFont.
Returns:
List of 3-tuples of ('GPOS', tag, name) of the features in the font.
"""
results = []
for tbl in ["GPOS", "GSUB"]:
if tbl in font.keys():
results += [
(tbl,
f.FeatureTag,
"lookups: [{}]".format(", ".join(map(str, f.Feature.LookupListIndex)))
) for f in font[tbl].table.FeatureList.FeatureRecord
]
return results
def main(path):
if path.endswith(".ttf"):
font_files = [path]
elif os.path.isdir(path):
font_files = glob(path + "/*.ttf")
for font_file in font_files:
features = []
with TTFont(font_file) as font:
features += ListFeatures(font)
for (table, tag, lookup_name) in features:
print('{:32s} {:4s} {:8s} {:15s}'.format(
os.path.basename(font_file), table, str(tag), lookup_name))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Please include either a path to a ttf or a path to a dir "
"containing ttfs")
else:
main(sys.argv[1])
| [
"os.path.isdir",
"fontTools.ttLib.TTFont",
"os.path.basename"
] | [((1471, 1490), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1484, 1490), False, 'import os\n'), ((1590, 1607), 'fontTools.ttLib.TTFont', 'TTFont', (['font_file'], {}), '(font_file)\n', (1596, 1607), False, 'from fontTools.ttLib import TTFont\n'), ((1756, 1783), 'os.path.basename', 'os.path.basename', (['font_file'], {}), '(font_file)\n', (1772, 1783), False, 'import os\n')] |
import io
from banner import print_banner
def test_print_banner(monkeypatch) -> None:
horizontal = "1"
vertical = "1"
centered = "1"
char = "*"
statement = "O" # only capital letters
set_page = "2"
monkeypatch.setattr(
"sys.stdin",
io.StringIO(
f"{horizontal}\n{vertical}\n{centered}\n{char}\n{statement}\n{set_page}"
),
)
print_banner()
| [
"banner.print_banner",
"io.StringIO"
] | [((399, 413), 'banner.print_banner', 'print_banner', ([], {}), '()\n', (411, 413), False, 'from banner import print_banner\n'), ((280, 369), 'io.StringIO', 'io.StringIO', (['f"""{horizontal}\n{vertical}\n{centered}\n{char}\n{statement}\n{set_page}"""'], {}), '(\n f"""{horizontal}\n{vertical}\n{centered}\n{char}\n{statement}\n{set_page}""")\n', (291, 369), False, 'import io\n')] |
#
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
import os
# Directory name which scenario file is placed
SCENARIO_DIR_NAME = "scenario"
# scenario file name excluding extension
SCENARIO_FILE_NAME = "scenario"
# cliboa project directory path
BASE_DIR = os.getcwd()
# Project directory path. Customization is available
PROJECT_DIR = os.path.join(BASE_DIR, "project")
# Common scenario directory path. Customization is available
COMMON_DIR = os.path.join(BASE_DIR, "common")
# Common scenario directory path. Customization is available
COMMON_SCENARIO_DIR = os.path.join(COMMON_DIR, "scenario")
# the blow paths are appended to sys.path of python
SYSTEM_APPEND_PATHS = [COMMON_SCENARIO_DIR]
# common custom classes to make available
COMMON_CUSTOM_CLASSES = ["sample_step.SampleStep", "sample_step.SampleStepSub"]
# project congenital classes to make available
PROJECT_CUSTOM_CLASSES = []
| [
"os.path.join",
"os.getcwd"
] | [((843, 854), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (852, 854), False, 'import os\n'), ((923, 956), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""project"""'], {}), "(BASE_DIR, 'project')\n", (935, 956), False, 'import os\n'), ((1032, 1064), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""common"""'], {}), "(BASE_DIR, 'common')\n", (1044, 1064), False, 'import os\n'), ((1149, 1185), 'os.path.join', 'os.path.join', (['COMMON_DIR', '"""scenario"""'], {}), "(COMMON_DIR, 'scenario')\n", (1161, 1185), False, 'import os\n')] |
import copy
import os
import time
from collections import OrderedDict
from sklearn.model_selection import train_test_split
from torchvision import models
import torch
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
from skimage.io import imread
from self_driving_ai.utils import *
"""
Credit: https://www.kaggle.com/gxkok21/resnet50-with-pytorch
"""
class DrivingDataset(torch.utils.data.Dataset):
"""
This is our custom dataset class which will load the images, perform transforms on them,
and load their corresponding labels.
"""
def __init__(self, img_dir, labels_csv_file=None, transform=None):
self.img_dir = img_dir
if labels_csv_file:
self.labels_df = pd.read_csv(labels_csv_file)
else:
self.images = [os.path.join(img_dir, f) for f in os.listdir(img_dir) if f.endswith(".jpg")]
self.transform = transform
def __getitem__(self, idx):
try:
img_path = self.labels_df.iloc[idx, 0]
except AttributeError:
img_path = self.images[idx]
# print("img_path:", img_path)
img = imread(img_path)
if self.transform:
img = self.transform(img)
sample = {
"image": img,
}
try:
sample["label"] = self.labels_df.iloc[idx, 1]#torch.tensor((self.labels_df.iloc[idx, 1], self.labels_df.iloc[idx, 2]))
sample["id"] = idx#self.labels_df.loc[idx, "id"]
except AttributeError:
#sample["id"] = os.path.basename(self.images[idx]).replace(".tif", "")
pass
return sample
def __len__(self):
try:
return self.labels_df.shape[0]
except AttributeError:
return len(self.images)
if __name__ == '__main__':
# Train
EPOCHS = 20
USE_GPU = True if torch.cuda.is_available() else False
device = torch.device("cuda:0" if USE_GPU else "cpu")
writer = SummaryWriter("runs/self_driving_ai")
IMG_DIR = "../Data/Training_Images"
LABELS_PATH = "../Data/Training_Data"
labels_df = pd.read_csv(LABELS_PATH, header=None)
train_indices, test_indices = train_test_split(labels_df.index - 1, test_size=0.20)
train_dataset = DrivingDataset(IMG_DIR, LABELS_PATH, transform_pipe)
model = models.resnet50(pretrained=True)
# Freeze model weights
# for param in model.parameters():
# param.requires_grad = False
freeze_layes = 6
for i, child in enumerate(model.children()):
if i <= freeze_layes:
for param in child.parameters():
param.requires_grad = False
model.fc = torch.nn.Sequential(
torch.nn.Linear(
in_features=2048,
out_features=1
),
)
model.to(device)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=32,
sampler=torch.utils.data.SubsetRandomSampler(
train_indices
))
test_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=32,
sampler=torch.utils.data.SubsetRandomSampler(
test_indices
))
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.MSELoss()
best_model_wts = copy.deepcopy(model.state_dict())
best_epoch_mse_loss = 0.0
phases = OrderedDict([("train", train_loader), ("test", test_loader)])
start = time.time()
for i in range(EPOCHS):
epoch = i + 1
samples = 0
mse_loss_sum = 0
correct_sum = 0
for phase, loader in phases.items():
for j, batch in enumerate(loader):
X = batch["image"]
labels = batch["label"]
if USE_GPU:
X = X.cuda()
labels = labels.cuda()
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
y = model(X)#.view(1, 3, 224, 224))
loss = criterion(
y,
labels.view(-1, 1).float()#.float()#
)
if phase == "train":
loss.backward()
optimizer.step()
mse_loss_sum += loss.item() * X.shape[0] # We need to multiple by batch size as loss is the mean loss of the samples in the batch
samples += X.shape[0]
# Print batch statistics every 50 batches
if j % 50 == 49 and phase == "train":
print("{}:{} - MSE_loss: {}".format(
i + 1,
j + 1,
float(mse_loss_sum) / float(samples)
))
# Print epoch statistics
epoch_mse_loss = float(mse_loss_sum) / float(samples)
print("epoch: {} - {} MSE_loss:{:.4f}".format(i + 1, phase, epoch_mse_loss))
# Deep copy the model
if phase == "test" and epoch_mse_loss > best_epoch_mse_loss:
writer.add_scalar('training MSE loss', mse_loss_sum / len(train_indices), epoch)
best_epoch_mse_loss = epoch_mse_loss
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(best_model_wts, "resnet50-optimal.pth")
writer.close()
end = time.time()
train_time = end - start
print("Total Training Time: {} seconds".format(train_time))
print("Training Time Per Epoch: {} seconds".format(train_time / EPOCHS))
| [
"torch.utils.tensorboard.SummaryWriter",
"collections.OrderedDict",
"os.listdir",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"os.path.join",
"torch.utils.data.SubsetRandomSampler",
"torch.nn.MSELoss",
"skimage.io.imread",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch... | [((1923, 1967), 'torch.device', 'torch.device', (["('cuda:0' if USE_GPU else 'cpu')"], {}), "('cuda:0' if USE_GPU else 'cpu')\n", (1935, 1967), False, 'import torch\n'), ((1981, 2018), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""runs/self_driving_ai"""'], {}), "('runs/self_driving_ai')\n", (1994, 2018), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2118, 2155), 'pandas.read_csv', 'pd.read_csv', (['LABELS_PATH'], {'header': 'None'}), '(LABELS_PATH, header=None)\n', (2129, 2155), True, 'import pandas as pd\n'), ((2190, 2242), 'sklearn.model_selection.train_test_split', 'train_test_split', (['(labels_df.index - 1)'], {'test_size': '(0.2)'}), '(labels_df.index - 1, test_size=0.2)\n', (2206, 2242), False, 'from sklearn.model_selection import train_test_split\n'), ((2331, 2363), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2346, 2363), False, 'from torchvision import models\n'), ((3253, 3271), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3269, 3271), False, 'import torch\n'), ((3371, 3432), 'collections.OrderedDict', 'OrderedDict', (["[('train', train_loader), ('test', test_loader)]"], {}), "([('train', train_loader), ('test', test_loader)])\n", (3382, 3432), False, 'from collections import OrderedDict\n'), ((3446, 3457), 'time.time', 'time.time', ([], {}), '()\n', (3455, 3457), False, 'import time\n'), ((5431, 5442), 'time.time', 'time.time', ([], {}), '()\n', (5440, 5442), False, 'import time\n'), ((1149, 1165), 'skimage.io.imread', 'imread', (['img_path'], {}), '(img_path)\n', (1155, 1165), False, 'from skimage.io import imread\n'), ((1873, 1898), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1896, 1898), False, 'import torch\n'), ((2703, 2752), 'torch.nn.Linear', 'torch.nn.Linear', ([], {'in_features': '(2048)', 'out_features': '(1)'}), '(in_features=2048, out_features=1)\n', (2718, 2752), False, 'import torch\n'), ((736, 764), 'pandas.read_csv', 'pd.read_csv', (['labels_csv_file'], {}), '(labels_csv_file)\n', (747, 764), True, 'import pandas as pd\n'), ((2925, 2976), 'torch.utils.data.SubsetRandomSampler', 'torch.utils.data.SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (2961, 2976), False, 'import torch\n'), ((3109, 3159), 'torch.utils.data.SubsetRandomSampler', 'torch.utils.data.SubsetRandomSampler', (['test_indices'], {}), '(test_indices)\n', (3145, 3159), False, 'import torch\n'), ((806, 830), 'os.path.join', 'os.path.join', (['img_dir', 'f'], {}), '(img_dir, f)\n', (818, 830), False, 'import os\n'), ((5350, 5400), 'torch.save', 'torch.save', (['best_model_wts', '"""resnet50-optimal.pth"""'], {}), "(best_model_wts, 'resnet50-optimal.pth')\n", (5360, 5400), False, 'import torch\n'), ((840, 859), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (850, 859), False, 'import os\n'), ((3909, 3949), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (3931, 3949), False, 'import torch\n')] |
#!/usr/bin/env python
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
from threading import Lock
import unittest
import rospy
from std_msgs.msg import Int32
from ros_bt_py_msgs.msg import Node as NodeMsg
from ros_bt_py.node_config import NodeConfig
from ros_bt_py.nodes.topic import TopicPublisher
PKG = 'ros_bt_py'
class TestTopicPublisherLeaf(unittest.TestCase):
"""This expects a test_topics_node.py instance running alongside
That node will "reflect" anything we publish to /numbers_in - it's a
separate node to avoid threading shenanigans in here.
"""
def setUp(self):
self.publisher_leaf = TopicPublisher(options={
'topic_name': '/numbers_in',
'topic_type': Int32
})
self.publisher_leaf.setup()
self._lock = Lock()
self.msg = None
self.subscriber = rospy.Subscriber('/numbers_out', Int32, self.cb)
rospy.wait_for_message('/ready', Int32)
def tearDown(self):
self.publisher_leaf.shutdown()
def cb(self, msg):
with self._lock:
self.msg = msg
def testSendsNumber(self):
self.assertIsNone(self.msg)
self.publisher_leaf.inputs['message'] = Int32(data=1)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 1)
self.publisher_leaf.inputs['message'] = Int32(data=42)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 42)
self.assertEqual(self.publisher_leaf.untick(), NodeMsg.IDLE)
self.publisher_leaf.reset()
self.publisher_leaf.inputs['message'] = Int32(data=23)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 23)
if __name__ == '__main__':
rospy.init_node('test_topic_publish_leaf')
import rostest
import sys
import os
os.environ['COVERAGE_FILE'] = '%s.%s.coverage' % (PKG, 'test_topic_publish_leaf')
rostest.rosrun(PKG, 'test_topic_publish_leaf', TestTopicPublisherLeaf,
sysargs=sys.argv + ['--cov'])
| [
"ros_bt_py.nodes.topic.TopicPublisher",
"rostest.rosrun",
"rospy.init_node",
"threading.Lock",
"std_msgs.msg.Int32",
"rospy.wait_for_message",
"rospy.sleep",
"rospy.Subscriber"
] | [((3967, 4009), 'rospy.init_node', 'rospy.init_node', (['"""test_topic_publish_leaf"""'], {}), "('test_topic_publish_leaf')\n", (3982, 4009), False, 'import rospy\n'), ((4148, 4252), 'rostest.rosrun', 'rostest.rosrun', (['PKG', '"""test_topic_publish_leaf"""', 'TestTopicPublisherLeaf'], {'sysargs': "(sys.argv + ['--cov'])"}), "(PKG, 'test_topic_publish_leaf', TestTopicPublisherLeaf,\n sysargs=sys.argv + ['--cov'])\n", (4162, 4252), False, 'import rostest\n'), ((2222, 2296), 'ros_bt_py.nodes.topic.TopicPublisher', 'TopicPublisher', ([], {'options': "{'topic_name': '/numbers_in', 'topic_type': Int32}"}), "(options={'topic_name': '/numbers_in', 'topic_type': Int32})\n", (2236, 2296), False, 'from ros_bt_py.nodes.topic import TopicPublisher\n'), ((2388, 2394), 'threading.Lock', 'Lock', ([], {}), '()\n', (2392, 2394), False, 'from threading import Lock\n'), ((2445, 2493), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/numbers_out"""', 'Int32', 'self.cb'], {}), "('/numbers_out', Int32, self.cb)\n", (2461, 2493), False, 'import rospy\n'), ((2502, 2541), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/ready"""', 'Int32'], {}), "('/ready', Int32)\n", (2524, 2541), False, 'import rospy\n'), ((2799, 2812), 'std_msgs.msg.Int32', 'Int32', ([], {'data': '(1)'}), '(data=1)\n', (2804, 2812), False, 'from std_msgs.msg import Int32\n'), ((3048, 3064), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (3059, 3064), False, 'import rospy\n'), ((3157, 3171), 'std_msgs.msg.Int32', 'Int32', ([], {'data': '(42)'}), '(data=42)\n', (3162, 3171), False, 'from std_msgs.msg import Int32\n'), ((3407, 3423), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (3418, 3423), False, 'import rospy\n'), ((3623, 3637), 'std_msgs.msg.Int32', 'Int32', ([], {'data': '(23)'}), '(data=23)\n', (3628, 3637), False, 'from std_msgs.msg import Int32\n'), ((3873, 3889), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (3884, 3889), False, 'import rospy\n')] |
# -*- coding: utf-8 -*-
import functools
import click
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope, add_arg_scope
from tfsnippet.bayes import BayesianNet
from tfsnippet.distributions import Normal, Bernoulli
from tfsnippet.examples.datasets import load_mnist, bernoulli_flow
from tfsnippet.examples.nn import (l2_regularizer,
regularization_loss,
dense)
from tfsnippet.examples.utils import (MLConfig,
MLResults,
save_images_collection,
config_options,
pass_global_config,
bernoulli_as_pixel,
print_with_title)
from tfsnippet.scaffold import TrainLoop
from tfsnippet.trainer import AnnealingDynamicValue, Trainer, Evaluator
from tfsnippet.utils import global_reuse, flatten, unflatten, create_session
class ExpConfig(MLConfig):
# model parameters
z_dim = 40
x_dim = 784
# training parameters
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
@global_reuse
@add_arg_scope
@pass_global_config
def q_net(config, x, observed=None, n_z=None, is_training=True):
net = BayesianNet(observed=observed)
# compute the hidden features
with arg_scope([dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = dense(h_x, 500)
h_x = dense(h_x, 500)
# sample z ~ q(z|x)
z_mean = dense(h_x, config.z_dim, name='z_mean')
z_logstd = dense(h_x, config.z_dim, name='z_logstd')
z = net.add('z', Normal(mean=z_mean, logstd=z_logstd), n_samples=n_z,
group_ndims=1)
return net
@global_reuse
@add_arg_scope
@pass_global_config
def p_net(config, observed=None, n_z=None, is_training=True):
net = BayesianNet(observed=observed)
# sample z ~ p(z)
z = net.add('z', Normal(mean=tf.zeros([1, config.z_dim]),
logstd=tf.zeros([1, config.z_dim])),
group_ndims=1, n_samples=n_z)
# compute the hidden features
with arg_scope([dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=l2_regularizer(config.l2_reg)):
h_z, s1, s2 = flatten(z, 2)
h_z = dense(h_z, 500)
h_z = dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = unflatten(dense(h_z, config.x_dim, name='x_logits'), s1, s2)
x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)
return net
@click.command()
@click.option('--result-dir', help='The result directory.', metavar='PATH',
required=False, type=str)
@config_options(ExpConfig)
@pass_global_config
def main(config, result_dir):
# print the config
print_with_title('Configurations', config.format_config(), after='\n')
# open the result object and prepare for result directories
results = MLResults(result_dir)
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
is_training = tf.placeholder(
dtype=tf.bool, shape=(), name='is_training')
learning_rate = tf.placeholder(shape=(), dtype=tf.float32)
learning_rate_var = AnnealingDynamicValue(config.initial_lr,
config.lr_anneal_factor)
# build the model
with arg_scope([q_net, p_net], is_training=is_training):
# derive the loss and lower-bound for training
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(
p_net, latent_names=['z'], latent_axis=0, observed={'x': input_x})
vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
loss = vae_loss + regularization_loss()
# derive the nll and logits output for testing
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_names=['z'], latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plot_x'):
plot_p_net = p_net(n_z=100, is_training=is_training)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
images = session.run(x_plots, feed_dict={is_training: False})
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = load_mnist()
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
# train the network
with TrainLoop(params,
var_groups=['q_net', 'p_net'],
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = Trainer(
loop, train_op, [input_x], train_flow,
feed_dict={learning_rate: learning_rate_var, is_training: True},
metrics={'loss': loss}
)
trainer.anneal_after(
learning_rate_var,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
feed_dict={is_training: False},
time_metric_name='test_time'
)
evaluator.after_run.add_hook(
lambda: results.update_metrics(evaluator.last_metrics_dict))
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
| [
"tfsnippet.examples.datasets.load_mnist",
"tfsnippet.bayes.BayesianNet",
"tfsnippet.examples.nn.dense",
"click.option",
"tensorflow.placeholder",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"click.command",
"tfsnippet.utils.flatten",
"tensorflow.zeros",
"tfsnippet.utils.c... | [((2935, 2950), 'click.command', 'click.command', ([], {}), '()\n', (2948, 2950), False, 'import click\n'), ((2952, 3056), 'click.option', 'click.option', (['"""--result-dir"""'], {'help': '"""The result directory."""', 'metavar': '"""PATH"""', 'required': '(False)', 'type': 'str'}), "('--result-dir', help='The result directory.', metavar='PATH',\n required=False, type=str)\n", (2964, 3056), False, 'import click\n'), ((3068, 3093), 'tfsnippet.examples.utils.config_options', 'config_options', (['ExpConfig'], {}), '(ExpConfig)\n', (3082, 3093), False, 'from tfsnippet.examples.utils import MLConfig, MLResults, save_images_collection, config_options, pass_global_config, bernoulli_as_pixel, print_with_title\n'), ((1561, 1591), 'tfsnippet.bayes.BayesianNet', 'BayesianNet', ([], {'observed': 'observed'}), '(observed=observed)\n', (1572, 1591), False, 'from tfsnippet.bayes import BayesianNet\n'), ((1903, 1942), 'tfsnippet.examples.nn.dense', 'dense', (['h_x', 'config.z_dim'], {'name': '"""z_mean"""'}), "(h_x, config.z_dim, name='z_mean')\n", (1908, 1942), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((1958, 1999), 'tfsnippet.examples.nn.dense', 'dense', (['h_x', 'config.z_dim'], {'name': '"""z_logstd"""'}), "(h_x, config.z_dim, name='z_logstd')\n", (1963, 1999), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((2244, 2274), 'tfsnippet.bayes.BayesianNet', 'BayesianNet', ([], {'observed': 'observed'}), '(observed=observed)\n', (2255, 2274), False, 'from tfsnippet.bayes import BayesianNet\n'), ((3321, 3342), 'tfsnippet.examples.utils.MLResults', 'MLResults', (['result_dir'], {}), '(result_dir)\n', (3330, 3342), False, 'from tfsnippet.examples.utils import MLConfig, MLResults, save_images_collection, config_options, pass_global_config, bernoulli_as_pixel, print_with_title\n'), ((3486, 3560), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '(None, config.x_dim)', 'name': '"""input_x"""'}), "(dtype=tf.int32, shape=(None, config.x_dim), name='input_x')\n", (3500, 3560), True, 'import tensorflow as tf\n'), ((3588, 3647), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.bool', 'shape': '()', 'name': '"""is_training"""'}), "(dtype=tf.bool, shape=(), name='is_training')\n", (3602, 3647), True, 'import tensorflow as tf\n'), ((3677, 3719), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (3691, 3719), True, 'import tensorflow as tf\n'), ((3744, 3809), 'tfsnippet.trainer.AnnealingDynamicValue', 'AnnealingDynamicValue', (['config.initial_lr', 'config.lr_anneal_factor'], {}), '(config.initial_lr, config.lr_anneal_factor)\n', (3765, 3809), False, 'from tfsnippet.trainer import AnnealingDynamicValue, Trainer, Evaluator\n'), ((4689, 4726), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4711, 4726), True, 'import tensorflow as tf\n'), ((4740, 4764), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4762, 4764), True, 'import tensorflow as tf\n'), ((5624, 5636), 'tfsnippet.examples.datasets.load_mnist', 'load_mnist', ([], {}), '()\n', (5634, 5636), False, 'from tfsnippet.examples.datasets import load_mnist, bernoulli_flow\n'), ((5654, 5732), 'tfsnippet.examples.datasets.bernoulli_flow', 'bernoulli_flow', (['x_train', 'config.batch_size'], {'shuffle': '(True)', 'skip_incomplete': '(True)'}), '(x_train, config.batch_size, shuffle=True, skip_incomplete=True)\n', (5668, 5732), False, 'from tfsnippet.examples.datasets import load_mnist, bernoulli_flow\n'), ((5758, 5821), 'tfsnippet.examples.datasets.bernoulli_flow', 'bernoulli_flow', (['x_test', 'config.test_batch_size'], {'sample_now': '(True)'}), '(x_test, config.test_batch_size, sample_now=True)\n', (5772, 5821), False, 'from tfsnippet.examples.datasets import load_mnist, bernoulli_flow\n'), ((1790, 1804), 'tensorflow.to_float', 'tf.to_float', (['x'], {}), '(x)\n', (1801, 1804), True, 'import tensorflow as tf\n'), ((1819, 1834), 'tfsnippet.examples.nn.dense', 'dense', (['h_x', '(500)'], {}), '(h_x, 500)\n', (1824, 1834), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((1849, 1864), 'tfsnippet.examples.nn.dense', 'dense', (['h_x', '(500)'], {}), '(h_x, 500)\n', (1854, 1864), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((2021, 2057), 'tfsnippet.distributions.Normal', 'Normal', ([], {'mean': 'z_mean', 'logstd': 'z_logstd'}), '(mean=z_mean, logstd=z_logstd)\n', (2027, 2057), False, 'from tfsnippet.distributions import Normal, Bernoulli\n'), ((2677, 2690), 'tfsnippet.utils.flatten', 'flatten', (['z', '(2)'], {}), '(z, 2)\n', (2684, 2690), False, 'from tfsnippet.utils import global_reuse, flatten, unflatten, create_session\n'), ((2705, 2720), 'tfsnippet.examples.nn.dense', 'dense', (['h_z', '(500)'], {}), '(h_z, 500)\n', (2710, 2720), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((2735, 2750), 'tfsnippet.examples.nn.dense', 'dense', (['h_z', '(500)'], {}), '(h_z, 500)\n', (2740, 2750), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((2801, 2842), 'tfsnippet.examples.nn.dense', 'dense', (['h_z', 'config.x_dim'], {'name': '"""x_logits"""'}), "(h_z, config.x_dim, name='x_logits')\n", (2806, 2842), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((2873, 2899), 'tfsnippet.distributions.Bernoulli', 'Bernoulli', ([], {'logits': 'x_logits'}), '(logits=x_logits)\n', (2882, 2899), False, 'from tfsnippet.distributions import Normal, Bernoulli\n'), ((3888, 3938), 'tensorflow.contrib.framework.arg_scope', 'arg_scope', (['[q_net, p_net]'], {'is_training': 'is_training'}), '([q_net, p_net], is_training=is_training)\n', (3897, 3938), False, 'from tensorflow.contrib.framework import arg_scope, add_arg_scope\n'), ((5016, 5039), 'tensorflow.name_scope', 'tf.name_scope', (['"""plot_x"""'], {}), "('plot_x')\n", (5029, 5039), True, 'import tensorflow as tf\n'), ((4245, 4266), 'tfsnippet.examples.nn.regularization_loss', 'regularization_loss', ([], {}), '()\n', (4264, 4266), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((4874, 4916), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4891, 4916), True, 'import tensorflow as tf\n'), ((5131, 5166), 'tfsnippet.examples.utils.bernoulli_as_pixel', 'bernoulli_as_pixel', (["plot_p_net['x']"], {}), "(plot_p_net['x'])\n", (5149, 5166), False, 'from tfsnippet.examples.utils import MLConfig, MLResults, save_images_collection, config_options, pass_global_config, bernoulli_as_pixel, print_with_title\n'), ((6430, 6569), 'tfsnippet.trainer.Trainer', 'Trainer', (['loop', 'train_op', '[input_x]', 'train_flow'], {'feed_dict': '{learning_rate: learning_rate_var, is_training: True}', 'metrics': "{'loss': loss}"}), "(loop, train_op, [input_x], train_flow, feed_dict={learning_rate:\n learning_rate_var, is_training: True}, metrics={'loss': loss})\n", (6437, 6569), False, 'from tfsnippet.trainer import AnnealingDynamicValue, Trainer, Evaluator\n'), ((6836, 7013), 'tfsnippet.trainer.Evaluator', 'Evaluator', (['loop'], {'metrics': "{'test_nll': test_nll, 'test_lb': test_lb}", 'inputs': '[input_x]', 'data_flow': 'test_flow', 'feed_dict': '{is_training: False}', 'time_metric_name': '"""test_time"""'}), "(loop, metrics={'test_nll': test_nll, 'test_lb': test_lb}, inputs=\n [input_x], data_flow=test_flow, feed_dict={is_training: False},\n time_metric_name='test_time')\n", (6845, 7013), False, 'from tfsnippet.trainer import AnnealingDynamicValue, Trainer, Evaluator\n'), ((1744, 1773), 'tfsnippet.examples.nn.l2_regularizer', 'l2_regularizer', (['config.l2_reg'], {}), '(config.l2_reg)\n', (1758, 1773), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((2331, 2358), 'tensorflow.zeros', 'tf.zeros', (['[1, config.z_dim]'], {}), '([1, config.z_dim])\n', (2339, 2358), True, 'import tensorflow as tf\n'), ((2395, 2422), 'tensorflow.zeros', 'tf.zeros', (['[1, config.z_dim]'], {}), '([1, config.z_dim])\n', (2403, 2422), True, 'import tensorflow as tf\n'), ((2623, 2652), 'tfsnippet.examples.nn.l2_regularizer', 'l2_regularizer', (['config.l2_reg'], {}), '(config.l2_reg)\n', (2637, 2652), False, 'from tfsnippet.examples.nn import l2_regularizer, regularization_loss, dense\n'), ((5841, 5857), 'tfsnippet.utils.create_session', 'create_session', ([], {}), '()\n', (5855, 5857), False, 'from tfsnippet.utils import global_reuse, flatten, unflatten, create_session\n'), ((7355, 7392), 'functools.partial', 'functools.partial', (['plot_samples', 'loop'], {}), '(plot_samples, loop)\n', (7372, 7392), False, 'import functools\n'), ((6330, 6352), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (6350, 6352), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import copy
# In[22]:
# helps from: https://www.geeksforgeeks.org/merge-sort/
def RecursiveMergeSort(input_array, is_first = True):
time_start = time.time()
compare_time = 0
if is_first:
sort_array = copy.deepcopy(input_array) # sort, but not change orignial array(strange)
else:
sort_array = input_array
if len(sort_array) > 1: # stop base
mid = len(sort_array)//2
left_array = sort_array[:mid]
right_array = sort_array[mid:]
# recursive
left_temp = RecursiveMergeSort(left_array, is_first = False)
if left_temp != None:
compare_time += left_temp[0]
right_temp = RecursiveMergeSort(right_array, is_first = False)
if right_temp != None:
compare_time += right_temp[0]
# merge part
i = j = k = 0
while i < len(left_array) and j < len(right_array):
compare_time += 1
if left_array[i] < right_array[j]:
sort_array[k] = left_array[i]
i += 1
else:
sort_array[k] = right_array[j]
j += 1
k += 1
while i < len(left_array):
sort_array[k] = left_array[i]
k += 1
i += 1
while j < len(right_array):
sort_array[k] = right_array[j]
k += 1
j += 1
time_finish = time.time()
time_run = time_finish - time_start
# check if sort_array is sorted, of course
#if is_first:
# print(sort_array)
return compare_time, time_run
# iterative merge sort
# helps: https://www.geeksforgeeks.org/iterative-merge-sort/
def IterativeMergeSort(input_array):
time_start = time.time()
compare_time = 0
current_size = 1
sort_array = copy.deepcopy(input_array) # sort, but not change orignial array(strange)
while current_size < len(sort_array) - 1:
left = 0
while left < len(sort_array)-1:
mid = left + current_size - 1
right = ((2 * current_size + left - 1, len(sort_array) - 1) [2 * current_size + left - 1 > len(sort_array)-1])
# Merge each subarray
compare_time += merge(sort_array, left, mid, right)
left = left + current_size*2
# have new sixe for subarray
current_size = 2 * current_size
time_finish = time.time()
time_run = time_finish - time_start
return compare_time, time_run
def merge(input_array, left, mid, right):
compare_time = 0
# length for each subarray to be merged
n1 = mid - left + 1
n2 = right - mid
# create zreos subarrays
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = input_array[left + i]
for i in range(0, n2):
R[i] = input_array[mid + i + 1]
# merge
i, j, k = 0, 0, left
while i < n1 and j < n2:
compare_time += 1
if L[i] > R[j]:
input_array[k] = R[j]
j += 1
else:
input_array[k] = L[i]
i += 1
k += 1
while i < n1:
input_array[k] = L[i]
i += 1
k += 1
while j < n2:
input_array[k] = R[j]
j += 1
k += 1
return compare_time
# In[23]:
input_1024_0 = np.loadtxt('./data/data0.1024', int)
input_2048_0 = np.loadtxt('./data/data0.2048', int)
input_4096_0 = np.loadtxt('./data/data0.4096', int)
input_8192_0 = np.loadtxt('./data/data0.8192', int)
input_16384_0 = np.loadtxt('./data/data0.16384', int)
input_32768_0 = np.loadtxt('./data/data0.32768', int)
input_1024_1 = np.loadtxt('./data/data1.1024', int)
input_2048_1 = np.loadtxt('./data/data1.2048', int)
input_4096_1 = np.loadtxt('./data/data1.4096', int)
input_8192_1 = np.loadtxt('./data/data1.8192', int)
input_16384_1 = np.loadtxt('./data/data1.16384', int)
input_32768_1 = np.loadtxt('./data/data1.32768', int)
input_data = [input_1024_0, input_1024_1, input_2048_0, input_2048_1, input_4096_0, input_4096_1, input_8192_0, input_8192_1, input_16384_0, input_16384_1, input_32768_0, input_32768_1]
# In[24]:
result = []
for i in input_data:
result.append(RecursiveMergeSort(i))
print(result)
# In[8]:
recursive_merge_compare_0 = []
recursive_merge_compare_1 = []
recursive_merge_runtime_0 = []
recursive_merge_runtime_1 = []
for i in range(0, len(result), 2):
recursive_merge_compare_0.append(result[i][0])
recursive_merge_runtime_0.append(result[i][1])
recursive_merge_compare_1.append(result[i+1][0])
recursive_merge_runtime_1.append(result[i+1][1])
print(recursive_merge_compare_1)
# In[9]:
result = []
for i in input_data:
result.append(IterativeMergeSort(i))
print(result)
# In[10]:
iterative_merge_compare_0 = []
iterative_merge_compare_1 = []
iterative_merge_runtime_0 = []
iterative_merge_runtime_1 = []
for i in range(0, len(result), 2):
iterative_merge_compare_0.append(result[i][0])
iterative_merge_runtime_0.append(result[i][1])
iterative_merge_compare_1.append(result[i+1][0])
iterative_merge_runtime_1.append(result[i+1][1])
print(iterative_merge_compare_1)
# In[11]:
np.savetxt('./result/recursice compare 0.txt', recursive_merge_compare_0, fmt='%f')
np.savetxt('./result/recursice compare 1.txt', recursive_merge_compare_1, fmt='%f')
np.savetxt('./result/recursice runtime 0.txt', recursive_merge_runtime_0, fmt='%f')
np.savetxt('./result/recursice runtime 1.txt', recursive_merge_runtime_1, fmt='%f')
np.savetxt('./result/iterative compare 0.txt', iterative_merge_compare_0, fmt='%f')
np.savetxt('./result/iterative compare 1.txt', iterative_merge_compare_1, fmt='%f')
np.savetxt('./result/iterative runtime 0.txt', iterative_merge_runtime_0, fmt='%f')
np.savetxt('./result/iterative runtime 1.txt', iterative_merge_runtime_1, fmt='%f')
# In[12]:
input_size = [1024, 2048, 4096, 8192, 16384, 32768]
plt.figure()
plt.plot(input_size, recursive_merge_compare_0, label = 'recursive compare times with sorted data')
plt.plot(input_size, recursive_merge_compare_1, label = 'recursive compare times with random data')
plt.plot(input_size, iterative_merge_compare_0, label = 'iterative compare times with sorted data')
plt.plot(input_size, iterative_merge_compare_1, label = 'iterative compare times with random data')
plt.legend(loc='upper left')
plt.title('Compare times as function of input size')
plt.xlabel('input size')
plt.ylabel('compare times')
#ax = plt.gca()
#ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.savefig('./result/compare times vs input.jpg')
plt.show()
# In[13]:
plt.figure()
plt.plot(input_size, recursive_merge_runtime_0, label = 'recursive runtime with sorted data')
plt.plot(input_size, recursive_merge_runtime_1, label = 'recursive runtime with random data')
plt.plot(input_size, iterative_merge_runtime_0, label = 'iterative runtime with sorted data')
plt.plot(input_size, iterative_merge_runtime_1, label = 'iterative runtime with random data')
plt.legend(loc='upper left')
plt.title('Runtime as function of input size')
plt.xlabel('input size')
plt.ylabel('runtime(s)')
#ax = plt.gca()
#ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.savefig('./result/runtime vs input.jpg')
plt.show()
# In[ ]:
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.savetxt",
"copy.deepcopy",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((3620, 3656), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.1024"""', 'int'], {}), "('./data/data0.1024', int)\n", (3630, 3656), True, 'import numpy as np\n'), ((3672, 3708), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.2048"""', 'int'], {}), "('./data/data0.2048', int)\n", (3682, 3708), True, 'import numpy as np\n'), ((3724, 3760), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.4096"""', 'int'], {}), "('./data/data0.4096', int)\n", (3734, 3760), True, 'import numpy as np\n'), ((3776, 3812), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.8192"""', 'int'], {}), "('./data/data0.8192', int)\n", (3786, 3812), True, 'import numpy as np\n'), ((3829, 3866), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.16384"""', 'int'], {}), "('./data/data0.16384', int)\n", (3839, 3866), True, 'import numpy as np\n'), ((3883, 3920), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.32768"""', 'int'], {}), "('./data/data0.32768', int)\n", (3893, 3920), True, 'import numpy as np\n'), ((3937, 3973), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.1024"""', 'int'], {}), "('./data/data1.1024', int)\n", (3947, 3973), True, 'import numpy as np\n'), ((3989, 4025), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.2048"""', 'int'], {}), "('./data/data1.2048', int)\n", (3999, 4025), True, 'import numpy as np\n'), ((4041, 4077), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.4096"""', 'int'], {}), "('./data/data1.4096', int)\n", (4051, 4077), True, 'import numpy as np\n'), ((4093, 4129), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.8192"""', 'int'], {}), "('./data/data1.8192', int)\n", (4103, 4129), True, 'import numpy as np\n'), ((4146, 4183), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.16384"""', 'int'], {}), "('./data/data1.16384', int)\n", (4156, 4183), True, 'import numpy as np\n'), ((4200, 4237), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.32768"""', 'int'], {}), "('./data/data1.32768', int)\n", (4210, 4237), True, 'import numpy as np\n'), ((5482, 5569), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice compare 0.txt"""', 'recursive_merge_compare_0'], {'fmt': '"""%f"""'}), "('./result/recursice compare 0.txt', recursive_merge_compare_0,\n fmt='%f')\n", (5492, 5569), True, 'import numpy as np\n'), ((5566, 5653), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice compare 1.txt"""', 'recursive_merge_compare_1'], {'fmt': '"""%f"""'}), "('./result/recursice compare 1.txt', recursive_merge_compare_1,\n fmt='%f')\n", (5576, 5653), True, 'import numpy as np\n'), ((5650, 5737), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice runtime 0.txt"""', 'recursive_merge_runtime_0'], {'fmt': '"""%f"""'}), "('./result/recursice runtime 0.txt', recursive_merge_runtime_0,\n fmt='%f')\n", (5660, 5737), True, 'import numpy as np\n'), ((5734, 5821), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice runtime 1.txt"""', 'recursive_merge_runtime_1'], {'fmt': '"""%f"""'}), "('./result/recursice runtime 1.txt', recursive_merge_runtime_1,\n fmt='%f')\n", (5744, 5821), True, 'import numpy as np\n'), ((5818, 5905), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative compare 0.txt"""', 'iterative_merge_compare_0'], {'fmt': '"""%f"""'}), "('./result/iterative compare 0.txt', iterative_merge_compare_0,\n fmt='%f')\n", (5828, 5905), True, 'import numpy as np\n'), ((5902, 5989), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative compare 1.txt"""', 'iterative_merge_compare_1'], {'fmt': '"""%f"""'}), "('./result/iterative compare 1.txt', iterative_merge_compare_1,\n fmt='%f')\n", (5912, 5989), True, 'import numpy as np\n'), ((5986, 6073), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative runtime 0.txt"""', 'iterative_merge_runtime_0'], {'fmt': '"""%f"""'}), "('./result/iterative runtime 0.txt', iterative_merge_runtime_0,\n fmt='%f')\n", (5996, 6073), True, 'import numpy as np\n'), ((6070, 6157), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative runtime 1.txt"""', 'iterative_merge_runtime_1'], {'fmt': '"""%f"""'}), "('./result/iterative runtime 1.txt', iterative_merge_runtime_1,\n fmt='%f')\n", (6080, 6157), True, 'import numpy as np\n'), ((6220, 6232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6230, 6232), True, 'import matplotlib.pyplot as plt\n'), ((6233, 6335), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_compare_0'], {'label': '"""recursive compare times with sorted data"""'}), "(input_size, recursive_merge_compare_0, label=\n 'recursive compare times with sorted data')\n", (6241, 6335), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6435), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_compare_1'], {'label': '"""recursive compare times with random data"""'}), "(input_size, recursive_merge_compare_1, label=\n 'recursive compare times with random data')\n", (6341, 6435), True, 'import matplotlib.pyplot as plt\n'), ((6433, 6535), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_compare_0'], {'label': '"""iterative compare times with sorted data"""'}), "(input_size, iterative_merge_compare_0, label=\n 'iterative compare times with sorted data')\n", (6441, 6535), True, 'import matplotlib.pyplot as plt\n'), ((6533, 6635), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_compare_1'], {'label': '"""iterative compare times with random data"""'}), "(input_size, iterative_merge_compare_1, label=\n 'iterative compare times with random data')\n", (6541, 6635), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6661), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (6643, 6661), True, 'import matplotlib.pyplot as plt\n'), ((6662, 6714), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare times as function of input size"""'], {}), "('Compare times as function of input size')\n", (6671, 6714), True, 'import matplotlib.pyplot as plt\n'), ((6715, 6739), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""input size"""'], {}), "('input size')\n", (6725, 6739), True, 'import matplotlib.pyplot as plt\n'), ((6740, 6767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""compare times"""'], {}), "('compare times')\n", (6750, 6767), True, 'import matplotlib.pyplot as plt\n'), ((6844, 6894), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result/compare times vs input.jpg"""'], {}), "('./result/compare times vs input.jpg')\n", (6855, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6895, 6905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6903, 6905), True, 'import matplotlib.pyplot as plt\n'), ((6920, 6932), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6930, 6932), True, 'import matplotlib.pyplot as plt\n'), ((6933, 7029), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_runtime_0'], {'label': '"""recursive runtime with sorted data"""'}), "(input_size, recursive_merge_runtime_0, label=\n 'recursive runtime with sorted data')\n", (6941, 7029), True, 'import matplotlib.pyplot as plt\n'), ((7027, 7123), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_runtime_1'], {'label': '"""recursive runtime with random data"""'}), "(input_size, recursive_merge_runtime_1, label=\n 'recursive runtime with random data')\n", (7035, 7123), True, 'import matplotlib.pyplot as plt\n'), ((7121, 7217), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_runtime_0'], {'label': '"""iterative runtime with sorted data"""'}), "(input_size, iterative_merge_runtime_0, label=\n 'iterative runtime with sorted data')\n", (7129, 7217), True, 'import matplotlib.pyplot as plt\n'), ((7215, 7311), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_runtime_1'], {'label': '"""iterative runtime with random data"""'}), "(input_size, iterative_merge_runtime_1, label=\n 'iterative runtime with random data')\n", (7223, 7311), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7337), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (7319, 7337), True, 'import matplotlib.pyplot as plt\n'), ((7338, 7384), 'matplotlib.pyplot.title', 'plt.title', (['"""Runtime as function of input size"""'], {}), "('Runtime as function of input size')\n", (7347, 7384), True, 'import matplotlib.pyplot as plt\n'), ((7385, 7409), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""input size"""'], {}), "('input size')\n", (7395, 7409), True, 'import matplotlib.pyplot as plt\n'), ((7410, 7434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""runtime(s)"""'], {}), "('runtime(s)')\n", (7420, 7434), True, 'import matplotlib.pyplot as plt\n'), ((7511, 7555), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result/runtime vs input.jpg"""'], {}), "('./result/runtime vs input.jpg')\n", (7522, 7555), True, 'import matplotlib.pyplot as plt\n'), ((7556, 7566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7564, 7566), True, 'import matplotlib.pyplot as plt\n'), ((310, 321), 'time.time', 'time.time', ([], {}), '()\n', (319, 321), False, 'import time\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((2019, 2045), 'copy.deepcopy', 'copy.deepcopy', (['input_array'], {}), '(input_array)\n', (2032, 2045), False, 'import copy\n'), ((2683, 2694), 'time.time', 'time.time', ([], {}), '()\n', (2692, 2694), False, 'import time\n'), ((381, 407), 'copy.deepcopy', 'copy.deepcopy', (['input_array'], {}), '(input_array)\n', (394, 407), False, 'import copy\n'), ((1601, 1612), 'time.time', 'time.time', ([], {}), '()\n', (1610, 1612), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 19:47:52 2019
@author: Zhou
"""
import torch
from Utils import load
from Data import load_data
from Modules import BasicDecoder, RNNEncoder
from Models import Model, MetaTranslator
from Train import MetaTrainer
import warnings
warnings.filterwarnings("ignore")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
meta_epoches = 15
meta_batch_size = 20
adapt_lr = 0.4
n = 5
if __name__ == '__main__':
from Utils import batch_bleu, batch_meteor, batch_rouge
fields, train_gen = load_data('train', 'meta', batch_size=meta_batch_size, k=1, epsilon=0.7,
device=device, meta_weights=True)
_, val_gen = load_data('valid', 'meta', batch_size=meta_batch_size, k=10, epsilon=0.7,
device=device, meta_weights=True)
e = RNNEncoder(fields[0], bidirectional=True)
d = BasicDecoder(fields[1], memory_dim=e.units * 2, glob_attn='mul')
model = Model(e, d)
model = model.to(device)
checkpoint = torch.load('checkpoints/nmt.pt')
model.load_state_dict(checkpoint['model'])
trainer = MetaTrainer(model, epoches=meta_epoches, temperature=0,
metrics=['bleu'], smooth=0, patience=4, save_per_epoch=False,
beam_width=5, length_penalty=1, val_metric='bleu',
adapt_lr=adapt_lr, first_order=True,
save_path='checkpoints/nmt_meta.pt')
reports = trainer(train_gen, val_gen)
####################################################################################
_, test_gen = load_data('test', 'meta', batch_size=meta_batch_size, k=10, epsilon=0.7, meta_weights=True)
trainer = MetaTrainer(model, adapt_lr=adapt_lr / (n - 1) if n > 2 else 2/3 * adapt_lr,
load_path='checkpoints/nmt_meta.pt')
evaluator = MetaTranslator(trainer.model, metrics=[], adapt_steps=n, unk_replace=False)
predicts, reports = evaluator(test_gen, save_path='predicts/nmt_meta.txt')
####################################################################################
hyp = [s.split() for s in predicts]
ref = load('data/preprocessed/test.nl.json', is_json=True)
bleu_4 = batch_bleu(hyp, ref, smooth_method=0)
print('BLEU-4: {:.2f}'.format(bleu_4 * 100))
bleu_s = batch_bleu(hyp, ref, smooth_method=3)
print('Smoothed BLEU-4: {:.2f}'.format(bleu_s * 100))
hyp = predicts
ref = [' '.join(s) for s in ref]
rouge = batch_rouge(hyp, ref)
print('ROUGE-L: {:.2f}'.format(rouge['rouge-l']['f'] * 100))
meteor = batch_meteor(hyp, ref)
print('METEOR: {:.2f}'.format(meteor * 100))
| [
"Modules.BasicDecoder",
"torch.load",
"Models.MetaTranslator",
"Utils.batch_bleu",
"torch.cuda.is_available",
"Utils.batch_rouge",
"Utils.batch_meteor",
"Train.MetaTrainer",
"Modules.RNNEncoder",
"Utils.load",
"Data.load_data",
"warnings.filterwarnings",
"Models.Model"
] | [((291, 324), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (314, 324), False, 'import warnings\n'), ((345, 370), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (368, 370), False, 'import torch\n'), ((570, 680), 'Data.load_data', 'load_data', (['"""train"""', '"""meta"""'], {'batch_size': 'meta_batch_size', 'k': '(1)', 'epsilon': '(0.7)', 'device': 'device', 'meta_weights': '(True)'}), "('train', 'meta', batch_size=meta_batch_size, k=1, epsilon=0.7,\n device=device, meta_weights=True)\n", (579, 680), False, 'from Data import load_data\n'), ((730, 841), 'Data.load_data', 'load_data', (['"""valid"""', '"""meta"""'], {'batch_size': 'meta_batch_size', 'k': '(10)', 'epsilon': '(0.7)', 'device': 'device', 'meta_weights': '(True)'}), "('valid', 'meta', batch_size=meta_batch_size, k=10, epsilon=0.7,\n device=device, meta_weights=True)\n", (739, 841), False, 'from Data import load_data\n'), ((877, 918), 'Modules.RNNEncoder', 'RNNEncoder', (['fields[0]'], {'bidirectional': '(True)'}), '(fields[0], bidirectional=True)\n', (887, 918), False, 'from Modules import BasicDecoder, RNNEncoder\n'), ((928, 992), 'Modules.BasicDecoder', 'BasicDecoder', (['fields[1]'], {'memory_dim': '(e.units * 2)', 'glob_attn': '"""mul"""'}), "(fields[1], memory_dim=e.units * 2, glob_attn='mul')\n", (940, 992), False, 'from Modules import BasicDecoder, RNNEncoder\n'), ((1006, 1017), 'Models.Model', 'Model', (['e', 'd'], {}), '(e, d)\n', (1011, 1017), False, 'from Models import Model, MetaTranslator\n'), ((1066, 1098), 'torch.load', 'torch.load', (['"""checkpoints/nmt.pt"""'], {}), "('checkpoints/nmt.pt')\n", (1076, 1098), False, 'import torch\n'), ((1168, 1423), 'Train.MetaTrainer', 'MetaTrainer', (['model'], {'epoches': 'meta_epoches', 'temperature': '(0)', 'metrics': "['bleu']", 'smooth': '(0)', 'patience': '(4)', 'save_per_epoch': '(False)', 'beam_width': '(5)', 'length_penalty': '(1)', 'val_metric': '"""bleu"""', 'adapt_lr': 'adapt_lr', 'first_order': '(True)', 'save_path': '"""checkpoints/nmt_meta.pt"""'}), "(model, epoches=meta_epoches, temperature=0, metrics=['bleu'],\n smooth=0, patience=4, save_per_epoch=False, beam_width=5,\n length_penalty=1, val_metric='bleu', adapt_lr=adapt_lr, first_order=\n True, save_path='checkpoints/nmt_meta.pt')\n", (1179, 1423), False, 'from Train import MetaTrainer\n'), ((1669, 1764), 'Data.load_data', 'load_data', (['"""test"""', '"""meta"""'], {'batch_size': 'meta_batch_size', 'k': '(10)', 'epsilon': '(0.7)', 'meta_weights': '(True)'}), "('test', 'meta', batch_size=meta_batch_size, k=10, epsilon=0.7,\n meta_weights=True)\n", (1678, 1764), False, 'from Data import load_data\n'), ((1782, 1901), 'Train.MetaTrainer', 'MetaTrainer', (['model'], {'adapt_lr': '(adapt_lr / (n - 1) if n > 2 else 2 / 3 * adapt_lr)', 'load_path': '"""checkpoints/nmt_meta.pt"""'}), "(model, adapt_lr=adapt_lr / (n - 1) if n > 2 else 2 / 3 *\n adapt_lr, load_path='checkpoints/nmt_meta.pt')\n", (1793, 1901), False, 'from Train import MetaTrainer\n'), ((1940, 2015), 'Models.MetaTranslator', 'MetaTranslator', (['trainer.model'], {'metrics': '[]', 'adapt_steps': 'n', 'unk_replace': '(False)'}), '(trainer.model, metrics=[], adapt_steps=n, unk_replace=False)\n', (1954, 2015), False, 'from Models import Model, MetaTranslator\n'), ((2240, 2292), 'Utils.load', 'load', (['"""data/preprocessed/test.nl.json"""'], {'is_json': '(True)'}), "('data/preprocessed/test.nl.json', is_json=True)\n", (2244, 2292), False, 'from Utils import load\n'), ((2307, 2344), 'Utils.batch_bleu', 'batch_bleu', (['hyp', 'ref'], {'smooth_method': '(0)'}), '(hyp, ref, smooth_method=0)\n', (2317, 2344), False, 'from Utils import batch_bleu, batch_meteor, batch_rouge\n'), ((2409, 2446), 'Utils.batch_bleu', 'batch_bleu', (['hyp', 'ref'], {'smooth_method': '(3)'}), '(hyp, ref, smooth_method=3)\n', (2419, 2446), False, 'from Utils import batch_bleu, batch_meteor, batch_rouge\n'), ((2577, 2598), 'Utils.batch_rouge', 'batch_rouge', (['hyp', 'ref'], {}), '(hyp, ref)\n', (2588, 2598), False, 'from Utils import batch_bleu, batch_meteor, batch_rouge\n'), ((2679, 2701), 'Utils.batch_meteor', 'batch_meteor', (['hyp', 'ref'], {}), '(hyp, ref)\n', (2691, 2701), False, 'from Utils import batch_bleu, batch_meteor, batch_rouge\n')] |
# Generated by Django 4.0 on 2021-12-13 17:54
from django.db import migrations
_CAR_GOODS = 'Автотовары'
_APPLIANCES = 'Бытовая техника'
def _create_categories(apps, schema_editor) -> None:
"""Создает две категории"""
# noinspection PyPep8Naming
Category = apps.get_model('shop', 'Category')
Category.objects.get_or_create(name=_CAR_GOODS)
Category.objects.get_or_create(name=_APPLIANCES)
def _create_products(apps, schema_editor) -> None:
"""Создает два товара"""
# noinspection PyPep8Naming
Product = apps.get_model('shop', 'Product')
# noinspection PyPep8Naming
Category = apps.get_model('shop', 'Category')
Product.objects.get_or_create(
name='Зимняя резина',
category=Category.objects.get(name=_CAR_GOODS),
price=4990.00,
)
Product.objects.get_or_create(
name='Холодильник',
category=Category.objects.get(name=_APPLIANCES),
price=49990.00,
)
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.RunPython(
code=_create_categories,
reverse_code=migrations.RunPython.noop,
),
migrations.RunPython(
code=_create_products,
reverse_code=migrations.RunPython.noop,
),
]
| [
"django.db.migrations.RunPython"
] | [((1091, 1181), 'django.db.migrations.RunPython', 'migrations.RunPython', ([], {'code': '_create_categories', 'reverse_code': 'migrations.RunPython.noop'}), '(code=_create_categories, reverse_code=migrations.\n RunPython.noop)\n', (1111, 1181), False, 'from django.db import migrations\n'), ((1221, 1309), 'django.db.migrations.RunPython', 'migrations.RunPython', ([], {'code': '_create_products', 'reverse_code': 'migrations.RunPython.noop'}), '(code=_create_products, reverse_code=migrations.\n RunPython.noop)\n', (1241, 1309), False, 'from django.db import migrations\n')] |
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import Order
from ibapi.scanner import ScannerSubscription
from ibapi.ticktype import TickTypeEnum
from ibapi.common import *
from ibapi.tag_value import TagValue
from ibapi.execution import ExecutionFilter
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from datetime import datetime
from time import sleep, strftime, localtime, time
sleeptime = 5
class AccountManagement:
def read_nextvalidid(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.nextValidOrderId = []
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def nextValidId(self, orderId):
super().nextValidId(orderId)
self.nextValidOrderId.append(orderId)
print("NextValidId:", orderId)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqIds(-1)
nid = app.nextValidOrderId
app.run()
return nid[0]
def placing_orders(self, symbol, sec_type, exch, prim_exch, curr, order_type, quantity, action):
contract = Contract()
contract.symbol = symbol
contract.secType = sec_type
contract.exchange = exch
contract.primaryExchange = prim_exch
contract.currency = curr
order = Order()
order.orderType = order_type
order.totalQuantity = quantity
order.action = action
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
app.placeOrder(orderId=orderId, contract=contract, order=order)
print('order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
sleep(sleeptime)
return order, contract
app.disconnect()
app.run()
def read_positions(self, subscribe, acctCode):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.up = pd.DataFrame([], columns=['Position', 'marketPrice', 'marketValue', 'averageCost',
'unrealizedPNL', 'realizedPNL'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL,
realizedPNL, accountName):
self.up.index.name = 'Symbol'
self.up.loc[
contract.symbol] = position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL
def positionEnd(self):
super().positionEnd()
print("PositionEnd")
self.cancelPositions()
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqAccountUpdates(subscribe=subscribe, acctCode=acctCode)
app.reqPositions()
update = app.up
app.run()
print('Reading Portfolio')
rows = update[update['Position'] == 0].index
update.drop(rows, axis=0, inplace=True)
return update
def read_account(self, subscribe, acctCode):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.up = pd.DataFrame([], columns=['Values'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def updateAccountValue(self, key, value, currency, accountName):
self.up.index.name = 'Keys'
self.up.loc[key] = value
def accountDownloadEnd(self, account):
print("AccountDownloadEnd. Account:", account)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqAccountUpdates(subscribe=subscribe, acctCode=acctCode)
update = app.up
app.reqAccountUpdates(False, acctCode)
app.run()
print('Reading Account')
return update
def cancel_openorders(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.open_orders = pd.DataFrame(columns=['action', 'quantity',
'type', 'algoStrategy',
'algoParams', 'pre_status'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def cancelOrder(self, orderId):
super().cancelOrder(orderId)
print('cancel order ended')
def openOrder(self, orderId, Contract, Order, OrderState):
super().openOrder(orderId, Contract, Order, OrderState)
self.open_orders.loc[Contract.symbol, :] = [Order.action,
Order.totalQuantity,
Order.orderType,
Order.algoStrategy,
Order.algoParams[0],
OrderState.status]
def openOrderEnd(self):
super().openOrderEnd()
print('open order ended')
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqIds(-1)
app.reqAllOpenOrders()
open_orders = app.open_orders
app.reqGlobalCancel()
app.run()
return open_orders
def get_openorders(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.open_orders = pd.DataFrame(columns=['action', 'open orders',
'type', 'algoStrategy',
'algoParams', 'status'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def openOrder(self, orderId, Contract, Order, OrderState):
super().openOrder(orderId, Contract, Order, OrderState)
self.open_orders.loc[Contract.symbol, :] = [Order.action,
Order.totalQuantity,
Order.orderType,
Order.algoStrategy,
Order.algoParams[0],
OrderState.status]
def openOrderEnd(self):
super().openOrderEnd()
print('open order ended')
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
app.reqIds(-1)
app.reqAllOpenOrders()
sleep(sleeptime)
open_orders = app.open_orders
app.run()
return open_orders
def closing_positions(self, portfolio, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
if app.isConnected():
print('app is running ...')
print('closing {} positions which are not present in action'.format(len(stock_to_close)))
# Closing Position
for i in stock_to_close:
contract = Contract()
contract.symbol = i
contract.secType = 'STK'
contract.exchange = 'SMART'
# contract.primaryExchange = 'ISLAND'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.totalQuantity = int(np.abs(portfolio.loc[i, 'Position']))
order.transmit = transmit
if portfolio.loc[i, 'Position'] > 0:
order.action = 'SELL'
# order.cashQty = weigth * 1.5 * net_liq
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('closing position for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif portfolio.loc[i, 'Position'] < 0:
order.action = 'BUY'
# order.cashQty = weigth * 1.5 * net_liq
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('closing position for {} is: {} '.format(contract.symbol, order.totalQuantity))
else:
print('app not connected')
app.disconnect()
return order_id + 1
def rebalancing_to_leverage(self, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
if app.isConnected():
print('app is running ...')
print('balancing {} positions'.format(len(action_balance.index)))
# Closing Position
for i in action_balance.index:
contract = Contract()
contract.symbol = i
contract.secType = 'STK'
contract.exchange = 'SMART'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.totalQuantity = np.abs(action_balance.loc[i, 'shares'])
order.transmit = transmit
if action_balance.loc[i, 'shares'] > 0:
order.action = 'BUY'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print(' buy order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif action_balance.loc[i, 'shares'] < 0:
order.action = 'SELL'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print(' sell order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
else:
print('app not connected')
app.disconnect()
def placing_final_orders(self, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
for ticker in action_final.index:
contract = Contract()
contract.symbol = ticker
contract.secType = 'STK'
contract.exchange = 'SMART'
# contract.primaryExchange = 'ISLAND'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.transmit = transmit
order.totalQuantity = np.abs(action_final.loc[ticker])[0]
if action_final.loc[ticker][0] > 0:
order.action = 'BUY'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('buy order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif action_final.loc[ticker][0] < 0:
order.action = 'SELL'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('sell order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
app.disconnect()
def commission_report(self, time):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.executed_orders = pd.DataFrame(columns=['ticker',
'time', 'shares', 'action',
'price', 'marketValue',
'RealizedPNL', 'commission'])
self.val = 0
self.val2 = 0
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def execDetails(self, reqId, contract, execution):
super().execDetails(reqId, contract, execution)
self.executed_orders.loc[self.val, ['ticker',
'time',
'shares',
'action',
'price',
'marketValue']] = [contract.symbol,
pd.to_datetime(execution.time),
execution.shares, execution.side,
execution.price,
execution.shares * execution.price]
self.val = self.val + 1
def commissionReport(self, commissionReport):
super().commissionReport(commissionReport)
self.executed_orders.loc[self.val2, ['RealizedPNL', 'commission']] = [
float(commissionReport.realizedPNL),
float(commissionReport.commission)]
self.val2 = self.val2 + 1
def execDetailsEnd(self, reqId):
super().execDetailsEnd(reqId)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
execution_filter = ExecutionFilter()
execution_filter.acctCode = acctCode
execution_filter.time = time
app.reqExecutions(0, execution_filter)
sleep(sleeptime)
df = app.executed_orders
app.run()
sleep(sleeptime)
df.set_index('time', inplace=True)
df.sort_index(inplace=True)
df['RealizedPNL'][df['RealizedPNL'] > 1000000] = 'OPEN'
return df
| [
"numpy.abs",
"ibapi.client.EClient.__init__",
"time.sleep",
"ibapi.tag_value.TagValue",
"ibapi.contract.Contract",
"ibapi.order.Order",
"pandas.DataFrame",
"ibapi.execution.ExecutionFilter",
"pandas.to_datetime"
] | [((1277, 1293), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (1282, 1293), False, 'from time import sleep, strftime, localtime, time\n'), ((1517, 1527), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (1525, 1527), False, 'from ibapi.contract import Contract\n'), ((1725, 1732), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (1730, 1732), False, 'from ibapi.order import Order\n'), ((2413, 2429), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (2418, 2429), False, 'from time import sleep, strftime, localtime, time\n'), ((3699, 3715), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (3704, 3715), False, 'from time import sleep, strftime, localtime, time\n'), ((4844, 4860), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (4849, 4860), False, 'from time import sleep, strftime, localtime, time\n'), ((6664, 6680), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (6669, 6680), False, 'from time import sleep, strftime, localtime, time\n'), ((8353, 8369), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (8358, 8369), False, 'from time import sleep, strftime, localtime, time\n'), ((17579, 17596), 'ibapi.execution.ExecutionFilter', 'ExecutionFilter', ([], {}), '()\n', (17594, 17596), False, 'from ibapi.execution import ExecutionFilter\n'), ((17735, 17751), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (17740, 17751), False, 'from time import sleep, strftime, localtime, time\n'), ((17812, 17828), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (17817, 17828), False, 'from time import sleep, strftime, localtime, time\n'), ((13800, 13810), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (13808, 13810), False, 'from ibapi.contract import Contract\n'), ((14034, 14041), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (14039, 14041), False, 'from ibapi.order import Order\n'), ((699, 727), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (715, 727), False, 'from ibapi.client import EClient\n'), ((1931, 1959), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (1947, 1959), False, 'from ibapi.client import EClient\n'), ((2650, 2678), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (2666, 2678), False, 'from ibapi.client import EClient\n'), ((2705, 2824), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['Position', 'marketPrice', 'marketValue', 'averageCost', 'unrealizedPNL',\n 'realizedPNL']"}), "([], columns=['Position', 'marketPrice', 'marketValue',\n 'averageCost', 'unrealizedPNL', 'realizedPNL'])\n", (2717, 2824), True, 'import pandas as pd\n'), ((4160, 4188), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (4176, 4188), False, 'from ibapi.client import EClient\n'), ((4215, 4251), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['Values']"}), "([], columns=['Values'])\n", (4227, 4251), True, 'import pandas as pd\n'), ((5206, 5234), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (5222, 5234), False, 'from ibapi.client import EClient\n'), ((5270, 5370), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['action', 'quantity', 'type', 'algoStrategy', 'algoParams', 'pre_status']"}), "(columns=['action', 'quantity', 'type', 'algoStrategy',\n 'algoParams', 'pre_status'])\n", (5282, 5370), True, 'import pandas as pd\n'), ((6975, 7003), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (6991, 7003), False, 'from ibapi.client import EClient\n'), ((7039, 7138), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['action', 'open orders', 'type', 'algoStrategy', 'algoParams', 'status']"}), "(columns=['action', 'open orders', 'type', 'algoStrategy',\n 'algoParams', 'status'])\n", (7051, 7138), True, 'import pandas as pd\n'), ((8629, 8657), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (8645, 8657), False, 'from ibapi.client import EClient\n'), ((9200, 9210), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (9208, 9210), False, 'from ibapi.contract import Contract\n'), ((9453, 9460), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (9458, 9460), False, 'from ibapi.order import Order\n'), ((11132, 11160), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (11148, 11160), False, 'from ibapi.client import EClient\n'), ((11685, 11695), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (11693, 11695), False, 'from ibapi.contract import Contract\n'), ((11884, 11891), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (11889, 11891), False, 'from ibapi.order import Order\n'), ((11970, 12009), 'numpy.abs', 'np.abs', (["action_balance.loc[i, 'shares']"], {}), "(action_balance.loc[i, 'shares'])\n", (11976, 12009), True, 'import numpy as np\n'), ((13432, 13460), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (13448, 13460), False, 'from ibapi.client import EClient\n'), ((14151, 14183), 'numpy.abs', 'np.abs', (['action_final.loc[ticker]'], {}), '(action_final.loc[ticker])\n', (14157, 14183), True, 'import numpy as np\n'), ((14544, 14560), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (14549, 14560), False, 'from time import sleep, strftime, localtime, time\n'), ((15399, 15427), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (15415, 15427), False, 'from ibapi.client import EClient\n'), ((15468, 15585), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ticker', 'time', 'shares', 'action', 'price', 'marketValue',\n 'RealizedPNL', 'commission']"}), "(columns=['ticker', 'time', 'shares', 'action', 'price',\n 'marketValue', 'RealizedPNL', 'commission'])\n", (15480, 15585), True, 'import pandas as pd\n'), ((9543, 9579), 'numpy.abs', 'np.abs', (["portfolio.loc[i, 'Position']"], {}), "(portfolio.loc[i, 'Position'])\n", (9549, 9579), True, 'import numpy as np\n'), ((10071, 10087), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (10076, 10087), False, 'from time import sleep, strftime, localtime, time\n'), ((12440, 12456), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (12445, 12456), False, 'from time import sleep, strftime, localtime, time\n'), ((14400, 14444), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (14408, 14444), False, 'from ibapi.tag_value import TagValue\n'), ((15072, 15088), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (15077, 15088), False, 'from time import sleep, strftime, localtime, time\n'), ((16624, 16654), 'pandas.to_datetime', 'pd.to_datetime', (['execution.time'], {}), '(execution.time)\n', (16638, 16654), True, 'import pandas as pd\n'), ((9919, 9963), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (9927, 9963), False, 'from ibapi.tag_value import TagValue\n'), ((10688, 10704), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (10693, 10704), False, 'from time import sleep, strftime, localtime, time\n'), ((12289, 12333), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (12297, 12333), False, 'from ibapi.tag_value import TagValue\n'), ((13009, 13025), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (13014, 13025), False, 'from time import sleep, strftime, localtime, time\n'), ((14928, 14972), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (14936, 14972), False, 'from ibapi.tag_value import TagValue\n'), ((10536, 10580), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (10544, 10580), False, 'from ibapi.tag_value import TagValue\n'), ((12858, 12902), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (12866, 12902), False, 'from ibapi.tag_value import TagValue\n')] |
import argparse
import os, socket
from datetime import datetime
import shutil
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from model import UNet
from warp import WarpingLayerBWFlow
from torch.utils.tensorboard import SummaryWriter
from dataloader import llenDataset
from torch.utils.data import DataLoader
import cv2
import kornia
import random
def save_checkpoint(state, epoch, output_directory):
checkpoint_filename = os.path.join(output_directory, 'checkpoint-' + str(epoch) + '.pth')
torch.save(state, checkpoint_filename)
# Parse arguments
parser = argparse.ArgumentParser(description='Low light enhancement')
parser.add_argument('--data-path', default='./data', type=str, help='path to the dataset')
parser.add_argument('--epochs', default=50, type=int, help='n of epochs (default: 50)')
parser.add_argument('--bs', default=1, type=int, help='[train] batch size(default: 1)')
parser.add_argument('--bs-test', default=1, type=int, help='[test] batch size (default: 1)')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate (default: 1e-4)')
parser.add_argument('--gpu', default='0', type=str, help='GPU id to use (default: 0)')
parser.add_argument('--checkpoint', default=None, type=str, help='path to checkpoint')
parser.add_argument('--log', default=None, type=str, help='folder to log')
parser.add_argument('--weight', default=20, type=float, help='weight of consistency loss')
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
train_set = llenDataset(args.data_path, type='train')
train_loader = DataLoader(train_set, batch_size=args.bs, shuffle=True, num_workers=8, pin_memory=True)
torch.manual_seed(ord('c')+137)
random.seed(ord('c')+137)
np.random.seed(ord('c')+137)
start_epoch = 0
model = UNet(n_channels=3, bilinear=True).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
criterion = nn.L1Loss()
warp = WarpingLayerBWFlow().cuda()
# Create logger
if args.log==None:
log_dir = os.path.join(os.path.abspath(os.getcwd()), 'logs', datetime.now().strftime('%b%d_%H-%M-%S_') + socket.gethostname())
else:
log_dir = os.path.join(os.path.abspath(os.getcwd()), 'logs', args.log)
os.makedirs(log_dir)
logger = SummaryWriter(log_dir)
# Log arguments
with open(os.path.join(log_dir, "config.txt"), "a") as f:
print(args, file=f)
iters = 0
for epoch in range(start_epoch, args.epochs):
# log learning rate
for i, param_group in enumerate(optimizer.param_groups):
logger.add_scalar('Lr/lr_' + str(i), float(param_group['lr']), epoch)
# Training stage
print('Epoch', epoch, 'train in progress...')
model.train()
for i, (input, target, flow) in enumerate(train_loader):
input, target, flow= input.cuda(), target.cuda(), flow.cuda()
# the 1st pass
pred = model(input)
loss = criterion(pred, target)
# the 2nd pass
input_t = warp(input, flow)
input_t_pred = model(input_t)
pred_t = warp(pred, flow)
loss_t = criterion(input_t_pred, pred_t)
total_loss = loss + loss_t * args.weight
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
logger.add_scalar('Train/Loss', loss.item(), iters)
logger.add_scalar('Train/Loss_t', loss_t.item(), iters)
iters += 1
if (i + 1) % 10 == 0:
print('Train Epoch: {0} [{1}/{2}]\t'
'l1Loss={Loss1:.8f} '
'conLoss={Loss2:.8f} '.format(
epoch, i + 1, len(train_loader), Loss1=loss.item(), Loss2=loss_t.item()))
save_checkpoint(model.state_dict(), epoch, log_dir)
print()
logger.close() | [
"torch.utils.tensorboard.SummaryWriter",
"os.makedirs",
"argparse.ArgumentParser",
"torch.nn.L1Loss",
"os.path.join",
"dataloader.llenDataset",
"os.getcwd",
"datetime.datetime.now",
"warp.WarpingLayerBWFlow",
"model.UNet",
"torch.save",
"torch.utils.data.DataLoader",
"socket.gethostname"
] | [((602, 662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Low light enhancement"""'}), "(description='Low light enhancement')\n", (625, 662), False, 'import argparse\n'), ((1553, 1594), 'dataloader.llenDataset', 'llenDataset', (['args.data_path'], {'type': '"""train"""'}), "(args.data_path, type='train')\n", (1564, 1594), False, 'from dataloader import llenDataset\n'), ((1610, 1701), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.bs', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(train_set, batch_size=args.bs, shuffle=True, num_workers=8,\n pin_memory=True)\n', (1620, 1701), False, 'from torch.utils.data import DataLoader\n'), ((1946, 1957), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (1955, 1957), True, 'import torch.nn as nn\n'), ((2242, 2262), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2253, 2262), False, 'import os, socket\n'), ((2272, 2294), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (2285, 2294), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((535, 573), 'torch.save', 'torch.save', (['state', 'checkpoint_filename'], {}), '(state, checkpoint_filename)\n', (545, 573), False, 'import torch\n'), ((1811, 1844), 'model.UNet', 'UNet', ([], {'n_channels': '(3)', 'bilinear': '(True)'}), '(n_channels=3, bilinear=True)\n', (1815, 1844), False, 'from model import UNet\n'), ((1965, 1985), 'warp.WarpingLayerBWFlow', 'WarpingLayerBWFlow', ([], {}), '()\n', (1983, 1985), False, 'from warp import WarpingLayerBWFlow\n'), ((2322, 2357), 'os.path.join', 'os.path.join', (['log_dir', '"""config.txt"""'], {}), "(log_dir, 'config.txt')\n", (2334, 2357), False, 'import os, socket\n'), ((2072, 2083), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2081, 2083), False, 'import os, socket\n'), ((2138, 2158), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (2156, 2158), False, 'import os, socket\n'), ((2209, 2220), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2218, 2220), False, 'import os, socket\n'), ((2094, 2108), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2106, 2108), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 21:25:24 2015
@author: Konrad
"""
import copy
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sc_p
def gen_clusters(means, num_each):
tup = ();
for m in means:
tup = tup + (np.random.multivariate_normal(m, np.diag(np.ones(2)), num_each),)
data = np.concatenate(tup);
np.random.shuffle(data);
return data;
def make_pts(data):
pts = [];
for pos in data:
pts.append(Point(pos));
return pts;
def euclid(obj1, obj2):
if (isinstance(obj1, Point) and isinstance(obj2, Point)):
return np.sqrt(sum(( obj1.pos - obj2.pos )**2));
elif (isinstance(obj1, np.ndarray) and isinstance(obj2, np.ndarray)):
return np.sqrt(sum(( obj1 - obj2 )**2))
else:
return None;
class Point:
def __init__(self, pos):
self.pos = copy.deepcopy(pos);
self.processed = False;
self.core_dist = None;
self.reach_dist = None;
self.in_seed = False;
class OPTICS:
def __init__(self, min_pts, data, max_eps = None):
self.max_eps = max_eps;
self.min_pts = min_pts;
self.data = copy.deepcopy(data);
self.dim = self.data[0].pos.size;
self.main_list = [];
if (self.max_eps == None):
self.get_max_eps();
self.main_loop();
def __call__(self, main_idx):
return self.data[self.main_list[main_idx]].reach_dist;
def main_loop(self):
for idx, obj in enumerate(self.data):
if (not obj.processed):
self.expand_point(idx);
for idx, obj in enumerate(self.data):
if (not obj.processed):
self.append_main(idx);
def get_max_eps(self):
extr_x = self.get_extr_x();
extr_y = self.get_extr_y();
area = (extr_x[1] - extr_x[0])*(extr_y[1] - extr_y[0]);
self.max_eps = ((area*self.min_pts*sc_p.gamma(2))/(len(self.data)*np.sqrt(np.pi**2)))**0.5
def get_extr_x(self):
min_x = float("inf");
max_x = -float("inf");
for obj in self.data:
if obj.pos[0] < min_x:
min_x = obj.pos[0];
if obj.pos[0] > max_x:
max_x = obj.pos[0];
return (min_x, max_x);
def get_extr_y(self):
min_y = float("inf");
max_y = -float("inf");
for obj in self.data:
if obj.pos[1] < min_y:
min_y = obj.pos[1];
if obj.pos[1] > max_y:
max_y = obj.pos[1];
return (min_y, max_y);
def append_main(self, idx):
self.data[idx].processed = True;
if (self.data[idx].reach_dist == None):
self.data[idx].reach_dist = self.max_eps;
self.main_list.append(idx);
def expand_point(self, idx):
self.get_neighbours(idx);
self.get_core_dist(idx);
if (self.data[idx].core_dist == -1):
return;
else:
self.data[idx].processed = True;
self.append_main(idx);
seed_list = [];
self.append_seed(seed_list, self.data[idx].neighbours, idx)
while (len(seed_list) > 0):
curr_idx = seed_list[0];
self.get_neighbours(curr_idx);
self.get_core_dist(curr_idx);
self.data[curr_idx].processed = True;
self.append_main(curr_idx);
self.remove_seed(seed_list);
if (not (self.data[curr_idx].core_dist == -1)):
self.append_seed(seed_list, self.data[curr_idx].neighbours, curr_idx);
def get_core_dist(self, idx):
if (len(self.data[idx].neighbours) >= self.min_pts):
self.data[idx].core_dist = self.data[idx].neighbours[self.min_pts - 1][1];
else:
self.data[idx].core_dist = -1;
def get_reach_dist(self, center_idx, idx, dist):
r_dist = max(dist, self.data[center_idx].core_dist);
if (self.data[idx].reach_dist == None):
self.data[idx].reach_dist = r_dist;
return True;
elif (self.data[idx].reach_dist > r_dist):
self.data[idx].reach_dist = r_dist;
return True;
else:
return False;
def get_neighbours(self, idx):
self.data[idx].neighbours = [];
for n_idx, obj in enumerate(self.data):
dist = euclid(obj, self.data[idx])
if (dist <= self.max_eps):
self.data[idx].neighbours.append([n_idx, dist]);
self.data[idx].neighbours.sort(key = lambda x : x[1]);
def append_seed(self, seed_list, neighbours, center_idx):
for n_tup in neighbours:
changed = self.get_reach_dist(center_idx, n_tup[0], n_tup[1]);
if (self.data[n_tup[0]].in_seed and changed):
del seed_list[seed_list.index(n_tup[0])];
self.data[n_tup[0]].in_seed = False;
elif (self.data[n_tup[0]].processed or self.data[n_tup[0]].in_seed):
continue;
for idx, obj in enumerate(seed_list):
if ( self.data[n_tup[0]].reach_dist < self.data[obj].reach_dist ):
seed_list.insert(idx, n_tup[0]);
self.data[n_tup[0]].in_seed = True;
break;
if (not self.data[n_tup[0]].in_seed):
seed_list.append(n_tup[0]);
self.data[n_tup[0]].in_seed = True;
def remove_seed(self, seed_list):
self.data[seed_list[0]].in_seed = False;
del seed_list[0];
def reach_plot(self):
x = list(range(len(self.main_list)));
y = [];
for idx in self.main_list:
y.append(self.data[idx].reach_dist);
f, ax = plt.subplots();
ax.bar(x, y);
def print_reach_dist(self):
for idx in self.main_list:
print (idx)
print (self.data[idx].reach_dist)
def plot_data(self):
x = [];
y = [];
for obj in self.data:
x.append(obj.pos[0]);
y.append(obj.pos[1]);
f, ax = plt.subplots();
ax.scatter(x, y);
def get_num_clusters(self):
clusters = [];
up = True;
top, bottom = -1, -1;
for i, idx in enumerate(self.main_list[:-1]):
if (up and (self.data[idx].reach_dist > self.data[self.main_list[i + 1]])):
up = not up;
if (not bottom == -1):
clusters.append(top - bottom);
top = self.data[idx].reach_dist;
continue;
if (not up) and (self.data[idx].reach_dist < self.data[self.main_list[i + 1]].reach_dist):
up = not up;
bottom = self.data[idx].reach_dist;
class Clusters:
def __init__(optics_obj, eps):
self.optics_obj = optics_obj;
self.main_list = optics_obj.main_list;
self.eps = eps;
self.min_pts = optics_obj.min_pts;
def find(self):
idx = 0;
#down, up = False, False;
downs = [];
clusters = [];
while idx < len(self.main_list):
diff = self.main_list[idx] - self.main_list[idx + 1];
if (diff >= self.optics_obj(idx)*self.eps):
new_down, idx = self.proc_down(idx);
downs.append([new_down, -float("inf")]);
#glob_mib = self.optics_obj(downs[-1][0][0]]);
#self.filter_downs(glob_mib, downs);
elif (-diff >= self.optics_obj(idx)*self.eps):
glob_mib = self.get_glob_mib(downs[-1], idx);
self.filter_downs(glob_mib, downs);
up, idx = self.proc_up(idx);
for down in downs:
if (self.optics_obj(up[1]).reach_dist*(1 - self.eps) >= down[1]):
clusters.append((down[0][0], up[1]));
else:
idx += 1;
def get_glob_mib(self, last_down, curr_idx):
begin_idx, end_idx = last_down[0][1], curr_idx;
glob_mib = -float("inf");
for i in range(begin_idx, end_idx + 1):
if (self.optics_obj(i) > glob_mib):
glob_mib = self.optics_obj(i);
return glob_mib;
def proc_down(self, idx):
bad_inrow = 0;
begin_idx = idx;
while (idx < len(self.main_list)):
idx += 1;
diff = self.main_list[idx].reach_dist - self.main_list[idx + 1].reach_dist;
if (diff < 0):
return (begin_idx, idx - 1);
if (diff > 0):
if (diff >= self.eps*self.main_list[idx]):
bad_inrow = 0;
else:
if (bad_inrow == 0):
last_good = idx - 1;
bad_inrow += 1;
if bad_inrow > self.min_pts:
# include a check that ensures region does not have
# length zero?
return (begin_idx, last_good), idx;
def proc_up(self, idx):
bad_inrow = 0;
begin_idx = idx;
while (idx < len(self.main_list)):
idx += 1;
diff = self.main_list[idx].reach_dist[idx + 1] - self.main_list[idx].reach_dist;
if (diff < 0):
return (begin_idx, idx - 1);
if (diff > 0):
if (diff >= self.eps*self.main_list[idx + 1]):
bad_inrow = 0;
else:
if (bad_inrow == 0):
last_good = idx - 1;
bad_inrow += 1;
if (bad_inrow > self.min_pts):
return (begin_idx, last_good), idx;
def filter_downs(self, glob_mib, downs):
del_idx = [];
for idx, obj in enumerate(downs[:-1]):
if self.main_list[obj[0][0]].reach_dist*(1 - self.eps) < glob_mib:
del_idx.append(idx);
elif (obj[1] < glob_mib):
downs[idx][1] = glob_mib;
del_idx.reverse();
for i in del_idx:
del downs[i];
dat = gen_clusters([[1, 1], [6, 7], [10, 15], [15, 15]], 200);
data = make_pts(dat);
optics = OPTICS(15, data);
optics.reach_plot();
optics.plot_data();
plt.show();
#optics.print_reach_dist();
print ("Done") | [
"numpy.sqrt",
"numpy.ones",
"matplotlib.pyplot.show",
"scipy.special.gamma",
"numpy.concatenate",
"copy.deepcopy",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] | [((10939, 10949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10947, 10949), True, 'import matplotlib.pyplot as plt\n'), ((362, 381), 'numpy.concatenate', 'np.concatenate', (['tup'], {}), '(tup)\n', (376, 381), True, 'import numpy as np\n'), ((388, 411), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (405, 411), True, 'import numpy as np\n'), ((912, 930), 'copy.deepcopy', 'copy.deepcopy', (['pos'], {}), '(pos)\n', (925, 930), False, 'import copy\n'), ((1231, 1250), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (1244, 1250), False, 'import copy\n'), ((6147, 6161), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6159, 6161), True, 'import matplotlib.pyplot as plt\n'), ((6553, 6567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6565, 6567), True, 'import matplotlib.pyplot as plt\n'), ((2040, 2053), 'scipy.special.gamma', 'sc_p.gamma', (['(2)'], {}), '(2)\n', (2050, 2053), True, 'import scipy.special as sc_p\n'), ((2071, 2090), 'numpy.sqrt', 'np.sqrt', (['(np.pi ** 2)'], {}), '(np.pi ** 2)\n', (2078, 2090), True, 'import numpy as np\n'), ((325, 335), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (332, 335), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME> (<EMAIL>)
# - <NAME> (<EMAIL>)
from mpi4py import MPI
from pandayoda.yodacore import Interaction
comm = MPI.COMM_WORLD
mpirank = comm.Get_rank()
if mpirank == 0:
rsv = Interaction.Receiver()
while rsv.activeRanks():
tmpStat, method, params = rsv.receiveRequest()
print(mpirank, 'got', tmpStat, method, params)
print(rsv.returnResponse({'msg': 'Done'}))
rsv.decrementNumRank()
print(mpirank, "done")
else:
snd = Interaction.Requester()
print(mpirank, "sending req")
res = snd.sendRequest('dummy', {1: 2, 3: 4, 'rank': mpirank})
print(res)
print(mpirank, "done")
| [
"pandayoda.yodacore.Interaction.Receiver",
"pandayoda.yodacore.Interaction.Requester"
] | [((419, 441), 'pandayoda.yodacore.Interaction.Receiver', 'Interaction.Receiver', ([], {}), '()\n', (439, 441), False, 'from pandayoda.yodacore import Interaction\n'), ((706, 729), 'pandayoda.yodacore.Interaction.Requester', 'Interaction.Requester', ([], {}), '()\n', (727, 729), False, 'from pandayoda.yodacore import Interaction\n')] |
import re
from collections import OrderedDict
import struct
import os
import decoder748
REG_EXP = re.compile(r'^\s*<([0-9a-f]+)>\s+<([0-9a-f]+)>\s+(\d+)$', re.M)
class CMap:
MAP_STRING = ''
def __init__(self):
self.codePoints = set()
self.cid2unicode = {}
self._feed()
def _feed(self):
for (s, e, code) in re.findall(REG_EXP, self.MAP_STRING):
s = int(s, 16)
e = int(e, 16)
self.codePoints.add(s)
self.cid2unicode[s] = int(code)
def to_unicode(self, cid):
for point in self.codePoints:
if cid <= point:
break
d = cid - point
code = self.cid2unicode[point]
return chr(code + d)
def to_unicode(klass, cid):
if cid in klass.diff:
return klass.diff[cid]
point = 0
for next_point in sorted(klass.cid2unicode.keys()):
if cid < next_point:
break
point = next_point
e = cid - point
code = klass.cid2unicode[point] + e
if code < 0x100:
c = chr(code)
elif code < 0x10000:
c = struct.pack('>H', code).decode('gb18030')
else:
c = struct.pack('>L', code).decode('gb18030')
return c
def to_unicode_wrapper(klass):
def func(cid):
return to_unicode(klass, cid)
return func
class UnicodeMap:
@property
def DESC(self):
return './cidtounicode'
def __init__(self, cmap={}):
self.cid2unicode = {}
self.diff = cmap
def get(self, cid):
if cid in self.diff:
return self.diff[cid]
return chr(cid)
class ADOBE_GB1(UnicodeMap):
FILE_NAME = 'Adobe-GB1.cidToUnicode'
def getCMap(cmapType, cmap={}):
if cmapType.startswith('Founder-') and cmapType.endswith('748'):
decoder = decoder748.encoding(cmapType)
for cid in cmap:
cmap[cid] = decoder.decode(cmap[cid].encode('gb18030'))
elif cmapType == 'Adobe-GB1':
cmap = ADOBE_GB1(cmap=cmap)
return cmap
| [
"re.findall",
"decoder748.encoding",
"struct.pack",
"re.compile"
] | [((100, 166), 're.compile', 're.compile', (['"""^\\\\s*<([0-9a-f]+)>\\\\s+<([0-9a-f]+)>\\\\s+(\\\\d+)$"""', 're.M'], {}), "('^\\\\s*<([0-9a-f]+)>\\\\s+<([0-9a-f]+)>\\\\s+(\\\\d+)$', re.M)\n", (110, 166), False, 'import re\n'), ((356, 392), 're.findall', 're.findall', (['REG_EXP', 'self.MAP_STRING'], {}), '(REG_EXP, self.MAP_STRING)\n', (366, 392), False, 'import re\n'), ((1816, 1845), 'decoder748.encoding', 'decoder748.encoding', (['cmapType'], {}), '(cmapType)\n', (1835, 1845), False, 'import decoder748\n'), ((1111, 1134), 'struct.pack', 'struct.pack', (['""">H"""', 'code'], {}), "('>H', code)\n", (1122, 1134), False, 'import struct\n'), ((1175, 1198), 'struct.pack', 'struct.pack', (['""">L"""', 'code'], {}), "('>L', code)\n", (1186, 1198), False, 'import struct\n')] |
import os
import json
import shutil
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from typing import NamedTuple
from tqdm.autonotebook import tqdm
from cfdata.tabular import TabularData
from cftool.ml import ModelPattern
from cftool.ml import EnsemblePattern
from cftool.dist import Parallel
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from cftool.ml.utils import patterns_type
from cftool.ml.utils import Comparer
from cftool.ml.utils import Estimator
from .pipeline import SimplePipeline
from .pipeline import CarefreePipeline
from ...data import MLData
from ...data import MLInferenceData
from ...trainer import get_sorted_checkpoints
from ...constants import SCORES_FILE
from ...constants import WARNING_PREFIX
from ...constants import CHECKPOINTS_FOLDER
from ...constants import ML_PIPELINE_SAVE_NAME
from ...dist.ml import Experiment
from ...dist.ml import ExperimentResults
from ...misc.toolkit import to_2d
from ...misc.toolkit import get_latest_workplace
from ...models.ml.protocol import MLCoreProtocol
def register_core(name: str) -> Callable[[Type], Type]:
return MLCoreProtocol.register(name)
pipelines_type = Dict[str, List[SimplePipeline]]
various_pipelines_type = Union[
SimplePipeline,
List[SimplePipeline],
Dict[str, SimplePipeline],
pipelines_type,
]
def _to_pipelines(pipelines: various_pipelines_type) -> pipelines_type:
if isinstance(pipelines, dict):
pipeline_dict = {}
for key, value in pipelines.items():
if isinstance(value, list):
pipeline_dict[key] = value
else:
pipeline_dict[key] = [value]
else:
if not isinstance(pipelines, list):
pipelines = [pipelines]
pipeline_dict = {}
for pipeline in pipelines:
assert pipeline.model is not None
key = pipeline.model.__identifier__
pipeline_dict.setdefault(key, []).append(pipeline)
return pipeline_dict
def evaluate(
data: Union[MLData, MLInferenceData],
*,
metrics: Union[str, List[str]],
metric_configs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
contains_labels: bool = True,
pipelines: Optional[various_pipelines_type] = None,
predict_config: Optional[Dict[str, Any]] = None,
other_patterns: Optional[Dict[str, patterns_type]] = None,
comparer_verbose_level: Optional[int] = 1,
) -> Comparer:
if not contains_labels:
err_msg = "`cflearn.evaluate` must be called with `contains_labels = True`"
raise ValueError(err_msg)
if metric_configs is None:
metric_configs = [{} for _ in range(len(metrics))]
patterns = {}
x, y = data.x_train, data.y_train
if pipelines is None:
msg = None
if y is None:
msg = "either `pipelines` or `y` should be provided"
if other_patterns is None:
msg = "either `pipelines` or `other_patterns` should be provided"
if msg is not None:
raise ValueError(msg)
else:
pipelines = _to_pipelines(pipelines)
# get data
# TODO : different pipelines may have different labels
if y is not None:
y = to_2d(y)
else:
if not isinstance(x, str):
raise ValueError("`x` should be str when `y` is not provided")
data_pipeline = list(pipelines.values())[0][0]
if not isinstance(data_pipeline, CarefreePipeline):
raise ValueError("only `CarefreePipeline` can handle file inputs")
cf_data = data_pipeline.cf_data
assert cf_data is not None
x, y = cf_data.read_file(x, contains_labels=contains_labels)
y = cf_data.transform(x, y).y
# get metrics
if predict_config is None:
predict_config = {}
predict_config.setdefault("contains_labels", contains_labels)
for name, pipeline_list in pipelines.items():
patterns[name] = [
pipeline.to_pattern(**predict_config) for pipeline in pipeline_list
]
if other_patterns is not None:
for other_name in other_patterns.keys():
if other_name in patterns:
print(
f"{WARNING_PREFIX}'{other_name}' is found in "
"`other_patterns`, it will be overwritten"
)
update_dict(other_patterns, patterns)
if isinstance(metrics, list):
metrics_list = metrics
else:
assert isinstance(metrics, str)
metrics_list = [metrics]
if isinstance(metric_configs, list):
metric_configs_list = metric_configs
else:
assert isinstance(metric_configs, dict)
metric_configs_list = [metric_configs]
estimators = [
Estimator(metric, metric_config=metric_config)
for metric, metric_config in zip(metrics_list, metric_configs_list)
]
comparer = Comparer(patterns, estimators)
comparer.compare(data, y, verbose_level=comparer_verbose_level)
return comparer
def task_loader(
workplace: str,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
compress: bool = True,
) -> SimplePipeline:
export_folder = os.path.join(workplace, ML_PIPELINE_SAVE_NAME)
m = pipeline_base.load(export_folder=export_folder, compress=compress)
assert isinstance(m, SimplePipeline)
return m
def load_experiment_results(
results: ExperimentResults,
pipeline_base: Type[SimplePipeline],
) -> pipelines_type:
pipelines_dict: Dict[str, Dict[int, SimplePipeline]] = {}
iterator = list(zip(results.workplaces, results.workplace_keys))
for workplace, workplace_key in tqdm(iterator, desc="load"):
pipeline = task_loader(workplace, pipeline_base)
model, str_i = workplace_key
pipelines_dict.setdefault(model, {})[int(str_i)] = pipeline
return {k: [v[i] for i in sorted(v)] for k, v in pipelines_dict.items()}
class RepeatResult(NamedTuple):
data: Optional[TabularData]
experiment: Optional[Experiment]
pipelines: Optional[Dict[str, List[SimplePipeline]]]
patterns: Optional[Dict[str, List[ModelPattern]]]
def repeat_with(
data: MLData,
*,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
workplace: str = "_repeat",
models: Union[str, List[str]] = "fcnn",
model_configs: Optional[Dict[str, Dict[str, Any]]] = None,
predict_config: Optional[Dict[str, Any]] = None,
sequential: Optional[bool] = None,
num_jobs: int = 1,
num_repeat: int = 5,
return_patterns: bool = True,
compress: bool = True,
use_tqdm: bool = True,
available_cuda_list: Optional[List[int]] = None,
resource_config: Optional[Dict[str, Any]] = None,
task_meta_kwargs: Optional[Dict[str, Any]] = None,
is_fix: bool = False,
**kwargs: Any,
) -> RepeatResult:
if os.path.isdir(workplace) and not is_fix:
print(f"{WARNING_PREFIX}'{workplace}' already exists, it will be erased")
shutil.rmtree(workplace)
kwargs = shallow_copy_dict(kwargs)
if isinstance(models, str):
models = [models]
if sequential is None:
sequential = num_jobs <= 1
if model_configs is None:
model_configs = {}
def is_buggy(i_: int, model_: str) -> bool:
i_workplace = os.path.join(workplace, model_, str(i_))
i_latest_workplace = get_latest_workplace(i_workplace)
if i_latest_workplace is None:
return True
checkpoint_folder = os.path.join(i_latest_workplace, CHECKPOINTS_FOLDER)
if not os.path.isfile(os.path.join(checkpoint_folder, SCORES_FILE)):
return True
if not get_sorted_checkpoints(checkpoint_folder):
return True
return False
def fetch_config(core_name: str) -> Dict[str, Any]:
local_kwargs = shallow_copy_dict(kwargs)
assert model_configs is not None
local_core_config = model_configs.setdefault(core_name, {})
local_kwargs["core_name"] = core_name
local_kwargs["core_config"] = shallow_copy_dict(local_core_config)
return shallow_copy_dict(local_kwargs)
pipelines_dict: Optional[Dict[str, List[SimplePipeline]]] = None
if sequential:
cuda = kwargs.pop("cuda", None)
experiment = None
tqdm_settings = kwargs.setdefault("tqdm_settings", {})
tqdm_settings["tqdm_position"] = 2
if not return_patterns:
print(
f"{WARNING_PREFIX}`return_patterns` should be "
"True when `sequential` is True, because patterns "
"will always be generated"
)
return_patterns = True
pipelines_dict = {}
if not use_tqdm:
iterator = models
else:
iterator = tqdm(models, total=len(models), position=0)
for model in iterator:
local_pipelines = []
sub_iterator = range(num_repeat)
if use_tqdm:
sub_iterator = tqdm(
sub_iterator,
total=num_repeat,
position=1,
leave=False,
)
for i in sub_iterator:
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
local_workplace = os.path.join(workplace, model, str(i))
local_config.setdefault("workplace", local_workplace)
m = pipeline_base(**local_config)
m.fit(data, cuda=cuda)
local_pipelines.append(m)
pipelines_dict[model] = local_pipelines
else:
if num_jobs <= 1:
print(
f"{WARNING_PREFIX}we suggest setting `sequential` "
f"to True when `num_jobs` is {num_jobs}"
)
# data
data_folder = Experiment.dump_data_bundle(
data.x_train,
data.y_train,
data.x_valid,
data.y_valid,
workplace=workplace,
)
# experiment
experiment = Experiment(
num_jobs=num_jobs,
available_cuda_list=available_cuda_list,
resource_config=resource_config,
)
for model in models:
for i in range(num_repeat):
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
experiment.add_task(
model=model,
compress=compress,
root_workplace=workplace,
workplace_key=(model, str(i)),
config=local_config,
data_folder=data_folder,
**(task_meta_kwargs or {}),
)
# finalize
results = experiment.run_tasks(use_tqdm=use_tqdm)
if return_patterns:
pipelines_dict = load_experiment_results(results, pipeline_base)
patterns = None
if return_patterns:
assert pipelines_dict is not None
if predict_config is None:
predict_config = {}
patterns = {
model: [m.to_pattern(**predict_config) for m in pipelines]
for model, pipelines in pipelines_dict.items()
}
cf_data = None
if patterns is not None:
m = patterns[models[0]][0].model
if isinstance(m, CarefreePipeline):
cf_data = m.cf_data
return RepeatResult(cf_data, experiment, pipelines_dict, patterns)
def pack_repeat(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_jobs: int = 1,
) -> List[str]:
sub_workplaces = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplaces.append(get_latest_workplace(stuff_path))
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def pick_from_repeat_and_pack(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_pick: int,
num_jobs: int = 1,
) -> List[str]:
score_workplace_pairs = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplace = get_latest_workplace(stuff_path)
assert sub_workplace is not None, "internal error occurred"
score_path = os.path.join(sub_workplace, CHECKPOINTS_FOLDER, SCORES_FILE)
with open(score_path, "r") as f:
score = float(max(json.load(f).values()))
score_workplace_pairs.append((score, sub_workplace))
score_workplace_pairs = sorted(score_workplace_pairs)[::-1]
sub_workplaces = [pair[1] for pair in score_workplace_pairs[:num_pick]]
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def make_toy_model(
model: str = "fcnn",
config: Optional[Dict[str, Any]] = None,
*,
pipeline_type: str = "ml.carefree",
is_classification: bool = False,
cf_data_config: Optional[Dict[str, Any]] = None,
data_tuple: Optional[Tuple[np.ndarray, np.ndarray]] = None,
cuda: Optional[str] = None,
) -> SimplePipeline:
if config is None:
config = {}
if data_tuple is not None:
x_np, y_np = data_tuple
else:
if not is_classification:
x, y = [[0]], [[1.0]]
else:
x, y = [[0], [1]], [[1], [0]]
x_np, y_np = map(np.array, [x, y])
model_config = {}
if model in ("fcnn", "tree_dnn"):
model_config = {
"hidden_units": [100],
"batch_norm": False,
"dropout": 0.0,
}
base_config = {
"core_name": model,
"core_config": model_config,
"output_dim": 1 + int(is_classification),
"num_epoch": 2,
"max_epoch": 4,
}
updated = update_dict(config, base_config)
m = SimplePipeline.make(pipeline_type, updated)
assert isinstance(m, SimplePipeline)
if cf_data_config is None:
cf_data_config = {}
cf_data_config = update_dict(
cf_data_config,
dict(
valid_columns=list(range(x_np.shape[1])),
label_process_method="identical",
),
)
data = MLData.with_cf_data(
x_np,
y_np,
is_classification=is_classification,
cf_data_config=cf_data_config,
valid_split=0.0,
)
m.fit(data, cuda=cuda)
return m
__all__ = [
"register_core",
"evaluate",
"task_loader",
"load_experiment_results",
"repeat_with",
"pack_repeat",
"pick_from_repeat_and_pack",
"make_toy_model",
"ModelPattern",
"EnsemblePattern",
]
| [
"os.listdir",
"os.path.join",
"cftool.ml.utils.Estimator",
"cftool.ml.utils.Comparer",
"cftool.misc.shallow_copy_dict",
"os.path.isdir",
"cftool.dist.Parallel",
"tqdm.autonotebook.tqdm",
"shutil.rmtree",
"json.load",
"cftool.misc.update_dict"
] | [((5137, 5167), 'cftool.ml.utils.Comparer', 'Comparer', (['patterns', 'estimators'], {}), '(patterns, estimators)\n', (5145, 5167), False, 'from cftool.ml.utils import Comparer\n'), ((5423, 5469), 'os.path.join', 'os.path.join', (['workplace', 'ML_PIPELINE_SAVE_NAME'], {}), '(workplace, ML_PIPELINE_SAVE_NAME)\n', (5435, 5469), False, 'import os\n'), ((5891, 5918), 'tqdm.autonotebook.tqdm', 'tqdm', (['iterator'], {'desc': '"""load"""'}), "(iterator, desc='load')\n", (5895, 5918), False, 'from tqdm.autonotebook import tqdm\n'), ((7246, 7271), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['kwargs'], {}), '(kwargs)\n', (7263, 7271), False, 'from cftool.misc import shallow_copy_dict\n'), ((14254, 14286), 'cftool.misc.update_dict', 'update_dict', (['config', 'base_config'], {}), '(config, base_config)\n', (14265, 14286), False, 'from cftool.misc import update_dict\n'), ((4587, 4624), 'cftool.misc.update_dict', 'update_dict', (['other_patterns', 'patterns'], {}), '(other_patterns, patterns)\n', (4598, 4624), False, 'from cftool.misc import update_dict\n'), ((4993, 5039), 'cftool.ml.utils.Estimator', 'Estimator', (['metric'], {'metric_config': 'metric_config'}), '(metric, metric_config=metric_config)\n', (5002, 5039), False, 'from cftool.ml.utils import Estimator\n'), ((7077, 7101), 'os.path.isdir', 'os.path.isdir', (['workplace'], {}), '(workplace)\n', (7090, 7101), False, 'import os\n'), ((7208, 7232), 'shutil.rmtree', 'shutil.rmtree', (['workplace'], {}), '(workplace)\n', (7221, 7232), False, 'import shutil\n'), ((7715, 7767), 'os.path.join', 'os.path.join', (['i_latest_workplace', 'CHECKPOINTS_FOLDER'], {}), '(i_latest_workplace, CHECKPOINTS_FOLDER)\n', (7727, 7767), False, 'import os\n'), ((8052, 8077), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['kwargs'], {}), '(kwargs)\n', (8069, 8077), False, 'from cftool.misc import shallow_copy_dict\n'), ((8271, 8307), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['local_core_config'], {}), '(local_core_config)\n', (8288, 8307), False, 'from cftool.misc import shallow_copy_dict\n'), ((8323, 8354), 'cftool.misc.shallow_copy_dict', 'shallow_copy_dict', (['local_kwargs'], {}), '(local_kwargs)\n', (8340, 8354), False, 'from cftool.misc import shallow_copy_dict\n'), ((11946, 11967), 'os.listdir', 'os.listdir', (['workplace'], {}), '(workplace)\n', (11956, 11967), False, 'import os\n'), ((11991, 12021), 'os.path.join', 'os.path.join', (['workplace', 'stuff'], {}), '(workplace, stuff)\n', (12003, 12021), False, 'import os\n'), ((12474, 12495), 'os.listdir', 'os.listdir', (['workplace'], {}), '(workplace)\n', (12484, 12495), False, 'import os\n'), ((12519, 12549), 'os.path.join', 'os.path.join', (['workplace', 'stuff'], {}), '(workplace, stuff)\n', (12531, 12549), False, 'import os\n'), ((12759, 12819), 'os.path.join', 'os.path.join', (['sub_workplace', 'CHECKPOINTS_FOLDER', 'SCORES_FILE'], {}), '(sub_workplace, CHECKPOINTS_FOLDER, SCORES_FILE)\n', (12771, 12819), False, 'import os\n'), ((12037, 12062), 'os.path.isdir', 'os.path.isdir', (['stuff_path'], {}), '(stuff_path)\n', (12050, 12062), False, 'import os\n'), ((12565, 12590), 'os.path.isdir', 'os.path.isdir', (['stuff_path'], {}), '(stuff_path)\n', (12578, 12590), False, 'import os\n'), ((7798, 7842), 'os.path.join', 'os.path.join', (['checkpoint_folder', 'SCORES_FILE'], {}), '(checkpoint_folder, SCORES_FILE)\n', (7810, 7842), False, 'import os\n'), ((9220, 9281), 'tqdm.autonotebook.tqdm', 'tqdm', (['sub_iterator'], {'total': 'num_repeat', 'position': '(1)', 'leave': '(False)'}), '(sub_iterator, total=num_repeat, position=1, leave=False)\n', (9224, 9281), False, 'from tqdm.autonotebook import tqdm\n'), ((12158, 12176), 'cftool.dist.Parallel', 'Parallel', (['num_jobs'], {}), '(num_jobs)\n', (12166, 12176), False, 'from cftool.dist import Parallel\n'), ((13129, 13147), 'cftool.dist.Parallel', 'Parallel', (['num_jobs'], {}), '(num_jobs)\n', (13137, 13147), False, 'from cftool.dist import Parallel\n'), ((12891, 12903), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12900, 12903), False, 'import json\n')] |
"""
one agent chooses an action, says it. other agent does it. both get a point if right
this file was forked from mll/discrete_bottleneck_discrete_input.py
"""
import torch
import torch.nn.functional as F
from torch import nn, optim
# from envs.world3c import World
from ulfs import alive_sieve, rl_common
from ulfs.stats import Stats
from ulfs.stochastic_trajectory import StochasticTrajectory
from ulfs.lexicon_recorder import LexiconRecorder
from ulfs.runner_base_v1 import RunnerBase
# this is just for display to human, cos reading 'badccd' gets annoying after a while :P
# this is somewhat based on how kirby 2001 does this
# we can randomize these potentially
phonemes = [
'ba',
'bo',
'bu',
'bi',
'be',
'to',
'ti',
'ta',
'te',
'tu',
'ra',
're',
'ri',
'ru',
'ro',
'la',
'le',
'li',
'lo',
'lu',
'ga',
'ge',
'gi',
'go',
'gu',
'ma',
'me',
'mu',
'mi',
'mo'
]
class AgentOneActionSelector(nn.Module):
"""
this is going to choose what action to do, given ... nothing :P
maybe given the previous few actions and results?
hmmm, given this model looks to be an LSTM (because we want to model a sequence
so I guess this makes sense?), a question arises which is: what are we going to predict?
so, we'll memorize a bunch of actions and rewards like:
1 => 0
5 => 0
1 => 1
5 => 1
5 => 0
3 => 0
3 => 0
3 => 1
...
we could train in at least a few ways:
- push the memories through, predict the next action to do, backprop on that reward
This has an issue though. clearly the result will be that the net just chooses the same
action over and over. how to mitigate this? hack the reward to decrease with
recency? hack the reward to contain some reward for entropy over action space?
somehow do something that doesnt appear superficially hacky? :P
how about hacking the reward for now, and then thinking about how to justify the hacking
later?
so, if we will hack the reward, then this can in fact predict the reward. eg:
3 => 1
3 => 0.7
3 => 0.5
3 => 0.3
...
what we really want is it for learn to do something like:
3
3
3
3
4
4
4
4
4
3
4
3
4
3
5
5
5
5
5
This probably needs something symbolic? I kind of think an LSTM is a bit too stupid
to be able to learn something like this, without either feature hacking and/or
becoming symbolic?
Let's go with feature hacking and/or symbolic perhaps???
To be symbolic, we no longer care about the actual sequence of actions, but we normalize the
actions to look the same each time. eg:
3,3,3,4,4,
4,4,4,2,2,
1,1,1,5,5
are all identicla sequences, if we consider that symbols are interchangeable. eg we could normalize them to:
1,1,1,2,2
or perhaps to:
2,2,2,1,1
we should find an appropriate way to normalize them. Perhaps, in full feature engineering mode :/ , based on
how the previous rewards have looked? In fact, in this case, we dont even need an LSTM, can just use some
linear classifier thing... Is there a way to make an LSTM symbolic though???
well... one way is to augment the data, by simpling rotating/permuting which symbols are which. that sounds
easy to do, and not horribly hacky. another way is to treat each specific action type, eg 3s, as independent
of the others. Both of these assume a bunch of prior knowledge....
unless we learn this knowledge in a meta-way ????
Hmmm, maybe we just have a simple model, with no explicit memory, no sequences
So, it's basically going to output a fixed probability distribution over actions,
and the RL reward, combined with ent-reg, will push this around. (initially will
likely become spiky, and then flatten??? (we can think about meta-learning it later...))
"""
# def __init__(self, num_actions, num_timesteps, embedding_size):
def __init__(self, num_actions):
"""
memorizes num_timesteps timesteps
(or perhaps should use a replay buffer?)
"""
# self.num_timesteps = num_timesteps
self.num_actions = num_actions
# self.embedding_size = embedding_size
super().__init__()
# self.memory = []
# self.rnn = nn.GRUcell(embedding_size, embedding_size)
# self.rnn = nn.LSTM(embedding_size, embedding_size)
# self.e2d = nn.Linear(embedding_size, num_actions)
self.action_distribution = nn.Parameter(torch.zeros(1, num_actions))
self.action_distribution.data.fill_(1 / num_actions)
def forward(self, batch_size):
"""
"""
# for i, m in enumerate(self.memory):
probs = F.softmax(self.action_distribution).expand(batch_size, self.num_actions)
s = rl_common.draw_categorical_sample(
action_probs=probs, batch_idxes=None)
return s
# def memorize(self, most_recent_action, most_recent_reward):
# self.memory.append({'action': most_recent_action, 'reward': most_recent_reward})
# self.memory = self.memory[-self.num_timesteps:]
class AgentOneLM(nn.Module):
"""
takes in a discrete action (1-in-k), converts to utterance
"""
def __init__(self, p, embedding_size, utterance_max, vocab_size, num_actions):
"""
Note that vocab_size excludes terminator character 0
"""
self.embedding_size = embedding_size
self.utterance_max = utterance_max
self.num_actions = num_actions
super().__init__()
self.h1 = nn.Embedding(num_actions, embedding_size)
# d2e "discrete to embed"
self.d2e = nn.Embedding(vocab_size + 1, embedding_size)
RNNCell = getattr(nn, f'{p.rnn_type}Cell')
self.rnn = RNNCell(embedding_size, embedding_size)
self.e2d = nn.Linear(embedding_size, vocab_size + 1)
def forward(self, actions, global_idxes):
"""
This agent will receive the image of the world
x might have been sieved. global_idxes too. but global_idxes contents are the global
indexes
"""
batch_size = actions.size()[0]
x = self.h1(actions)
state = x
global_idxes = global_idxes.clone()
# note that this sieve might start off smaller than the global batch_size
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=x.is_cuda)
type_constr = torch.cuda if x.is_cuda else torch
last_token = type_constr.LongTensor(batch_size).fill_(0)
utterance = type_constr.LongTensor(batch_size, self.utterance_max).fill_(0)
# N_outer might not be the full episode batch size, but a subset
N_outer = type_constr.LongTensor(batch_size).fill_(self.utterance_max)
stochastic_trajectory = StochasticTrajectory()
for t in range(self.utterance_max):
emb = self.d2e(last_token)
state = self.rnn(emb, state)
token_logits = self.e2d(state)
token_probs = F.softmax(token_logits, dim=-1)
if self.training:
s = rl_common.draw_categorical_sample(
action_probs=token_probs, batch_idxes=global_idxes[sieve.global_idxes])
stochastic_trajectory.append_stochastic_sample(s=s)
token = s.actions.view(-1)
# print('stochastic')
# print('token.size()', token.size())
# die()
else:
# print('argmax')
_, token = token_probs.max(-1)
# print('token.size()', token.size())
# die()
utterance[:, t][sieve.global_idxes] = token
last_token = token
sieve.mark_dead(last_token == 0)
sieve.set_global_dead(N_outer, t)
if sieve.all_dead():
break
state = state[sieve.alive_idxes]
last_token = last_token[sieve.alive_idxes]
sieve.self_sieve_()
res = {
'stochastic_trajectory': stochastic_trajectory,
'utterance': utterance,
'utterance_lens': N_outer
}
return res
class AgentTwo(nn.Module):
def __init__(self, p, embedding_size, vocab_size, num_actions):
"""
- input: utterance
- output: action
"""
super().__init__()
self.num_actions = num_actions
self.embedding_size = embedding_size
self.d2e = nn.Embedding(vocab_size + 1, embedding_size)
RNNCell = getattr(nn, f'{p.rnn_type}Cell')
self.rnn = RNNCell(embedding_size, embedding_size)
self.h1 = nn.Linear(embedding_size, num_actions)
def forward(self, utterance, global_idxes):
"""
utterance etc might be sieved, which is why we receive global_idxes
alive_masks will then create subsets of this already-sieved set
"""
batch_size = utterance.size()[0]
utterance_max = utterance.size()[1]
type_constr = torch.cuda if utterance.is_cuda else torch
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=utterance.is_cuda)
state = type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0)
output_state = state.clone()
for t in range(utterance_max):
emb = self.d2e(utterance[:, t])
state = self.rnn(emb, state)
output_state[sieve.global_idxes] = state
sieve.mark_dead(utterance[:, t] == 0)
if sieve.all_dead():
break
utterance = utterance[sieve.alive_idxes]
state = state[sieve.alive_idxes]
sieve.self_sieve_()
state = output_state
action_logits = self.h1(state)
action_probs = F.softmax(action_logits, dim=-1)
s = rl_common.draw_categorical_sample(
action_probs=action_probs, batch_idxes=global_idxes)
return s
def run_episode(actions, one, two, utterance_len_reg, enable_cuda, render=False):
batch_size = actions.size()[0]
global_idxes = torch.LongTensor(batch_size).fill_(1).cumsum(-1) - 1
one_res = one(
actions=actions, global_idxes=global_idxes)
one_stochastic_trajectory, utterances, utterances_lens = map(one_res.__getitem__, [
'stochastic_trajectory', 'utterance', 'utterance_lens'
])
two_s = two(
utterance=utterances, global_idxes=global_idxes)
stats = Stats([])
episode_result = {
'one_stochastic_trajectory': one_stochastic_trajectory,
'two_s': two_s,
'utterances': utterances,
'utterances_lens': utterances_lens,
'stats': stats
}
return episode_result
class Runner(RunnerBase):
def __init__(self):
super().__init__(
save_as_statedict_keys=['action_selector', 'one', 'two', 'opt_one', 'opt_two'],
additional_save_keys=['baseline'],
step_key='episode'
)
def setup(self, p):
num_actions = p.num_actions
self.lexicon_recorder = LexiconRecorder(num_actions=num_actions)
self.test_lexicon_recorder = LexiconRecorder(num_actions=num_actions)
self.action_selector = AgentOneActionSelector(num_actions=num_actions)
self.one = AgentOneLM(
embedding_size=p.embedding_size, vocab_size=p.vocab_size, utterance_max=p.utterance_max,
num_actions=num_actions)
self.two = AgentTwo(
embedding_size=p.embedding_size, vocab_size=p.vocab_size, num_actions=num_actions)
if p.enable_cuda:
self.one = self.one.cuda()
self.two = self.two.cuda()
self.action_selector = self.action_selector.cuda()
Opt = getattr(optim, p.opt)
self.opt_action_selector = Opt(lr=0.001, params=self.action_selector.parameters())
self.opt_one = Opt(lr=0.001, params=self.one.parameters())
self.opt_two = Opt(lr=0.001, params=self.two.parameters())
self.stats = Stats([
# 'batches_count',
'episodes_count',
'baseline_sum',
'train_len_sum',
'train_len_count',
'train_acc_sum',
'train_rewards_sum',
'test_acc_sum',
'test_len_sum',
'test_len_count',
'test_rewards_sum'
])
self.baseline = 0
def step(self, p):
render = self.should_render()
stats = self.stats
# dopamine per action is initially 1, decreases a bit each time we
# succeed on the action
# we gradually top it up over time too
# this is highly engineered, but we're trying to learn nlp, not
# learn curiosity, in this particular paper
dopamine_per_action = torch.ones(p.num_actions, dtype=torch.float32)
s_actions_in = self.action_selector(batch_size=p.batch_size)
actions_in = s_actions_in.actions
# print('s_actions_in', s_actions_in)
# print('s_actions_in.actions', s_actions_in.actions)
# print('dir(s_actions_in)', dir(s_actions_in))
# die()
# actions_in = s_actions_in
# actions_in = torch.from_numpy(
# np.random.choice(p.num_actions, p.batch_size, replace=True)
# ).long()
if p.enable_cuda:
actions_in = actions_in.cuda()
self.one.train()
self.two.train()
episode_result = run_episode(
actions=actions_in,
one=self.one, two=self.two,
render=render, utterance_len_reg=p.utterance_len_reg, enable_cuda=p.enable_cuda)
utterances, utterances_lens, _stats = map(episode_result.__getitem__, [
'utterances', 'utterances_lens', 'stats'
])
one_stochastic_trajectory, two_s = map(episode_result.__getitem__, [
'one_stochastic_trajectory', 'two_s'
])
self.lexicon_recorder.record(
action_probs_l=[two_s.action_probs], utterances_by_t=[utterances], utterance_lens_by_t=[utterances_lens])
# self.stats += _stats
self.stats.train_len_sum += utterances_lens.sum().item()
self.stats.train_len_count += len(utterances_lens)
self.stats.episodes_count += 1
# rewards = (two_s.actions == actions_in).float() * 2 - 1
correct_mask = two_s.actions == actions_in
rewards = correct_mask.float()
# if rewards_std > 0:
# rewards = rewards / rewards_std
zero_length_idxes = (utterances_lens == 0).nonzero().view(-1).long()
rewards[zero_length_idxes] = 0
rewards -= utterances_lens.float() * p.utterance_len_reg
rewards = rewards.clamp(min=0)
self.baseline = 0.7 * self.baseline + 0.3 * rewards.mean().item()
rewards_std = rewards.detach().std().item()
baselined_rewards = (rewards - self.baseline)
if rewards_std > 0:
baselined_rewards = baselined_rewards / rewards_std
dopamine_per_action = (dopamine_per_action + 0.1).clamp(max=1.0, min=0.1)
acc = (two_s.actions == actions_in).float().mean().item()
stats.train_acc_sum += acc
stats.train_rewards_sum += rewards.mean().item()
reinforce_loss_action_selector = s_actions_in.calc_loss(baselined_rewards)
reinforce_loss_one = one_stochastic_trajectory.calc_loss(baselined_rewards)
reinforce_loss_two = two_s.calc_loss(baselined_rewards)
ent_loss_action_selector = - p.actions_ent_reg * s_actions_in.entropy
ent_loss_one = - p.ent_reg * one_stochastic_trajectory.entropy
ent_loss_two = - p.ent_reg * two_s.entropy
loss_action_selector = reinforce_loss_action_selector + ent_loss_action_selector
loss_one = reinforce_loss_one + ent_loss_one
loss_two = reinforce_loss_two + ent_loss_two
self.opt_action_selector.zero_grad()
loss_action_selector.backward()
self.opt_action_selector.step()
self.opt_one.zero_grad()
loss_one.backward()
self.opt_one.step()
self.opt_two.zero_grad()
loss_two.backward()
self.opt_two.step()
# self.baseline = 0.7 * self.baseline + 0.3 * rewards.mean().item()
stats.baseline_sum += self.baseline
# ========================
self.one.eval()
self.two.eval()
episode_result = run_episode(
actions=actions_in,
one=self.one, two=self.two,
render=render, utterance_len_reg=p.utterance_len_reg, enable_cuda=p.enable_cuda)
utterances, utterances_lens, _stats = map(episode_result.__getitem__, [
'utterances', 'utterances_lens', 'stats'
])
one_stochastic_trajectory, two_s = map(episode_result.__getitem__, [
'one_stochastic_trajectory', 'two_s'
])
self.test_lexicon_recorder.record(
action_probs_l=[two_s.action_probs], utterances_by_t=[utterances], utterance_lens_by_t=[utterances_lens])
test_rewards = (two_s.actions == actions_in).float() * 2 - 1
# test_rewards = (two_s.actions == actions_in).float()
test_rewards -= utterances_lens.float() * p.utterance_len_reg
test_acc = (two_s.actions == actions_in).float().mean().item()
self.stats.test_acc_sum += test_acc
self.stats.test_len_sum += utterances_lens.sum().item()
self.stats.test_len_count += len(utterances_lens)
stats.test_rewards_sum += test_rewards.mean().item()
if render:
lex_stats = self.lexicon_recorder.calc_stats()
test_lex_stats = self.test_lexicon_recorder.calc_stats()
print('')
print('rewards[:16]', rewards[:16])
self.test_lexicon_recorder.print_lexicon()
self.lexicon_recorder.reset()
self.test_lexicon_recorder.reset()
stats = self.stats
log_dict = {
'baseline': stats.baseline_sum / stats.episodes_count,
'train_acc': stats.train_acc_sum / stats.episodes_count,
'train_reward': stats.train_rewards_sum / stats.episodes_count,
'train_utt_len': stats.train_len_sum / stats.train_len_count,
'train_lex_size': lex_stats['total_unique'],
'test_reward': stats.test_rewards_sum / stats.episodes_count,
'test_lex_size': test_lex_stats['total_unique'],
'test_utt_len': stats.test_len_sum / stats.test_len_count,
'test_acc': stats.test_acc_sum / stats.episodes_count,
'actions_ent': s_actions_in.entropy.mean().item()
}
for k, v in lex_stats.items():
log_dict[k] = v
self.print_and_log(
log_dict,
formatstr='e={episode} '
'b={baseline:.3f} '
'| train '
'len {train_utt_len:.2f} '
'acc {train_acc:.3f} '
'r {train_reward:.3f} '
'lex_size {train_lex_size} '
'| test '
'len {test_utt_len:.2f} '
'acc {test_acc:.3f} '
'r {test_reward:.3f} '
'lex_size {test_lex_size} '
'ent {actions_ent:.3f} '
)
stats.reset()
if __name__ == '__main__':
runner = Runner()
runner.add_param('--embedding-size', type=int, default=50)
runner.add_param('--num-actions', type=int, default=32)
runner.add_param('--utterance-max', type=int, default=10)
runner.add_param('--utterance-len-reg', type=float, default=0.01, help='how much to penalize longer utterances')
runner.add_param('--ent-reg', type=float, default=0.2)
runner.add_param('--actions-ent-reg', type=float, default=0.2)
runner.add_param('--boredom-reg', type=float, default=0.1,
help='less dopamine for repeated identical successes (I guess this is similar to count-based :/ )')
runner.add_param('--vocab-size', type=int, default=2, help='excludes terminator')
runner.add_param('--batch-size', type=int, default=128)
runner.add_param('--opt', type=str, default='Adam')
runner.add_param('--rnn-type', type=str, default='GRU')
runner.parse_args()
runner.setup_base()
runner.run_base()
| [
"ulfs.stochastic_trajectory.StochasticTrajectory",
"ulfs.stats.Stats",
"torch.LongTensor",
"ulfs.lexicon_recorder.LexiconRecorder",
"ulfs.alive_sieve.AliveSieve",
"ulfs.rl_common.draw_categorical_sample",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.softmax",
"torch.nn.Embedding",
"tor... | [((10652, 10661), 'ulfs.stats.Stats', 'Stats', (['[]'], {}), '([])\n', (10657, 10661), False, 'from ulfs.stats import Stats\n'), ((4997, 5068), 'ulfs.rl_common.draw_categorical_sample', 'rl_common.draw_categorical_sample', ([], {'action_probs': 'probs', 'batch_idxes': 'None'}), '(action_probs=probs, batch_idxes=None)\n', (5030, 5068), False, 'from ulfs import alive_sieve, rl_common\n'), ((5766, 5807), 'torch.nn.Embedding', 'nn.Embedding', (['num_actions', 'embedding_size'], {}), '(num_actions, embedding_size)\n', (5778, 5807), False, 'from torch import nn, optim\n'), ((5862, 5906), 'torch.nn.Embedding', 'nn.Embedding', (['(vocab_size + 1)', 'embedding_size'], {}), '(vocab_size + 1, embedding_size)\n', (5874, 5906), False, 'from torch import nn, optim\n'), ((6036, 6077), 'torch.nn.Linear', 'nn.Linear', (['embedding_size', '(vocab_size + 1)'], {}), '(embedding_size, vocab_size + 1)\n', (6045, 6077), False, 'from torch import nn, optim\n'), ((6543, 6611), 'ulfs.alive_sieve.AliveSieve', 'alive_sieve.AliveSieve', ([], {'batch_size': 'batch_size', 'enable_cuda': 'x.is_cuda'}), '(batch_size=batch_size, enable_cuda=x.is_cuda)\n', (6565, 6611), False, 'from ulfs import alive_sieve, rl_common\n'), ((7002, 7024), 'ulfs.stochastic_trajectory.StochasticTrajectory', 'StochasticTrajectory', ([], {}), '()\n', (7022, 7024), False, 'from ulfs.stochastic_trajectory import StochasticTrajectory\n'), ((8680, 8724), 'torch.nn.Embedding', 'nn.Embedding', (['(vocab_size + 1)', 'embedding_size'], {}), '(vocab_size + 1, embedding_size)\n', (8692, 8724), False, 'from torch import nn, optim\n'), ((8854, 8892), 'torch.nn.Linear', 'nn.Linear', (['embedding_size', 'num_actions'], {}), '(embedding_size, num_actions)\n', (8863, 8892), False, 'from torch import nn, optim\n'), ((9280, 9356), 'ulfs.alive_sieve.AliveSieve', 'alive_sieve.AliveSieve', ([], {'batch_size': 'batch_size', 'enable_cuda': 'utterance.is_cuda'}), '(batch_size=batch_size, enable_cuda=utterance.is_cuda)\n', (9302, 9356), False, 'from ulfs import alive_sieve, rl_common\n'), ((9981, 10013), 'torch.nn.functional.softmax', 'F.softmax', (['action_logits'], {'dim': '(-1)'}), '(action_logits, dim=-1)\n', (9990, 10013), True, 'import torch.nn.functional as F\n'), ((10027, 10118), 'ulfs.rl_common.draw_categorical_sample', 'rl_common.draw_categorical_sample', ([], {'action_probs': 'action_probs', 'batch_idxes': 'global_idxes'}), '(action_probs=action_probs, batch_idxes=\n global_idxes)\n', (10060, 10118), False, 'from ulfs import alive_sieve, rl_common\n'), ((11258, 11298), 'ulfs.lexicon_recorder.LexiconRecorder', 'LexiconRecorder', ([], {'num_actions': 'num_actions'}), '(num_actions=num_actions)\n', (11273, 11298), False, 'from ulfs.lexicon_recorder import LexiconRecorder\n'), ((11336, 11376), 'ulfs.lexicon_recorder.LexiconRecorder', 'LexiconRecorder', ([], {'num_actions': 'num_actions'}), '(num_actions=num_actions)\n', (11351, 11376), False, 'from ulfs.lexicon_recorder import LexiconRecorder\n'), ((12201, 12394), 'ulfs.stats.Stats', 'Stats', (["['episodes_count', 'baseline_sum', 'train_len_sum', 'train_len_count',\n 'train_acc_sum', 'train_rewards_sum', 'test_acc_sum', 'test_len_sum',\n 'test_len_count', 'test_rewards_sum']"], {}), "(['episodes_count', 'baseline_sum', 'train_len_sum', 'train_len_count',\n 'train_acc_sum', 'train_rewards_sum', 'test_acc_sum', 'test_len_sum',\n 'test_len_count', 'test_rewards_sum'])\n", (12206, 12394), False, 'from ulfs.stats import Stats\n'), ((12972, 13018), 'torch.ones', 'torch.ones', (['p.num_actions'], {'dtype': 'torch.float32'}), '(p.num_actions, dtype=torch.float32)\n', (12982, 13018), False, 'import torch\n'), ((4700, 4727), 'torch.zeros', 'torch.zeros', (['(1)', 'num_actions'], {}), '(1, num_actions)\n', (4711, 4727), False, 'import torch\n'), ((7218, 7249), 'torch.nn.functional.softmax', 'F.softmax', (['token_logits'], {'dim': '(-1)'}), '(token_logits, dim=-1)\n', (7227, 7249), True, 'import torch.nn.functional as F\n'), ((4912, 4947), 'torch.nn.functional.softmax', 'F.softmax', (['self.action_distribution'], {}), '(self.action_distribution)\n', (4921, 4947), True, 'import torch.nn.functional as F\n'), ((7301, 7411), 'ulfs.rl_common.draw_categorical_sample', 'rl_common.draw_categorical_sample', ([], {'action_probs': 'token_probs', 'batch_idxes': 'global_idxes[sieve.global_idxes]'}), '(action_probs=token_probs, batch_idxes=\n global_idxes[sieve.global_idxes])\n', (7334, 7411), False, 'from ulfs import alive_sieve, rl_common\n'), ((10282, 10310), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (10298, 10310), False, 'import torch\n')] |
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import random
import numpy as np
class Searchspace(object):
"""Create an instance of `Searchspace` from keyword arguments.
A searchspace is essentially a set of key value pairs, defining the
hyperparameters with a name, type and a feasible interval. The keyword
arguments specify name-values pairs for the hyperparameters,
where values are tuples of the form (type, list). Type is a string with
one of the following values:
- DOUBLE
- INTEGER
- DISCRETE
- CATEGORICAL
And the list in the tuple specifies either two values only, the start
and end point of of the feasible interval for DOUBLE and INTEGER,
or the discrete possible values for the types DISCRETE and CATEGORICAL.
Sample usage:
>>> # Define Searchspace
>>> from maggy import Searchspace
>>> # The searchspace can be instantiated with parameters
>>> sp = Searchspace(kernel=('INTEGER', [2, 8]), pool=('INTEGER', [2, 8]))
>>> # Or additional parameters can be added one by one
>>> sp.add('dropout', ('DOUBLE', [0.01, 0.99]))
The `Searchspace` object can also be initialized from a python dictionary:
>>> sp_dict = sp.to_dict()
>>> sp_new = Searchspace(**sp_dict)
The parameter names are added as attributes of `Searchspace` object,
so they can be accessed directly with the dot notation
`searchspace._name_`.
"""
DOUBLE = "DOUBLE"
INTEGER = "INTEGER"
DISCRETE = "DISCRETE"
CATEGORICAL = "CATEGORICAL"
def __init__(self, **kwargs):
self._hparam_types = {}
self._names = []
for name, value in kwargs.items():
self.add(name, value)
def add(self, name, value):
"""Adds {name, value} pair to hyperparameters.
:param name: Name of the hyperparameter
:type name: str
:param value: A tuple of the parameter type and its feasible region
:type value: tuple
:raises ValueError: Hyperparameter name is reserved
:raises ValueError: Hyperparameter feasible region in wrong format
"""
if getattr(self, name, None) is not None:
raise ValueError("Hyperparameter name is reserved: {}".format(name))
if isinstance(value, tuple) or isinstance(value, list):
if len(value) != 2:
raise ValueError(
"Hyperparameter tuple has to be of length "
"two and format (type, list): {0}, {1}".format(name, value)
)
param_type = value[0].upper()
param_values = value[1]
if param_type in [
Searchspace.DOUBLE,
Searchspace.INTEGER,
Searchspace.DISCRETE,
Searchspace.CATEGORICAL,
]:
if len(param_values) == 0:
raise ValueError(
"Hyperparameter feasible region list "
"cannot be empty: {0}, {1}".format(name, param_values)
)
if param_type in [Searchspace.DOUBLE, Searchspace.INTEGER]:
assert len(param_values) == 2, (
"For DOUBLE or INTEGER type parameters, list "
"can only contain upper and lower bounds: {0}, {1}".format(
name, param_values
)
)
if param_type == Searchspace.DOUBLE:
if type(param_values[0]) not in [int, float] or type(
param_values[1]
) not in [int, float]:
raise ValueError(
"Hyperparameter boundaries for type DOUBLE need to be integer "
"or float: {}".format(name)
)
elif param_type == Searchspace.INTEGER:
if type(param_values[0]) != int or type(param_values[1]) != int:
raise ValueError(
"Hyperparameter boundaries for type INTEGER need to be integer: "
"{}".format(name)
)
assert param_values[0] < param_values[1], (
"Lower bound {0} must be "
"less than upper bound {1}: {2}".format(
param_values[0], param_values[1], name
)
)
self._hparam_types[name] = param_type
setattr(self, name, value[1])
self._names.append(name)
else:
raise ValueError(
"Hyperparameter type is not of type DOUBLE, "
"INTEGER, DISCRETE or CATEGORICAL: {}".format(name)
)
else:
raise ValueError("Value is not an appropriate tuple: {}".format(name))
print("Hyperparameter added: {}".format(name))
def to_dict(self):
"""Return the hyperparameters as a Python dictionary.
:return: A dictionary with hyperparameter names as keys. The values are
the hyperparameter values.
:rtype: dict
"""
return {
n: (self._hparam_types[n], getattr(self, n))
for n in self._hparam_types.keys()
}
def names(self):
"""Returns the dictionary with the names and types of all
hyperparameters.
:return: Dictionary of hyperparameter names, with types as value
:rtype: dict
"""
return self._hparam_types
def get(self, name, default=None):
"""Returns the value of `name` if it exists, else `default`."""
if name in self._hparam_types:
return getattr(self, name)
return default
def get_random_parameter_values(self, num):
"""Generate random parameter dictionaries, e.g. to be used for initializing an optimizer.
:param num: number of random parameter dictionaries to be generated.
:type num: int
:raises ValueError: `num` is not an int.
:return: a list containing parameter dictionaries
:rtype: list
"""
return_list = []
for _ in range(num):
params = {}
for name, value in self.names().items():
feasible_region = self.get(name)
if value == Searchspace.DOUBLE:
params[name] = random.uniform(
feasible_region[0], feasible_region[1]
)
elif value == Searchspace.INTEGER:
params[name] = random.randint(
feasible_region[0], feasible_region[1]
)
elif value == Searchspace.DISCRETE:
params[name] = random.choice(feasible_region)
elif value == Searchspace.CATEGORICAL:
params[name] = random.choice(feasible_region)
return_list.append(params)
return return_list
def __iter__(self):
self._returned = self._names.copy()
return self
def __next__(self):
# if list not empty
if self._returned:
# pop from left and get parameter tuple
name = self._returned.pop(0)
return {
"name": name,
"type": self._hparam_types[name],
"values": self.get(name),
}
else:
raise StopIteration
def items(self):
"""Returns a sorted iterable over all hyperparameters in the searchspace.
Allows to iterate over the hyperparameters in a searchspace. The parameters
are sorted in the order of which they were added to the searchspace by the user.
:return: an iterable of the searchspace
:type: Searchspace
"""
# for consistency and serves mainly as syntactic sugar
return self
def keys(self):
"""Returns a sorted iterable list over the names of hyperparameters in
the searchspace.
:return: names of hyperparameters as a list of strings
:type: list
"""
return self._names
def values(self):
"""Returns a sorted iterable list over the types and feasible intervals of
hyperparameters in the searchspace.
:return: types and feasible interval of hyperparameters as tuple
:type: tuple
"""
return [(self._hparam_types[name], self.get(name)) for name in self._names]
def __contains__(self, name):
return name in self._hparam_types
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True)
def json(self):
return json.dumps(self.to_dict(), sort_keys=True)
def transform(self, hparams, normalize_categorical=False):
"""Transforms array of hypeparameters for one trial.
+--------------+-----------------------------------------------------+
| Hparam Type | Transformation |
+==============+=====================================================+
| DOUBLE | Max-Min Normalization |
+--------------+-----------------------------------------------------+
| INTEGER | Max-Min Normalization |
+--------------+-----------------------------------------------------+
| CATEGORICAL | Encoding: index in list + opt. Max-Min Normalization|
+--------------+-----------------------------------------------------+
:param hparams: hparams in original representation for one trial
:type hparams: 1D np.ndarray
:param normalize_categorical: If True, the encoded categorical hparam is also max-min normalized between 0 and 1
`inverse_transform()` must use the same value for this parameter
:type normalize_categorical: bool
:return: transformed hparams
:rtype: np.ndarray[np.float]
"""
transformed_hparams = []
# loop through hparams
for hparam, hparam_spec in zip(hparams, self.items()):
if hparam_spec["type"] == "DOUBLE":
normalized_hparam = Searchspace._normalize_scalar(
hparam_spec["values"], hparam
)
transformed_hparams.append(normalized_hparam)
elif hparam_spec["type"] == "INTEGER":
normalized_hparam = Searchspace._normalize_integer(
hparam_spec["values"], hparam
)
transformed_hparams.append(normalized_hparam)
elif hparam_spec["type"] == "CATEGORICAL":
encoded_hparam = Searchspace._encode_categorical(
hparam_spec["values"], hparam
)
if normalize_categorical:
encoded_hparam = Searchspace._normalize_integer(
[0, len(hparam_spec["values"]) - 1], encoded_hparam
)
transformed_hparams.append(encoded_hparam)
else:
raise NotImplementedError("Not Implemented other types yet")
return transformed_hparams
def inverse_transform(self, transformed_hparams, normalize_categorical=False):
"""Returns array of hparams in same representation as specified when instantiated
:param transformed_hparams: hparams in transformed representation for one trial
:type transformed_hparams: 1D np.ndarray
:param normalize_categorical: If True, the encoded categorical hparam was also max-min normalized between 0 and 1
`transform()` must use the same value for this parameter
:type normalize_categorical: bool
:return: transformed hparams
:rtype: np.ndarray
"""
hparams = []
for hparam, hparam_spec in zip(transformed_hparams, self.items()):
if hparam_spec["type"] == "DOUBLE":
value = Searchspace._inverse_normalize_scalar(
hparam_spec["values"], hparam
)
hparams.append(value)
elif hparam_spec["type"] == "INTEGER":
value = Searchspace._inverse_normalize_integer(
hparam_spec["values"], hparam
)
hparams.append(value)
elif hparam_spec["type"] == "CATEGORICAL":
if normalize_categorical:
value = Searchspace._inverse_normalize_integer(
[0, len(hparam_spec["values"]) - 1], hparam
)
value = Searchspace._decode_categorical(
hparam_spec["values"], value
)
else:
value = Searchspace._decode_categorical(
hparam_spec["values"], hparam
)
hparams.append(value)
else:
raise NotImplementedError("Not Implemented other types yet")
return hparams
@staticmethod
def _encode_categorical(choices, value):
"""Encodes category to integer. The encoding is the list index of the category
:param choices: possible values of the categorical hparam
:type choices: list
:param value: category to encode
:type value: str
:return: encoded category
:rtype: int
"""
return choices.index(value)
@staticmethod
def _decode_categorical(choices, encoded_value):
"""Decodes integer to corresponding category value
:param choices: possible values of the categorical hparam
:type choices: list
:param encoded_value: encoding of category
:type encoded_value: int
:return: category value
:rtype: str
"""
encoded_value = int(
encoded_value
) # it is possible that value gets casted to np.float by numpy
return choices[encoded_value]
@staticmethod
def _normalize_scalar(bounds, scalar):
"""Returns max-min normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param scalar: scalar value to be normalized
:type scalar: float
:return: normalized scalar
:rtype: float
"""
scalar = float(scalar)
scalar = (scalar - bounds[0]) / (bounds[1] - bounds[0])
scalar = np.minimum(1.0, scalar)
scalar = np.maximum(0.0, scalar)
return scalar
@staticmethod
def _inverse_normalize_scalar(bounds, normalized_scalar):
"""Returns inverse normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param normalized_scalar: normalized scalar value
:type normalized_scalar: float
:return: original scalar
:rtype: float
"""
normalized_scalar = float(normalized_scalar)
normalized_scalar = normalized_scalar * (bounds[1] - bounds[0]) + bounds[0]
return normalized_scalar
@staticmethod
def _normalize_integer(bounds, integer):
"""
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param integer: value to be normalized
:type normalized_scalar: int
:return: normalized value between 0 and 1
:rtype: float
"""
integer = int(integer)
return Searchspace._normalize_scalar(bounds, integer)
@staticmethod
def _inverse_normalize_integer(bounds, scalar):
"""Returns inverse normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param normalized_scalar: normalized scalar value
:type normalized_scalar: float
:return: original integer
:rtype: int
"""
x = Searchspace._inverse_normalize_scalar(bounds, scalar)
return int(np.round(x))
@staticmethod
def dict_to_list(hparams):
"""Transforms dict of hparams to list representation ( for one hparam config )
example:
{'x': -3.0, 'y': 3.0, 'z': 'green'} to [-3.0, 3.0, 'green']
:param hparams: hparams in dict representation
:type hparams: dict
:return: hparams in list representation
:rtype: list
"""
return list(hparams.values())
def list_to_dict(self, hparams):
"""Transforms list of hparams to dict representation ( for one hparam config )
example:
[-3.0, 3.0, 'green'] to {'x': -3.0, 'y': 3.0, 'z': 'green'}
:param hparams: hparams in list representation
:type hparams: list
:return: hparams in dict representation
:rtype: dict
"""
hparam_names = self.keys()
if len(hparam_names) != len(hparams):
raise ValueError(
"hparam_names and hparams have to have same length (and order!)"
)
hparam_dict = {
hparam_name: hparam for hparam_name, hparam in zip(hparam_names, hparams)
}
return hparam_dict
| [
"random.uniform",
"random.choice",
"numpy.minimum",
"numpy.maximum",
"random.randint",
"numpy.round"
] | [((15258, 15281), 'numpy.minimum', 'np.minimum', (['(1.0)', 'scalar'], {}), '(1.0, scalar)\n', (15268, 15281), True, 'import numpy as np\n'), ((15299, 15322), 'numpy.maximum', 'np.maximum', (['(0.0)', 'scalar'], {}), '(0.0, scalar)\n', (15309, 15322), True, 'import numpy as np\n'), ((16816, 16827), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (16824, 16827), True, 'import numpy as np\n'), ((7161, 7215), 'random.uniform', 'random.uniform', (['feasible_region[0]', 'feasible_region[1]'], {}), '(feasible_region[0], feasible_region[1])\n', (7175, 7215), False, 'import random\n'), ((7348, 7402), 'random.randint', 'random.randint', (['feasible_region[0]', 'feasible_region[1]'], {}), '(feasible_region[0], feasible_region[1])\n', (7362, 7402), False, 'import random\n'), ((7536, 7566), 'random.choice', 'random.choice', (['feasible_region'], {}), '(feasible_region)\n', (7549, 7566), False, 'import random\n'), ((7657, 7687), 'random.choice', 'random.choice', (['feasible_region'], {}), '(feasible_region)\n', (7670, 7687), False, 'import random\n')] |
import numpy as np
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
import sklearn.gaussian_process.kernels as Kernels
from scipy.optimize import minimize
from numpy.linalg import norm
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected as tf_layer
class Kernel_Optimization():
def __init__(self, dict_mat=None, kernel_type='RBF', CV=5, X=np.array([[1,2],[2,3],[3,4]]) , y=np.array([[1],[2],[3]]),
All_material = ['K+','P-']):
self._kernel_type = kernel_type
self.All_material = All_material
kernel = getattr(Kernels,kernel_type)
self.dict_mat = dict_mat
if kernel_type =='ExpSineSquared':
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [kernel(length_scale=l,periodicity=p)
for l in np.logspace(-2, 2, 500)
for p in np.logspace(-2, 2, 500)]}
elif kernel_type =='RBF':
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [kernel(length_scale=l)
for l in np.logspace(-2, 2, 100)]}
self._CV = CV
self.kr= GridSearchCV(KernelRidge(), cv=self._CV, param_grid=param_grid)
self.X , self.y = X, y
self.kr.fit(self.X, self.y)
def kr_func(self, x):
return self.kr.predict(x)
def constraint(self, x):
''' Create Constraints for physically-consistent solvent decomposition
sum_cat x_i = 1.0 & sum_an x_i = 1.0 , x_i > 0 for both cation and anaion
'''
n_cations = 0
n_anions = 0
for k in self.All_material:
if k[-1] =='+':
n_cations += 1
else:
n_anions += 1
n_constraints = len(self.All_material)+ 2
for cnt, m in enumerate(self.All_material):
if m[:-1] in self.dict_mat.keys():
n_constraints -= 1
if x[cnt] <0 or x[cnt] > 1:
n_constraints += 1
val_constraints = np.zeros((n_constraints))
cat_list = []
an_list = []
# active (user selected) materials constraints
for k, v in self.dict_mat.items():
if v =='+':
cat_list.append(k)
if v =='-':
an_list.append(k)
cnt = 2
for i in range(len(self.All_material)):
if self.All_material[i][:-1] in cat_list:
val_constraints[0] += x[i]
elif self.All_material[i][:-1] in an_list:
val_constraints[1] += x[i]
else:
val_constraints[cnt] += x[i]
cnt += 1
if x[i] < 0 or x[i] > 1:
val_constraints[cnt] += x[i]
cnt += 1
val_constraints[0] -= 1.0
val_constraints[1] -= 1.0
return val_constraints
def minimize_func(self, optimal, sig,i=0):
if i==0:
optimal = self.X[np.random.randint(self.X.shape[0])]
def funct(x):
const = self.constraint(x)
f = 0
for i in range(len(const)):
f += sig*max(0.0, const[i]**2)
return self.kr_func(x) + f
res = minimize(funct, optimal, method='nelder-mead', options={'xtol': 1e-16, 'disp': False, 'maxiter': 1000})
optimal = res.x
return optimal
| [
"scipy.optimize.minimize",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"sklearn.kernel_ridge.KernelRidge",
"numpy.logspace"
] | [((479, 513), 'numpy.array', 'np.array', (['[[1, 2], [2, 3], [3, 4]]'], {}), '([[1, 2], [2, 3], [3, 4]])\n', (487, 513), True, 'import numpy as np\n'), ((513, 538), 'numpy.array', 'np.array', (['[[1], [2], [3]]'], {}), '([[1], [2], [3]])\n', (521, 538), True, 'import numpy as np\n'), ((2240, 2263), 'numpy.zeros', 'np.zeros', (['n_constraints'], {}), '(n_constraints)\n', (2248, 2263), True, 'import numpy as np\n'), ((3503, 3610), 'scipy.optimize.minimize', 'minimize', (['funct', 'optimal'], {'method': '"""nelder-mead"""', 'options': "{'xtol': 1e-16, 'disp': False, 'maxiter': 1000}"}), "(funct, optimal, method='nelder-mead', options={'xtol': 1e-16,\n 'disp': False, 'maxiter': 1000})\n", (3511, 3610), False, 'from scipy.optimize import minimize\n'), ((1324, 1337), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {}), '()\n', (1335, 1337), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((3233, 3267), 'numpy.random.randint', 'np.random.randint', (['self.X.shape[0]'], {}), '(self.X.shape[0])\n', (3250, 3267), True, 'import numpy as np\n'), ((960, 983), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(500)'], {}), '(-2, 2, 500)\n', (971, 983), True, 'import numpy as np\n'), ((1018, 1041), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(500)'], {}), '(-2, 2, 500)\n', (1029, 1041), True, 'import numpy as np\n'), ((1233, 1256), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(100)'], {}), '(-2, 2, 100)\n', (1244, 1256), True, 'import numpy as np\n')] |
from functools import partial
from .landmark import asf_importer, pts_importer
asf_image_importer = partial(asf_importer, image_origin=True)
asf_image_importer.__doc__ = asf_importer.__doc__
pts_image_importer = partial(pts_importer, image_origin=True)
pts_image_importer.__doc__ = pts_importer.__doc__
| [
"functools.partial"
] | [((103, 143), 'functools.partial', 'partial', (['asf_importer'], {'image_origin': '(True)'}), '(asf_importer, image_origin=True)\n', (110, 143), False, 'from functools import partial\n'), ((216, 256), 'functools.partial', 'partial', (['pts_importer'], {'image_origin': '(True)'}), '(pts_importer, image_origin=True)\n', (223, 256), False, 'from functools import partial\n')] |
import os
# exemplo alterado de EX_10.5.py para 10_5.py
for nome in os.listdir('./Minicurso/Minicurso API'):
# alterar conforme sua necessidade de geração de nomes e layout de arquivos
os.rename("./Minicurso/Minicurso API/"+nome, "./Minicurso/Minicurso API/"+nome+"_Minicurso_API.png")
print("arquivo " + nome + " alterado para " +nome+"_Minicurso_API") | [
"os.rename",
"os.listdir"
] | [((70, 109), 'os.listdir', 'os.listdir', (['"""./Minicurso/Minicurso API"""'], {}), "('./Minicurso/Minicurso API')\n", (80, 109), False, 'import os\n'), ((200, 310), 'os.rename', 'os.rename', (["('./Minicurso/Minicurso API/' + nome)", "('./Minicurso/Minicurso API/' + nome + '_Minicurso_API.png')"], {}), "('./Minicurso/Minicurso API/' + nome, './Minicurso/Minicurso API/' +\n nome + '_Minicurso_API.png')\n", (209, 310), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3, 5):
raise RuntimeError("The minimum support Python 3.5")
from setuptools import find_packages
from setuptools import setup
from HTMLReport import __version__, __author__
try:
from pypandoc import convert
read_md = convert('README.md', 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = open('README.md', 'r', encoding="utf-8").read()
setup(
name='HTMLReport',
version=__version__,
description="Python3 Unittest HTML报告生成器",
long_description=read_md,
author=__author__,
author_email='<EMAIL>',
url='https://github.com/liushilive/HTMLReport',
project_urls={
'The report template': 'https://liushilive.github.io/report/report/#en',
'报告样板': 'https://liushilive.github.io/report/report/#cn'
},
packages=find_packages(),
package_dir={'HTMLReport': 'HTMLReport'},
include_package_data=True,
license="MIT license",
zip_safe=False,
keywords='HtmlTestRunner test runner html reports unittest',
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing :: Unit',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces',
'License :: OSI Approved :: MIT License',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite='tests'
)
| [
"pypandoc.convert",
"setuptools.find_packages"
] | [((329, 356), 'pypandoc.convert', 'convert', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (336, 356), False, 'from pypandoc import convert\n'), ((959, 974), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (972, 974), False, 'from setuptools import find_packages\n')] |
import os
import torch
import torch.nn as nn
import numpy as np
import pickle
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
# used in test time, no backprop
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_ids, epoch, total_steps):
save_filename = '%s_%s.pth' % (epoch_label, network_label)
save_infoname = '%s.pkl' % (epoch_label)
save_path = os.path.join(self.save_dir, save_filename)
save_infoname = os.path.join(self.save_dir, save_infoname)
torch.save(network.cpu().state_dict(), save_path)
network.cuda()
info = {'epoch':epoch, 'total_steps':total_steps}
filehandler = open(save_infoname, "wb")
pickle.dump(info, filehandler)
filehandler.close()
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
if os.path.exists(save_path):
network.load_state_dict(torch.load(save_path))
print("Found checkpoints. Network loaded.")
else:
print("Not found checkpoints. Network from scratch.")
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
| [
"pickle.dump",
"os.path.exists",
"torch.load",
"os.path.join"
] | [((450, 493), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.name'], {}), '(opt.checkpoints_dir, opt.name)\n', (462, 493), False, 'import os\n'), ((1203, 1245), 'os.path.join', 'os.path.join', (['self.save_dir', 'save_filename'], {}), '(self.save_dir, save_filename)\n', (1215, 1245), False, 'import os\n'), ((1270, 1312), 'os.path.join', 'os.path.join', (['self.save_dir', 'save_infoname'], {}), '(self.save_dir, save_infoname)\n', (1282, 1312), False, 'import os\n'), ((1525, 1555), 'pickle.dump', 'pickle.dump', (['info', 'filehandler'], {}), '(info, filehandler)\n', (1536, 1555), False, 'import pickle\n'), ((1798, 1840), 'os.path.join', 'os.path.join', (['self.save_dir', 'save_filename'], {}), '(self.save_dir, save_filename)\n', (1810, 1840), False, 'import os\n'), ((1852, 1877), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (1866, 1877), False, 'import os\n'), ((1915, 1936), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (1925, 1936), False, 'import torch\n')] |
from JumpScale import j
def cb():
from .ms1 import MS1Factory
return MS1Factory()
j.base.loader.makeAvailable(j, 'tools')
j.tools._register('ms1', cb)
| [
"JumpScale.j.tools._register",
"JumpScale.j.base.loader.makeAvailable"
] | [((92, 131), 'JumpScale.j.base.loader.makeAvailable', 'j.base.loader.makeAvailable', (['j', '"""tools"""'], {}), "(j, 'tools')\n", (119, 131), False, 'from JumpScale import j\n'), ((132, 160), 'JumpScale.j.tools._register', 'j.tools._register', (['"""ms1"""', 'cb'], {}), "('ms1', cb)\n", (149, 160), False, 'from JumpScale import j\n')] |
## temp utility
from __future__ import print_function
import frappe
from erpnext.utilities.activation import get_level
from frappe.utils import cstr
def update_doctypes():
for d in frappe.db.sql("""select df.parent, df.fieldname
from tabDocField df, tabDocType dt where df.fieldname
like "%description%" and df.parent = dt.name and dt.istable = 1""", as_dict=1):
dt = frappe.get_doc("DocType", d.parent)
for f in dt.fields:
if f.fieldname == d.fieldname and f.fieldtype in ("Text", "Small Text"):
print(f.parent, f.fieldname)
f.fieldtype = "Text Editor"
dt.save()
break
def get_site_info(site_info):
# called via hook
company = frappe.db.get_single_value('Global Defaults', 'default_company')
domain = None
if not company:
company = frappe.db.sql('select name from `tabCompany` order by creation asc')
company = company[0][0] if company else None
if company:
domain = frappe.db.get_value('Company', cstr(company), 'domain')
return {
'company': company,
'domain': domain,
'activation': get_level()
}
| [
"frappe.db.get_single_value",
"erpnext.utilities.activation.get_level",
"frappe.db.sql",
"frappe.get_doc",
"frappe.utils.cstr"
] | [((183, 377), 'frappe.db.sql', 'frappe.db.sql', (['"""select df.parent, df.fieldname\n\t\tfrom tabDocField df, tabDocType dt where df.fieldname\n\t\tlike "%description%" and df.parent = dt.name and dt.istable = 1"""'], {'as_dict': '(1)'}), '(\n """select df.parent, df.fieldname\n\t\tfrom tabDocField df, tabDocType dt where df.fieldname\n\t\tlike "%description%" and df.parent = dt.name and dt.istable = 1"""\n , as_dict=1)\n', (196, 377), False, 'import frappe\n'), ((661, 725), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Global Defaults"""', '"""default_company"""'], {}), "('Global Defaults', 'default_company')\n", (687, 725), False, 'import frappe\n'), ((376, 411), 'frappe.get_doc', 'frappe.get_doc', (['"""DocType"""', 'd.parent'], {}), "('DocType', d.parent)\n", (390, 411), False, 'import frappe\n'), ((771, 839), 'frappe.db.sql', 'frappe.db.sql', (['"""select name from `tabCompany` order by creation asc"""'], {}), "('select name from `tabCompany` order by creation asc')\n", (784, 839), False, 'import frappe\n'), ((1037, 1048), 'erpnext.utilities.activation.get_level', 'get_level', ([], {}), '()\n', (1046, 1048), False, 'from erpnext.utilities.activation import get_level\n'), ((943, 956), 'frappe.utils.cstr', 'cstr', (['company'], {}), '(company)\n', (947, 956), False, 'from frappe.utils import cstr\n')] |
"""Simulate a Map Reduce Scenario where timeout prevention is required.
In this simulation we are using an Optimizer created for map reduce scenarios.
This improves the distribution of the computation no matter how the interest is formated.
Scenario consists of two NFN nodes and a Client. Goal of the simulation is to add en
Client <--------> NFN0 <-*-----------> NFN1 <-----------> Repo1
\-----------> NFN2 <-----------> Repo2
\-----------> NFN3 <-----------> Repo3
\-----------> NFN4 <-----------> Repo4
"""
import abc
import queue
import unittest
import os
import shutil
import time
import _thread
import threading
from PiCN.Layers.LinkLayer.Interfaces import SimulationBus
from PiCN.Layers.LinkLayer.Interfaces import AddressInfo
from PiCN.Layers.NFNLayer.NFNOptimizer import MapReduceOptimizer
from PiCN.ProgramLibs.Fetch import Fetch
from PiCN.ProgramLibs.NFNForwarder import NFNForwarder
from PiCN.ProgramLibs.ICNForwarder import ICNForwarder
from PiCN.ProgramLibs.ICNDataRepository import ICNDataRepository
from PiCN.Layers.PacketEncodingLayer.Encoder import BasicEncoder, SimpleStringEncoder, NdnTlvEncoder
from PiCN.Packets import Content, Interest, Name
from PiCN.Mgmt import MgmtClient
class Fs_thread(threading.Thread):
def __init__(self, name: Name, fetch_tool):
threading.Thread.__init__(self)
self.name = name
self.fetch_tool = fetch_tool
def run(self):
self.request_function()
def request_function(self):
self.fetch_tool.fetch_data(self.name,timeout= 10)
class Initiation(unittest.TestCase):
"""Simulate a Map Reduce Scenario where timeout prevention is required."""
@abc.abstractmethod
def get_encoder(self) -> BasicEncoder:
return SimpleStringEncoder
def setUp(self):
self.encoder_type = self.get_encoder()
self.simulation_bus = SimulationBus(packetencoder=self.encoder_type())
self.fetch_tool1 = Fetch("distributor", None, 255, self.encoder_type(), interfaces=[self.simulation_bus.add_interface("fetchtool1")])
self.distributor = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("distributor")], log_level=255,
ageing_interval=3)
self.nfn1 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn1")], log_level=255,
ageing_interval=3)
self.nfn2 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn2")], log_level=255,
ageing_interval=3)
self.nfn3 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn3")], log_level=255,
ageing_interval=3)
self.nfn4 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn4")], log_level=255,
ageing_interval=3)
self.repo1 = ICNDataRepository("/tmp/repo1", Name("/repo/r1"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo1")])
self.repo2 = ICNDataRepository("/tmp/repo2", Name("/repo/r2"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo2")])
self.repo3 = ICNDataRepository("/tmp/repo3", Name("/repo/r3"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo3")])
self.repo4 = ICNDataRepository("/tmp/repo4", Name("/repo/r4"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo4")])
self.nfn1.icnlayer.pit.set_pit_timeout(50)
self.nfn1.icnlayer.cs.set_cs_timeout(0)
self.nfn2.icnlayer.pit.set_pit_timeout(50)
self.nfn2.icnlayer.cs.set_cs_timeout(0)
self.nfn3.icnlayer.pit.set_pit_timeout(50)
self.nfn3.icnlayer.cs.set_cs_timeout(0)
self.nfn4.icnlayer.pit.set_pit_timeout(50)
self.nfn4.icnlayer.cs.set_cs_timeout(0)
self.mgmt_client0 = MgmtClient(self.distributor.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client1 = MgmtClient(self.nfn1.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client2 = MgmtClient(self.nfn2.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client3 = MgmtClient(self.nfn3.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client4 = MgmtClient(self.nfn4.mgmt.mgmt_sock.getsockname()[1])
def tearDown(self):
self.distributor.stop_forwarder()
self.nfn1.stop_forwarder()
self.nfn2.stop_forwarder()
self.nfn3.stop_forwarder()
self.nfn4.stop_forwarder()
self.repo1.stop_repo()
self.repo2.stop_repo()
self.repo3.stop_repo()
self.repo4.stop_repo()
self.fetch_tool1.stop_fetch()
self.simulation_bus.stop_process()
self.tearDown_repo()
def setup_faces_and_connections(self):
self.distributor.start_forwarder()
self.nfn1.start_forwarder()
self.nfn2.start_forwarder()
self.nfn3.start_forwarder()
self.nfn4.start_forwarder()
self.repo1.start_repo()
self.repo2.start_repo()
self.repo3.start_repo()
self.repo4.start_repo()
self.simulation_bus.start_process()
time.sleep(3)
# setup forwarding rules
self.mgmt_client0.add_face("nfn1", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib"), [0])
self.mgmt_client0.add_face("nfn2", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client0.add_face("nfn3", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib"), [2])
self.mgmt_client0.add_face("nfn4", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib4/func4"), [3])
self.mgmt_client1.add_face("repo1", None, 0)
self.mgmt_client1.add_forwarding_rule(Name("/repo/r1"), [0])
self.mgmt_client2.add_face("repo2", None, 0)
self.mgmt_client2.add_forwarding_rule(Name("/repo/r2"), [0])
self.mgmt_client3.add_face("repo3", None, 0)
self.mgmt_client3.add_forwarding_rule(Name("/repo/r3"), [0])
self.mgmt_client4.add_face("repo4", None, 0)
self.mgmt_client4.add_forwarding_rule(Name("/repo/r4"), [0])
self.mgmt_client1.add_face("nfn0", None, 0)
self.mgmt_client1.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client2.add_face("nfn0", None, 0)
self.mgmt_client2.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client3.add_face("nfn0", None, 0)
self.mgmt_client3.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client4.add_face("nfn0", None, 0)
self.mgmt_client4.add_forwarding_rule(Name("/lib4/func4"), [1])
#setup function code
#self.mgmt_client1.add_new_content(Name("/lib/func1"),"PYTHON\nf\ndef f(n):\n return n")
self.mgmt_client1.add_new_content(Name("/lib/func1"), "PYTHON\nf\ndef f():\n result =[]\n x,y =0,1\n while x<n:\n result.append(x)\n x,y = y, y+x\n return result")
self.mgmt_client2.add_new_content(Name("/lib/func1"), "PYTHON\nf\ndef f(n):\n result =[]\n x,y =0,1\n while x<n:\n result.append(x)\n x,y = y, y+x\n return result")
# self.mgmt_client2.add_new_content(Name("/lib/func2"),"func2")
self.mgmt_client3.add_new_content(Name("/lib/func1"), "PYTHON\nf\ndef f(n):\n result =[]\n x,y =0,1\n while x<n:\n result.append(x)\n x,y = y, y+x\n return result")
self.mgmt_client4.add_new_content(Name("/lib4/func4"),"func4")
# self.mgmt_client1.add_new_content(Name("/lib/func1"),
# "PYTHON\nf\ndef f():\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
# self.mgmt_client2.add_new_content(Name("/lib/func2"),
# "PYTHON\nf\ndef f(a):\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
# self.mgmt_client3.add_new_content(Name("/lib/func3"),
# "PYTHON\nf\ndef f(a):\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
# self.mgmt_client4.add_new_content(Name("/lib/func4"),
# "PYTHON\nf\ndef f(a):\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
#
def setup_repo(self):
for i in range(1,5):
self.path = "/tmp/repo" + str(i)
try:
os.stat(self.path)
except:
os.mkdir(self.path)
with open(self.path + "/data" + str(i), 'w+') as content_file:
content_file.write("data" + str(i))
def tearDown_repo(self):
try:
shutil.rmtree(self.path)
os.remove("/tmp/repo")
except:
pass
def test_simple_Fs(self):
self.setup_repo()
self.setup_faces_and_connections()
name1 = Name("/lib/func1")
name1 += '_()'
name1 += "NFN"
name2 = Name("/lib/func1")
name2 += '_(500000)'
name2 += "NFN"
name3 = Name("/lib/func1")
name3 += '_(5000)'
name3 += "NFN"
name4 = Name("/lib/func1")
name4 += '_(900000000000000000)'
name4 += "NFN"
name5 = Name("/lib/func1")
name5 += '_(68899455874)'
name5 += "NFN"
t1= Fs_thread(name1, fetch_tool= self.fetch_tool1)
t2= Fs_thread(name2, fetch_tool= self.fetch_tool1)
t3= Fs_thread(name3, fetch_tool= self.fetch_tool1)
t4= Fs_thread(name4, fetch_tool= self.fetch_tool1)
t5= Fs_thread(name5, fetch_tool= self.fetch_tool1)
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
# def first_request(self):
# name1 = Name("/lib/func1")
# name1 += '_(100000000000000000000000000)'
# name1 += "NFN"
# self.fetch_tool1.fetch_data(name1, timeout=10)
#
# def second_request(self):
# name1 = Name("/lib/func1")
# name1 += '_(5)'
# name1 += "NFN"
# self.fetch_tool1.fetch_data(name1, timeout=10)
#
# def third_request(self):
# name1 = Name("/lib/func1")
# name1 += '_(1000000)'
# name1 += "NFN"
# self.fetch_tool1.fetch_data(name1, timeout=10)
#
# def test_simple_Fs(self):
# """Simple FS test"""
# self.setup_repo()
# self.setup_faces_and_connections()
# t1= threading.Thread(self.first_request())
# t2= threading.Thread(self.second_request())
# t3= threading.Thread(self.third_request())
# t1.start()
# t2.start()
# t3.start()
# t1 = threading.Thread(self.first_request())
# t2 = threading.Thread(self.second_request())
# t3 = threading.Thread(self.third_request())
# t1.start()
# t2.start()
# t3.start()
#
# name2= Name("/lib/func1")
# name2 += '_(5)'
# name2 += "NFN"
#
# res1 = self.
# res2 = self.fetch_tool1.fetch_data(name2, timeout=0)
# print(res1)
# print(res2)
# self.assertEqual("func1", res1)
# res2 = self.fetch_tool1.fetch_data(name2, timeout=0)
# time.sleep(3)
# print(res2)
# self.assertEqual("func2", res2)
# def test_simple_map_reduce_data_from_repo(self):
# """Simple map reduce test with input data from repo"""
# self.setup_repo()
# self.setup_faces_and_connections()
#
# name = Name("/lib/reduce4")
# name += '_(/lib/func1(/repo/r1/data1),/lib/func2(/repo/r2/data2),/lib/func3(/repo/r3/data3),/lib/func4(/repo/r4/data4))'
# name += "NFN"
#
# res = self.fetch_tool1.fetch_data(name, timeout=0)
# time.sleep(3)
# print(res)
# self.assertEqual("DATA1DATA2DATA3DATA4", res)
#
#
# def test_simple_map_reduce_data_from_repo_to_data(self):
# """Simple map reduce test with input data from repo forwarding to data"""
# self.setup_repo()
# self.setup_faces_and_connections()
#
# name = Name("/repo/r1/data1")
# name += '/lib/reduce4(/lib/func1(_),/lib/func2(/repo/r2/data2),/lib/func3(/repo/r3/data3),/lib/func4(/repo/r4/data4))'
# name += "NFN"
#
# res = self.fetch_tool1.fetch_data(name, timeout=0)
# time.sleep(3)
# print(res)
# self.assertEqual("DATA1DATA2DATA3DATA4", res) | [
"threading.Thread.__init__",
"time.sleep",
"PiCN.Packets.Name",
"os.mkdir",
"shutil.rmtree",
"os.stat",
"os.remove"
] | [((1371, 1402), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1396, 1402), False, 'import threading\n'), ((5732, 5745), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (5742, 5745), False, 'import time\n'), ((9476, 9494), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (9480, 9494), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((9558, 9576), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (9562, 9576), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((9646, 9664), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (9650, 9664), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((9732, 9750), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (9736, 9750), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((9832, 9850), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (9836, 9850), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((3318, 3334), 'PiCN.Packets.Name', 'Name', (['"""/repo/r1"""'], {}), "('/repo/r1')\n", (3322, 3334), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((3517, 3533), 'PiCN.Packets.Name', 'Name', (['"""/repo/r2"""'], {}), "('/repo/r2')\n", (3521, 3533), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((3716, 3732), 'PiCN.Packets.Name', 'Name', (['"""/repo/r3"""'], {}), "('/repo/r3')\n", (3720, 3732), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((3915, 3931), 'PiCN.Packets.Name', 'Name', (['"""/repo/r4"""'], {}), "('/repo/r4')\n", (3919, 3931), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((5878, 5890), 'PiCN.Packets.Name', 'Name', (['"""/lib"""'], {}), "('/lib')\n", (5882, 5890), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((5995, 6007), 'PiCN.Packets.Name', 'Name', (['"""/lib"""'], {}), "('/lib')\n", (5999, 6007), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6112, 6124), 'PiCN.Packets.Name', 'Name', (['"""/lib"""'], {}), "('/lib')\n", (6116, 6124), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6229, 6248), 'PiCN.Packets.Name', 'Name', (['"""/lib4/func4"""'], {}), "('/lib4/func4')\n", (6233, 6248), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6355, 6371), 'PiCN.Packets.Name', 'Name', (['"""/repo/r1"""'], {}), "('/repo/r1')\n", (6359, 6371), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6477, 6493), 'PiCN.Packets.Name', 'Name', (['"""/repo/r2"""'], {}), "('/repo/r2')\n", (6481, 6493), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6599, 6615), 'PiCN.Packets.Name', 'Name', (['"""/repo/r3"""'], {}), "('/repo/r3')\n", (6603, 6615), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6721, 6737), 'PiCN.Packets.Name', 'Name', (['"""/repo/r4"""'], {}), "('/repo/r4')\n", (6725, 6737), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6844, 6856), 'PiCN.Packets.Name', 'Name', (['"""/lib"""'], {}), "('/lib')\n", (6848, 6856), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((6961, 6973), 'PiCN.Packets.Name', 'Name', (['"""/lib"""'], {}), "('/lib')\n", (6965, 6973), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((7078, 7090), 'PiCN.Packets.Name', 'Name', (['"""/lib"""'], {}), "('/lib')\n", (7082, 7090), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((7195, 7214), 'PiCN.Packets.Name', 'Name', (['"""/lib4/func4"""'], {}), "('/lib4/func4')\n", (7199, 7214), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((7390, 7408), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (7394, 7408), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((7572, 7590), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (7576, 7590), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((7826, 7844), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {}), "('/lib/func1')\n", (7830, 7844), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((8009, 8028), 'PiCN.Packets.Name', 'Name', (['"""/lib4/func4"""'], {}), "('/lib4/func4')\n", (8013, 8028), False, 'from PiCN.Packets import Content, Interest, Name\n'), ((9267, 9291), 'shutil.rmtree', 'shutil.rmtree', (['self.path'], {}), '(self.path)\n', (9280, 9291), False, 'import shutil\n'), ((9304, 9326), 'os.remove', 'os.remove', (['"""/tmp/repo"""'], {}), "('/tmp/repo')\n", (9313, 9326), False, 'import os\n'), ((9010, 9028), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (9017, 9028), False, 'import os\n'), ((9065, 9084), 'os.mkdir', 'os.mkdir', (['self.path'], {}), '(self.path)\n', (9073, 9084), False, 'import os\n')] |
from requests import Session
import urllib.parse
import json
class ServerFunctions:
SITE_INFO = "core_webservice_get_site_info"
ALL_COURSES = "core_course_get_courses_by_field"
USER_COURSES = "core_enrol_get_users_courses"
COURSE_CONTENTS = "core_course_get_contents"
ASSIGNMENTS = "mod_assign_get_assignments"
ASSIGNMENT_STATUS = "mod_assign_get_submission_status"
URLS = "mod_url_get_urls_by_courses"
RESOURCES = "mod_resource_get_resources_by_courses"
class MoodleClient:
def __init__(self, baseurl):
self.baseurl = baseurl
self.login_url = urllib.parse.urljoin(baseurl, "login/token.php")
self.server_url = urllib.parse.urljoin(baseurl, "webservice/rest/server.php")
self.session = Session()
self.token = ""
def response(self, url, **data):
return self.session.post(url, data)
def response_json(self, url, **data):
response = self.response(url, **data)
return json.loads(response.content)
def authenticate(self, username, password):
login = self.response_json(
self.login_url,
username=username,
password=password,
service="moodle_mobile_app",
)
try:
self.token = login["token"]
return self.token
except KeyError:
return False
def server(self, function, **data):
return self.response_json(
self.server_url,
wstoken=self.token,
moodlewsrestformat="json",
wsfunction=function,
**data
)
def close(self):
self.session.close()
| [
"json.loads",
"requests.Session"
] | [((756, 765), 'requests.Session', 'Session', ([], {}), '()\n', (763, 765), False, 'from requests import Session\n'), ((976, 1004), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (986, 1004), False, 'import json\n')] |
import numpy as np
from gym import spaces
from agents import SimpleAgentClass
# Create agents for the CMA-ES, NEAT and WANN agents
# defined in the weight-agnostic paper repo:
# https://github.com/google/brain-tokyo-workshop/tree/master/WANNRelease/
# -------------------------------------------------------------------
# Here begins copy/paste from WANNRelease code linked above
def weightedRandom(weights):
"""Returns random index, with each choices chance weighted
Args:
weights - (np_array) - weighting of each choice
[N X 1]
Returns:
i - (int) - chosen index
"""
minVal = np.min(weights)
weights = weights - minVal # handle negative vals
cumVal = np.cumsum(weights)
pick = np.random.uniform(0, cumVal[-1])
for i in range(len(weights)):
if cumVal[i] >= pick:
return i
def selectAct(action, actSelect):
"""Selects action based on vector of actions
Single Action:
- Hard: a single action is chosen based on the highest index
- Prob: a single action is chosen probablistically with higher values
more likely to be chosen
We aren't selecting a single action:
- Softmax: a softmax normalized distribution of values is returned
- Default: all actions are returned
Args:
action - (np_array) - vector weighting each possible action
[N X 1]
Returns:
i - (int) or (np_array) - chosen index
[N X 1]
"""
if actSelect == 'softmax':
action = softmax(action)
elif actSelect == 'prob':
action = weightedRandom(np.sum(action,axis=0))
else:
action = action.flatten()
return action
def act(weights, aVec, nInput, nOutput, inPattern):
"""Returns FFANN output given a single input pattern
If the variable weights is a vector it is turned into a square weight matrix.
Allows the network to return the result of several samples at once if given a matrix instead of a vector of inputs:
Dim 0 : individual samples
Dim 1 : dimensionality of pattern (# of inputs)
Args:
weights - (np_array) - ordered weight matrix or vector
[N X N] or [N**2]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
nInput - (int) - number of input nodes
nOutput - (int) - number of output nodes
inPattern - (np_array) - input activation
[1 X nInput] or [nSamples X nInput]
Returns:
output - (np_array) - output activation
[1 X nOutput] or [nSamples X nOutput]
"""
# Turn weight vector into weight matrix
if np.ndim(weights) < 2:
nNodes = int(np.sqrt(np.shape(weights)[0]))
wMat = np.reshape(weights, (nNodes, nNodes))
else:
nNodes = np.shape(weights)[0]
wMat = weights
wMat[np.isnan(wMat)]=0
# Vectorize input
if np.ndim(inPattern) > 1:
nSamples = np.shape(inPattern)[0]
else:
nSamples = 1
# Run input pattern through ANN
nodeAct = np.zeros((nSamples,nNodes))
nodeAct[:,0] = 1 # Bias activation
nodeAct[:,1:nInput+1] = inPattern
# Propagate signal through hidden to output nodes
iNode = nInput+1
for iNode in range(nInput+1,nNodes):
rawAct = np.dot(nodeAct, wMat[:,iNode]).squeeze()
nodeAct[:,iNode] = applyAct(aVec[iNode], rawAct)
#print(nodeAct)
output = nodeAct[:,-nOutput:]
return output
def applyAct(actId, x):
"""Returns value after an activation function is applied
Lookup table to allow activations to be stored in numpy arrays
case 1 -- Linear
case 2 -- Unsigned Step Function
case 3 -- Sin
case 4 -- Gausian with mean 0 and sigma 1
case 5 -- Hyperbolic Tangent [tanh] (signed)
case 6 -- Sigmoid unsigned [1 / (1 + exp(-x))]
case 7 -- Inverse
case 8 -- Absolute Value
case 9 -- Relu
case 10 -- Cosine
case 11 -- Squared
Args:
actId - (int) - key to look up table
x - (???) - value to be input into activation
[? X ?] - any type or dimensionality
Returns:
output - (float) - value after activation is applied
[? X ?] - same dimensionality as input
"""
if actId == 1: # Linear
value = x
if actId == 2: # Unsigned Step Function
value = 1.0*(x>0.0)
#value = (np.tanh(50*x/2.0) + 1.0)/2.0
elif actId == 3: # Sin
value = np.sin(np.pi*x)
elif actId == 4: # Gaussian with mean 0 and sigma 1
value = np.exp(-np.multiply(x, x) / 2.0)
elif actId == 5: # Hyperbolic Tangent (signed)
value = np.tanh(x)
elif actId == 6: # Sigmoid (unsigned)
value = (np.tanh(x/2.0) + 1.0)/2.0
elif actId == 7: # Inverse
value = -x
elif actId == 8: # Absolute Value
value = abs(x)
elif actId == 9: # Relu
value = np.maximum(0, x)
elif actId == 10: # Cosine
value = np.cos(np.pi*x)
elif actId == 11: # Squared
value = x**2
else:
value = x
return value
# End of copypaste
# -------------------------------------------------------------------
# This action is original to this repository
def create_wann_agent(agent_path, agent_type, env):
"""
Load and return a WANN agent.
The agent has a function `get_action` that takes in
an observation and returns an appropiate action.
"""
np_data = np.load(agent_path)
wMat = np_data["wMat"]
aVec = np_data["aVec"]
# TODO support for other input spaces?
nInput = env.observation_space.shape[0]
nOutput = 0
action_type = "all"
if isinstance(env.action_space, spaces.Box):
nOutput = env.action_space.shape[0]
elif isinstance(env.action_space, spaces.Discrete):
nOutput = env.action_space.n
action_type = "prob"
else:
raise ValueError("Unsupported action space")
def get_action(obs):
# Includes batch-size
output = act(wMat, aVec, nInput, nOutput, obs)
action = selectAct(output, action_type)
return action
agent = SimpleAgentClass(lambda obs: get_action(obs))
return agent
| [
"numpy.multiply",
"numpy.reshape",
"numpy.maximum",
"numpy.min",
"numpy.ndim",
"numpy.tanh",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.isnan",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.cumsum",
"numpy.shape",
"numpy.load"
] | [((644, 659), 'numpy.min', 'np.min', (['weights'], {}), '(weights)\n', (650, 659), True, 'import numpy as np\n'), ((727, 745), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (736, 745), True, 'import numpy as np\n'), ((757, 789), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'cumVal[-1]'], {}), '(0, cumVal[-1])\n', (774, 789), True, 'import numpy as np\n'), ((3155, 3183), 'numpy.zeros', 'np.zeros', (['(nSamples, nNodes)'], {}), '((nSamples, nNodes))\n', (3163, 3183), True, 'import numpy as np\n'), ((5582, 5601), 'numpy.load', 'np.load', (['agent_path'], {}), '(agent_path)\n', (5589, 5601), True, 'import numpy as np\n'), ((2747, 2763), 'numpy.ndim', 'np.ndim', (['weights'], {}), '(weights)\n', (2754, 2763), True, 'import numpy as np\n'), ((2836, 2873), 'numpy.reshape', 'np.reshape', (['weights', '(nNodes, nNodes)'], {}), '(weights, (nNodes, nNodes))\n', (2846, 2873), True, 'import numpy as np\n'), ((2954, 2968), 'numpy.isnan', 'np.isnan', (['wMat'], {}), '(wMat)\n', (2962, 2968), True, 'import numpy as np\n'), ((3002, 3020), 'numpy.ndim', 'np.ndim', (['inPattern'], {}), '(inPattern)\n', (3009, 3020), True, 'import numpy as np\n'), ((2901, 2918), 'numpy.shape', 'np.shape', (['weights'], {}), '(weights)\n', (2909, 2918), True, 'import numpy as np\n'), ((3045, 3064), 'numpy.shape', 'np.shape', (['inPattern'], {}), '(inPattern)\n', (3053, 3064), True, 'import numpy as np\n'), ((4581, 4598), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (4587, 4598), True, 'import numpy as np\n'), ((1649, 1671), 'numpy.sum', 'np.sum', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (1655, 1671), True, 'import numpy as np\n'), ((3394, 3425), 'numpy.dot', 'np.dot', (['nodeAct', 'wMat[:, iNode]'], {}), '(nodeAct, wMat[:, iNode])\n', (3400, 3425), True, 'import numpy as np\n'), ((2798, 2815), 'numpy.shape', 'np.shape', (['weights'], {}), '(weights)\n', (2806, 2815), True, 'import numpy as np\n'), ((4772, 4782), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (4779, 4782), True, 'import numpy as np\n'), ((4679, 4696), 'numpy.multiply', 'np.multiply', (['x', 'x'], {}), '(x, x)\n', (4690, 4696), True, 'import numpy as np\n'), ((4848, 4864), 'numpy.tanh', 'np.tanh', (['(x / 2.0)'], {}), '(x / 2.0)\n', (4855, 4864), True, 'import numpy as np\n'), ((5035, 5051), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (5045, 5051), True, 'import numpy as np\n'), ((5103, 5120), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5109, 5120), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
class Anime_Dataset(Dataset):
def __init__(self, config, transform):
self.config = config
self.transform = transform
self.lines = open(config.label_path, 'r').readlines()
self.num_data = len(self.lines)
self.image_ids = []
self.labels = []
self.tag_dict = {'orange_hair': 0, 'white_hair': 1, 'aqua_hair': 2, 'gray_hair': 3, 'green_hair': 4,
'red_hair': 5, 'purple_hair': 6, 'pink_hair': 7, 'blue_hair': 8, 'black_hair': 9,
'brown_hair': 10, 'blonde_hair': 11, 'gray_eyes': 12, 'black_eyes': 13, 'orange_eyes': 14,
'pink_eyes': 15, 'yellow_eyes': 16, 'aqua_eyes': 17, 'purple_eyes': 18, 'green_eyes': 19,
'brown_eyes': 20, 'red_eyes': 21, 'blue_eyes': 22, 'bicolored_eyes': 23}
print('preprocessing...')
print('number of images: ', self.num_data)
self.preprocess()
def __len__(self):
return self.num_data
def __getitem__(self, index):
correct_image = Image.open(os.path.join(self.config.image_dir, self.image_ids[index] + '.jpg'))
correct_text = self.labels[index]
# wrong_text = self.labels[np.random.randint(low=0, high=self.num_data)]
random_index = np.random.randint(low=0, high=self.num_data)
wrong_image = Image.open(os.path.join(self.config.image_dir, self.image_ids[random_index] + '.jpg'))
return self.transform(correct_image), torch.Tensor(correct_text), self.transform(wrong_image)
def preprocess(self):
for i, line in enumerate(self.lines):
splits = line.split()
image_id = splits[0]
attr_values = splits[1:]
one_hot = np.zeros(len(self.tag_dict))
for value in attr_values:
index = self.tag_dict[value]
one_hot[index] = 1
self.labels += [one_hot]
self.image_ids += [image_id]
def generate_embedding(self):
test_str = ['blue_hair, red_eyes', 'brown_hair, brown_eyes', 'black_hair, blue_eyes', 'red_hair, green_eyes']
embeddings = {}
for str in test_str:
split = str.split(', ')
one_hot = np.zeros(len(self.tag_dict))
for tag in split:
one_hot[self.tag_dict[tag]] = 1
embeddings[str] = one_hot
return embeddings
def get_loader(config):
transform = transforms.Compose([
# transforms.CenterCrop(config.crop_size),
transforms.Scale(config.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), # 3 for RGB channels
std=(0.5, 0.5, 0.5))
])
dataset = Anime_Dataset(config, transform)
print('generating test embeddings...')
embeddings = dataset.generate_embedding()
data_loader = DataLoader(dataset,
config.batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
return data_loader, embeddings
| [
"torchvision.transforms.Scale",
"os.path.join",
"torch.Tensor",
"torchvision.transforms.RandomHorizontalFlip",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor"
] | [((3071, 3158), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'config.batch_size'], {'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(dataset, config.batch_size, shuffle=True, num_workers=4,\n drop_last=True)\n', (3081, 3158), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1436, 1480), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.num_data'}), '(low=0, high=self.num_data)\n', (1453, 1480), True, 'import numpy as np\n'), ((1221, 1288), 'os.path.join', 'os.path.join', (['self.config.image_dir', "(self.image_ids[index] + '.jpg')"], {}), "(self.config.image_dir, self.image_ids[index] + '.jpg')\n", (1233, 1288), False, 'import os\n'), ((1514, 1588), 'os.path.join', 'os.path.join', (['self.config.image_dir', "(self.image_ids[random_index] + '.jpg')"], {}), "(self.config.image_dir, self.image_ids[random_index] + '.jpg')\n", (1526, 1588), False, 'import os\n'), ((1636, 1662), 'torch.Tensor', 'torch.Tensor', (['correct_text'], {}), '(correct_text)\n', (1648, 1662), False, 'import torch\n'), ((2673, 2708), 'torchvision.transforms.Scale', 'transforms.Scale', (['config.image_size'], {}), '(config.image_size)\n', (2689, 2708), False, 'from torchvision import transforms\n'), ((2718, 2751), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2749, 2751), False, 'from torchvision import transforms\n'), ((2761, 2782), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2780, 2782), False, 'from torchvision import transforms\n'), ((2792, 2855), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.5, 0.5, 0.5)', 'std': '(0.5, 0.5, 0.5)'}), '(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n', (2812, 2855), False, 'from torchvision import transforms\n')] |
# Quickie script for refreshing the local objects.inv cache
# OVERWRITES EXISTING FILES, WITH PRE-DELETION
def pullobjs():
import os
import urllib.request as urlrq
import certifi
# Open conf.py, retrieve content and compile
with open(os.path.join(os.pardir, 'conf.py'), 'r') as f:
confcode = compile(f.read(), 'conf.py', 'exec')
# Execute conf.py into the global namespace (I know, sloppy)
exec(confcode, globals())
# Iterate intersphinx_mapping from conf.py to retrieve the objects.inv files
# Make use of the conf.py 'isphx_objstr' substitution string, too
for n, t in intersphinx_mapping.items():
print('{0}:\n'.format(n) + '-' * 16)
try:
os.remove(isphx_objstr.format(n))
except FileNotFoundError:
pass # No big deal
try:
resp = urlrq.urlopen(t[0] + '/objects.inv', cafile=certifi.where())
except Exception as e:
print('HTTP request failed:\n' + str(e) + '\n')
continue
else:
print('... located ...')
try:
b_s = resp.read()
except Exception as e:
print('Download failed:\n' + str(e) + '\n')
continue
else:
print('... downloaded ...')
try:
with open(isphx_objstr.format(n), 'wb') as f:
f.write(b_s)
except Exception as e:
print('Write failed:\n' + str(e) + '\n')
continue
else:
print('... done.')
print('')
if __name__ == '__main__':
pullobjs()
| [
"certifi.where",
"os.path.join"
] | [((260, 294), 'os.path.join', 'os.path.join', (['os.pardir', '"""conf.py"""'], {}), "(os.pardir, 'conf.py')\n", (272, 294), False, 'import os\n'), ((904, 919), 'certifi.where', 'certifi.where', ([], {}), '()\n', (917, 919), False, 'import certifi\n')] |
import os,sys
import shutil
import pandas as pd
data=pd.read_csv('D:/MachineLearning/AnimalClassification/train.csv')
Im_id=data['Image_id']
Animal=data['Animal']
dic_data=dict()
for i in range(0,len(Im_id)):
dic_data[Im_id[i].strip()]=Animal[i].strip()
source_dir='D:/MachineLearning/AnimalClassification/Images/train'
folder='D:/MachineLearning/AnimalClassificationUsingCNN'
lis=os.listdir(source_dir)
j=0
for filename in lis:
src_filename=os.path.join(source_dir,filename)
temp=os.path.join(folder,'Images/test/'+dic_data[filename])
if j<9000:
temp=os.path.join(folder,'Images/train/'+dic_data[filename])
if not os.path.exists(temp):
os.makedirs(temp)
dst_filename=os.path.join(temp,filename)
if os.path.isfile(src_filename) and not os.path.isfile(dst_filename):
shutil.copy(src_filename,temp)
j=j+1;
lis=os.listdir(folder+'/Images/val');
for i in lis:
temp=folder+'/Images/train/'+i.strip();
temp1=folder+'/Images/val/'+i.strip();
if not os.path.exists(temp1):
os.makedirs(temp1)
k=os.listdir(temp1)
for j in range(0,int(len(k))):
shutil.move(temp1+'/'+k[j],temp+'/'+k[j])
lis=os.listdir(folder+'/Images/train')
min=None
for i in lis:
temp=folder+'/Images/train/'+i.strip()
temp=os.listdir(temp)
print(len(temp)) | [
"os.path.exists",
"os.listdir",
"os.makedirs",
"pandas.read_csv",
"shutil.move",
"os.path.join",
"os.path.isfile",
"shutil.copy"
] | [((54, 118), 'pandas.read_csv', 'pd.read_csv', (['"""D:/MachineLearning/AnimalClassification/train.csv"""'], {}), "('D:/MachineLearning/AnimalClassification/train.csv')\n", (65, 118), True, 'import pandas as pd\n'), ((384, 406), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (394, 406), False, 'import os, sys\n'), ((829, 863), 'os.listdir', 'os.listdir', (["(folder + '/Images/val')"], {}), "(folder + '/Images/val')\n", (839, 863), False, 'import os, sys\n'), ((1114, 1150), 'os.listdir', 'os.listdir', (["(folder + '/Images/train')"], {}), "(folder + '/Images/train')\n", (1124, 1150), False, 'import os, sys\n'), ((446, 480), 'os.path.join', 'os.path.join', (['source_dir', 'filename'], {}), '(source_dir, filename)\n', (458, 480), False, 'import os, sys\n'), ((486, 543), 'os.path.join', 'os.path.join', (['folder', "('Images/test/' + dic_data[filename])"], {}), "(folder, 'Images/test/' + dic_data[filename])\n", (498, 543), False, 'import os, sys\n'), ((680, 708), 'os.path.join', 'os.path.join', (['temp', 'filename'], {}), '(temp, filename)\n', (692, 708), False, 'import os, sys\n'), ((1014, 1031), 'os.listdir', 'os.listdir', (['temp1'], {}), '(temp1)\n', (1024, 1031), False, 'import os, sys\n'), ((1218, 1234), 'os.listdir', 'os.listdir', (['temp'], {}), '(temp)\n', (1228, 1234), False, 'import os, sys\n'), ((560, 618), 'os.path.join', 'os.path.join', (['folder', "('Images/train/' + dic_data[filename])"], {}), "(folder, 'Images/train/' + dic_data[filename])\n", (572, 618), False, 'import os, sys\n'), ((624, 644), 'os.path.exists', 'os.path.exists', (['temp'], {}), '(temp)\n', (638, 644), False, 'import os, sys\n'), ((648, 665), 'os.makedirs', 'os.makedirs', (['temp'], {}), '(temp)\n', (659, 665), False, 'import os, sys\n'), ((713, 741), 'os.path.isfile', 'os.path.isfile', (['src_filename'], {}), '(src_filename)\n', (727, 741), False, 'import os, sys\n'), ((783, 814), 'shutil.copy', 'shutil.copy', (['src_filename', 'temp'], {}), '(src_filename, temp)\n', (794, 814), False, 'import shutil\n'), ((967, 988), 'os.path.exists', 'os.path.exists', (['temp1'], {}), '(temp1)\n', (981, 988), False, 'import os, sys\n'), ((992, 1010), 'os.makedirs', 'os.makedirs', (['temp1'], {}), '(temp1)\n', (1003, 1010), False, 'import os, sys\n'), ((1066, 1116), 'shutil.move', 'shutil.move', (["(temp1 + '/' + k[j])", "(temp + '/' + k[j])"], {}), "(temp1 + '/' + k[j], temp + '/' + k[j])\n", (1077, 1116), False, 'import shutil\n'), ((750, 778), 'os.path.isfile', 'os.path.isfile', (['dst_filename'], {}), '(dst_filename)\n', (764, 778), False, 'import os, sys\n')] |
#!/usr/bin/env python
import os
import sys
import glob
import hashlib
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
from input_set import PinCellInputSet
import openmc
import openmc.mgxs
class MGXSTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Set the input set to use the pincell model
self._input_set = PinCellInputSet()
# Generate inputs using parent class routine
super(MGXSTestHarness, self)._build_inputs()
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625e-6,
20.])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._input_set.geometry)
self.mgxs_lib.by_nuclide = False
self.mgxs_lib.mgxs_types = ['total', 'absorption', 'nu-fission matrix',
'nu-scatter matrix', 'multiplicity matrix']
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.correction = None
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Initialize a tallies file
self._input_set.tallies = openmc.Tallies()
self.mgxs_lib.add_to_tallies_file(self._input_set.tallies, merge=False)
self._input_set.tallies.export_to_xml()
def _run_openmc(self):
# Initial run
if self._opts.mpi_exec is not None:
returncode = openmc.run(mpi_procs=self._opts.mpi_np,
openmc_exec=self._opts.exe,
mpi_exec=self._opts.mpi_exec)
else:
returncode = openmc.run(openmc_exec=self._opts.exe)
assert returncode == 0, 'CE OpenMC calculation did not exit' \
'successfully.'
# Build MG Inputs
# Get data needed to execute Library calculations.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
self.mgxs_lib.load_from_statepoint(sp)
self._input_set.mgxs_file, self._input_set.materials, \
self._input_set.geometry = self.mgxs_lib.create_mg_mode()
# Modify settings so we can run in MG mode
self._input_set.settings.cross_sections = './mgxs.xml'
self._input_set.settings.energy_mode = 'multi-group'
# Write modified input files
self._input_set.settings.export_to_xml()
self._input_set.geometry.export_to_xml()
self._input_set.materials.export_to_xml()
self._input_set.mgxs_file.export_to_xml()
# Dont need tallies.xml, so remove the file
if os.path.exists('./tallies.xml'):
os.remove('./tallies.xml')
# Re-run MG mode.
if self._opts.mpi_exec is not None:
returncode = openmc.run(mpi_procs=self._opts.mpi_np,
openmc_exec=self._opts.exe,
mpi_exec=self._opts.mpi_exec)
else:
returncode = openmc.run(openmc_exec=self._opts.exe)
def _cleanup(self):
super(MGXSTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'mgxs.xml')
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
harness = MGXSTestHarness('statepoint.10.*', False)
harness.main()
| [
"openmc.run",
"os.path.exists",
"sys.path.insert",
"input_set.PinCellInputSet",
"os.remove",
"os.getcwd",
"openmc.mgxs.Library",
"openmc.StatePoint",
"openmc.mgxs.EnergyGroups",
"openmc.Tallies"
] | [((71, 100), 'sys.path.insert', 'sys.path.insert', (['(0)', 'os.pardir'], {}), '(0, os.pardir)\n', (86, 100), False, 'import sys\n'), ((368, 385), 'input_set.PinCellInputSet', 'PinCellInputSet', ([], {}), '()\n', (383, 385), False, 'from input_set import PinCellInputSet\n'), ((561, 618), 'openmc.mgxs.EnergyGroups', 'openmc.mgxs.EnergyGroups', ([], {'group_edges': '[0, 6.25e-07, 20.0]'}), '(group_edges=[0, 6.25e-07, 20.0])\n', (585, 618), False, 'import openmc\n'), ((769, 814), 'openmc.mgxs.Library', 'openmc.mgxs.Library', (['self._input_set.geometry'], {}), '(self._input_set.geometry)\n', (788, 814), False, 'import openmc\n'), ((1305, 1321), 'openmc.Tallies', 'openmc.Tallies', ([], {}), '()\n', (1319, 1321), False, 'import openmc\n'), ((2113, 2142), 'openmc.StatePoint', 'openmc.StatePoint', (['statepoint'], {}), '(statepoint)\n', (2130, 2142), False, 'import openmc\n'), ((2799, 2830), 'os.path.exists', 'os.path.exists', (['"""./tallies.xml"""'], {}), "('./tallies.xml')\n", (2813, 2830), False, 'import os\n'), ((3350, 3367), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (3364, 3367), False, 'import os\n'), ((1569, 1670), 'openmc.run', 'openmc.run', ([], {'mpi_procs': 'self._opts.mpi_np', 'openmc_exec': 'self._opts.exe', 'mpi_exec': 'self._opts.mpi_exec'}), '(mpi_procs=self._opts.mpi_np, openmc_exec=self._opts.exe,\n mpi_exec=self._opts.mpi_exec)\n', (1579, 1670), False, 'import openmc\n'), ((1779, 1817), 'openmc.run', 'openmc.run', ([], {'openmc_exec': 'self._opts.exe'}), '(openmc_exec=self._opts.exe)\n', (1789, 1817), False, 'import openmc\n'), ((2844, 2870), 'os.remove', 'os.remove', (['"""./tallies.xml"""'], {}), "('./tallies.xml')\n", (2853, 2870), False, 'import os\n'), ((2967, 3068), 'openmc.run', 'openmc.run', ([], {'mpi_procs': 'self._opts.mpi_np', 'openmc_exec': 'self._opts.exe', 'mpi_exec': 'self._opts.mpi_exec'}), '(mpi_procs=self._opts.mpi_np, openmc_exec=self._opts.exe,\n mpi_exec=self._opts.mpi_exec)\n', (2977, 3068), False, 'import openmc\n'), ((3177, 3215), 'openmc.run', 'openmc.run', ([], {'openmc_exec': 'self._opts.exe'}), '(openmc_exec=self._opts.exe)\n', (3187, 3215), False, 'import openmc\n'), ((3314, 3325), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3323, 3325), False, 'import os\n'), ((3381, 3393), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (3390, 3393), False, 'import os\n'), ((2068, 2079), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2077, 2079), False, 'import os\n')] |
import os
from setuptools import setup, find_packages
from cloudwatch_metrics.version import VERSION
with open(os.path.join(os.path.dirname(__file__),
'README.md')) as readme:
README = readme.read()
setup(
name='cloudwatch_metrics',
version=VERSION,
description='The Cloudwatch Metrics package enables Python developers to record'
' and emit information from within their applications to the Cloudwatch service.',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/random1st/cloudwatch-metrics',
author='Amazon Web Services',
license="GPLv3",
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'aiobotocore',
'boto3<=1.16.52',
'async-property'
],
keywords='aws cloudwatch metrics',
packages=find_packages(exclude=['tests*']),
include_package_data=True
)
| [
"os.path.dirname",
"setuptools.find_packages"
] | [((1220, 1253), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests*']"}), "(exclude=['tests*'])\n", (1233, 1253), False, 'from setuptools import setup, find_packages\n'), ((127, 152), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (142, 152), False, 'import os\n')] |
import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
def plot_slice(img, slice=80):
# Show some slice in the middle
plt.imshow(img[slice])
plt.show()
def plot_3d(image, threshold=-100):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
# p = image.transpose(2,1,0)
p = image
results = measure.marching_cubes(p, threshold)
verts = results[0]
faces = results[1]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.70)
face_color = [0.45, 0.45, 0.75]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.savefig('plot3d.png')
def save(arr, pth):
with open(pth, 'wb+') as fh:
np.savez_compressed(fh, data=arr)
def load(pth):
return np.load(pth)['data']
def read_mapping_file(pth):
return pd.read_csv(pth)
def shuffle_weights(model, weights=None):
"""Randomly permute the weights in `model`, or the given `weights`.
This is a fast approximation of re-initializing the weights of a model.
Assumes weights are distributed independently of the dimensions of the weight tensors
(i.e., the weights have the same distribution along each dimension).
:param Model model: Modify the weights of the given model.
:param list(ndarray) weights: The model's weights will be replaced by a random permutation of these weights.
If `None`, permute the model's current weights.
"""
if weights is None:
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
# Faster, but less random: only permutes along the first dimension
# weights = [np.random.permutation(w) for w in weights]
model.set_weights(weights)
| [
"matplotlib.pyplot.imshow",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.random.permutation",
"matplotlib.pyplot.figure",
"skimage.measure.marching_cubes",
"numpy.savez_compressed",
"numpy.load",
"matplotlib.pyplot.show"
] | [((239, 261), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img[slice]'], {}), '(img[slice])\n', (249, 261), True, 'import matplotlib.pyplot as plt\n'), ((266, 276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (274, 276), True, 'import matplotlib.pyplot as plt\n'), ((481, 517), 'skimage.measure.marching_cubes', 'measure.marching_cubes', (['p', 'threshold'], {}), '(p, threshold)\n', (503, 517), False, 'from skimage import measure, morphology\n'), ((575, 603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (585, 603), True, 'import matplotlib.pyplot as plt\n'), ((738, 779), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['verts[faces]'], {'alpha': '(0.7)'}), '(verts[faces], alpha=0.7)\n', (754, 779), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((981, 1006), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot3d.png"""'], {}), "('plot3d.png')\n", (992, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1210), 'pandas.read_csv', 'pd.read_csv', (['pth'], {}), '(pth)\n', (1205, 1210), True, 'import pandas as pd\n'), ((1070, 1103), 'numpy.savez_compressed', 'np.savez_compressed', (['fh'], {'data': 'arr'}), '(fh, data=arr)\n', (1089, 1103), True, 'import numpy as np\n'), ((1132, 1144), 'numpy.load', 'np.load', (['pth'], {}), '(pth)\n', (1139, 1144), True, 'import numpy as np\n'), ((1885, 1914), 'numpy.random.permutation', 'np.random.permutation', (['w.flat'], {}), '(w.flat)\n', (1906, 1914), True, 'import numpy as np\n')] |
import unittest
from translator import french_to_english, english_to_french
class TestFrenchToEnglish(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english("Bonjour"), "Hello") # test when "Bonjour" is given as input the output is "Hello".
with self.assertRaises(ValueError): # test when None/null is given as input a ValueError is returned.
french_to_english(None)
class TestEnglishToFrench(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french("Hello"), "Bonjour") # test when "Hello" is given as input the output is "Bonjour".
with self.assertRaises(ValueError): # test when None/null is given as input a ValueError is returned.
english_to_french(None)
unittest.main()
| [
"unittest.main",
"translator.french_to_english",
"translator.english_to_french"
] | [((826, 841), 'unittest.main', 'unittest.main', ([], {}), '()\n', (839, 841), False, 'import unittest\n'), ((170, 198), 'translator.french_to_english', 'french_to_english', (['"""Bonjour"""'], {}), "('Bonjour')\n", (187, 198), False, 'from translator import french_to_english, english_to_french\n'), ((427, 450), 'translator.french_to_english', 'french_to_english', (['None'], {}), '(None)\n', (444, 450), False, 'from translator import french_to_english, english_to_french\n'), ((544, 570), 'translator.english_to_french', 'english_to_french', (['"""Hello"""'], {}), "('Hello')\n", (561, 570), False, 'from translator import french_to_english, english_to_french\n'), ((801, 824), 'translator.english_to_french', 'english_to_french', (['None'], {}), '(None)\n', (818, 824), False, 'from translator import french_to_english, english_to_french\n')] |
import os
import torch
import numpy as np
from tqdm import tqdm
import json
from torch.utils.data import Dataset, DataLoader
from arcface.resnet import ResNet
from arcface.googlenet import GoogLeNet
from arcface.inception_v4 import InceptionV4
from arcface.inceptionresnet_v2 import InceptionResNetV2
from arcface.densenet import DenseNet
from arcface.resnet_cbam import ResNetCBAM
import torchvision.transforms as transforms
import cv2
import random
import jieba
from autoaugment import rand_augment_transform
from PIL import Image
'''
for image-text match
'''
class ITMatchTrain(Dataset):
def __init__(self, opt):
arcfaceDataset = ArcfaceDataset(root_dir=opt.data_path, mode="train", size=(opt.size, opt.size), imgORvdo='video')
batch_size = 256
training_params = {"batch_size": batch_size,
"shuffle": False,
"drop_last": False,
"num_workers": opt.workers}
arcfaceLoader = DataLoader(arcfaceDataset, **training_params)
self.vocab_size = arcfaceDataset.vocab_size
if opt.network == 'resnet':
model = ResNet(opt)
b_name = opt.network+'_'+opt.mode+'_{}'.format(opt.num_layers_r)
elif opt.network == 'googlenet':
model = GoogLeNet(opt)
b_name = opt.network
elif opt.network == 'inceptionv4':
model = InceptionV4(opt)
b_name = opt.network
elif opt.network == 'inceptionresnetv2':
model = InceptionResNetV2(opt)
b_name = opt.network
elif opt.network == 'densenet':
model = DenseNet(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_d)
elif opt.network == 'resnet_cbam':
model = ResNetCBAM(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_c)
else:
raise RuntimeError('Cannot Find the Model: {}'.format(opt.network))
model.load_state_dict(torch.load(os.path.join(opt.saved_path, b_name+'.pth')))
model.cuda()
model.eval()
self.model_name = b_name
self.features = torch.zeros((len(arcfaceDataset), opt.embedding_size))
self.texts = torch.zeros((len(arcfaceDataset), 64)).long()
self.instances = torch.zeros((len(arcfaceDataset))).long()
print('Calculating features...')
for i, d in enumerate(tqdm(arcfaceLoader)):
# img = d['img'].cuda()
text = d['text']
instance = d['instance']
# with torch.no_grad():
# feature = model(img).cpu()
# self.features[i*batch_size:(i+1)*batch_size] = feature
self.texts[i*batch_size:(i+1)*batch_size] = text
self.instances[i*batch_size:(i+1)*batch_size] = instance
def __len__(self):
return self.texts.size(0)
def __getitem__(self, index):
text = self.texts[index]
# feature = self.features[index]
feature = None
instance = self.instances[index]
# return {'feature': feature, 'text':text, 'instance':instance}
return {'text':text, 'instance':instance}
class ITMatchValidation(Dataset):
def __init__(self, size=(224, 224), root_dir='data/validation_instance/', maxLen=64, PAD=0, imgORvdo='video'):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
assert imgORvdo in ['image', 'video']
tat = 'validation_'+imgORvdo+'s'
# tat = 'train_'+imgORvdo+'s'
with open(os.path.join('data', tat+'_text.json'), 'r') as f:
textDic = json.load(f)
for k in textDic.keys():
textDic[k] = text2num(textDic[k])
instances = os.listdir(root_dir)
self.items = []
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
l = []
for img in imgs:
if imgORvdo in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(textDic[text_name])
break
if len(l) < 2:
continue
self.items.append(l)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
imgPath, text = self.items[index]
text = torch.Tensor(text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
hi, wi, ci = img.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {
'img': img,
'text': text
}
'''
for text
'''
class Text2Num:
def __init__(self, maxLen, root_dir='data', PAD=0):
with open(os.path.join(root_dir, 'vocab.json'), 'r') as f:
self.vocab = json.load(f)
self.PAD = PAD
self.maxLen = maxLen
self.vocab_size = len(self.vocab)
def __call__(self, text):
words = jieba.cut(text, cut_all=False, HMM=True)
# l = [len(self.vocab)]# CLS
l = []
for w in words:
if w.strip() in self.vocab:
l.append(self.vocab[w.strip()])
if len(l) > self.maxLen:
l = l[:self.maxLen]
elif len(l) < self.maxLen:
l += [self.PAD]*(self.maxLen-len(l))
assert len(l) == self.maxLen
return l
'''
for efficientdet
'''
class EfficientdetDataset(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='all', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['image', 'video', 'all']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
if imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
else:
tats = [mode + '_images', mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if len(d['annotations']) == 0:
continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.images.append(t)
# print(len(self.images))
# self.images = self.images[:1000]
print('Done')
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def label2index(self, label):
return self.labelDic['label2index'][label]
def index2label(self, index):
return self.labelDic['index2label'][str(index)]
def getImagePath(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
return imgPath
def getImageInfo(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
imgID, frame = imgName[:-4].split('_')
return imgPath, imgID, frame
class EfficientdetDatasetVideo(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='video', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['video']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
tats = [mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
self.videos = {}
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if d['img_name'][:6] not in self.videos:
self.videos[d['img_name'][:6]] = []
# if len(d['annotations']) == 0:
# continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.videos[d['img_name'][:6]].append(t)
# self.images.append(t)
self.videos = list(self.videos.values())
for l in self.videos:
assert len(l) == 10
# print(len(self.images))
self.videos = self.videos[:100]
print('Done')
def __len__(self):
return len(self.videos)
def __getitem__(self, index):
lst = self.videos[index]
datas = []
for imgPath, annotationsList, imgName, t in lst:
# imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
datas.append(sample)
if self.transform:
datas = self.transform(datas)
return datas
# def label2index(self, label):
# return self.labelDic['label2index'][label]
# def index2label(self, index):
# return self.labelDic['index2label'][str(index)]
# def getImagePath(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# return imgPath
# def getImageInfo(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# imgID, frame = imgName[:-4].split('_')
# return imgPath, imgID, frame
'''
for arcface
'''
class ArcfaceDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train', 'all']
assert imgORvdo in ['all', 'image', 'video']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.textDics = {}
for mode in modes:
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
# img_tat = mode + '_images'
# vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
d = []
textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
textDic.append(json.load(f))
for i in range(len(textDic)):
for k in textDic[i].keys():
textDic[i][k] = text2num(textDic[i][k])
self.textDics[mode] = textDic
l = [dd['annotations'] for dd in d]
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(self.savePath, str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(i)
t.append(mode)
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName, instance_id, textName, iORv, mode = self.images[index]
img = np.load(imgName[:-4]+'.npy')
# img = cv2.imread(imgName[:-4]+'.jpg')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
text = self.textDics[mode][iORv][textName]
text = torch.tensor(text).long()
iORv = torch.tensor(iORv).long()
h, w, c = img.shape
# print(h,w,c)
rh = random.randint(0, h-256)
rw = random.randint(0, w-256)
img = img[rh:256+rh, rw:256+rw, :]
img = cv2.resize(img, self.size)
# '''random erasing'''
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
# print(img.shape)
instance = torch.tensor(self.clsDic[str(instance_id)])
label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {'img':img, 'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
# return {'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
class ArcfaceDatasetSeparate(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train']
assert imgORvdo in ['all']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
d = []
self.textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
self.textDic.append(json.load(f))
for i in range(len(self.textDic)):
for k in self.textDic[i].keys():
self.textDic[i][k] = text2num(self.textDic[i][k])
l = [dd['annotations'] for dd in d]
self.images = []
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
names = ['image', 'video']
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(names[i])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
self.dic = {}
for i in range(len(self.images)):
imgName, instance_id, textName, iORv = self.images[i]
if instance_id not in self.dic:
self.dic[instance_id] = {}
self.dic[instance_id]['image'] = []
self.dic[instance_id]['video'] = []
self.dic[instance_id][iORv].append(i)
for k in self.dic.keys():
if len(self.dic[k]['image']) == 0 or len(self.dic[k]['video']) == 0:
del self.dic[k]
self.dic = list(self.dic.items())
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.dic)
def __getitem__(self, index):
imgIndex = random.choice(self.dic[index][1]['image'])
vdoIndex = random.choice(self.dic[index][1]['video'])
sample = []
instances = []
for index in [imgIndex, vdoIndex]:
imgName, instance_id, textName, iORv = self.images[index]
img = np.load(os.path.join(self.savePath, imgName)[:-4]+'.npy')
# text = self.textDic[iORv][textName]
# text = torch.tensor(text).long()
# iORv = torch.tensor(iORv).long()
h, w, c = img.shape
rh_1 = random.randint(0, h-224)
rh_2 = random.randint(224, h)
rw_1 = random.randint(0, w-224)
rw_2 = random.randint(224, w)
img = img[rh_1:rh_2, rw_1:rw_2, :]
img = cv2.resize(img, self.size)
instances.append(torch.tensor(self.clsDic[str(instance_id)]))
# label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
sample.append(img)
assert instances[0] == instances[1]
return {'img': sample[0], 'vdo':sample[1], 'instance':instances[0]}
class TripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5):
assert mode in ['train']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
instance2label = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
self.images = []
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(instance2label.values()))
self.cls_ins_dic = {}
for i, l in enumerate(self.images):
imgName, instance_id, label = l
if label not in self.cls_ins_dic:
self.cls_ins_dic[label] = {}
if instance_id not in self.cls_ins_dic[label]:
self.cls_ins_dic[label][instance_id] = []
self.cls_ins_dic[label][instance_id].append(i)
for k in self.cls_ins_dic.keys():
if len(self.cls_ins_dic[k]) < 2:
raise RuntimeError('size of self.cls_ins_dic[k] must be larger than 1')
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName_q, instance_id_q, label_q = self.images[index]
p_index = index
while p_index == index:
p_index = random.choice(self.cls_ins_dic[label_q][instance_id_q])
instance_id_n = instance_id_q
while instance_id_n == instance_id_q:
instance_id_n = random.choice(list(self.cls_ins_dic[label_q].keys()))
n_index = random.choice(self.cls_ins_dic[label_q][instance_id_n])
imgName_p, instance_id_p, label_p = self.images[p_index]
imgName_n, instance_id_n, label_n = self.images[n_index]
assert len(set([label_q, label_p, label_n])) == 1
assert len(set([instance_id_q, instance_id_p])) == 1
instance_id_q = torch.tensor(instance_id_q)
instance_id_p = torch.tensor(instance_id_p)
instance_id_n = torch.tensor(instance_id_n)
img_q = np.load(os.path.join(self.savePath, imgName_q)[:-4]+'.npy')
img_p = np.load(os.path.join(self.savePath, imgName_p)[:-4]+'.npy')
img_n = np.load(os.path.join(self.savePath, imgName_n)[:-4]+'.npy')
hq, wq, cq = img_q.shape
hp, wp, cp = img_p.shape
hn, wn, cn = img_n.shape
rh = random.randint(0, hq-self.size[0])
rw = random.randint(0, wq-self.size[1])
img_q = img_q[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hp-self.size[0])
rw = random.randint(0, wp-self.size[1])
img_p = img_p[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hn-self.size[0])
rw = random.randint(0, wn-self.size[1])
img_n = img_n[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if np.random.rand() < self.flip_x:
img_q = img_q[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_p = img_p[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_n = img_n[:, ::-1, :].copy()
img_q = torch.from_numpy(img_q).permute(2, 0, 1)
img_p = torch.from_numpy(img_p).permute(2, 0, 1)
img_n = torch.from_numpy(img_n).permute(2, 0, 1)
img_q = self.transform(img_q)
img_p = self.transform(img_p)
img_n = self.transform(img_n)
return {
'img_q':img_q,
'img_p':img_p,
'img_n':img_n,
'img_q_instance':instance_id_q,
'img_p_instance':instance_id_p,
'img_n_instance':instance_id_n,
}
class HardTripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, n_samples=4):
assert mode in ['train', 'all', 'train_2']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
self.n_samples = n_samples
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
self.samples = {}
for mode in modes:
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
self.num_classes = len(self.clsDic)
for k in self.samples.keys():
while len(self.samples[k]) < n_samples:
self.samples[k] *= 2
assert len(self.samples[k]) >= n_samples
self.instances = list(self.samples.keys())
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
instance = self.instances[index]
imgPaths = random.sample(self.samples[instance], self.n_samples)
imgs = []
instances = []
for imgPath in imgPaths:
img = np.load(imgPath[:-4]+'.npy')
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
assert self.size[0] == 256
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(270/r), int(270/r)))
h, w, c = img.shape
rh = random.randint(0, h-self.size[0])
rw = random.randint(0, w-self.size[1])
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
instance_t = torch.tensor(instance)
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
imgs.append(img)
instances.append(instance_t)
imgs = torch.stack(imgs, dim=0)
instances = torch.stack(instances, dim=0)
return {'img': imgs, 'instance': instances}
'''
for validation
'''
class ValidationArcfaceDataset(Dataset):
def __init__(self, size=(112, 112), root_dir='data/validation_instance/', maxLen=64, PAD=0):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
img_tat = 'validation_images'
vdo_tat = 'validation_videos'
with open(os.path.join('data', img_tat+'_text.json'), 'r') as f:
self.textDic_i = json.load(f)
with open(os.path.join('data', vdo_tat+'_text.json'), 'r') as f:
self.textDic_v = json.load(f)
for k in self.textDic_i.keys():
self.textDic_i[k] = text2num(self.textDic_i[k])
for k in self.textDic_v.keys():
self.textDic_v[k] = text2num(self.textDic_v[k])
instances = os.listdir(root_dir)
self.items = []
# s = ''
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
if len(imgs) < 2:
continue
l = []
for img in imgs:
if 'images' in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(text_name)
break
if len(l) == 0:
continue
for img in imgs:
if 'videos' in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(text_name)
break
if len(l) < 4:
continue
l.append(instance)
# s += '{}\t{}\n'.format(l[0], l[2])
self.items.append(l)
# with open('validation_path.txt', 'w') as f:
# f.write(s)
self.length = len(self.items)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items) * 2
def __getitem__(self, index):
imgPath, textName_img, vdoPath, textName_vdo, instance = self.items[index%self.length]
img_text = self.textDic_i[textName_img]
vdo_text = self.textDic_v[textName_vdo]
img_text = torch.Tensor(img_text).long()
vdo_text = torch.Tensor(vdo_text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
# vdo = np.load(os.path.join(self.root_dir, vdoPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
vdo = cv2.imread(os.path.join(self.root_dir, vdoPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
vdo = cv2.cvtColor(vdo, cv2.COLOR_BGR2RGB)
vdo = vdo.astype(np.float32) / 255
hi, wi, ci = img.shape
hv, wv, cv = vdo.shape
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(hi/r), int(wi/r)))
vdo = cv2.resize(vdo, (int(hv/r), int(wv/r)))
hi, wi, ci = img.shape
hv, wv, cv = vdo.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = (hv-self.size[0])//2
rw = (wv-self.size[1])//2
vdo = vdo[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if index >= self.length:
img = img[:, ::-1, :].copy()
vdo = vdo[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
vdo = torch.from_numpy(vdo)
vdo = vdo.permute(2, 0, 1)
img = self.transform(img)
vdo = self.transform(vdo)
return {
'img': img,
'vdo': vdo,
'img_text': img_text,
'vdo_text': vdo_text,
'instance':instance,
'img_e': torch.tensor(0),
'vdo_e': torch.tensor(1)
}
class ValidationDataset(Dataset):
def __init__(self, root_dir, items, size):
self.size = size
self.root_dir = root_dir
self.imgPath = None
self.img = None
self.items = items
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
frame, imgID, imgPath, xmin, ymin, xmax, ymax, classes = self.items[index]
if imgPath != self.imgPath:
self.imgPath = imgPath
self.img = cv2.imread(os.path.join(self.root_dir, imgPath))
det = self.img[ymin:ymax, xmin:xmax, :].copy()
det = cv2.resize(det, self.size)
det = cv2.cvtColor(det, cv2.COLOR_BGR2RGB)
det = det.astype(np.float32) / 255
det = torch.from_numpy(det)
det = det.permute(2, 0, 1)
det = self.transform(det)
# print(classes)
return {
'img': det,
'imgID': imgID,
'frame': frame,
'box': np.array([xmin, ymin, xmax, ymax]),
'classes': classes}
'''
for test
'''
class TestImageDataset(Dataset):
def __init__(self, root_dir='data', dir_list=['validation_dataset_part1', 'validation_dataset_part2'], transform=None, maxLen=64, PAD=0):
self.root_dir = root_dir
self.transform = transform
self.mode = 'image'
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
dirs = [os.path.join(root_dir, d) for d in dir_list]
text2num = Text2Num(maxLen=maxLen, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.ids = []
self.frames = []
self.textDic = {}
for di in dirs:
img_dir_list = os.listdir(os.path.join(di, 'image'))
for img_dir in img_dir_list:
img_names = os.listdir(os.path.join(di, 'image', img_dir))
for img_name in img_names:
self.images.append(os.path.join(di, 'image', img_dir, img_name))
self.frames.append(img_name.split('.')[0])
self.ids.append(img_dir)
textPath = os.path.join(di, 'image_text', img_dir+'.txt')
with open(textPath, 'r') as f:
self.textDic[img_dir] = text2num(f.readline())
# self.images = self.images[:100]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath = self.images[index]
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
img_id = self.ids[index]
text = self.textDic[img_id]
text = torch.Tensor(text).long()
sample = {'img': img, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def getImageInfo(self, index):
imgPath = self.images[index]
img_id = self.ids[index]
frame = self.frames[index]
return imgPath, img_id, frame
# class TestVideoDataset(Dataset):
# def __init__(self, root_dir, transform=None, n=20, maxLen=64, PAD=0):
# self.root_dir = root_dir
# self.transform = transform
# self.n = n
# self.mode = 'video'
# label_file = 'label.json'
# with open(label_file, 'r') as f:
# self.labelDic = json.load(f)
# self.num_classes = len(self.labelDic['label2index'])
# text2num = Text2Num(maxLen=maxLen, PAD=PAD)
# self.vocab_size = text2num.vocab_size
# # gap = 400 // n
# # self.frames_ids = [i*gap for i in range(n)]
# self.videos = []
# self.ids = []
# self.textDic = {}
# vdo_names = os.listdir(os.path.join(root_dir, 'video'))
# for vdo_name in vdo_names:
# self.videos.append(os.path.join(root_dir, 'video', vdo_name))
# self.ids.append(vdo_name.split('.')[0])
# textPath = os.path.join(root_dir, 'video_text', vdo_name.split('.')[0]+'.txt')
# with open(textPath, 'r') as f:
# self.textDic[vdo_name.split('.')[0]] = text2num(f.readline())
# # self.videos = self.videos[:100]
# def __len__(self):
# return len(self.videos)*self.n
# def __getitem__(self, index):
# v_index = index // self.n
# # f_index = self.frames_ids[index % self.n]
# vdo_name = self.videos[v_index]
# cap = cv2.VideoCapture(vdo_name)
# frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# f_index = int((frames // self.n) * (index % self.n))
# cap.set(cv2.CAP_PROP_POS_FRAMES, f_index)
# ret, img = cap.read()
# cap.release()
# vdo_id = self.ids[v_index]
# text = self.textDic[vdo_id]
# text = torch.tensor(text).long()
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# sample = {'img': img, 'text': text}
# if self.transform:
# sample = self.transform(sample)
# return sample
class TestVideoDataset(Dataset):
def __init__(self, root_dir, transform=None, n=20, dir_list=['validation_dataset_part1', 'validation_dataset_part2'], maxLen=64, PAD=0):
self.root_dir = root_dir
self.transform = transform
self.n = n
self.mode = 'video'
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
text2num = Text2Num(maxLen=maxLen, PAD=PAD)
self.vocab_size = text2num.vocab_size
dirs = [os.path.join(root_dir, d) for d in dir_list]
# gap = 400 // n
# self.frames_ids = [i*gap for i in range(n)]
self.videos = []
self.ids = []
self.textDic = {}
for di in dirs:
vdo_names = os.listdir(os.path.join(di, 'video'))
for vdo_name in vdo_names:
self.videos.append(os.path.join(di, 'video', vdo_name))
self.ids.append(vdo_name.split('.')[0])
textPath = os.path.join(di, 'video_text', vdo_name.split('.')[0]+'.txt')
with open(textPath, 'r') as f:
self.textDic[vdo_name.split('.')[0]] = text2num(f.readline())
# self.videos = self.videos[:10]
def __len__(self):
return len(self.videos)*self.n
def __getitem__(self, index):
v_index = index // self.n
# f_index = self.frames_ids[index % self.n]
vdo_name = self.videos[v_index]
cap = cv2.VideoCapture(vdo_name)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
f_index = int((frames // self.n) * (index % self.n))
cap.set(cv2.CAP_PROP_POS_FRAMES, f_index)
ret, img = cap.read()
cap.release()
vdo_id = self.ids[v_index]
text = self.textDic[vdo_id]
text = torch.Tensor(text).long()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
sample = {'img': img, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def getImageInfo(self, index):
v_index = index // self.n
# frame = self.frames_ids[index % self.n]
vdoPath = self.videos[v_index]
cap = cv2.VideoCapture(vdoPath)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frame = int((frames // self.n) * (index % self.n))
cap.release()
vdo_id = self.ids[v_index]
return vdoPath, vdo_id, str(frame)
class TestDataset(Dataset):
def __init__(self, root_dir, items, size, mode):
assert mode in ['image', 'video']
self.mode = mode
self.size = size
self.root_dir = root_dir
self.items = items
self.length = len(items)
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items) * 2
def __getitem__(self, index):
frame, imgID, imgPath, xmin, ymin, xmax, ymax, classes, text = self.items[index%self.length]
if self.mode == 'image':
img = cv2.imread(imgPath)
else:
cap = cv2.VideoCapture(imgPath)
cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame))
ret, img = cap.read()
cap.release()
det = img[ymin:ymax, xmin:xmax, :]
if index >= self.length:
det = det[:, ::-1, :].copy()
det = cv2.resize(det, self.size)
det = cv2.cvtColor(det, cv2.COLOR_BGR2RGB)
det = det.astype(np.float32) / 255
det = torch.from_numpy(det)
det = det.permute(2, 0, 1)
det = self.transform(det)
return {
'img': det,
'imgID': imgID,
'frame': frame,
'box': np.array([xmin, ymin, xmax, ymax]),
'classes': classes,
'text': text}
if __name__ == "__main__":
from config import get_args_arcface
opt = get_args_arcface()
dataset = ArcfaceDataset()
# print(len(dataset))
print(dataset[0])
# from utils import collater_HardTriplet
# from torch.utils.data import DataLoader
# training_params = {"batch_size": 20,
# "shuffle": True,
# "drop_last": True,
# "collate_fn": collater_HardTriplet,
# "num_workers": 4}
# from PIL import Image
# dataset = ArcfaceDataset()
# print(dataset[0])
# loader = DataLoader(dataset, **training_params)
# for data in loader:
# print(data['img'].size())
# break
# print(len(dataset))
# for d in tqdm(dataset):
# pass
# img = dataset[100]['img']
# mi = min(img.view(-1))
# ma = max(img.view(-1))
# img = (img-mi)/(ma-mi)
# img = img*256
# img = img.permute(1, 2, 0)
# img = img.numpy()
# img = Image.fromarray(img.astype(np.uint8))
# img.save('aaa.jpg')
# img = dataset[0]['vdo']
# mi = min(img.view(-1))
# ma = max(img.view(-1))
# img = (img-mi)/(ma-mi)
# img = img*256
# img = img.permute(1, 2, 0)
# img = img.numpy()
# img = Image.fromarray(img.astype(np.uint8))
# img.save('bbb.jpg')
# mean = np.zeros(3)
# std = np.zeros(3)
# for d in tqdm(dataset):
# img = d['img']
# for i in range(3):
# mean[i] += img[:, :, i].mean()
# std[i] += img[:, :, i].std()
# mean = mean / len(dataset)
# std = std / len(dataset)
# print(mean, std)
| [
"autoaugment.rand_augment_transform",
"numpy.random.rand",
"torch.from_numpy",
"numpy.array",
"arcface.inception_v4.InceptionV4",
"os.listdir",
"arcface.inceptionresnet_v2.InceptionResNetV2",
"config.get_args_arcface",
"random.randint",
"random.sample",
"random.choice",
"torch.Tensor",
"arcf... | [((47781, 47799), 'config.get_args_arcface', 'get_args_arcface', ([], {}), '()\n', (47797, 47799), False, 'from config import get_args_arcface\n'), ((989, 1034), 'torch.utils.data.DataLoader', 'DataLoader', (['arcfaceDataset'], {}), '(arcfaceDataset, **training_params)\n', (999, 1034), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3853, 3873), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (3863, 3873), False, 'import os\n'), ((3955, 3970), 'tqdm.tqdm', 'tqdm', (['instances'], {}), '(instances)\n', (3959, 3970), False, 'from tqdm import tqdm\n'), ((4440, 4550), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (4460, 4550), True, 'import torchvision.transforms as transforms\n'), ((4888, 4924), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4900, 4924), False, 'import cv2\n'), ((5153, 5174), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (5169, 5174), False, 'import torch\n'), ((5663, 5703), 'jieba.cut', 'jieba.cut', (['text'], {'cut_all': '(False)', 'HMM': '(True)'}), '(text, cut_all=False, HMM=True)\n', (5672, 5703), False, 'import jieba\n'), ((8389, 8425), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8401, 8425), False, 'import cv2\n'), ((14059, 14114), 'autoaugment.rand_augment_transform', 'rand_augment_transform', (['"""rand-m9-n3-mstd0.5"""', 'aa_params'], {}), "('rand-m9-n3-mstd0.5', aa_params)\n", (14081, 14114), False, 'from autoaugment import rand_augment_transform\n'), ((16938, 17048), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (16958, 17048), True, 'import torchvision.transforms as transforms\n'), ((17247, 17277), 'numpy.load', 'np.load', (["(imgName[:-4] + '.npy')"], {}), "(imgName[:-4] + '.npy')\n", (17254, 17277), True, 'import numpy as np\n'), ((17877, 17903), 'random.randint', 'random.randint', (['(0)', '(h - 256)'], {}), '(0, h - 256)\n', (17891, 17903), False, 'import random\n'), ((17915, 17941), 'random.randint', 'random.randint', (['(0)', '(w - 256)'], {}), '(0, w - 256)\n', (17929, 17941), False, 'import random\n'), ((17999, 18025), 'cv2.resize', 'cv2.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (18009, 18025), False, 'import cv2\n'), ((18757, 18778), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (18773, 18778), False, 'import torch\n'), ((19649, 19681), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (19661, 19681), False, 'import os\n'), ((21996, 22106), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (22016, 22106), True, 'import torchvision.transforms as transforms\n'), ((22235, 22277), 'random.choice', 'random.choice', (["self.dic[index][1]['image']"], {}), "(self.dic[index][1]['image'])\n", (22248, 22277), False, 'import random\n'), ((22297, 22339), 'random.choice', 'random.choice', (["self.dic[index][1]['video']"], {}), "(self.dic[index][1]['video'])\n", (22310, 22339), False, 'import random\n'), ((23970, 24002), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (23982, 24002), False, 'import os\n'), ((26257, 26367), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (26277, 26367), True, 'import torchvision.transforms as transforms\n'), ((26869, 26924), 'random.choice', 'random.choice', (['self.cls_ins_dic[label_q][instance_id_n]'], {}), '(self.cls_ins_dic[label_q][instance_id_n])\n', (26882, 26924), False, 'import random\n'), ((27200, 27227), 'torch.tensor', 'torch.tensor', (['instance_id_q'], {}), '(instance_id_q)\n', (27212, 27227), False, 'import torch\n'), ((27252, 27279), 'torch.tensor', 'torch.tensor', (['instance_id_p'], {}), '(instance_id_p)\n', (27264, 27279), False, 'import torch\n'), ((27304, 27331), 'torch.tensor', 'torch.tensor', (['instance_id_n'], {}), '(instance_id_n)\n', (27316, 27331), False, 'import torch\n'), ((27675, 27711), 'random.randint', 'random.randint', (['(0)', '(hq - self.size[0])'], {}), '(0, hq - self.size[0])\n', (27689, 27711), False, 'import random\n'), ((27723, 27759), 'random.randint', 'random.randint', (['(0)', '(wq - self.size[1])'], {}), '(0, wq - self.size[1])\n', (27737, 27759), False, 'import random\n'), ((27837, 27873), 'random.randint', 'random.randint', (['(0)', '(hp - self.size[0])'], {}), '(0, hp - self.size[0])\n', (27851, 27873), False, 'import random\n'), ((27885, 27921), 'random.randint', 'random.randint', (['(0)', '(wp - self.size[1])'], {}), '(0, wp - self.size[1])\n', (27899, 27921), False, 'import random\n'), ((27999, 28035), 'random.randint', 'random.randint', (['(0)', '(hn - self.size[0])'], {}), '(0, hn - self.size[0])\n', (28013, 28035), False, 'import random\n'), ((28047, 28083), 'random.randint', 'random.randint', (['(0)', '(wn - self.size[1])'], {}), '(0, wn - self.size[1])\n', (28061, 28083), False, 'import random\n'), ((29365, 29420), 'autoaugment.rand_augment_transform', 'rand_augment_transform', (['"""rand-m9-n3-mstd0.5"""', 'aa_params'], {}), "('rand-m9-n3-mstd0.5', aa_params)\n", (29387, 29420), False, 'from autoaugment import rand_augment_transform\n'), ((32012, 32122), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (32032, 32122), True, 'import torchvision.transforms as transforms\n'), ((32306, 32359), 'random.sample', 'random.sample', (['self.samples[instance]', 'self.n_samples'], {}), '(self.samples[instance], self.n_samples)\n', (32319, 32359), False, 'import random\n'), ((33970, 33994), 'torch.stack', 'torch.stack', (['imgs'], {'dim': '(0)'}), '(imgs, dim=0)\n', (33981, 33994), False, 'import torch\n'), ((34015, 34044), 'torch.stack', 'torch.stack', (['instances'], {'dim': '(0)'}), '(instances, dim=0)\n', (34026, 34044), False, 'import torch\n'), ((34976, 34996), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (34986, 34996), False, 'import os\n'), ((35095, 35110), 'tqdm.tqdm', 'tqdm', (['instances'], {}), '(instances)\n', (35099, 35110), False, 'from tqdm import tqdm\n'), ((36135, 36245), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (36155, 36245), True, 'import torchvision.transforms as transforms\n'), ((36918, 36954), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (36930, 36954), False, 'import cv2\n'), ((37012, 37048), 'cv2.cvtColor', 'cv2.cvtColor', (['vdo', 'cv2.COLOR_BGR2RGB'], {}), '(vdo, cv2.COLOR_BGR2RGB)\n', (37024, 37048), False, 'import cv2\n'), ((37801, 37822), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (37817, 37822), False, 'import torch\n'), ((37872, 37893), 'torch.from_numpy', 'torch.from_numpy', (['vdo'], {}), '(vdo)\n', (37888, 37893), False, 'import torch\n'), ((38502, 38612), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (38522, 38612), True, 'import torchvision.transforms as transforms\n'), ((39019, 39045), 'cv2.resize', 'cv2.resize', (['det', 'self.size'], {}), '(det, self.size)\n', (39029, 39045), False, 'import cv2\n'), ((39060, 39096), 'cv2.cvtColor', 'cv2.cvtColor', (['det', 'cv2.COLOR_BGR2RGB'], {}), '(det, cv2.COLOR_BGR2RGB)\n', (39072, 39096), False, 'import cv2\n'), ((39163, 39184), 'torch.from_numpy', 'torch.from_numpy', (['det'], {}), '(det)\n', (39179, 39184), False, 'import torch\n'), ((41049, 41068), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (41059, 41068), False, 'import cv2\n'), ((41083, 41119), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (41095, 41119), False, 'import cv2\n'), ((45251, 45277), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vdo_name'], {}), '(vdo_name)\n', (45267, 45277), False, 'import cv2\n'), ((45628, 45664), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (45640, 45664), False, 'import cv2\n'), ((46027, 46052), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vdoPath'], {}), '(vdoPath)\n', (46043, 46052), False, 'import cv2\n'), ((46556, 46666), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (46576, 46666), True, 'import torchvision.transforms as transforms\n'), ((47260, 47286), 'cv2.resize', 'cv2.resize', (['det', 'self.size'], {}), '(det, self.size)\n', (47270, 47286), False, 'import cv2\n'), ((47301, 47337), 'cv2.cvtColor', 'cv2.cvtColor', (['det', 'cv2.COLOR_BGR2RGB'], {}), '(det, cv2.COLOR_BGR2RGB)\n', (47313, 47337), False, 'import cv2\n'), ((47396, 47417), 'torch.from_numpy', 'torch.from_numpy', (['det'], {}), '(det)\n', (47412, 47417), False, 'import torch\n'), ((1145, 1156), 'arcface.resnet.ResNet', 'ResNet', (['opt'], {}), '(opt)\n', (1151, 1156), False, 'from arcface.resnet import ResNet\n'), ((2412, 2431), 'tqdm.tqdm', 'tqdm', (['arcfaceLoader'], {}), '(arcfaceLoader)\n', (2416, 2431), False, 'from tqdm import tqdm\n'), ((3732, 3744), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3741, 3744), False, 'import json\n'), ((3991, 4022), 'os.listdir', 'os.listdir', (['(root_dir + instance)'], {}), '(root_dir + instance)\n', (4001, 4022), False, 'import os\n'), ((4836, 4872), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (4848, 4872), False, 'import os\n'), ((5509, 5521), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5518, 5521), False, 'import json\n'), ((6666, 6678), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6675, 6678), False, 'import json\n'), ((8337, 8373), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (8349, 8373), False, 'import os\n'), ((8613, 8629), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (8621, 8629), True, 'import numpy as np\n'), ((10202, 10214), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10211, 10214), False, 'import json\n'), ((12187, 12223), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (12199, 12223), False, 'import cv2\n'), ((14664, 14676), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14673, 14676), False, 'import json\n'), ((14787, 14799), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14796, 14799), False, 'import json\n'), ((15404, 15436), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (15416, 15436), False, 'import os\n'), ((18670, 18686), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (18684, 18686), True, 'import numpy as np\n'), ((20469, 20481), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20478, 20481), False, 'import json\n'), ((20592, 20604), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20601, 20604), False, 'import json\n'), ((22815, 22841), 'random.randint', 'random.randint', (['(0)', '(h - 224)'], {}), '(0, h - 224)\n', (22829, 22841), False, 'import random\n'), ((22859, 22881), 'random.randint', 'random.randint', (['(224)', 'h'], {}), '(224, h)\n', (22873, 22881), False, 'import random\n'), ((22901, 22927), 'random.randint', 'random.randint', (['(0)', '(w - 224)'], {}), '(0, w - 224)\n', (22915, 22927), False, 'import random\n'), ((22945, 22967), 'random.randint', 'random.randint', (['(224)', 'w'], {}), '(224, w)\n', (22959, 22967), False, 'import random\n'), ((23047, 23073), 'cv2.resize', 'cv2.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (23057, 23073), False, 'import cv2\n'), ((23334, 23355), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (23350, 23355), False, 'import torch\n'), ((24103, 24115), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24112, 24115), False, 'import json\n'), ((24215, 24227), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24224, 24227), False, 'import json\n'), ((24327, 24339), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24336, 24339), False, 'import json\n'), ((24454, 24466), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24463, 24466), False, 'import json\n'), ((26629, 26684), 'random.choice', 'random.choice', (['self.cls_ins_dic[label_q][instance_id_q]'], {}), '(self.cls_ins_dic[label_q][instance_id_q])\n', (26642, 26684), False, 'import random\n'), ((28159, 28175), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28173, 28175), True, 'import numpy as np\n'), ((28247, 28263), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28261, 28263), True, 'import numpy as np\n'), ((28335, 28351), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28349, 28351), True, 'import numpy as np\n'), ((30004, 30016), 'json.load', 'json.load', (['f'], {}), '(f)\n', (30013, 30016), False, 'import json\n'), ((30215, 30247), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (30227, 30247), False, 'import os\n'), ((32452, 32482), 'numpy.load', 'np.load', (["(imgPath[:-4] + '.npy')"], {}), "(imgPath[:-4] + '.npy')\n", (32459, 32482), True, 'import numpy as np\n'), ((32975, 33010), 'random.randint', 'random.randint', (['(0)', '(h - self.size[0])'], {}), '(0, h - self.size[0])\n', (32989, 33010), False, 'import random\n'), ((33026, 33061), 'random.randint', 'random.randint', (['(0)', '(w - self.size[1])'], {}), '(0, w - self.size[1])\n', (33040, 33061), False, 'import random\n'), ((33626, 33648), 'torch.tensor', 'torch.tensor', (['instance'], {}), '(instance)\n', (33638, 33648), False, 'import torch\n'), ((33760, 33781), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (33776, 33781), False, 'import torch\n'), ((34619, 34631), 'json.load', 'json.load', (['f'], {}), '(f)\n', (34628, 34631), False, 'import json\n'), ((34734, 34746), 'json.load', 'json.load', (['f'], {}), '(f)\n', (34743, 34746), False, 'import json\n'), ((35131, 35162), 'os.listdir', 'os.listdir', (['(root_dir + instance)'], {}), '(root_dir + instance)\n', (35141, 35162), False, 'import os\n'), ((36803, 36839), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (36815, 36839), False, 'import os\n'), ((36866, 36902), 'os.path.join', 'os.path.join', (['self.root_dir', 'vdoPath'], {}), '(self.root_dir, vdoPath)\n', (36878, 36902), False, 'import os\n'), ((38191, 38206), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (38203, 38206), False, 'import torch\n'), ((38230, 38245), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (38242, 38245), False, 'import torch\n'), ((39399, 39433), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {}), '([xmin, ymin, xmax, ymax])\n', (39407, 39433), True, 'import numpy as np\n'), ((39889, 39901), 'json.load', 'json.load', (['f'], {}), '(f)\n', (39898, 39901), False, 'import json\n'), ((39981, 40006), 'os.path.join', 'os.path.join', (['root_dir', 'd'], {}), '(root_dir, d)\n', (39993, 40006), False, 'import os\n'), ((44084, 44096), 'json.load', 'json.load', (['f'], {}), '(f)\n', (44093, 44096), False, 'import json\n'), ((44273, 44298), 'os.path.join', 'os.path.join', (['root_dir', 'd'], {}), '(root_dir, d)\n', (44285, 44298), False, 'import os\n'), ((46934, 46953), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (46944, 46953), False, 'import cv2\n'), ((46986, 47011), 'cv2.VideoCapture', 'cv2.VideoCapture', (['imgPath'], {}), '(imgPath)\n', (47002, 47011), False, 'import cv2\n'), ((47608, 47642), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {}), '([xmin, ymin, xmax, ymax])\n', (47616, 47642), True, 'import numpy as np\n'), ((1295, 1309), 'arcface.googlenet.GoogLeNet', 'GoogLeNet', (['opt'], {}), '(opt)\n', (1304, 1309), False, 'from arcface.googlenet import GoogLeNet\n'), ((1997, 2042), 'os.path.join', 'os.path.join', (['opt.saved_path', "(b_name + '.pth')"], {}), "(opt.saved_path, b_name + '.pth')\n", (2009, 2042), False, 'import os\n'), ((3659, 3699), 'os.path.join', 'os.path.join', (['"""data"""', "(tat + '_text.json')"], {}), "('data', tat + '_text.json')\n", (3671, 3699), False, 'import os\n'), ((4723, 4741), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (4735, 4741), False, 'import torch\n'), ((5435, 5471), 'os.path.join', 'os.path.join', (['root_dir', '"""vocab.json"""'], {}), "(root_dir, 'vocab.json')\n", (5447, 5471), False, 'import os\n'), ((6591, 6625), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (6603, 6625), False, 'import os\n'), ((7246, 7258), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7255, 7258), False, 'import json\n'), ((8285, 8303), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (8297, 8303), False, 'import torch\n'), ((10127, 10161), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (10139, 10161), False, 'import os\n'), ((10604, 10616), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10613, 10616), False, 'import json\n'), ((12131, 12167), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (12143, 12167), False, 'import os\n'), ((12427, 12443), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (12435, 12443), True, 'import numpy as np\n'), ((14589, 14625), 'os.path.join', 'os.path.join', (['root_dir', 'instanceFile'], {}), '(root_dir, instanceFile)\n', (14601, 14625), False, 'import os\n'), ((14695, 14740), 'os.path.join', 'os.path.join', (['root_dir', '"""instance2label.json"""'], {}), "(root_dir, 'instance2label.json')\n", (14707, 14740), False, 'import os\n'), ((17737, 17755), 'torch.tensor', 'torch.tensor', (['text'], {}), '(text)\n', (17749, 17755), False, 'import torch\n'), ((17778, 17796), 'torch.tensor', 'torch.tensor', (['iORv'], {}), '(iORv)\n', (17790, 17796), False, 'import torch\n'), ((20389, 20430), 'os.path.join', 'os.path.join', (['root_dir', '"""instanceID.json"""'], {}), "(root_dir, 'instanceID.json')\n", (20401, 20430), False, 'import os\n'), ((20500, 20545), 'os.path.join', 'os.path.join', (['root_dir', '"""instance2label.json"""'], {}), "(root_dir, 'instance2label.json')\n", (20512, 20545), False, 'import os\n'), ((23239, 23255), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (23253, 23255), True, 'import numpy as np\n'), ((24022, 24074), 'os.path.join', 'os.path.join', (['root_dir', "(img_tat + '_annotation.json')"], {}), "(root_dir, img_tat + '_annotation.json')\n", (24034, 24074), False, 'import os\n'), ((24134, 24186), 'os.path.join', 'os.path.join', (['root_dir', "(vdo_tat + '_annotation.json')"], {}), "(root_dir, vdo_tat + '_annotation.json')\n", (24146, 24186), False, 'import os\n'), ((24247, 24288), 'os.path.join', 'os.path.join', (['root_dir', '"""instanceID.json"""'], {}), "(root_dir, 'instanceID.json')\n", (24259, 24288), False, 'import os\n'), ((24367, 24412), 'os.path.join', 'os.path.join', (['root_dir', '"""instance2label.json"""'], {}), "(root_dir, 'instance2label.json')\n", (24379, 24412), False, 'import os\n'), ((28437, 28460), 'torch.from_numpy', 'torch.from_numpy', (['img_q'], {}), '(img_q)\n', (28453, 28460), False, 'import torch\n'), ((28494, 28517), 'torch.from_numpy', 'torch.from_numpy', (['img_p'], {}), '(img_p)\n', (28510, 28517), False, 'import torch\n'), ((28551, 28574), 'torch.from_numpy', 'torch.from_numpy', (['img_n'], {}), '(img_n)\n', (28567, 28574), False, 'import torch\n'), ((29929, 29965), 'os.path.join', 'os.path.join', (['root_dir', 'instanceFile'], {}), '(root_dir, instanceFile)\n', (29941, 29965), False, 'import os\n'), ((30356, 30368), 'json.load', 'json.load', (['f'], {}), '(f)\n', (30365, 30368), False, 'import json\n'), ((30476, 30488), 'json.load', 'json.load', (['f'], {}), '(f)\n', (30485, 30488), False, 'import json\n'), ((33665, 33681), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (33679, 33681), True, 'import numpy as np\n'), ((34535, 34579), 'os.path.join', 'os.path.join', (['"""data"""', "(img_tat + '_text.json')"], {}), "('data', img_tat + '_text.json')\n", (34547, 34579), False, 'import os\n'), ((34650, 34694), 'os.path.join', 'os.path.join', (['"""data"""', "(vdo_tat + '_text.json')"], {}), "('data', vdo_tat + '_text.json')\n", (34662, 34694), False, 'import os\n'), ((36575, 36597), 'torch.Tensor', 'torch.Tensor', (['img_text'], {}), '(img_text)\n', (36587, 36597), False, 'import torch\n'), ((36624, 36646), 'torch.Tensor', 'torch.Tensor', (['vdo_text'], {}), '(vdo_text)\n', (36636, 36646), False, 'import torch\n'), ((38912, 38948), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (38924, 38948), False, 'import os\n'), ((39814, 39848), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (39826, 39848), False, 'import os\n'), ((40293, 40318), 'os.path.join', 'os.path.join', (['di', '"""image"""'], {}), "(di, 'image')\n", (40305, 40318), False, 'import os\n'), ((40699, 40747), 'os.path.join', 'os.path.join', (['di', '"""image_text"""', "(img_dir + '.txt')"], {}), "(di, 'image_text', img_dir + '.txt')\n", (40711, 40747), False, 'import os\n'), ((41247, 41265), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (41259, 41265), False, 'import torch\n'), ((44009, 44043), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (44021, 44043), False, 'import os\n'), ((44539, 44564), 'os.path.join', 'os.path.join', (['di', '"""video"""'], {}), "(di, 'video')\n", (44551, 44564), False, 'import os\n'), ((45579, 45597), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (45591, 45597), False, 'import torch\n'), ((1406, 1422), 'arcface.inception_v4.InceptionV4', 'InceptionV4', (['opt'], {}), '(opt)\n', (1417, 1422), False, 'from arcface.inception_v4 import InceptionV4\n'), ((7042, 7088), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_annotation.json')"], {}), "(root_dir, t + '_annotation.json')\n", (7054, 7088), False, 'import os\n'), ((7125, 7137), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7134, 7137), False, 'import json\n'), ((7161, 7201), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_text.json')"], {}), "(root_dir, t + '_text.json')\n", (7173, 7201), False, 'import os\n'), ((7737, 7773), 'os.path.join', 'os.path.join', (['tats[i]', "d['img_name']"], {}), "(tats[i], d['img_name'])\n", (7749, 7773), False, 'import os\n'), ((10400, 10446), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_annotation.json')"], {}), "(root_dir, t + '_annotation.json')\n", (10412, 10446), False, 'import os\n'), ((10483, 10495), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10492, 10495), False, 'import json\n'), ((10519, 10559), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_text.json')"], {}), "(root_dir, t + '_text.json')\n", (10531, 10559), False, 'import os\n'), ((11237, 11273), 'os.path.join', 'os.path.join', (['tats[i]', "d['img_name']"], {}), "(tats[i], d['img_name'])\n", (11249, 11273), False, 'import os\n'), ((12075, 12093), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (12087, 12093), False, 'import torch\n'), ((19889, 19937), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_annotation.json')"], {}), "(root_dir, tat + '_annotation.json')\n", (19901, 19937), False, 'import os\n'), ((19973, 19985), 'json.load', 'json.load', (['f'], {}), '(f)\n', (19982, 19985), False, 'import json\n'), ((20009, 20051), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_text.json')"], {}), "(root_dir, tat + '_text.json')\n", (20021, 20051), False, 'import os\n'), ((20098, 20110), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20107, 20110), False, 'import json\n'), ((27357, 27395), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName_q'], {}), '(self.savePath, imgName_q)\n', (27369, 27395), False, 'import os\n'), ((27433, 27471), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName_p'], {}), '(self.savePath, imgName_p)\n', (27445, 27471), False, 'import os\n'), ((27509, 27547), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName_n'], {}), '(self.savePath, imgName_n)\n', (27521, 27547), False, 'import os\n'), ((30271, 30323), 'os.path.join', 'os.path.join', (['root_dir', "(img_tat + '_annotation.json')"], {}), "(root_dir, img_tat + '_annotation.json')\n", (30283, 30323), False, 'import os\n'), ((30391, 30443), 'os.path.join', 'os.path.join', (['root_dir', "(vdo_tat + '_annotation.json')"], {}), "(root_dir, vdo_tat + '_annotation.json')\n", (30403, 30443), False, 'import os\n'), ((40400, 40434), 'os.path.join', 'os.path.join', (['di', '"""image"""', 'img_dir'], {}), "(di, 'image', img_dir)\n", (40412, 40434), False, 'import os\n'), ((44640, 44675), 'os.path.join', 'os.path.join', (['di', '"""video"""', 'vdo_name'], {}), "(di, 'video', vdo_name)\n", (44652, 44675), False, 'import os\n'), ((1525, 1547), 'arcface.inceptionresnet_v2.InceptionResNetV2', 'InceptionResNetV2', (['opt'], {}), '(opt)\n', (1542, 1547), False, 'from arcface.inceptionresnet_v2 import InceptionResNetV2\n'), ((4134, 4161), 'os.path.join', 'os.path.join', (['instance', 'img'], {}), '(instance, img)\n', (4146, 4161), False, 'import os\n'), ((15537, 15585), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_annotation.json')"], {}), "(root_dir, tat + '_annotation.json')\n", (15549, 15585), False, 'import os\n'), ((15625, 15637), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15634, 15637), False, 'import json\n'), ((15665, 15707), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_text.json')"], {}), "(root_dir, tat + '_text.json')\n", (15677, 15707), False, 'import os\n'), ((15753, 15765), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15762, 15765), False, 'import json\n'), ((22531, 22567), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName'], {}), '(self.savePath, imgName)\n', (22543, 22567), False, 'import os\n'), ((35329, 35356), 'os.path.join', 'os.path.join', (['instance', 'img'], {}), '(instance, img)\n', (35341, 35356), False, 'import os\n'), ((35641, 35668), 'os.path.join', 'os.path.join', (['instance', 'img'], {}), '(instance, img)\n', (35653, 35668), False, 'import os\n'), ((40518, 40562), 'os.path.join', 'os.path.join', (['di', '"""image"""', 'img_dir', 'img_name'], {}), "(di, 'image', img_dir, img_name)\n", (40530, 40562), False, 'import os\n'), ((1641, 1654), 'arcface.densenet.DenseNet', 'DenseNet', (['opt'], {}), '(opt)\n', (1649, 1654), False, 'from arcface.densenet import DenseNet\n'), ((1782, 1797), 'arcface.resnet_cbam.ResNetCBAM', 'ResNetCBAM', (['opt'], {}), '(opt)\n', (1792, 1797), False, 'from arcface.resnet_cbam import ResNetCBAM\n')] |
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import pickle
from common import database
import config
import common.LogBase
import WebMirror.rules
from WebMirror.OutputFilters.util.MessageConstructors import pack_message
import WebMirror.TimedTriggers.TriggerBase
import common.get_rpyc
# import WebMirror.OutputFilters.AmqpInterface
class MetaUpdater(WebMirror.TimedTriggers.TriggerBase.TriggerBaseClass):
pluginName = "Meta Updater"
loggerPath = 'MetaUpdater'
def __init__(self):
super().__init__()
# print()
self.rpc_interface = common.get_rpyc.RemoteJobInterface("FeedUpdater")
# if config.C_DO_RABBIT:
# print("No message queue! Doing independent RabbitMQ connection!")
# # traceback.print_stack()
# # print("Wat?")
# # print()
# self.msg_q = False
# amqp_settings = {
# "RABBIT_LOGIN" : config.C_RABBIT_LOGIN,
# "RABBIT_PASWD" : config.C_RABBIT_PASWD,
# "RABBIT_SRVER" : config.C_RABBIT_SRVER,
# "RABBIT_VHOST" : config.C_RABBIT_VHOST,
# 'taskq_task' : 'task.master.q',
# 'taskq_response' : 'response.master.q',
# }
# self._amqpint = WebMirror.OutputFilters.AmqpInterface.RabbitQueueHandler(amqp_settings)
def get_feed_count_message(self):
feeds = set()
for ruleset in WebMirror.rules.load_rules():
feeds |= set(ruleset['feedurls'])
data = {
"feed-count" : len(feeds)
}
return pack_message("system-feed-counts", data)
def get_times(self):
with common.database.session_context() as conn:
aps = conn.execute("SELECT job_state FROM apscheduler_jobs;")
update_times = []
for blob, in aps:
job_dict = pickle.loads(blob)
update_times.append((
job_dict['id'],
job_dict['next_run_time'].isoformat()
))
data = {
"update-times" : update_times,
}
database.delete_db_session()
return pack_message("system-update-times", data)
def go(self):
feeds = self.get_feed_count_message()
times = self.get_times()
self.rpc_interface.put_feed_job(feeds)
self.rpc_interface.put_feed_job(times)
# self._amqpint.put_item(feeds)
# self._amqpint.put_item(times)
def do_meta_update():
updator = MetaUpdater()
updator._go()
updator = MetaUpdater()
updator._go()
updator = MetaUpdater()
updator._go()
if __name__ == '__main__':
do_meta_update()
| [
"common.database.delete_db_session",
"logSetup.initLogging",
"pickle.loads",
"WebMirror.OutputFilters.util.MessageConstructors.pack_message"
] | [((48, 70), 'logSetup.initLogging', 'logSetup.initLogging', ([], {}), '()\n', (68, 70), False, 'import logSetup\n'), ((1401, 1441), 'WebMirror.OutputFilters.util.MessageConstructors.pack_message', 'pack_message', (['"""system-feed-counts"""', 'data'], {}), "('system-feed-counts', data)\n", (1413, 1441), False, 'from WebMirror.OutputFilters.util.MessageConstructors import pack_message\n'), ((1813, 1841), 'common.database.delete_db_session', 'database.delete_db_session', ([], {}), '()\n', (1839, 1841), False, 'from common import database\n'), ((1853, 1894), 'WebMirror.OutputFilters.util.MessageConstructors.pack_message', 'pack_message', (['"""system-update-times"""', 'data'], {}), "('system-update-times', data)\n", (1865, 1894), False, 'from WebMirror.OutputFilters.util.MessageConstructors import pack_message\n'), ((1638, 1656), 'pickle.loads', 'pickle.loads', (['blob'], {}), '(blob)\n', (1650, 1656), False, 'import pickle\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:44:34 2018
@author: Moha-Thinkpad
"""
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow.keras
import argparse
import tensorflow as tf
from tensorflow.keras import backend as K
#cfg = K.tf.ConfigProto()
#cfg.gpu_options.allow_growth = True
#K.set_session(K.tf.Session(config=cfg))
####################################
########################################################################
####################################
def custom_loss_seg (y_true, y_pred):
#A = tensorflow.keras.losses.mean_squared_error(y_true, y_pred)
B = tensorflow.keras.losses.mean_absolute_error(y_true, y_pred)
return(B)
from tensorflow.keras.layers import Lambda
sum_dim_channel = Lambda(lambda xin: K.sum(xin, axis=3))
def lrelu(x): #from pix2pix code
a=0.2
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def lrelu_output_shape(input_shape):
shape = list(input_shape)
return tuple(shape)
layer_lrelu=Lambda(lrelu, output_shape=lrelu_output_shape)
def PreProcess(InputImages):
#output=np.zeros(InputImages.shape,dtype=np.float)
InputImages=InputImages.astype(np.float)
for i in range(InputImages.shape[0]):
try:
InputImages[i,:,:,:]=InputImages[i,:,:,:]/np.max(InputImages[i,:,:,:])
# output[i,:,:,:] = (output[i,:,:,:]* 2)-1
except:
InputImages[i,:,:]=InputImages[i,:,:]/np.max(InputImages[i,:,:])
# output[i,:,:] = (output[i,:,:]* 2) -1
return InputImages
####################################
########################################################################
####################################
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["train", "test", "export"])
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--target_dir", help="where to")
parser.add_argument("--checkpoint", help="where to ")
parser.add_argument("--output_dir", help="where to p")
parser.add_argument("--landmarks", help=" -,-,-")
parser.add_argument("--lr", help="adam learning rate")
parser.add_argument("--ngf", type=int, default=64, help="number of generator filters in first conv layer")
# export options
a = parser.parse_args()
a.batch_size=40
a.max_epochs_seg=1
a.lr_seg=0.0001
a.beta1=0.5
a.ngf=64
#a.seed=1
# a.mode="train"
# a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_png/'
# a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_lm/'
# a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.landmarks='43,43,43'
#a.mode="test"
#a.batch_size=1
#a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_png/'
#a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_lm/'
#a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.landmarks='43,43,43'
######## ------------ Config
#Ind_impo_landmarks_matlab=np.array([5, 6, 15,16,17,18,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,38,41])
#Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
#Num_landmarks=25
# 33,23,16 - 29,15, - 30,20,26 - 5,18,21 - 44,17,41 - 28,22,34, - 27,43,37
StrLandmarks=a.landmarks
StrLandmarks=StrLandmarks.split(",")
Ind_impo_landmarks_matlab=np.array([0,0,0])
Ind_impo_landmarks_matlab[0]=int(StrLandmarks[0])
Ind_impo_landmarks_matlab[1]=int(StrLandmarks[1])
Ind_impo_landmarks_matlab[2]=int(StrLandmarks[2])
Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
Num_landmarks=3
print('============================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
#########----------------------DATA
from os import listdir
ImageFileNames=[]
FileNames=listdir(a.input_dir)
for names in FileNames:
if names.endswith(".png"):
ImageFileNames.append(names)
#LMFileNames=listdir(a.target_dir)
from skimage import io as ioSK
from numpy import genfromtxt
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.uint8)
#Images_seg=np.zeros((len(ImageFileNames),256,256),dtype=np.uint8)
LandmarkLocations=np.zeros((len(ImageFileNames),2,44),dtype=np.uint8)
for i in range(len(ImageFileNames)):
Image = ioSK.imread(a.input_dir+'/'+ImageFileNames[i])
Images[i,:,:,:]=Image
FileName=ImageFileNames[i]
FileName=FileName[:-4]
# Image = ioSK.imread(a.target_dir_seg+'/'+ImageFileNames[i])
# Images_seg[i,:,:]=Image
Landmarks0 = genfromtxt(a.target_dir+'/'+FileName+'.csv', delimiter=',')
Landmarks0 = Landmarks0.astype(int)
LandmarkLocations[i,0,:]=Landmarks0[:,0]
LandmarkLocations[i,1,:]=Landmarks0[:,1]
#Landmarks = np.flip(Landmarks0, axis=1)
#plt.figure()
#plt.imshow(Images[100,:,:,:])
#plt.scatter(LandmarkLocations[100,0,:],LandmarkLocations[100,1,:])
X_train = PreProcess(Images)
del Images
import gc
gc.collect()
LandmarkLocations_row=LandmarkLocations[:,0,:]
LandmarkLocations_col=LandmarkLocations[:,1,:]
LandmarkLocations_row=LandmarkLocations_row[:,Ind_impo_landmarks_python]
LandmarkLocations_col=LandmarkLocations_col[:,Ind_impo_landmarks_python]
from scipy.ndimage import gaussian_filter
Images_HeatMaps=np.zeros((X_train.shape[0],X_train.shape[1],X_train.shape[2],Num_landmarks),dtype=np.float)
Image_heatmap=np.zeros((256,256),dtype=np.float)
for i in range(X_train.shape[0]):
for k in range(Num_landmarks):
# h=np.argwhere(Images_seg[i,:,:]==2*Ind_impo_landmarks_matlab[k])
lms_1=LandmarkLocations_row[i,k]
lms_2=LandmarkLocations_col[i,k]
Image_heatmap[:,:]=0
Image_heatmap[lms_2,lms_1]=1
Image_heatmap=gaussian_filter(Image_heatmap, sigma=10)
Image_heatmap=(Image_heatmap/np.max(Image_heatmap))
Images_HeatMaps[i,:,:,k]=Image_heatmap
gc.collect()
#plt.figure()
#plt.imshow(np.squeeze(Images_HeatMaps[2,:,:,5]), cmap='gray')
#plt.imshow(Images[2,:,:,:],cmap='jet', alpha=0.5)
#plt.show()
Y_train_heatmap = PreProcess(Images_HeatMaps)
del Images_HeatMaps
gc.collect()
# del Images_seg
import os
if not os.path.exists(a.checkpoint):
os.makedirs(a.checkpoint)
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
if a.mode=='test':
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('loading model ...')
model_final=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('model is loaded ')
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.float)
newLandmarks=np.zeros((Num_landmarks,2),dtype=np.float16)
Y_test_heatmap=Y_train_heatmap
X_test=X_train
# fig = plt.figure()
# plt.imshow(X_train[0,:,:,:],cmap='gray', alpha=0.95)
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.grid(True)
pred_example_heatmaps=model_final.predict(X_test[:,:,:,:])
print('writing results ...')
for i in range(len(ImageFileNames)):
# print(i)
FileName=ImageFileNames[i]
FileName=FileName[:-4]
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for k in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
True_chan=np.squeeze(Y_test_heatmap[i,:,:,k])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[k,:]=lms_True
Pred_chan=np.squeeze(pred_example_heatmaps[i,:,:,k])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[k,:]=lms_pred
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_test_heatmap[i,:,:,i])
# ax[1].imshow(pred_example_heatmaps[i,:,:,i])
# plt.show()
np.savetxt(a.output_dir+FileName+'_pred.csv',
lms_pred_all , delimiter=",", fmt='%i')
np.savetxt(a.output_dir+FileName+'_true.csv',
lms_True_all , delimiter=",", fmt='%i')
fig = plt.figure()
plt.imshow(X_test[i,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
# plt.grid(True)
fig.savefig(a.output_dir+FileName+'.png')
plt.close(fig)
if a.mode=='train':
# plt.figure()
# plt.imshow(X_train[90,:,:,:])
# plt.figure()
# plt.imshow(Y_train_heatmap[90,:,:,4])
try: # continue training
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('======== loading model ...')
model_4_heatmap=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('======== continue training ...')
except: # new training
print('======== new training ...')
checkpoint_model_file=a.output_dir+'LandMarkModel'
########### network
kernelSize=(4,4)
InputLayer=tensorflow.keras.layers.Input(shape=(256,256,3))
e_1=tensorflow.keras.layers.Conv2D(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(InputLayer)
e_2=layer_lrelu(e_1)
e_2=tensorflow.keras.layers.Conv2D(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_2)
e_2=tensorflow.keras.layers.BatchNormalization()(e_2)
e_3=layer_lrelu(e_2)
e_3=tensorflow.keras.layers.Conv2D(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_3)
e_3=tensorflow.keras.layers.BatchNormalization()(e_3)
e_4=layer_lrelu(e_3)
e_4=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_4)
e_4=tensorflow.keras.layers.BatchNormalization()(e_4)
e_5=layer_lrelu(e_4)
e_5=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_5)
e_5=tensorflow.keras.layers.BatchNormalization()(e_5)
e_6=layer_lrelu(e_5)
e_6=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_6)
e_6=tensorflow.keras.layers.BatchNormalization()(e_6)
e_7=layer_lrelu(e_6)
e_7=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_7)
e_7=tensorflow.keras.layers.BatchNormalization()(e_7)
e_8=layer_lrelu(e_7)
e_8=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_8)
e_8=tensorflow.keras.layers.BatchNormalization()(e_8)
d_8=e_8
d_8=tensorflow.keras.layers.Activation('relu')(d_8)
d_8=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_8)
d_8=tensorflow.keras.layers.BatchNormalization()(d_8)
d_8=tensorflow.keras.layers.Dropout(0.5)(d_8)
d_7=tensorflow.keras.layers.concatenate(inputs=[d_8, e_7], axis=3)
d_7=tensorflow.keras.layers.Activation('relu')(d_7)
d_7=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_7)
d_7=tensorflow.keras.layers.BatchNormalization()(d_7)
d_7=tensorflow.keras.layers.Dropout(0.5)(d_7)
d_6=tensorflow.keras.layers.concatenate(inputs=[d_7, e_6], axis=3)
d_6=tensorflow.keras.layers.Activation('relu')(d_6)
d_6=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_6)
d_6=tensorflow.keras.layers.BatchNormalization()(d_6)
d_6=tensorflow.keras.layers.Dropout(0.5) (d_6)
d_5=tensorflow.keras.layers.concatenate(inputs=[d_6, e_5], axis=3)
d_5=tensorflow.keras.layers.Activation('relu')(d_5)
d_5=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_5)
d_5=tensorflow.keras.layers.BatchNormalization()(d_5)
d_5=tensorflow.keras.layers.Dropout(0.5) (d_5)
d_4=tensorflow.keras.layers.concatenate(inputs=[d_5, e_4], axis=3)
d_4=tensorflow.keras.layers.Activation('relu')(d_4)
d_4=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_4)
d_4=tensorflow.keras.layers.BatchNormalization()(d_4)
d_3=tensorflow.keras.layers.concatenate(inputs=[d_4, e_3], axis=3)
d_3=tensorflow.keras.layers.Activation('relu')(d_3)
d_3=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_3)
d_3=tensorflow.keras.layers.BatchNormalization()(d_3)
d_2=tensorflow.keras.layers.concatenate(inputs=[d_3, e_2], axis=3)
d_2=tensorflow.keras.layers.Activation('relu')(d_2)
# d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.BatchNormalization()(d_2)
d_1=tensorflow.keras.layers.concatenate(inputs=[d_2, e_1], axis=3)
d_1=tensorflow.keras.layers.Activation('relu')(d_1)
d_1=tensorflow.keras.layers.Conv2DTranspose(Num_landmarks, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_1)
HeatMaps=tensorflow.keras.layers.Activation('sigmoid', name='last_layer_of_decoder')(d_1)
model_4_heatmap=Model(inputs=InputLayer, outputs=HeatMaps)
###########Train
print('trainable_count =',int(np.sum([K.count_params(p) for p in set(model_4_heatmap.trainable_weights)])))
print('non_trainable_count =', int(np.sum([K.count_params(p) for p in set(model_4_heatmap.non_trainable_weights)])))
# fix random seed for reproducibility
seed = 1
import random
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
#### compile and train the model
UsedOptimizer=optimizers.Adam(lr=a.lr_seg, beta_1=a.beta1)
model_4_heatmap.compile(loss=custom_loss_seg, optimizer=UsedOptimizer)
History=model_4_heatmap.fit(X_train, Y_train_heatmap,
batch_size=a.batch_size, shuffle=True, validation_split=0.05,
epochs=a.max_epochs_seg,
verbose=1)
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.grid()
plt.savefig(a.output_dir+'History_'+str(a.lr)+'.png')
plt.close()
import pickle
Dict={'History_loss_train':History.history['loss'],
'History_loss_val':History.history['val_loss'],}
pickle.dump( Dict, open(a.output_dir+'History_'+str(a.lr)+'.pkl', "wb" ) )
# show an exemplary result
Num_example_train=0
pred_example_heatmaps=model_4_heatmap.predict(X_train[Num_example_train:Num_example_train+1,:,:,:])
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for i in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(X_train[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
Pred_chan=np.squeeze(pred_example_heatmaps[0,:,:,i])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[i,:]=lms_pred
True_chan=np.squeeze(Y_train_heatmap[Num_example_train,:,:,i])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[i,:]=lms_True
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_train_heatmap[Num_example_train,:,:,i])
# ax[1].imshow(pred_example_heatmaps[0,:,:,i])
# plt.show()
fig = plt.figure()
plt.imshow(X_train[Num_example_train,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
plt.grid(True)
# fig.savefig('scatter-result'+str(i)+'_pred.png')
plt.close(fig)
print('===========training done=================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
print('Saving model ...')
model_4_heatmap.save(checkpoint_model_file+'_weights.h5')
| [
"matplotlib.pyplot.grid",
"numpy.array",
"tensorflow.keras.models.load_model",
"scipy.ndimage.gaussian_filter",
"tensorflow.set_random_seed",
"numpy.genfromtxt",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.max",
"mat... | [((228, 249), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (242, 249), False, 'import matplotlib\n'), ((1462, 1508), 'tensorflow.keras.layers.Lambda', 'Lambda', (['lrelu'], {'output_shape': 'lrelu_output_shape'}), '(lrelu, output_shape=lrelu_output_shape)\n', (1468, 1508), False, 'from tensorflow.keras.layers import Lambda\n'), ((2176, 2201), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2199, 2201), False, 'import argparse\n'), ((4096, 4115), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4104, 4115), True, 'import numpy as np\n'), ((4611, 4631), 'os.listdir', 'listdir', (['a.input_dir'], {}), '(a.input_dir)\n', (4618, 4631), False, 'from os import listdir\n'), ((5766, 5778), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5776, 5778), False, 'import gc\n'), ((6084, 6183), 'numpy.zeros', 'np.zeros', (['(X_train.shape[0], X_train.shape[1], X_train.shape[2], Num_landmarks)'], {'dtype': 'np.float'}), '((X_train.shape[0], X_train.shape[1], X_train.shape[2],\n Num_landmarks), dtype=np.float)\n', (6092, 6183), True, 'import numpy as np\n'), ((6191, 6227), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {'dtype': 'np.float'}), '((256, 256), dtype=np.float)\n', (6199, 6227), True, 'import numpy as np\n'), ((6692, 6704), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6702, 6704), False, 'import gc\n'), ((6924, 6936), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6934, 6936), False, 'import gc\n'), ((1281, 1295), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (1292, 1295), True, 'import tensorflow as tf\n'), ((4420, 4443), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4441, 4443), False, 'import datetime\n'), ((5081, 5131), 'skimage.io.imread', 'ioSK.imread', (["(a.input_dir + '/' + ImageFileNames[i])"], {}), "(a.input_dir + '/' + ImageFileNames[i])\n", (5092, 5131), True, 'from skimage import io as ioSK\n'), ((5350, 5415), 'numpy.genfromtxt', 'genfromtxt', (["(a.target_dir + '/' + FileName + '.csv')"], {'delimiter': '""","""'}), "(a.target_dir + '/' + FileName + '.csv', delimiter=',')\n", (5360, 5415), False, 'from numpy import genfromtxt\n'), ((6981, 7009), 'os.path.exists', 'os.path.exists', (['a.checkpoint'], {}), '(a.checkpoint)\n', (6995, 7009), False, 'import os\n'), ((7015, 7040), 'os.makedirs', 'os.makedirs', (['a.checkpoint'], {}), '(a.checkpoint)\n', (7026, 7040), False, 'import os\n'), ((7053, 7081), 'os.path.exists', 'os.path.exists', (['a.output_dir'], {}), '(a.output_dir)\n', (7067, 7081), False, 'import os\n'), ((7087, 7112), 'os.makedirs', 'os.makedirs', (['a.output_dir'], {}), '(a.output_dir)\n', (7098, 7112), False, 'import os\n'), ((7298, 7505), 'tensorflow.keras.models.load_model', 'load_model', (["(checkpoint_model_file + '_weights.h5')"], {'custom_objects': "{'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf}"}), "(checkpoint_model_file + '_weights.h5', custom_objects={\n 'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf})\n", (7308, 7505), False, 'from tensorflow.keras.models import load_model\n'), ((8030, 8076), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.float16'}), '((Num_landmarks, 2), dtype=np.float16)\n', (8038, 8076), True, 'import numpy as np\n'), ((16856, 16880), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (16874, 16880), True, 'import tensorflow as tf\n'), ((16885, 16905), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16899, 16905), True, 'import numpy as np\n'), ((16910, 16927), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (16921, 16927), False, 'import random\n'), ((16993, 17037), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'a.lr_seg', 'beta_1': 'a.beta1'}), '(lr=a.lr_seg, beta_1=a.beta1)\n', (17008, 17037), False, 'from tensorflow.keras import optimizers\n'), ((17323, 17356), 'matplotlib.pyplot.plot', 'plt.plot', (["History.history['loss']"], {}), "(History.history['loss'])\n", (17331, 17356), True, 'import matplotlib.pyplot as plt\n'), ((17361, 17398), 'matplotlib.pyplot.plot', 'plt.plot', (["History.history['val_loss']"], {}), "(History.history['val_loss'])\n", (17369, 17398), True, 'import matplotlib.pyplot as plt\n'), ((17403, 17413), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (17411, 17413), True, 'import matplotlib.pyplot as plt\n'), ((17476, 17487), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17485, 17487), True, 'import matplotlib.pyplot as plt\n'), ((17907, 17949), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (17915, 17949), True, 'import numpy as np\n'), ((17965, 18007), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (17973, 18007), True, 'import numpy as np\n'), ((18798, 18810), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18808, 18810), True, 'import matplotlib.pyplot as plt\n'), ((18815, 18885), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_train[Num_example_train, :, :, :]'], {'cmap': '"""jet"""', 'alpha': '(0.9)'}), "(X_train[Num_example_train, :, :, :], cmap='jet', alpha=0.9)\n", (18825, 18885), True, 'import matplotlib.pyplot as plt\n'), ((18886, 18962), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_True_all[:, 1]', 'lms_True_all[:, 0]'], {'marker': '"""+"""', 'color': '"""red"""'}), "(lms_True_all[:, 1], lms_True_all[:, 0], marker='+', color='red')\n", (18897, 18962), True, 'import matplotlib.pyplot as plt\n'), ((18964, 19041), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_pred_all[:, 1]', 'lms_pred_all[:, 0]'], {'marker': '"""x"""', 'color': '"""blue"""'}), "(lms_pred_all[:, 1], lms_pred_all[:, 0], marker='x', color='blue')\n", (18975, 19041), True, 'import matplotlib.pyplot as plt\n'), ((19043, 19057), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (19051, 19057), True, 'import matplotlib.pyplot as plt\n'), ((19116, 19130), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19125, 19130), True, 'import matplotlib.pyplot as plt\n'), ((932, 950), 'tensorflow.keras.backend.sum', 'K.sum', (['xin'], {'axis': '(3)'}), '(xin, axis=3)\n', (937, 950), True, 'from tensorflow.keras import backend as K\n'), ((6538, 6578), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['Image_heatmap'], {'sigma': '(10)'}), '(Image_heatmap, sigma=10)\n', (6553, 6578), False, 'from scipy.ndimage import gaussian_filter\n'), ((8588, 8630), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (8596, 8630), True, 'import numpy as np\n'), ((8650, 8692), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (8658, 8692), True, 'import numpy as np\n'), ((9549, 9642), 'numpy.savetxt', 'np.savetxt', (["(a.output_dir + FileName + '_pred.csv')", 'lms_pred_all'], {'delimiter': '""","""', 'fmt': '"""%i"""'}), "(a.output_dir + FileName + '_pred.csv', lms_pred_all, delimiter=\n ',', fmt='%i')\n", (9559, 9642), True, 'import numpy as np\n'), ((9656, 9749), 'numpy.savetxt', 'np.savetxt', (["(a.output_dir + FileName + '_true.csv')", 'lms_True_all'], {'delimiter': '""","""', 'fmt': '"""%i"""'}), "(a.output_dir + FileName + '_true.csv', lms_True_all, delimiter=\n ',', fmt='%i')\n", (9666, 9749), True, 'import numpy as np\n'), ((9773, 9785), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9783, 9785), True, 'import matplotlib.pyplot as plt\n'), ((9794, 9847), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_test[i, :, :, :]'], {'cmap': '"""jet"""', 'alpha': '(0.9)'}), "(X_test[i, :, :, :], cmap='jet', alpha=0.9)\n", (9804, 9847), True, 'import matplotlib.pyplot as plt\n'), ((9852, 9928), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_True_all[:, 1]', 'lms_True_all[:, 0]'], {'marker': '"""+"""', 'color': '"""red"""'}), "(lms_True_all[:, 1], lms_True_all[:, 0], marker='+', color='red')\n", (9863, 9928), True, 'import matplotlib.pyplot as plt\n'), ((9934, 10011), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_pred_all[:, 1]', 'lms_pred_all[:, 0]'], {'marker': '"""x"""', 'color': '"""blue"""'}), "(lms_pred_all[:, 1], lms_pred_all[:, 0], marker='x', color='blue')\n", (9945, 10011), True, 'import matplotlib.pyplot as plt\n'), ((10091, 10105), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10100, 10105), True, 'import matplotlib.pyplot as plt\n'), ((10516, 10723), 'tensorflow.keras.models.load_model', 'load_model', (["(checkpoint_model_file + '_weights.h5')"], {'custom_objects': "{'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf}"}), "(checkpoint_model_file + '_weights.h5', custom_objects={\n 'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf})\n", (10526, 10723), False, 'from tensorflow.keras.models import load_model\n'), ((18225, 18270), 'numpy.squeeze', 'np.squeeze', (['pred_example_heatmaps[0, :, :, i]'], {}), '(pred_example_heatmaps[0, :, :, i])\n', (18235, 18270), True, 'import numpy as np\n'), ((18413, 18468), 'numpy.squeeze', 'np.squeeze', (['Y_train_heatmap[Num_example_train, :, :, i]'], {}), '(Y_train_heatmap[Num_example_train, :, :, i])\n', (18423, 18468), True, 'import numpy as np\n'), ((19254, 19277), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19275, 19277), False, 'import datetime\n'), ((1347, 1356), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (1353, 1356), True, 'import tensorflow as tf\n'), ((6614, 6635), 'numpy.max', 'np.max', (['Image_heatmap'], {}), '(Image_heatmap)\n', (6620, 6635), True, 'import numpy as np\n'), ((8959, 8997), 'numpy.squeeze', 'np.squeeze', (['Y_test_heatmap[i, :, :, k]'], {}), '(Y_test_heatmap[i, :, :, k])\n', (8969, 8997), True, 'import numpy as np\n'), ((9157, 9202), 'numpy.squeeze', 'np.squeeze', (['pred_example_heatmaps[i, :, :, k]'], {}), '(pred_example_heatmaps[i, :, :, k])\n', (9167, 9202), True, 'import numpy as np\n'), ((16445, 16487), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'InputLayer', 'outputs': 'HeatMaps'}), '(inputs=InputLayer, outputs=HeatMaps)\n', (16450, 16487), False, 'from tensorflow.keras.models import Model\n'), ((18302, 18333), 'numpy.argmax', 'np.argmax', (['Pred_chan'], {'axis': 'None'}), '(Pred_chan, axis=None)\n', (18311, 18333), True, 'import numpy as np\n'), ((18500, 18531), 'numpy.argmax', 'np.argmax', (['True_chan'], {'axis': 'None'}), '(True_chan, axis=None)\n', (18509, 18531), True, 'import numpy as np\n'), ((1754, 1785), 'numpy.max', 'np.max', (['InputImages[i, :, :, :]'], {}), '(InputImages[i, :, :, :])\n', (1760, 1785), True, 'import numpy as np\n'), ((9033, 9064), 'numpy.argmax', 'np.argmax', (['True_chan'], {'axis': 'None'}), '(True_chan, axis=None)\n', (9042, 9064), True, 'import numpy as np\n'), ((9238, 9269), 'numpy.argmax', 'np.argmax', (['Pred_chan'], {'axis': 'None'}), '(Pred_chan, axis=None)\n', (9247, 9269), True, 'import numpy as np\n'), ((1903, 1931), 'numpy.max', 'np.max', (['InputImages[i, :, :]'], {}), '(InputImages[i, :, :])\n', (1909, 1931), True, 'import numpy as np\n'), ((16569, 16586), 'tensorflow.keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (16583, 16586), True, 'from tensorflow.keras import backend as K\n'), ((16686, 16703), 'tensorflow.keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (16700, 16703), True, 'from tensorflow.keras import backend as K\n')] |
"""This file contains functions for converting and storing jupyter notebooks."""
import nbformat
import pickle
import numpy as np
import os
from nbconvert import PythonExporter
from pathlib import Path # for windows-Unix compatibility
def nbconvert_python(path):
"""Use nbconvert to convert jupyter notebook to python code.
Return the string of python code. You can then excute it with `exec()`.
Args:
path (str): Path of jupyter notebook
Returns:
str: The string of python code converted from notebook
"""
with open(path) as f:
nb = nbformat.read(f, as_version=4)
body, _ = PythonExporter().from_notebook_node(nb)
return body
def is_picklable(obj):
"""Check if an obj can be dumped into a pickle file.
Args:
obj : The Object to be judged
Returns:
bool: The result if the input can be picklable
"""
try:
pickle.dumps(obj)
except Exception:
return False
return True
def filter_pickable(global_vars):
"""Filter the variables that are pickable.
Args:
global_vars (array-like): The names of variables to get
Returns:
dict: Dictionary containing names of objects and their values
"""
bk = {}
for k in global_vars:
obj = global_vars[k]
if is_picklable(obj):
try:
bk.update({k: obj})
except TypeError:
pass
return bk
def notebook_to_pickable_dict(path):
"""Excute jupyter notebook and then save variables defined in notebook.
This function converts notebook to python code and then excutes the code.
Finally it put all public variables that defined in notebook into dictionary
and return it.
Parameters
----------
path : str
Path of jupyter notebook
Returns
-------
bk : :dict
Dictionary containing names of variables and variables that defined in notebook.
"""
# Step 1: Convert notebook to script
code = nbconvert_python(path)
code = code.replace("get_ipython()", "# get_ipython()")
# Step 2: Execute script and save variables in dictionary
d = {}
exec(code, d)
d.pop("__builtins__")
# Step 3: Filter for pickable variables
bk = filter_pickable(d)
return bk
def save_to_pkl(path, obj):
"""Save object to pickle file.
Args:
path (str): Path to save pickle file
obj : Object to be saved
"""
with open(path, "wb") as f:
pickle.dump(obj, f)
def basic_type_or_list(obj):
"""Check type of object."""
return not np.asanyarray(obj).dtype.hasobject
def flatten_to_dict(obj):
"""Reduce dimensionality of dictionary."""
def _flatten(value, key):
"""Reduce dimensionality of object recursively."""
if isinstance(value, (list, tuple, set)):
if basic_type_or_list(value):
return {key: value} if key is not None else value
else:
tile_d = {}
for i, v in enumerate(value):
tile_d.update(_flatten(v, f"{key}_{i}" if key is not None else i))
return tile_d
elif isinstance(value, dict):
tile_d = {}
for k, v in value.items():
tile_d.update(_flatten(v, f"{key}_{k}" if key is not None else k))
return tile_d
else:
return {key: value} if key is not None else value
return _flatten(value=obj, key=None)
def to_ndarray(obj):
"""Convert to numpy array."""
if isinstance(obj, dict):
return {k: np.asanyarray(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)) and not basic_type_or_list(obj):
return [np.asanyarray(v) for v in obj]
else:
return np.asanyarray(obj)
def is_path(path):
"""Judge if object is path or string of exists path."""
if isinstance(path, os.PathLike):
return True
if not isinstance(path, str):
return False
return os.path.exists(path)
def contains_path(obj):
"""Judge if an array contains path."""
if isinstance(obj, (np.ndarray, list, tuple, set)):
for v in obj:
if is_path(v):
return True
return False
else:
return is_path(obj)
def notebook_exec_result_flattened(path):
"""Prepare notebook for numpy savez."""
# Step 1: Convert notebook to script
code = nbconvert_python(path)
code = code.replace("get_ipython()", "# get_ipython()")
# Step 2: Execute script and save variables in dictionary
d = {}
exec(code, d)
d.pop("__builtins__")
# Step 3: Flatten all variables
bk = flatten_to_dict(d)
# Step 4: Filter for variables which is basic type or list of basic type
bk_filted = {k: v for k, v in bk.items() if basic_type_or_list(v)}
# Step 5: Remove environmental variables
bk_filted = {k: v for k, v in bk_filted.items() if not contains_path(v)}
for key in {"__warningregistry___version"}:
bk_filted.pop(key)
return bk_filted
def main():
"""Excute jupyter notebook and save global variables."""
notebook_path = Path("docs/getting_started.ipynb")
bk = notebook_exec_result_flattened(notebook_path)
# to save session
save_path = Path("pydsge/tests/resources/getting_started_stable.npz")
with open(save_path, "wb") as f:
np.savez_compressed(f, **bk)
if __name__ == "__main__":
main()
| [
"os.path.exists",
"pickle.dump",
"pathlib.Path",
"pickle.dumps",
"nbformat.read",
"numpy.asanyarray",
"nbconvert.PythonExporter",
"numpy.savez_compressed"
] | [((4025, 4045), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4039, 4045), False, 'import os\n'), ((5177, 5211), 'pathlib.Path', 'Path', (['"""docs/getting_started.ipynb"""'], {}), "('docs/getting_started.ipynb')\n", (5181, 5211), False, 'from pathlib import Path\n'), ((5307, 5364), 'pathlib.Path', 'Path', (['"""pydsge/tests/resources/getting_started_stable.npz"""'], {}), "('pydsge/tests/resources/getting_started_stable.npz')\n", (5311, 5364), False, 'from pathlib import Path\n'), ((589, 619), 'nbformat.read', 'nbformat.read', (['f'], {'as_version': '(4)'}), '(f, as_version=4)\n', (602, 619), False, 'import nbformat\n'), ((916, 933), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (928, 933), False, 'import pickle\n'), ((2510, 2529), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (2521, 2529), False, 'import pickle\n'), ((5410, 5438), 'numpy.savez_compressed', 'np.savez_compressed', (['f'], {}), '(f, **bk)\n', (5429, 5438), True, 'import numpy as np\n'), ((634, 650), 'nbconvert.PythonExporter', 'PythonExporter', ([], {}), '()\n', (648, 650), False, 'from nbconvert import PythonExporter\n'), ((3609, 3625), 'numpy.asanyarray', 'np.asanyarray', (['v'], {}), '(v)\n', (3622, 3625), True, 'import numpy as np\n'), ((3801, 3819), 'numpy.asanyarray', 'np.asanyarray', (['obj'], {}), '(obj)\n', (3814, 3819), True, 'import numpy as np\n'), ((2608, 2626), 'numpy.asanyarray', 'np.asanyarray', (['obj'], {}), '(obj)\n', (2621, 2626), True, 'import numpy as np\n'), ((3745, 3761), 'numpy.asanyarray', 'np.asanyarray', (['v'], {}), '(v)\n', (3758, 3761), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas.compat import long
from pandas.tseries import offsets
from pandas import Timestamp, Timedelta
class TestTimestampArithmetic(object):
def test_overflow_offset(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
stamp = Timestamp('2017-01-13 00:00:00', freq='D')
offset = 20169940 * offsets.Day(1)
with pytest.raises(OverflowError):
stamp + offset
with pytest.raises(OverflowError):
offset + stamp
with pytest.raises(OverflowError):
stamp - offset
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
assert (ts - dt).days == 1
assert (dt - ts).days == -1
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
ts = Timestamp(dt, freq='D')
with tm.assert_produces_warning(FutureWarning):
# GH#22535 add/sub with integers is deprecated
assert type(ts + 1) == Timestamp
assert type(ts - 1) == Timestamp
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
assert type(ts - dt) == Timedelta
assert type(ts + td) == Timestamp
assert type(ts - td) == Timestamp
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
td64 = np.timedelta64(1, 'D')
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
def test_addition_subtraction_preserve_frequency(self):
ts = Timestamp('2014-03-05', freq='D')
td = timedelta(days=1)
original_freq = ts.freq
with tm.assert_produces_warning(FutureWarning):
# GH#22535 add/sub with integers is deprecated
assert (ts + 1).freq == original_freq
assert (ts - 1).freq == original_freq
assert (ts + td).freq == original_freq
assert (ts - td).freq == original_freq
td64 = np.timedelta64(1, 'D')
assert (ts + td64).freq == original_freq
assert (ts - td64).freq == original_freq
| [
"datetime.datetime",
"pandas.Timestamp",
"pandas.compat.long",
"datetime.timedelta",
"pytest.raises",
"numpy.timedelta64",
"pandas.tseries.offsets.Day",
"pandas.util.testing.assert_produces_warning"
] | [((469, 511), 'pandas.Timestamp', 'Timestamp', (['"""2017-01-13 00:00:00"""'], {'freq': '"""D"""'}), "('2017-01-13 00:00:00', freq='D')\n", (478, 511), False, 'from pandas import Timestamp, Timedelta\n'), ((1005, 1027), 'datetime.datetime', 'datetime', (['(2013)', '(10)', '(12)'], {}), '(2013, 10, 12)\n', (1013, 1027), False, 'from datetime import datetime, timedelta\n'), ((1302, 1322), 'datetime.datetime', 'datetime', (['(2014)', '(3)', '(4)'], {}), '(2014, 3, 4)\n', (1310, 1322), False, 'from datetime import datetime, timedelta\n'), ((1336, 1356), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (1345, 1356), False, 'from datetime import datetime, timedelta\n'), ((1482, 1505), 'pandas.Timestamp', 'Timestamp', (['dt'], {'freq': '"""D"""'}), "(dt, freq='D')\n", (1491, 1505), False, 'from pandas import Timestamp, Timedelta\n'), ((2122, 2144), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (2136, 2144), True, 'import numpy as np\n'), ((2307, 2340), 'pandas.Timestamp', 'Timestamp', (['"""2014-03-05"""'], {'freq': '"""D"""'}), "('2014-03-05', freq='D')\n", (2316, 2340), False, 'from pandas import Timestamp, Timedelta\n'), ((2354, 2371), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2363, 2371), False, 'from datetime import datetime, timedelta\n'), ((2731, 2753), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (2745, 2753), True, 'import numpy as np\n'), ((540, 554), 'pandas.tseries.offsets.Day', 'offsets.Day', (['(1)'], {}), '(1)\n', (551, 554), False, 'from pandas.tseries import offsets\n'), ((569, 597), 'pytest.raises', 'pytest.raises', (['OverflowError'], {}), '(OverflowError)\n', (582, 597), False, 'import pytest\n'), ((640, 668), 'pytest.raises', 'pytest.raises', (['OverflowError'], {}), '(OverflowError)\n', (653, 668), False, 'import pytest\n'), ((711, 739), 'pytest.raises', 'pytest.raises', (['OverflowError'], {}), '(OverflowError)\n', (724, 739), False, 'import pytest\n'), ((834, 859), 'pandas.compat.long', 'long', (['(1337299200000000123)'], {}), '(1337299200000000123)\n', (838, 859), False, 'from pandas.compat import long\n'), ((884, 896), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (893, 896), False, 'from datetime import datetime, timedelta\n'), ((1051, 1073), 'datetime.datetime', 'datetime', (['(2013)', '(10)', '(13)'], {}), '(2013, 10, 13)\n', (1059, 1073), False, 'from datetime import datetime, timedelta\n'), ((1520, 1561), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (1546, 1561), True, 'import pandas.util.testing as tm\n'), ((2418, 2459), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (2444, 2459), True, 'import pandas.util.testing as tm\n')] |
import urllib, urllib2
from parse_data import taxid
#UniProt column names are found at
#https://www.uniprot.org/help/uniprotkb_column_names
class UniProtAPI():
def __init__(self, columns):
self.columns = columns
self.url = 'https://www.uniprot.org/uniprot/'
self.batch_size = 350 #491 is limit
self.raw_data = []
def info(self):
data = urllib.urlencode(self.params)
request = urllib2.Request(self.url, data)
response = urllib2.urlopen(request)
labels = next(response).split('\t')
self.raw_data.extend(response)
self.labels = [label.rstrip() for label in labels]
def uniprot_info(self, uniprots):
for batch_i in range(len(uniprots) / self.batch_size + 1):
self.params = {'query':','.join(uniprots[(batch_i)*self.batch_size:(batch_i+1)*self.batch_size]), 'columns':','.join(self.columns), 'format':'tab'} #as of 5/29/18 cannot get multiple uniprots at once
self.info()
return self.labels, self.raw_data
def organism_info(self, organism = ''):
if not organism:
from parse_data import organism
self.params = {'query':'organism:{0} AND reviewed:yes'.format(taxid()[organism]), 'columns':','.join(self.columns), 'format':'tab'}
self.info()
return self.labels, self.raw_data
| [
"urllib2.Request",
"urllib2.urlopen",
"urllib.urlencode",
"parse_data.taxid"
] | [((354, 383), 'urllib.urlencode', 'urllib.urlencode', (['self.params'], {}), '(self.params)\n', (370, 383), False, 'import urllib, urllib2\n'), ((396, 427), 'urllib2.Request', 'urllib2.Request', (['self.url', 'data'], {}), '(self.url, data)\n', (411, 427), False, 'import urllib, urllib2\n'), ((441, 465), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (456, 465), False, 'import urllib, urllib2\n'), ((1104, 1111), 'parse_data.taxid', 'taxid', ([], {}), '()\n', (1109, 1111), False, 'from parse_data import taxid\n')] |
import json
from pathlib import Path
import numpy as np
from matplotlib import path
current_dir = Path(__file__).parent
__all__ = list(p.stem for p in current_dir.glob("*.json"))
def __getattr__(name: str) -> path.Path:
file_path = current_dir / (name + ".json")
if file_path.exists():
data = json.loads(file_path.read_text())
return path.Path(
vertices=data["vertices"], codes=np.array(data["codes"], np.uint8)
)
raise AttributeError(
f"No {name}.json file found in {current_dir.absolute()}."
)
| [
"numpy.array",
"pathlib.Path"
] | [((100, 114), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'from pathlib import Path\n'), ((418, 451), 'numpy.array', 'np.array', (["data['codes']", 'np.uint8'], {}), "(data['codes'], np.uint8)\n", (426, 451), True, 'import numpy as np\n')] |
import asyncio
import os
import subprocess
from threading import Thread
from typing import Dict, Set
from .plugin_settings import PluginSettings
from .rpc.api.daemon import DaemonConnectedEvent
from .project import CurrentProject
from .rpc import FlutterRpcProcess, FlutterRpcClient
from .env import Env
import sublime
class WindowManager:
def __init__(self, window: sublime.Window) -> None:
super().__init__()
env_dict = sublime.load_settings("LSP-Dart.sublime-settings").get("env", dict(os.environ))
settings = sublime.load_settings("Subliminal.sublime-settings").to_dict()
self.__plugin_settings = PluginSettings(**settings)
if "FLUTTER_ROOT" in env_dict:
env = Env.from_dict(env_dict)
loop = asyncio.new_event_loop()
self.__env = env
self.__window = window
self.__is_daemon_started = False
self.__event_loop = loop
self.__daemon = FlutterRpcProcess([env.flutter_path, "daemon"], loop)
self.__daemon_client = FlutterRpcClient(self.__daemon)
self.__project = CurrentProject(window, env, self.__daemon_client, loop)
else:
sublime.error_message('Unable to determine the path to the Flutter SDK. Please define "FLUTTER_ROOT" under the "env" key in LSP-Dart settings.')
@property
def env(self):
return self.__env
@property
def project(self):
return self.__project
@property
def event_loop(self):
return self.__event_loop
@property
def plugin_settings(self):
return self.__plugin_settings
def start_daemon(self):
if self.__is_daemon_started:
return
Thread(target=self.__event_loop.run_forever).start()
self.__daemon_client.add_event_listener(self.__daemon_event_listener)
self.__daemon.start()
self.__is_daemon_started = True
def unload(self):
self.__daemon.terminate()
_unregister_window_manager(self.__window)
def __daemon_event_listener(self, event):
if isinstance(event, DaemonConnectedEvent):
asyncio.run_coroutine_threadsafe(self.__initialize(), self.__event_loop)
async def __initialize(self):
await self.__project.initialize()
await self.__daemon_client.device.enable()
_window_managers: Dict[int, WindowManager] = {}
_ignored_window: Set[int] = set()
def _unregister_window_manager(window: sublime.Window):
try:
_window_managers.pop(window.id())
except KeyError:
pass
def ignore_window(window: sublime.Window):
_ignored_window.add(window.id())
def unignore_window(window: sublime.Window):
_ignored_window.remove(window.id())
def is_window_ignored(window: sublime.Window):
return window.id() in _ignored_window
def get_window_manager(window) -> WindowManager:
win_id = window.id()
try:
return _window_managers[win_id]
except KeyError:
wm = WindowManager(window)
_window_managers[window.id()] = wm
return wm
def unload_window_manager(window: sublime.Window):
try:
_window_managers[window.id()].unload()
except KeyError:
pass
def unload_window_managers():
for _, wm in _window_managers.items():
wm.unload()
| [
"sublime.load_settings",
"asyncio.new_event_loop",
"threading.Thread",
"sublime.error_message"
] | [((775, 799), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (797, 799), False, 'import asyncio\n'), ((1207, 1361), 'sublime.error_message', 'sublime.error_message', (['"""Unable to determine the path to the Flutter SDK. Please define "FLUTTER_ROOT" under the "env" key in LSP-Dart settings."""'], {}), '(\n \'Unable to determine the path to the Flutter SDK. Please define "FLUTTER_ROOT" under the "env" key in LSP-Dart settings.\'\n )\n', (1228, 1361), False, 'import sublime\n'), ((451, 501), 'sublime.load_settings', 'sublime.load_settings', (['"""LSP-Dart.sublime-settings"""'], {}), "('LSP-Dart.sublime-settings')\n", (472, 501), False, 'import sublime\n'), ((550, 602), 'sublime.load_settings', 'sublime.load_settings', (['"""Subliminal.sublime-settings"""'], {}), "('Subliminal.sublime-settings')\n", (571, 602), False, 'import sublime\n'), ((1737, 1781), 'threading.Thread', 'Thread', ([], {'target': 'self.__event_loop.run_forever'}), '(target=self.__event_loop.run_forever)\n', (1743, 1781), False, 'from threading import Thread\n')] |
#!/usr/bin/env python2
"""Context for all tests."""
from __future__ import absolute_import
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)) + "../lcovparse"))
import lcovparse # pylint: disable=wrong-import-position,unused-import
| [
"os.path.realpath"
] | [((159, 185), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n')] |
import argparse
from functools import partial
from numbers import Number
from typing import Callable, Union, Tuple, Optional
import numpy as np
from skimage import img_as_uint
from starfish.errors import DataFormatWarning
from starfish.image import ImageStack
from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass
from ._base import FilterAlgorithmBase
class GaussianHighPass(FilterAlgorithmBase):
def __init__(
self, sigma: Union[Number, Tuple[Number]], is_volume: bool=False, verbose: bool=False, **kwargs
) -> None:
"""Gaussian high pass filter
Parameters
----------
sigma : Union[Number, Tuple[Number]]
standard deviation of gaussian kernel
is_volume : bool
If True, 3d (z, y, x) volumes will be filtered, otherwise, filter 2d tiles independently.
verbose : bool
if True, report on filtering progress (default = False)
"""
if isinstance(sigma, tuple):
message = ("if passing an anisotropic kernel, the dimensionality must match the data shape ({shape}), not "
"{passed_shape}")
if is_volume and len(sigma) != 3:
raise ValueError(message.format(shape=3, passed_shape=len(sigma)))
if not is_volume and len(sigma) != 2:
raise ValueError(message.format(shape=2, passed_shape=len(sigma)))
self.sigma = sigma
self.is_volume = is_volume
self.verbose = verbose
@classmethod
def add_arguments(cls, group_parser: argparse.ArgumentParser) -> None:
group_parser.add_argument(
"--sigma", type=float, help="standard deviation of gaussian kernel")
group_parser.add_argument(
"--is-volume", action="store_true", help="indicates that the image stack should be filtered in 3d")
@staticmethod
def high_pass(image: np.ndarray, sigma: Union[Number, Tuple[Number]]) -> np.ndarray:
"""
Applies a gaussian high pass filter to an image
Parameters
----------
image : numpy.ndarray[np.uint32]
2-d or 3-d image data
sigma : Union[Number, Tuple[Number]]
Standard deviation of gaussian kernel
Returns
-------
np.ndarray :
Standard deviation of the Gaussian kernel that will be applied. If a float, an isotropic kernel will be
assumed, otherwise the dimensions of the kernel give (z, y, x)
"""
if image.dtype != np.uint16:
DataFormatWarning('gaussian filters currently only support uint16 images. Image data will be converted.')
image = img_as_uint(image)
blurred: np.ndarray = GaussianLowPass.low_pass(image, sigma)
over_flow_ind: np.ndarray[bool] = image < blurred
filtered: np.ndarray = image - blurred
filtered[over_flow_ind] = 0
return filtered
def filter(self, stack: ImageStack, in_place: bool=True) -> Optional[ImageStack]:
"""Perform filtering of an image stack
Parameters
----------
stack : ImageStack
Stack to be filtered.
in_place : bool
if True, process ImageStack in-place, otherwise return a new stack
Returns
-------
Optional[ImageStack] :
if in-place is False, return the results of filter as a new stack
"""
high_pass: Callable = partial(self.high_pass, sigma=self.sigma)
result = stack.apply(high_pass, is_volume=self.is_volume, verbose=self.verbose, in_place=in_place)
if not in_place:
return result
return None
| [
"skimage.img_as_uint",
"starfish.errors.DataFormatWarning",
"functools.partial",
"starfish.pipeline.filter.gaussian_low_pass.GaussianLowPass.low_pass"
] | [((2741, 2779), 'starfish.pipeline.filter.gaussian_low_pass.GaussianLowPass.low_pass', 'GaussianLowPass.low_pass', (['image', 'sigma'], {}), '(image, sigma)\n', (2765, 2779), False, 'from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass\n'), ((3469, 3510), 'functools.partial', 'partial', (['self.high_pass'], {'sigma': 'self.sigma'}), '(self.high_pass, sigma=self.sigma)\n', (3476, 3510), False, 'from functools import partial\n'), ((2565, 2680), 'starfish.errors.DataFormatWarning', 'DataFormatWarning', (['"""gaussian filters currently only support uint16 images. Image data will be converted."""'], {}), "(\n 'gaussian filters currently only support uint16 images. Image data will be converted.'\n )\n", (2582, 2680), False, 'from starfish.errors import DataFormatWarning\n'), ((2691, 2709), 'skimage.img_as_uint', 'img_as_uint', (['image'], {}), '(image)\n', (2702, 2709), False, 'from skimage import img_as_uint\n')] |
from typing import Any, List
import factom_core.blocks as blocks
from factom_core.db import FactomdLevelDB
from .pending_block import PendingBlock
class BaseBlockchain:
"""The base class for all Blockchain objects"""
network_id: bytes = None
vms: List[Any] = None
data_path: str = None
db: FactomdLevelDB = None
current_block: PendingBlock = None
def __init__(self, data_path: str = None) -> None:
if not isinstance(self.network_id, bytes) or len(self.network_id) != 4:
raise ValueError("The Blockchain class must be instantiated with a `network_id` bytes object of length 4")
# if not isinstance(self.vms, list) or len(self.vms) == 0:
# raise ValueError(
# "The Blockchain class must be instantiated with a `vms` list of length > 1"
# )
self.data_path = data_path
self.db = FactomdLevelDB(path=data_path, create_if_missing=True)
def load_genesis_block(self) -> blocks.DirectoryBlock:
raise NotImplementedError("Blockchain classes must implement this method")
def vm_for_hash(self, h: bytes) -> int:
raise NotImplementedError("Blockchain classes must implement this method")
def seal_minute(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
def rotate_vms(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
def seal_block(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
class Blockchain(BaseBlockchain):
"""
A Blockchain is a combination of VM classes. Each VM is associated
with a range of chains. The Blockchain class acts as a wrapper around these other
VM classes, delegating operations to the appropriate VM depending on the
current block / minute number.
"""
def __init__(self, data_path: str = None):
super().__init__(data_path)
def load_genesis_block(self) -> blocks.DirectoryBlock:
pass
def vm_for_hash(self, h: bytes) -> int:
"""
Compute the VM index responsible for hash h
Taken from: factomd/state/processList.go/VMindexFor(hash []byte)
"""
if len(self.vms) == 0:
return 0
v = sum(h)
return v % len(self.vms)
def seal_minute(self) -> None:
"""Finalize the current block minute"""
self.rotate_vms()
if self.current_block.current_minute == 10:
self.seal_block()
else:
self.current_block.current_minute += 1
def rotate_vms(self) -> None:
"""Rotate the responsibilities of the VM set (if necessary)"""
# TODO: see processList.go/MakgeMap for formula per block height
if len(self.vms) == 1:
return
self.vms = self.vms[1:] + self.vms[:1]
def seal_block(self):
"""
Bundles all added transactions, entries, and other elements into a set of finalized
blocks with headers.
"""
block = self.current_block
entry_blocks: List[blocks.EntryBlock] = []
for chain_id, block_body in block.entry_blocks.items():
prev = self.db.get_entry_block_head(chain_id)
header = block_body.construct_header(
chain_id=chain_id,
prev_keymr=prev.keymr if prev is not None else bytes(32),
prev_full_hash=prev.full_hash if prev is not None else bytes(32),
sequence=prev.header.sequence + 1 if prev is not None else 0,
height=block.height,
)
entry_blocks.append(blocks.EntryBlock(header, block_body))
prev = self.db.get_entry_credit_block(height=block.height - 1)
header = block.entry_credit_block.construct_header(
prev_header_hash=prev.header_hash, prev_full_hash=prev.full_hash, height=block.height,
)
entry_credit_block = blocks.EntryCreditBlock(header, block.entry_credit_block)
prev = self.db.get_factoid_block(height=block.height - 1)
header = block.factoid_block.construct_header(
prev_keymr=block.previous.body.factoid_block_keymr,
prev_ledger_keymr=prev.ledger_keymr,
ec_exchange_rate=1000, # TODO
height=block.height,
)
factoid_block = blocks.FactoidBlock(header, block.factoid_block)
prev = self.db.get_admin_block(height=block.height - 1)
header = block.admin_block.construct_header(
prev_back_reference_hash=prev.back_reference_hash, height=block.height
)
admin_block = blocks.AdminBlock(header, block.admin_block)
# Compile all the above blocks and the previous directory block, into a new one
directory_block_body = blocks.DirectoryBlockBody(
admin_block_lookup_hash=admin_block.lookup_hash,
entry_credit_block_header_hash=entry_credit_block.header_hash,
factoid_block_keymr=factoid_block.keymr,
entry_blocks=[
{"chain_id": entry_block.header.chain_id, "keymr": entry_block.keymr} for entry_block in entry_blocks
],
)
header = directory_block_body.construct_header(
network_id=self.network_id,
prev_keymr=block.previous.keymr,
prev_full_hash=block.previous.full_hash,
timestamp=block.timestamp,
height=block.height,
)
directory_block = blocks.DirectoryBlock(header, directory_block_body)
# Persist the blocks as new chain heads
self.db.put_directory_block_head(directory_block)
self.db.put_admin_block_head(admin_block)
self.db.put_entry_credit_block_head(entry_credit_block)
self.db.put_factoid_block_head(factoid_block)
for entry_block in entry_blocks:
self.db.put_entry_block_head(entry_block)
| [
"factom_core.blocks.FactoidBlock",
"factom_core.blocks.EntryCreditBlock",
"factom_core.blocks.DirectoryBlock",
"factom_core.db.FactomdLevelDB",
"factom_core.blocks.DirectoryBlockBody",
"factom_core.blocks.AdminBlock",
"factom_core.blocks.EntryBlock"
] | [((894, 948), 'factom_core.db.FactomdLevelDB', 'FactomdLevelDB', ([], {'path': 'data_path', 'create_if_missing': '(True)'}), '(path=data_path, create_if_missing=True)\n', (908, 948), False, 'from factom_core.db import FactomdLevelDB\n'), ((3977, 4034), 'factom_core.blocks.EntryCreditBlock', 'blocks.EntryCreditBlock', (['header', 'block.entry_credit_block'], {}), '(header, block.entry_credit_block)\n', (4000, 4034), True, 'import factom_core.blocks as blocks\n'), ((4380, 4428), 'factom_core.blocks.FactoidBlock', 'blocks.FactoidBlock', (['header', 'block.factoid_block'], {}), '(header, block.factoid_block)\n', (4399, 4428), True, 'import factom_core.blocks as blocks\n'), ((4662, 4706), 'factom_core.blocks.AdminBlock', 'blocks.AdminBlock', (['header', 'block.admin_block'], {}), '(header, block.admin_block)\n', (4679, 4706), True, 'import factom_core.blocks as blocks\n'), ((4827, 5139), 'factom_core.blocks.DirectoryBlockBody', 'blocks.DirectoryBlockBody', ([], {'admin_block_lookup_hash': 'admin_block.lookup_hash', 'entry_credit_block_header_hash': 'entry_credit_block.header_hash', 'factoid_block_keymr': 'factoid_block.keymr', 'entry_blocks': "[{'chain_id': entry_block.header.chain_id, 'keymr': entry_block.keymr} for\n entry_block in entry_blocks]"}), "(admin_block_lookup_hash=admin_block.lookup_hash,\n entry_credit_block_header_hash=entry_credit_block.header_hash,\n factoid_block_keymr=factoid_block.keymr, entry_blocks=[{'chain_id':\n entry_block.header.chain_id, 'keymr': entry_block.keymr} for\n entry_block in entry_blocks])\n", (4852, 5139), True, 'import factom_core.blocks as blocks\n'), ((5515, 5566), 'factom_core.blocks.DirectoryBlock', 'blocks.DirectoryBlock', (['header', 'directory_block_body'], {}), '(header, directory_block_body)\n', (5536, 5566), True, 'import factom_core.blocks as blocks\n'), ((3668, 3705), 'factom_core.blocks.EntryBlock', 'blocks.EntryBlock', (['header', 'block_body'], {}), '(header, block_body)\n', (3685, 3705), True, 'import factom_core.blocks as blocks\n')] |
import keras
import os
def load_model(version, new_model, retrain=False, *args):
"""
:param version: model version
:param new_model: method for call to get a new model e.g. my_ResNet.my_ResNet
:param retrain: True: load new model
:return:
"""
create_new_model = False
# load model
if not retrain:
try:
with open(os.path.join("./model", "model_structure_{}.json".format(version)), "r") as file:
model_json = file.read()
print("[info]: loading model...")
model = keras.models.model_from_json(model_json)
model.load_weights(os.path.join("./model", "model_weights_{}.h5".format(version)))
print("[info]: load model done.")
except OSError:
print("[info]: load model file failed, creating model")
model = new_model(*args)
create_new_model = True
else:
print("[info]: retrain, creating model")
model = new_model(*args)
create_new_model = True
return model, create_new_model
| [
"keras.models.model_from_json"
] | [((559, 599), 'keras.models.model_from_json', 'keras.models.model_from_json', (['model_json'], {}), '(model_json)\n', (587, 599), False, 'import keras\n')] |
import argparse
from bioplottemplates.libs import libcli, libio
from bioplottemplates.plots import label_dots
ap = libcli.CustomParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
ap.add_argument(
'data_csv',
help='The CSVs files to plot',
nargs='+',
)
ap.add_argument(
'-v',
'--plotvars',
help=(
'Plot variables. '
'Example: -v xlabel=frames ylabel=RMSD color=red.'
),
nargs='*',
action=libcli.ParamsToDict,
)
def maincli():
cmd = load_args()
main(**vars(cmd))
def main(data_csv, plotvars, **kwargs):
data, labels = libio.extract_labels_data(*data_csv)
plotvars = plotvars or dict()
plotvars.setdefault('series_labels', data_csv)
print(plotvars['series_labels'])
label_dots.plot(
labels,
data,
**plotvars,
)
pass
if __name__ == '__main__':
maincli()
| [
"bioplottemplates.libs.libio.extract_labels_data",
"bioplottemplates.plots.label_dots.plot",
"bioplottemplates.libs.libcli.CustomParser"
] | [((118, 219), 'bioplottemplates.libs.libcli.CustomParser', 'libcli.CustomParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n', (137, 219), False, 'from bioplottemplates.libs import libcli, libio\n'), ((655, 691), 'bioplottemplates.libs.libio.extract_labels_data', 'libio.extract_labels_data', (['*data_csv'], {}), '(*data_csv)\n', (680, 691), False, 'from bioplottemplates.libs import libcli, libio\n'), ((823, 864), 'bioplottemplates.plots.label_dots.plot', 'label_dots.plot', (['labels', 'data'], {}), '(labels, data, **plotvars)\n', (838, 864), False, 'from bioplottemplates.plots import label_dots\n')] |
# coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
class SharedFolder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
users = models.ManyToManyField(User, through='Collaborator', related_name='shared_folders')
class Meta:
verbose_name = 'Shared Folder'
verbose_name_plural = 'Shared Folders'
ordering = ('name',)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
super(SharedFolder, self).save(*args, **kwargs)
base_slug = slugify(self.name)
if len(base_slug) > 0:
base_slug = slugify(u'{0} {1}'.format(self.name, self.pk))
else:
base_slug = self.pk
i = 0
unique_slug = base_slug
while SharedFolder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(SharedFolder, self).save(*args, **kwargs)
class Collaborator(models.Model):
READ = 'R'
WRITE = 'W'
ADMIN = 'A'
ACCESS_TYPES = (
(READ, 'Read'),
(WRITE, 'Write'),
(ADMIN, 'Admin'),
)
user = models.ForeignKey(User,on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder,on_delete=models.CASCADE)
joined_at = models.DateTimeField(auto_now_add=True)
is_owner = models.BooleanField(default=False)
access = models.CharField(max_length=1, choices=ACCESS_TYPES, default=READ)
class Meta:
verbose_name = 'Collaborator'
verbose_name_plural = 'Collaborators'
def save(self, *args, **kwargs):
if self.is_owner:
self.access = Collaborator.ADMIN
super(Collaborator, self).save(*args, **kwargs)
class Document(models.Model):
ARTICLE = 'article'
BOOK = 'book'
BOOKLET = 'booklet'
CONFERENCE = 'conference'
INBOOK = 'inbook'
INCOLLECTION = 'incollection'
INPROCEEDINGS = 'inproceedings'
MANUAL = 'manual'
MASTERSTHESIS = 'mastersthesis'
MISC = 'misc'
PHDTHESIS = 'phdthesis'
PROCEEDINGS = 'proceedings'
TECHREPORT = 'techreport'
UNPUBLISHED = 'unpublished'
ENTRY_TYPES = (
(ARTICLE, 'Article'),
(BOOK, 'Book'),
(BOOKLET, 'Booklet'),
(CONFERENCE, 'Conference'),
(INBOOK, 'Inbook'),
(INCOLLECTION, 'Incollection'),
(INPROCEEDINGS, 'Inproceedings'),
(MANUAL, 'Manual'),
(MASTERSTHESIS, 'Master\'s Thesis'),
(MISC, 'Misc'),
(PHDTHESIS, 'Ph.D. Thesis'),
(PROCEEDINGS, 'Proceedings'),
(TECHREPORT, 'Tech Report'),
(UNPUBLISHED, 'Unpublished'),
)
# Bibtex required fields
bibtexkey = models.CharField('Bibtex key', max_length=255, null=True, blank=True)
entry_type = models.CharField('Document type', max_length=13, choices=ENTRY_TYPES, null=True, blank=True)
# Bibtex base fields
address = models.CharField(max_length=2000, null=True, blank=True)
author = models.TextField(max_length=1000, null=True, blank=True)
booktitle = models.CharField(max_length=1000, null=True, blank=True)
chapter = models.CharField(max_length=1000, null=True, blank=True)
crossref = models.CharField('Cross-referenced', max_length=1000, null=True, blank=True)
edition = models.CharField(max_length=1000, null=True, blank=True)
editor = models.CharField(max_length=1000, null=True, blank=True)
howpublished = models.CharField('How it was published', max_length=1000, null=True, blank=True)
institution = models.CharField(max_length=1000, null=True, blank=True)
journal = models.CharField(max_length=1000, null=True, blank=True)
month = models.CharField(max_length=50, null=True, blank=True)
note = models.CharField(max_length=2000, null=True, blank=True)
number = models.CharField(max_length=1000, null=True, blank=True)
organization = models.CharField(max_length=1000, null=True, blank=True)
pages = models.CharField(max_length=255, null=True, blank=True)
publisher = models.CharField(max_length=1000, null=True, blank=True)
school = models.CharField(max_length=1000, null=True, blank=True)
series = models.CharField(max_length=500, null=True, blank=True)
title = models.CharField(max_length=1000, null=True, blank=True)
publication_type = models.CharField(max_length=1000, null=True, blank=True) # Type
volume = models.CharField(max_length=1000, null=True, blank=True)
year = models.CharField(max_length=50, null=True, blank=True)
# Extra fields
abstract = models.TextField(max_length=4000, null=True, blank=True)
coden = models.CharField(max_length=1000, null=True, blank=True)
doi = models.CharField('DOI', max_length=255, null=True, blank=True)
isbn = models.CharField('ISBN', max_length=255, null=True, blank=True)
issn = models.CharField('ISSN', max_length=255, null=True, blank=True)
keywords = models.CharField(max_length=2000, null=True, blank=True)
language = models.CharField(max_length=1000, null=True, blank=True)
url = models.CharField('URL', max_length=1000, null=True, blank=True)
# Parsifal management field
user = models.ForeignKey(User, null=True, related_name='documents',on_delete=models.CASCADE)
review = models.ForeignKey('reviews.Review', null=True, related_name='documents',on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder, null=True, related_name='documents',on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document'
verbose_name_plural = 'Documents'
def __unicode__(self):
return self.title
def document_file_upload_to(instance, filename):
return u'library/{0}/'.format(instance.document.user.pk)
class DocumentFile(models.Model):
document = models.ForeignKey(Document, related_name='files',on_delete=models.CASCADE)
document_file = models.FileField(upload_to='library/')
filename = models.CharField(max_length=255)
size = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document File'
verbose_name_plural = 'Document Files'
def __unicode__(self):
return self.filename
class Folder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User, related_name='library_folders',on_delete=models.CASCADE)
documents = models.ManyToManyField(Document)
class Meta:
verbose_name = 'Folder'
verbose_name_plural = 'Folders'
ordering = ('name',)
unique_together = (('name', 'user'),)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
base_slug = slugify(self.name)
if len(base_slug) > 0:
unique_slug = base_slug
else:
base_slug = unique_slug = 'untitled-folder'
i = 0
while Folder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(Folder, self).save(*args, **kwargs)
| [
"django.utils.text.slugify",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.DateTimeField",
"django.db.m... | [((175, 206), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (191, 206), False, 'from django.db import models\n'), ((218, 273), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (234, 273), False, 'from django.db import models\n'), ((286, 374), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'through': '"""Collaborator"""', 'related_name': '"""shared_folders"""'}), "(User, through='Collaborator', related_name=\n 'shared_folders')\n", (308, 374), False, 'from django.db import models\n'), ((1348, 1397), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1365, 1397), False, 'from django.db import models\n'), ((1417, 1474), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SharedFolder'], {'on_delete': 'models.CASCADE'}), '(SharedFolder, on_delete=models.CASCADE)\n', (1434, 1474), False, 'from django.db import models\n'), ((1490, 1529), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1510, 1529), False, 'from django.db import models\n'), ((1545, 1579), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1564, 1579), False, 'from django.db import models\n'), ((1593, 1659), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'ACCESS_TYPES', 'default': 'READ'}), '(max_length=1, choices=ACCESS_TYPES, default=READ)\n', (1609, 1659), False, 'from django.db import models\n'), ((2898, 2967), 'django.db.models.CharField', 'models.CharField', (['"""Bibtex key"""'], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), "('Bibtex key', max_length=255, null=True, blank=True)\n", (2914, 2967), False, 'from django.db import models\n'), ((2985, 3082), 'django.db.models.CharField', 'models.CharField', (['"""Document type"""'], {'max_length': '(13)', 'choices': 'ENTRY_TYPES', 'null': '(True)', 'blank': '(True)'}), "('Document type', max_length=13, choices=ENTRY_TYPES, null=\n True, blank=True)\n", (3001, 3082), False, 'from django.db import models\n'), ((3122, 3178), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=2000, null=True, blank=True)\n', (3138, 3178), False, 'from django.db import models\n'), ((3192, 3248), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3208, 3248), False, 'from django.db import models\n'), ((3265, 3321), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3281, 3321), False, 'from django.db import models\n'), ((3336, 3392), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3352, 3392), False, 'from django.db import models\n'), ((3408, 3484), 'django.db.models.CharField', 'models.CharField', (['"""Cross-referenced"""'], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), "('Cross-referenced', max_length=1000, null=True, blank=True)\n", (3424, 3484), False, 'from django.db import models\n'), ((3499, 3555), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3515, 3555), False, 'from django.db import models\n'), ((3569, 3625), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3585, 3625), False, 'from django.db import models\n'), ((3645, 3730), 'django.db.models.CharField', 'models.CharField', (['"""How it was published"""'], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), "('How it was published', max_length=1000, null=True, blank=True\n )\n", (3661, 3730), False, 'from django.db import models\n'), ((3744, 3800), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3760, 3800), False, 'from django.db import models\n'), ((3815, 3871), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (3831, 3871), False, 'from django.db import models\n'), ((3884, 3938), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (3900, 3938), False, 'from django.db import models\n'), ((3950, 4006), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=2000, null=True, blank=True)\n', (3966, 4006), False, 'from django.db import models\n'), ((4020, 4076), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4036, 4076), False, 'from django.db import models\n'), ((4096, 4152), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4112, 4152), False, 'from django.db import models\n'), ((4165, 4220), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (4181, 4220), False, 'from django.db import models\n'), ((4237, 4293), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4253, 4293), False, 'from django.db import models\n'), ((4307, 4363), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4323, 4363), False, 'from django.db import models\n'), ((4377, 4432), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'null': '(True)', 'blank': '(True)'}), '(max_length=500, null=True, blank=True)\n', (4393, 4432), False, 'from django.db import models\n'), ((4445, 4501), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4461, 4501), False, 'from django.db import models\n'), ((4525, 4581), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4541, 4581), False, 'from django.db import models\n'), ((4602, 4658), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4618, 4658), False, 'from django.db import models\n'), ((4670, 4724), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (4686, 4724), False, 'from django.db import models\n'), ((4760, 4816), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(4000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=4000, null=True, blank=True)\n', (4776, 4816), False, 'from django.db import models\n'), ((4829, 4885), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (4845, 4885), False, 'from django.db import models\n'), ((4896, 4958), 'django.db.models.CharField', 'models.CharField', (['"""DOI"""'], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), "('DOI', max_length=255, null=True, blank=True)\n", (4912, 4958), False, 'from django.db import models\n'), ((4970, 5033), 'django.db.models.CharField', 'models.CharField', (['"""ISBN"""'], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), "('ISBN', max_length=255, null=True, blank=True)\n", (4986, 5033), False, 'from django.db import models\n'), ((5045, 5108), 'django.db.models.CharField', 'models.CharField', (['"""ISSN"""'], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), "('ISSN', max_length=255, null=True, blank=True)\n", (5061, 5108), False, 'from django.db import models\n'), ((5124, 5180), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=2000, null=True, blank=True)\n', (5140, 5180), False, 'from django.db import models\n'), ((5196, 5252), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1000, null=True, blank=True)\n', (5212, 5252), False, 'from django.db import models\n'), ((5263, 5326), 'django.db.models.CharField', 'models.CharField', (['"""URL"""'], {'max_length': '(1000)', 'null': '(True)', 'blank': '(True)'}), "('URL', max_length=1000, null=True, blank=True)\n", (5279, 5326), False, 'from django.db import models\n'), ((5371, 5462), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'null': '(True)', 'related_name': '"""documents"""', 'on_delete': 'models.CASCADE'}), "(User, null=True, related_name='documents', on_delete=\n models.CASCADE)\n", (5388, 5462), False, 'from django.db import models\n'), ((5470, 5572), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""reviews.Review"""'], {'null': '(True)', 'related_name': '"""documents"""', 'on_delete': 'models.CASCADE'}), "('reviews.Review', null=True, related_name='documents',\n on_delete=models.CASCADE)\n", (5487, 5572), False, 'from django.db import models\n'), ((5588, 5686), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SharedFolder'], {'null': '(True)', 'related_name': '"""documents"""', 'on_delete': 'models.CASCADE'}), "(SharedFolder, null=True, related_name='documents',\n on_delete=models.CASCADE)\n", (5605, 5686), False, 'from django.db import models\n'), ((5700, 5739), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5720, 5739), False, 'from django.db import models\n'), ((5757, 5792), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (5777, 5792), False, 'from django.db import models\n'), ((6102, 6177), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Document'], {'related_name': '"""files"""', 'on_delete': 'models.CASCADE'}), "(Document, related_name='files', on_delete=models.CASCADE)\n", (6119, 6177), False, 'from django.db import models\n'), ((6197, 6235), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""library/"""'}), "(upload_to='library/')\n", (6213, 6235), False, 'from django.db import models\n'), ((6251, 6283), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (6267, 6283), False, 'from django.db import models\n'), ((6295, 6325), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6314, 6325), False, 'from django.db import models\n'), ((6343, 6382), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6363, 6382), False, 'from django.db import models\n'), ((6400, 6435), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (6420, 6435), False, 'from django.db import models\n'), ((6641, 6672), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (6657, 6672), False, 'from django.db import models\n'), ((6684, 6739), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (6700, 6739), False, 'from django.db import models\n'), ((6751, 6837), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""library_folders"""', 'on_delete': 'models.CASCADE'}), "(User, related_name='library_folders', on_delete=models.\n CASCADE)\n", (6768, 6837), False, 'from django.db import models\n'), ((6848, 6880), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Document'], {}), '(Document)\n', (6870, 6880), False, 'from django.db import models\n'), ((697, 715), 'django.utils.text.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (704, 715), False, 'from django.utils.text import slugify\n'), ((7156, 7174), 'django.utils.text.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (7163, 7174), False, 'from django.utils.text import slugify\n')] |
import sys
import pytest
from pysh import shwords, shwords_f
def test_conversions():
with pytest.raises(ValueError):
shwords('{:{}}', 1, 2)
assert '{:{}}'.format(1, 2) == ' 1' # by contrast
def test_multiword():
assert shwords('touch {!@}', ['a', 'b']) \
== ['touch', 'a', 'b']
with pytest.raises(ValueError):
shwords('a b{!@}', ['x'])
with pytest.raises(ValueError):
shwords('a {!@}c', ['x'])
with pytest.raises(ValueError):
shwords('a {!@}{}', ['b'], 'c')
assert shwords('touch {!@} c', ['a', 'b']) \
== ['touch', 'a', 'b', 'c']
def test_splitting():
assert shwords('git grep {}', 'hello world') \
== ['git', 'grep', 'hello world']
assert shwords('{} {} {}', 'a', 'b c', 'd') \
== ['a', 'b c', 'd']
assert shwords(' a {} c ', 'b') \
== ['a', 'b', 'c']
assert shwords('tar -C {outdir} -xzf {tarball}',
outdir='/path/with/spaces in it',
tarball='2019 Planning (final) (v2) (final final).tgz') \
== ['tar', '-C', '/path/with/spaces in it', '-xzf', '2019 Planning (final) (v2) (final final).tgz']
def test_within_word():
assert shwords('git log --format={}', '%aN') \
== ['git', 'log', '--format=%aN']
assert shwords('{basedir}/deployments/{deploy_id}/bin/start',
basedir='/srv/app', deploy_id='0f1e2d3c') \
== ['/srv/app/deployments/0f1e2d3c/bin/start']
def test_locals():
import pytest
l = ['a', 'b']
assert shwords_f('touch {l!@}') \
== ['touch', 'a', 'b']
assert shwords_f('touch {l[1]}') \
== ['touch', 'b']
assert shwords_f('echo {pytest.__name__}') \
== ['echo', 'pytest']
# Known limitation: locals only, no globals...
with pytest.raises(KeyError, match='sys'):
shwords_f('echo {sys}')
# (unlike real, compiler-assisted f-strings)
assert f'{sys}' \
== "<module 'sys' (built-in)>"
# ... and enclosing scopes' locals are complicated.
def inner1():
with pytest.raises(KeyError):
return shwords_f('touch {l!@}')
inner1()
def inner2():
l
assert shwords_f('touch {l!@}') \
== ['touch', 'a', 'b']
inner2()
def inner3():
nonlocal l
assert shwords_f('touch {l!@}') \
== ['touch', 'a', 'b']
inner3()
| [
"pysh.shwords",
"pytest.raises",
"pysh.shwords_f"
] | [((96, 121), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (109, 121), False, 'import pytest\n'), ((127, 149), 'pysh.shwords', 'shwords', (['"""{:{}}"""', '(1)', '(2)'], {}), "('{:{}}', 1, 2)\n", (134, 149), False, 'from pysh import shwords, shwords_f\n'), ((236, 269), 'pysh.shwords', 'shwords', (['"""touch {!@}"""', "['a', 'b']"], {}), "('touch {!@}', ['a', 'b'])\n", (243, 269), False, 'from pysh import shwords, shwords_f\n'), ((306, 331), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (319, 331), False, 'import pytest\n'), ((337, 362), 'pysh.shwords', 'shwords', (['"""a b{!@}"""', "['x']"], {}), "('a b{!@}', ['x'])\n", (344, 362), False, 'from pysh import shwords, shwords_f\n'), ((370, 395), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (383, 395), False, 'import pytest\n'), ((401, 426), 'pysh.shwords', 'shwords', (['"""a {!@}c"""', "['x']"], {}), "('a {!@}c', ['x'])\n", (408, 426), False, 'from pysh import shwords, shwords_f\n'), ((434, 459), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (447, 459), False, 'import pytest\n'), ((465, 496), 'pysh.shwords', 'shwords', (['"""a {!@}{}"""', "['b']", '"""c"""'], {}), "('a {!@}{}', ['b'], 'c')\n", (472, 496), False, 'from pysh import shwords, shwords_f\n'), ((506, 541), 'pysh.shwords', 'shwords', (['"""touch {!@} c"""', "['a', 'b']"], {}), "('touch {!@} c', ['a', 'b'])\n", (513, 541), False, 'from pysh import shwords, shwords_f\n'), ((609, 646), 'pysh.shwords', 'shwords', (['"""git grep {}"""', '"""hello world"""'], {}), "('git grep {}', 'hello world')\n", (616, 646), False, 'from pysh import shwords, shwords_f\n'), ((696, 732), 'pysh.shwords', 'shwords', (['"""{} {} {}"""', '"""a"""', '"""b c"""', '"""d"""'], {}), "('{} {} {}', 'a', 'b c', 'd')\n", (703, 732), False, 'from pysh import shwords, shwords_f\n'), ((769, 795), 'pysh.shwords', 'shwords', (['""" a {} c """', '"""b"""'], {}), "(' a {} c ', 'b')\n", (776, 795), False, 'from pysh import shwords, shwords_f\n'), ((830, 965), 'pysh.shwords', 'shwords', (['"""tar -C {outdir} -xzf {tarball}"""'], {'outdir': '"""/path/with/spaces in it"""', 'tarball': '"""2019 Planning (final) (v2) (final final).tgz"""'}), "('tar -C {outdir} -xzf {tarball}', outdir='/path/with/spaces in it',\n tarball='2019 Planning (final) (v2) (final final).tgz')\n", (837, 965), False, 'from pysh import shwords, shwords_f\n'), ((1153, 1190), 'pysh.shwords', 'shwords', (['"""git log --format={}"""', '"""%aN"""'], {}), "('git log --format={}', '%aN')\n", (1160, 1190), False, 'from pysh import shwords, shwords_f\n'), ((1240, 1340), 'pysh.shwords', 'shwords', (['"""{basedir}/deployments/{deploy_id}/bin/start"""'], {'basedir': '"""/srv/app"""', 'deploy_id': '"""0f1e2d3c"""'}), "('{basedir}/deployments/{deploy_id}/bin/start', basedir='/srv/app',\n deploy_id='0f1e2d3c')\n", (1247, 1340), False, 'from pysh import shwords, shwords_f\n'), ((1471, 1495), 'pysh.shwords_f', 'shwords_f', (['"""touch {l!@}"""'], {}), "('touch {l!@}')\n", (1480, 1495), False, 'from pysh import shwords, shwords_f\n'), ((1534, 1559), 'pysh.shwords_f', 'shwords_f', (['"""touch {l[1]}"""'], {}), "('touch {l[1]}')\n", (1543, 1559), False, 'from pysh import shwords, shwords_f\n'), ((1593, 1628), 'pysh.shwords_f', 'shwords_f', (['"""echo {pytest.__name__}"""'], {}), "('echo {pytest.__name__}')\n", (1602, 1628), False, 'from pysh import shwords, shwords_f\n'), ((1714, 1750), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""sys"""'}), "(KeyError, match='sys')\n", (1727, 1750), False, 'import pytest\n'), ((1756, 1779), 'pysh.shwords_f', 'shwords_f', (['"""echo {sys}"""'], {}), "('echo {sys}')\n", (1765, 1779), False, 'from pysh import shwords, shwords_f\n'), ((1962, 1985), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (1975, 1985), False, 'import pytest\n'), ((2000, 2024), 'pysh.shwords_f', 'shwords_f', (['"""touch {l!@}"""'], {}), "('touch {l!@}')\n", (2009, 2024), False, 'from pysh import shwords, shwords_f\n'), ((2069, 2093), 'pysh.shwords_f', 'shwords_f', (['"""touch {l!@}"""'], {}), "('touch {l!@}')\n", (2078, 2093), False, 'from pysh import shwords, shwords_f\n'), ((2178, 2202), 'pysh.shwords_f', 'shwords_f', (['"""touch {l!@}"""'], {}), "('touch {l!@}')\n", (2187, 2202), False, 'from pysh import shwords, shwords_f\n')] |
import wandb
import main
# Load project config
config = main.load_config()
# Initialize wandb
wandb.init()
# Replace project config hyperparameters with the ones loaded from wandb sweep server
sweep_hparams = wandb.Config._as_dict(wandb.config)
for key, value in sweep_hparams.items():
if key != "_wandb":
config["hparams"][key] = value
# Execute run
main.main(config)
| [
"main.load_config",
"wandb.Config._as_dict",
"wandb.init",
"main.main"
] | [((58, 76), 'main.load_config', 'main.load_config', ([], {}), '()\n', (74, 76), False, 'import main\n'), ((97, 109), 'wandb.init', 'wandb.init', ([], {}), '()\n', (107, 109), False, 'import wandb\n'), ((213, 248), 'wandb.Config._as_dict', 'wandb.Config._as_dict', (['wandb.config'], {}), '(wandb.config)\n', (234, 248), False, 'import wandb\n'), ((368, 385), 'main.main', 'main.main', (['config'], {}), '(config)\n', (377, 385), False, 'import main\n')] |
# Top of main python script
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import sys
import random
import argparse
import numpy as np
import trimesh
import imageio
import open3d as o3d
from mathutils import Matrix
import h5py
import json
from mesh_to_sdf import get_surface_point_cloud
import pyrender
import util
np.random.seed(12433)
random.seed(12433)
train_categories = [
"04379243",
"02958343",
"03001627",
"02691156",
"04256520",
"04090263",
"03636649",
"04530566",
"02828884",
"03691459",
"02933112",
"03211117",
"04401088",
]
val_categories = [
"02924116",
"02808440",
"03467517",
"03325088",
"03046257",
"03991062",
"03593526",
"02876657",
"02871439",
"03642806",
"03624134",
"04468005",
"02747177",
"03790512",
"03948459",
"03337140",
"02818832",
"03928116",
"04330267",
"03797390",
"02880940",
"04554684",
"04004475",
"03513137",
"03761084",
"04225987",
"04460130",
"02942699",
"02801938",
"02946921",
"03938244",
"03710193",
"03207941",
"04099429",
"02773838",
"02843684",
"03261776",
"03759954",
"04074963",
"03085013",
"02992529",
"02954340",
]
p = argparse.ArgumentParser(
description="Renders given obj file by rotation a camera around it."
)
p.add_argument(
"--data_dir",
type=str,
default="/labdata/nicolai/data/ShapeNetCore.v2",
help="Data directory containing meshes.",
)
p.add_argument(
"--output_dir",
type=str,
default="./images",
help="The path the output will be dumped to.",
)
p.add_argument(
"--num_views",
type=int,
default=25,
help="Number of images to render",
)
p.add_argument("--resolution", type=int, default=256, help="output image resolution.")
p.add_argument(
"--sphere_radius",
type=float,
default=1.2,
help="Radius of the viewing sphere",
)
p.add_argument("--val", action="store_true", help="Use to render validation split")
p.add_argument(
"--save_png",
action="store_true",
help="Save output images for visualization",
)
p.add_argument(
"--show_3d",
action="store_true",
help="Save output images for visualization",
)
def normalize_mesh(mesh):
# Center the mesh
matrix = np.eye(4)
bounds = mesh.bounds
centroid = (bounds[1, :] + bounds[0, :]) / 2
matrix[:3, -1] = -centroid
mesh.apply_transform(matrix)
# Scale the model to unit diagonal lenght
matrix = np.eye(4)
extents = mesh.extents
diag = np.sqrt(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)
matrix[:3, :3] *= 1.0 / diag
mesh.apply_transform(matrix)
return mesh
def main():
args = p.parse_args()
instance_names = []
shapenet_categories = train_categories + val_categories
folders = sorted(os.listdir(args.data_dir))
for cat in shapenet_categories:
path = os.path.join(args.data_dir, cat)
new_instances = [
os.path.join(cat, f)
for f in sorted(os.listdir(path))
if os.path.isdir(os.path.join(path, f))
]
instance_names = instance_names + new_instances
instance_names = instance_names[0:10000]
if len(instance_names) == 0:
print("Data dir does not contain any instances")
raise NotImplementedError
# instance_names = instance_names[32000:]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print(f"Number of files: {len(instance_names)}")
# Load n meshes
count = 0
mesh_errors = {}
for instance_name in instance_names:
runtime_error = False
category, instance_name = instance_name.split("/")
if os.path.exists(os.path.join(args.output_dir, f"{instance_name}.h5")):
continue
try:
mesh = trimesh.load(
os.path.join(
args.data_dir,
category,
instance_name,
"models",
"model_normalized.obj",
),
force="mesh",
)
except ValueError:
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
print(f"ValueError with instance {instance_name}. Skipping....")
continue
# Normalize the mesh to unit diagonal
mesh = normalize_mesh(mesh)
cam_locations = util.sample_spherical(args.num_views, args.sphere_radius)
obj_location = np.zeros((1, 3))
cv_poses = util.look_at(cam_locations, obj_location)
cam_locations = [util.cv_cam2world_to_bcam2world(m) for m in cv_poses]
image_size = (args.resolution, args.resolution)
K = np.array([[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]])
camera = pyrender.IntrinsicsCamera(
fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=0.01, zfar=100
)
rgbs = []
depths = []
masks = []
c2ws = []
normals = []
scene = pyrender.Scene.from_trimesh_scene(
trimesh.Scene(mesh), ambient_light=(1, 1, 1)
)
for ii, w2c in enumerate(cam_locations):
# Add camera roll
theta = random.random() * np.pi
roll_matrix = Matrix(
(
(np.cos(theta), -np.sin(theta), 0, 0),
(np.sin(theta), np.cos(theta), 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
)
)
w2c = roll_matrix @ w2c
if ii == 0:
cam_node = scene.add(camera, pose=np.array(w2c))
else:
scene.set_pose(cam_node, pose=np.array(w2c))
try:
r = pyrender.OffscreenRenderer(*image_size)
color, depth = r.render(
scene, flags=pyrender.constants.RenderFlags.FLAT
)
if np.all(color == 255):
raise RuntimeError("No texture rendered")
except Exception as e:
print(f"RuntimeError with instance: {instance_name}. Skipping...")
runtime_error = True
r.delete()
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
break
normals.append(util.depth_2_normal(depth, depth == 0.0, K))
mask = depth != 0
w2c = np.array(util.get_world2cam_from_blender_cam(w2c))
rgbs.append(color)
depths.append(depth)
masks.append(mask)
c2ws.append(np.linalg.inv(w2c))
r.delete()
if args.save_png:
imageio.imwrite(
os.path.join(
args.output_dir, f"{instance_name}_{str(ii).zfill(3)}.png"
),
color,
)
if runtime_error:
runtime_error = False
continue
rgbs = np.stack([r for r in rgbs])
# Check if all images are white. If yes, continue without saving the model
depths = np.stack([r for r in depths])
masks = np.stack([r for r in masks])
poses = np.stack([r for r in c2ws])
normals = np.stack([r for r in normals])
# Generate 3D supervision data for the prior
number_of_points = 100000
surface_pcd = get_surface_point_cloud(
mesh, "scan", args.sphere_radius, 100, 400, 10000000, calculate_normals=True
)
pts, sdf = surface_pcd.sample_sdf_near_surface(
number_of_points,
1,
sign_method="normal",
normal_sample_count=11,
min_size=0,
return_gradients=False,
)
sdf_pts = np.concatenate([pts, sdf[:, None]], axis=-1)
if args.show_3d:
colors = np.zeros_like(pts)
colors[:, 0] = 1
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
pcd.colors = o3d.utility.Vector3dVector(colors)
frames = []
for c in c2ws:
frames.append(
o3d.geometry.TriangleMesh.create_coordinate_frame().transform(c)
)
o3d.visualization.draw_geometries(frames + [pcd])
hf = h5py.File(os.path.join(args.output_dir, f"{instance_name}.h5"), "w")
hf.create_dataset("rgb", data=rgbs, compression="gzip", dtype="f")
hf.create_dataset("depth", data=depths, compression="gzip", dtype="f")
hf.create_dataset("mask", data=masks, compression="gzip", dtype="f")
hf.create_dataset("normals", data=normals, compression="gzip", dtype="f")
hf.create_dataset("pose", data=poses, compression="gzip", dtype="f")
hf.create_dataset("K", data=K, dtype="f")
hf.create_dataset("sphere_radius", data=args.sphere_radius, dtype="f")
hf.create_dataset("sdf", data=sdf_pts, compression="gzip", dtype="f")
hf.create_dataset("category", data=category)
hf.close()
count += 1
if count % 100 == 0:
print(f"Generated {count} new instances")
with open(os.path.join(args.output_dir, "failures.json"), "w") as outfile:
json.dump(mesh_errors, outfile)
print("Finished all data generation")
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"util.look_at",
"numpy.array",
"util.cv_cam2world_to_bcam2world",
"numpy.sin",
"util.sample_spherical",
"os.path.exists",
"pyrender.IntrinsicsCamera",
"os.listdir",
"argparse.ArgumentParser",
"numpy.stack",
"util.depth_2_normal",
"util.get_world2cam_from_blender_cam",
"numpy.... | [((324, 345), 'numpy.random.seed', 'np.random.seed', (['(12433)'], {}), '(12433)\n', (338, 345), True, 'import numpy as np\n'), ((346, 364), 'random.seed', 'random.seed', (['(12433)'], {}), '(12433)\n', (357, 364), False, 'import random\n'), ((1295, 1393), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Renders given obj file by rotation a camera around it."""'}), "(description=\n 'Renders given obj file by rotation a camera around it.')\n", (1318, 1393), False, 'import argparse\n'), ((2345, 2354), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2351, 2354), True, 'import numpy as np\n'), ((2553, 2562), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2559, 2562), True, 'import numpy as np\n'), ((2601, 2661), 'numpy.sqrt', 'np.sqrt', (['(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)'], {}), '(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)\n', (2608, 2661), True, 'import numpy as np\n'), ((2890, 2915), 'os.listdir', 'os.listdir', (['args.data_dir'], {}), '(args.data_dir)\n', (2900, 2915), False, 'import os\n'), ((2968, 3000), 'os.path.join', 'os.path.join', (['args.data_dir', 'cat'], {}), '(args.data_dir, cat)\n', (2980, 3000), False, 'import os\n'), ((3453, 3484), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (3467, 3484), False, 'import os\n'), ((3494, 3522), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (3505, 3522), False, 'import os\n'), ((4563, 4620), 'util.sample_spherical', 'util.sample_spherical', (['args.num_views', 'args.sphere_radius'], {}), '(args.num_views, args.sphere_radius)\n', (4584, 4620), False, 'import util\n'), ((4644, 4660), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (4652, 4660), True, 'import numpy as np\n'), ((4680, 4721), 'util.look_at', 'util.look_at', (['cam_locations', 'obj_location'], {}), '(cam_locations, obj_location)\n', (4692, 4721), False, 'import util\n'), ((4869, 4938), 'numpy.array', 'np.array', (['[[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]]'], {}), '([[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]])\n', (4877, 4938), True, 'import numpy as np\n'), ((4956, 5055), 'pyrender.IntrinsicsCamera', 'pyrender.IntrinsicsCamera', ([], {'fx': 'K[0, 0]', 'fy': 'K[1, 1]', 'cx': 'K[0, 2]', 'cy': 'K[1, 2]', 'znear': '(0.01)', 'zfar': '(100)'}), '(fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2],\n znear=0.01, zfar=100)\n', (4981, 5055), False, 'import pyrender\n'), ((7244, 7271), 'numpy.stack', 'np.stack', (['[r for r in rgbs]'], {}), '([r for r in rgbs])\n', (7252, 7271), True, 'import numpy as np\n'), ((7373, 7402), 'numpy.stack', 'np.stack', (['[r for r in depths]'], {}), '([r for r in depths])\n', (7381, 7402), True, 'import numpy as np\n'), ((7419, 7447), 'numpy.stack', 'np.stack', (['[r for r in masks]'], {}), '([r for r in masks])\n', (7427, 7447), True, 'import numpy as np\n'), ((7464, 7491), 'numpy.stack', 'np.stack', (['[r for r in c2ws]'], {}), '([r for r in c2ws])\n', (7472, 7491), True, 'import numpy as np\n'), ((7510, 7540), 'numpy.stack', 'np.stack', (['[r for r in normals]'], {}), '([r for r in normals])\n', (7518, 7540), True, 'import numpy as np\n'), ((7651, 7757), 'mesh_to_sdf.get_surface_point_cloud', 'get_surface_point_cloud', (['mesh', '"""scan"""', 'args.sphere_radius', '(100)', '(400)', '(10000000)'], {'calculate_normals': '(True)'}), "(mesh, 'scan', args.sphere_radius, 100, 400, \n 10000000, calculate_normals=True)\n", (7674, 7757), False, 'from mesh_to_sdf import get_surface_point_cloud\n'), ((8034, 8078), 'numpy.concatenate', 'np.concatenate', (['[pts, sdf[:, None]]'], {'axis': '(-1)'}), '([pts, sdf[:, None]], axis=-1)\n', (8048, 8078), True, 'import numpy as np\n'), ((9527, 9558), 'json.dump', 'json.dump', (['mesh_errors', 'outfile'], {}), '(mesh_errors, outfile)\n', (9536, 9558), False, 'import json\n'), ((3039, 3059), 'os.path.join', 'os.path.join', (['cat', 'f'], {}), '(cat, f)\n', (3051, 3059), False, 'import os\n'), ((3790, 3842), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""{instance_name}.h5"""'], {}), "(args.output_dir, f'{instance_name}.h5')\n", (3802, 3842), False, 'import os\n'), ((4747, 4781), 'util.cv_cam2world_to_bcam2world', 'util.cv_cam2world_to_bcam2world', (['m'], {}), '(m)\n', (4778, 4781), False, 'import util\n'), ((5235, 5254), 'trimesh.Scene', 'trimesh.Scene', (['mesh'], {}), '(mesh)\n', (5248, 5254), False, 'import trimesh\n'), ((8126, 8144), 'numpy.zeros_like', 'np.zeros_like', (['pts'], {}), '(pts)\n', (8139, 8144), True, 'import numpy as np\n'), ((8193, 8218), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (8216, 8218), True, 'import open3d as o3d\n'), ((8244, 8275), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pts'], {}), '(pts)\n', (8270, 8275), True, 'import open3d as o3d\n'), ((8301, 8335), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (8327, 8335), True, 'import open3d as o3d\n'), ((8534, 8583), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['(frames + [pcd])'], {}), '(frames + [pcd])\n', (8567, 8583), True, 'import open3d as o3d\n'), ((8608, 8660), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""{instance_name}.h5"""'], {}), "(args.output_dir, f'{instance_name}.h5')\n", (8620, 8660), False, 'import os\n'), ((9454, 9500), 'os.path.join', 'os.path.join', (['args.output_dir', '"""failures.json"""'], {}), "(args.output_dir, 'failures.json')\n", (9466, 9500), False, 'import os\n'), ((3929, 4019), 'os.path.join', 'os.path.join', (['args.data_dir', 'category', 'instance_name', '"""models"""', '"""model_normalized.obj"""'], {}), "(args.data_dir, category, instance_name, 'models',\n 'model_normalized.obj')\n", (3941, 4019), False, 'import os\n'), ((5389, 5404), 'random.random', 'random.random', ([], {}), '()\n', (5402, 5404), False, 'import random\n'), ((5924, 5963), 'pyrender.OffscreenRenderer', 'pyrender.OffscreenRenderer', (['*image_size'], {}), '(*image_size)\n', (5950, 5963), False, 'import pyrender\n'), ((6111, 6131), 'numpy.all', 'np.all', (['(color == 255)'], {}), '(color == 255)\n', (6117, 6131), True, 'import numpy as np\n'), ((6589, 6632), 'util.depth_2_normal', 'util.depth_2_normal', (['depth', '(depth == 0.0)', 'K'], {}), '(depth, depth == 0.0, K)\n', (6608, 6632), False, 'import util\n'), ((6692, 6732), 'util.get_world2cam_from_blender_cam', 'util.get_world2cam_from_blender_cam', (['w2c'], {}), '(w2c)\n', (6727, 6732), False, 'import util\n'), ((6854, 6872), 'numpy.linalg.inv', 'np.linalg.inv', (['w2c'], {}), '(w2c)\n', (6867, 6872), True, 'import numpy as np\n'), ((3088, 3104), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3098, 3104), False, 'import os\n'), ((3135, 3156), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (3147, 3156), False, 'import os\n'), ((5486, 5499), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5492, 5499), True, 'import numpy as np\n'), ((5545, 5558), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5551, 5558), True, 'import numpy as np\n'), ((5560, 5573), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5566, 5573), True, 'import numpy as np\n'), ((5792, 5805), 'numpy.array', 'np.array', (['w2c'], {}), '(w2c)\n', (5800, 5805), True, 'import numpy as np\n'), ((5871, 5884), 'numpy.array', 'np.array', (['w2c'], {}), '(w2c)\n', (5879, 5884), True, 'import numpy as np\n'), ((5502, 5515), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5508, 5515), True, 'import numpy as np\n'), ((8439, 8490), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {}), '()\n', (8488, 8490), True, 'import open3d as o3d\n')] |
from fetcher.source.fetcher import Fetcher
from fetcher.source.managers.notification import FetcherException
def fetch(config_dict):
f = Fetcher(config_dict)
f.start()
return "Job has been finished"
| [
"fetcher.source.fetcher.Fetcher"
] | [((143, 163), 'fetcher.source.fetcher.Fetcher', 'Fetcher', (['config_dict'], {}), '(config_dict)\n', (150, 163), False, 'from fetcher.source.fetcher import Fetcher\n')] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserProfile
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = ''
self.fields['password1'].help_text = ''
self.fields['password2'].help_text = ''
class Meta:
model = User
fields = (
'username',
'email',
'password1',
'password2'
)
def save(self):
user = User.objects.create_user(username=self.cleaned_data['username'], password=self.cleaned_data['<PASSWORD>'])
user.email = self.cleaned_data['email']
user.save()
return user
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_pic', 'bio')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['profile_pic', 'bio'] | [
"django.contrib.auth.models.User.objects.create_user"
] | [((653, 764), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': "self.cleaned_data['username']", 'password': "self.cleaned_data['<PASSWORD>']"}), "(username=self.cleaned_data['username'], password=\n self.cleaned_data['<PASSWORD>'])\n", (677, 764), False, 'from django.contrib.auth.models import User\n')] |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Index Page
url(r'^$', views.index, name='index'),
# 전자명세서 발행
url(r'^CheckMgtKeyInUse$', views.checkMgtKeyInUse, name='CheckMgtKeyInUse'),
url(r'^RegistIssue$', views.registIssue, name='RegistIssue'),
url(r'^Register$', views.register, name='Register'),
url(r'^Update$', views.update, name='Update'),
url(r'^Issue$', views.issue, name='Issue'),
url(r'^Cancel$', views.cancel, name='Cancel'),
url(r'^Delete$', views.delete, name='Delete'),
# 세금계산서 정보확인
url(r'^GetInfo$', views.getInfo, name='GetInfo'),
url(r'^GetInfos$', views.getInfos, name='GetInfos'),
url(r'^GetDetailInfo$', views.getDetailInfo, name='GetDetailInfo'),
url(r'^Search$', views.search, name='Search'),
url(r'^GetLogs$', views.getLogs, name='GetLogs'),
url(r'^GetURL$', views.getURL, name='GetURL'),
# 명세서 보기인쇄
url(r'^GetPopUpURL$', views.getPopUpURL, name='getPopUpURL'),
url(r'^GetViewURL$', views.getViewURL, name='getViewURL'),
url(r'^GetPrintURL$', views.getPrintURL, name='GetPrintURL'),
url(r'^GetEPrintURL$', views.getEPrintURL, name='GetEPrintURL'),
url(r'^GetMassPrintURL$', views.getMassPrintURL, name='GetMassPrintURL'),
url(r'^GetMailURL$', views.getMailURL, name='GetMailURL'),
# 부가 기능
url(r'^GetAccessURL', views.getAccessURL, name='GetAccessURL'),
url(r'^GetSealURL', views.getSealURL, name='GetSealURL'),
url(r'^AttachFile$', views.attachFile, name='AttachFile'),
url(r'^DeleteFile$', views.deleteFile, name='DeleteFile'),
url(r'^GetFiles$', views.getFiles, name='GetFiles'),
url(r'^SendEmail$', views.sendEmail, name='SendEmail'),
url(r'^SendSMS$', views.sendSMS, name='SendSMS'),
url(r'^SendFAX$', views.sendFAX, name='SendFAX'),
url(r'^FAXSend$', views.FAXSend, name='FAXSend'),
url(r'^AttachStatement$', views.attachStatement, name='AttachStatement'),
url(r'^DetachStatement$', views.detachStatement, name='DetachStatement'),
url(r'^ListEmailConfig', views.listEmailConfig, name='ListEmailConfig'),
url(r'^UpdateEmailConfig', views.updateEmailConfig, name='UpdateEmailConfig'),
# 포인트 관리
url(r'^GetBalance$', views.getBalance, name='GetBalance'),
url(r'^GetChargeURL', views.getChargeURL, name='GetChargeURL'),
url(r'^GetPaymentURL', views.getPaymentURL, name='GetPaymentURL'),
url(r'^GetUseHistoryURL', views.getUseHistoryURL, name='GetUseHistoryURL'),
url(r'^GetPartnerBalance$', views.getPartnerBalance, name='GetPartnerBalance'),
url(r'^GetPartnerURL$', views.getPartnerURL, name='GetPartnerURL'),
url(r'^GetUnitCost$', views.getUnitCost, name='GetUnitCost'),
url(r'^GetChargeInfo$', views.getChargeInfo, name='GetChargeInfo'),
# 회원정보
url(r'^CheckIsMember$', views.checkIsMember, name='CheckIsMember'),
url(r'^CheckID$', views.checkID, name='CheckID'),
url(r'^JoinMember$', views.joinMember, name='JoinMember'),
url(r'^GetCorpInfo$', views.getCorpInfo, name='GetCorpInfo'),
url(r'^UpdateCorpInfo$', views.updateCorpInfo, name='UpdateCorpInfo'),
url(r'^RegistContact$', views.registContact, name='RegistContact'),
url(r'^GetContactInfo$', views.getContactInfo, name='GetContactInfo'),
url(r'^ListContact$', views.listContact, name='ListContact'),
url(r'^UpdateContact$', views.updateContact, name='UpdateContact'),
]
| [
"django.conf.urls.url"
] | [((116, 152), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (119, 152), False, 'from django.conf.urls import url\n'), ((175, 249), 'django.conf.urls.url', 'url', (['"""^CheckMgtKeyInUse$"""', 'views.checkMgtKeyInUse'], {'name': '"""CheckMgtKeyInUse"""'}), "('^CheckMgtKeyInUse$', views.checkMgtKeyInUse, name='CheckMgtKeyInUse')\n", (178, 249), False, 'from django.conf.urls import url\n'), ((256, 315), 'django.conf.urls.url', 'url', (['"""^RegistIssue$"""', 'views.registIssue'], {'name': '"""RegistIssue"""'}), "('^RegistIssue$', views.registIssue, name='RegistIssue')\n", (259, 315), False, 'from django.conf.urls import url\n'), ((322, 372), 'django.conf.urls.url', 'url', (['"""^Register$"""', 'views.register'], {'name': '"""Register"""'}), "('^Register$', views.register, name='Register')\n", (325, 372), False, 'from django.conf.urls import url\n'), ((379, 423), 'django.conf.urls.url', 'url', (['"""^Update$"""', 'views.update'], {'name': '"""Update"""'}), "('^Update$', views.update, name='Update')\n", (382, 423), False, 'from django.conf.urls import url\n'), ((430, 471), 'django.conf.urls.url', 'url', (['"""^Issue$"""', 'views.issue'], {'name': '"""Issue"""'}), "('^Issue$', views.issue, name='Issue')\n", (433, 471), False, 'from django.conf.urls import url\n'), ((478, 522), 'django.conf.urls.url', 'url', (['"""^Cancel$"""', 'views.cancel'], {'name': '"""Cancel"""'}), "('^Cancel$', views.cancel, name='Cancel')\n", (481, 522), False, 'from django.conf.urls import url\n'), ((529, 573), 'django.conf.urls.url', 'url', (['"""^Delete$"""', 'views.delete'], {'name': '"""Delete"""'}), "('^Delete$', views.delete, name='Delete')\n", (532, 573), False, 'from django.conf.urls import url\n'), ((598, 645), 'django.conf.urls.url', 'url', (['"""^GetInfo$"""', 'views.getInfo'], {'name': '"""GetInfo"""'}), "('^GetInfo$', views.getInfo, name='GetInfo')\n", (601, 645), False, 'from django.conf.urls import url\n'), ((652, 702), 'django.conf.urls.url', 'url', (['"""^GetInfos$"""', 'views.getInfos'], {'name': '"""GetInfos"""'}), "('^GetInfos$', views.getInfos, name='GetInfos')\n", (655, 702), False, 'from django.conf.urls import url\n'), ((709, 774), 'django.conf.urls.url', 'url', (['"""^GetDetailInfo$"""', 'views.getDetailInfo'], {'name': '"""GetDetailInfo"""'}), "('^GetDetailInfo$', views.getDetailInfo, name='GetDetailInfo')\n", (712, 774), False, 'from django.conf.urls import url\n'), ((781, 825), 'django.conf.urls.url', 'url', (['"""^Search$"""', 'views.search'], {'name': '"""Search"""'}), "('^Search$', views.search, name='Search')\n", (784, 825), False, 'from django.conf.urls import url\n'), ((832, 879), 'django.conf.urls.url', 'url', (['"""^GetLogs$"""', 'views.getLogs'], {'name': '"""GetLogs"""'}), "('^GetLogs$', views.getLogs, name='GetLogs')\n", (835, 879), False, 'from django.conf.urls import url\n'), ((886, 930), 'django.conf.urls.url', 'url', (['"""^GetURL$"""', 'views.getURL'], {'name': '"""GetURL"""'}), "('^GetURL$', views.getURL, name='GetURL')\n", (889, 930), False, 'from django.conf.urls import url\n'), ((953, 1012), 'django.conf.urls.url', 'url', (['"""^GetPopUpURL$"""', 'views.getPopUpURL'], {'name': '"""getPopUpURL"""'}), "('^GetPopUpURL$', views.getPopUpURL, name='getPopUpURL')\n", (956, 1012), False, 'from django.conf.urls import url\n'), ((1019, 1075), 'django.conf.urls.url', 'url', (['"""^GetViewURL$"""', 'views.getViewURL'], {'name': '"""getViewURL"""'}), "('^GetViewURL$', views.getViewURL, name='getViewURL')\n", (1022, 1075), False, 'from django.conf.urls import url\n'), ((1082, 1141), 'django.conf.urls.url', 'url', (['"""^GetPrintURL$"""', 'views.getPrintURL'], {'name': '"""GetPrintURL"""'}), "('^GetPrintURL$', views.getPrintURL, name='GetPrintURL')\n", (1085, 1141), False, 'from django.conf.urls import url\n'), ((1148, 1210), 'django.conf.urls.url', 'url', (['"""^GetEPrintURL$"""', 'views.getEPrintURL'], {'name': '"""GetEPrintURL"""'}), "('^GetEPrintURL$', views.getEPrintURL, name='GetEPrintURL')\n", (1151, 1210), False, 'from django.conf.urls import url\n'), ((1217, 1288), 'django.conf.urls.url', 'url', (['"""^GetMassPrintURL$"""', 'views.getMassPrintURL'], {'name': '"""GetMassPrintURL"""'}), "('^GetMassPrintURL$', views.getMassPrintURL, name='GetMassPrintURL')\n", (1220, 1288), False, 'from django.conf.urls import url\n'), ((1295, 1351), 'django.conf.urls.url', 'url', (['"""^GetMailURL$"""', 'views.getMailURL'], {'name': '"""GetMailURL"""'}), "('^GetMailURL$', views.getMailURL, name='GetMailURL')\n", (1298, 1351), False, 'from django.conf.urls import url\n'), ((1371, 1432), 'django.conf.urls.url', 'url', (['"""^GetAccessURL"""', 'views.getAccessURL'], {'name': '"""GetAccessURL"""'}), "('^GetAccessURL', views.getAccessURL, name='GetAccessURL')\n", (1374, 1432), False, 'from django.conf.urls import url\n'), ((1439, 1494), 'django.conf.urls.url', 'url', (['"""^GetSealURL"""', 'views.getSealURL'], {'name': '"""GetSealURL"""'}), "('^GetSealURL', views.getSealURL, name='GetSealURL')\n", (1442, 1494), False, 'from django.conf.urls import url\n'), ((1501, 1557), 'django.conf.urls.url', 'url', (['"""^AttachFile$"""', 'views.attachFile'], {'name': '"""AttachFile"""'}), "('^AttachFile$', views.attachFile, name='AttachFile')\n", (1504, 1557), False, 'from django.conf.urls import url\n'), ((1564, 1620), 'django.conf.urls.url', 'url', (['"""^DeleteFile$"""', 'views.deleteFile'], {'name': '"""DeleteFile"""'}), "('^DeleteFile$', views.deleteFile, name='DeleteFile')\n", (1567, 1620), False, 'from django.conf.urls import url\n'), ((1627, 1677), 'django.conf.urls.url', 'url', (['"""^GetFiles$"""', 'views.getFiles'], {'name': '"""GetFiles"""'}), "('^GetFiles$', views.getFiles, name='GetFiles')\n", (1630, 1677), False, 'from django.conf.urls import url\n'), ((1684, 1737), 'django.conf.urls.url', 'url', (['"""^SendEmail$"""', 'views.sendEmail'], {'name': '"""SendEmail"""'}), "('^SendEmail$', views.sendEmail, name='SendEmail')\n", (1687, 1737), False, 'from django.conf.urls import url\n'), ((1744, 1791), 'django.conf.urls.url', 'url', (['"""^SendSMS$"""', 'views.sendSMS'], {'name': '"""SendSMS"""'}), "('^SendSMS$', views.sendSMS, name='SendSMS')\n", (1747, 1791), False, 'from django.conf.urls import url\n'), ((1798, 1845), 'django.conf.urls.url', 'url', (['"""^SendFAX$"""', 'views.sendFAX'], {'name': '"""SendFAX"""'}), "('^SendFAX$', views.sendFAX, name='SendFAX')\n", (1801, 1845), False, 'from django.conf.urls import url\n'), ((1852, 1899), 'django.conf.urls.url', 'url', (['"""^FAXSend$"""', 'views.FAXSend'], {'name': '"""FAXSend"""'}), "('^FAXSend$', views.FAXSend, name='FAXSend')\n", (1855, 1899), False, 'from django.conf.urls import url\n'), ((1906, 1977), 'django.conf.urls.url', 'url', (['"""^AttachStatement$"""', 'views.attachStatement'], {'name': '"""AttachStatement"""'}), "('^AttachStatement$', views.attachStatement, name='AttachStatement')\n", (1909, 1977), False, 'from django.conf.urls import url\n'), ((1984, 2055), 'django.conf.urls.url', 'url', (['"""^DetachStatement$"""', 'views.detachStatement'], {'name': '"""DetachStatement"""'}), "('^DetachStatement$', views.detachStatement, name='DetachStatement')\n", (1987, 2055), False, 'from django.conf.urls import url\n'), ((2062, 2132), 'django.conf.urls.url', 'url', (['"""^ListEmailConfig"""', 'views.listEmailConfig'], {'name': '"""ListEmailConfig"""'}), "('^ListEmailConfig', views.listEmailConfig, name='ListEmailConfig')\n", (2065, 2132), False, 'from django.conf.urls import url\n'), ((2139, 2215), 'django.conf.urls.url', 'url', (['"""^UpdateEmailConfig"""', 'views.updateEmailConfig'], {'name': '"""UpdateEmailConfig"""'}), "('^UpdateEmailConfig', views.updateEmailConfig, name='UpdateEmailConfig')\n", (2142, 2215), False, 'from django.conf.urls import url\n'), ((2236, 2292), 'django.conf.urls.url', 'url', (['"""^GetBalance$"""', 'views.getBalance'], {'name': '"""GetBalance"""'}), "('^GetBalance$', views.getBalance, name='GetBalance')\n", (2239, 2292), False, 'from django.conf.urls import url\n'), ((2299, 2360), 'django.conf.urls.url', 'url', (['"""^GetChargeURL"""', 'views.getChargeURL'], {'name': '"""GetChargeURL"""'}), "('^GetChargeURL', views.getChargeURL, name='GetChargeURL')\n", (2302, 2360), False, 'from django.conf.urls import url\n'), ((2367, 2431), 'django.conf.urls.url', 'url', (['"""^GetPaymentURL"""', 'views.getPaymentURL'], {'name': '"""GetPaymentURL"""'}), "('^GetPaymentURL', views.getPaymentURL, name='GetPaymentURL')\n", (2370, 2431), False, 'from django.conf.urls import url\n'), ((2438, 2511), 'django.conf.urls.url', 'url', (['"""^GetUseHistoryURL"""', 'views.getUseHistoryURL'], {'name': '"""GetUseHistoryURL"""'}), "('^GetUseHistoryURL', views.getUseHistoryURL, name='GetUseHistoryURL')\n", (2441, 2511), False, 'from django.conf.urls import url\n'), ((2518, 2595), 'django.conf.urls.url', 'url', (['"""^GetPartnerBalance$"""', 'views.getPartnerBalance'], {'name': '"""GetPartnerBalance"""'}), "('^GetPartnerBalance$', views.getPartnerBalance, name='GetPartnerBalance')\n", (2521, 2595), False, 'from django.conf.urls import url\n'), ((2602, 2667), 'django.conf.urls.url', 'url', (['"""^GetPartnerURL$"""', 'views.getPartnerURL'], {'name': '"""GetPartnerURL"""'}), "('^GetPartnerURL$', views.getPartnerURL, name='GetPartnerURL')\n", (2605, 2667), False, 'from django.conf.urls import url\n'), ((2674, 2733), 'django.conf.urls.url', 'url', (['"""^GetUnitCost$"""', 'views.getUnitCost'], {'name': '"""GetUnitCost"""'}), "('^GetUnitCost$', views.getUnitCost, name='GetUnitCost')\n", (2677, 2733), False, 'from django.conf.urls import url\n'), ((2740, 2805), 'django.conf.urls.url', 'url', (['"""^GetChargeInfo$"""', 'views.getChargeInfo'], {'name': '"""GetChargeInfo"""'}), "('^GetChargeInfo$', views.getChargeInfo, name='GetChargeInfo')\n", (2743, 2805), False, 'from django.conf.urls import url\n'), ((2824, 2889), 'django.conf.urls.url', 'url', (['"""^CheckIsMember$"""', 'views.checkIsMember'], {'name': '"""CheckIsMember"""'}), "('^CheckIsMember$', views.checkIsMember, name='CheckIsMember')\n", (2827, 2889), False, 'from django.conf.urls import url\n'), ((2896, 2943), 'django.conf.urls.url', 'url', (['"""^CheckID$"""', 'views.checkID'], {'name': '"""CheckID"""'}), "('^CheckID$', views.checkID, name='CheckID')\n", (2899, 2943), False, 'from django.conf.urls import url\n'), ((2950, 3006), 'django.conf.urls.url', 'url', (['"""^JoinMember$"""', 'views.joinMember'], {'name': '"""JoinMember"""'}), "('^JoinMember$', views.joinMember, name='JoinMember')\n", (2953, 3006), False, 'from django.conf.urls import url\n'), ((3013, 3072), 'django.conf.urls.url', 'url', (['"""^GetCorpInfo$"""', 'views.getCorpInfo'], {'name': '"""GetCorpInfo"""'}), "('^GetCorpInfo$', views.getCorpInfo, name='GetCorpInfo')\n", (3016, 3072), False, 'from django.conf.urls import url\n'), ((3079, 3147), 'django.conf.urls.url', 'url', (['"""^UpdateCorpInfo$"""', 'views.updateCorpInfo'], {'name': '"""UpdateCorpInfo"""'}), "('^UpdateCorpInfo$', views.updateCorpInfo, name='UpdateCorpInfo')\n", (3082, 3147), False, 'from django.conf.urls import url\n'), ((3154, 3219), 'django.conf.urls.url', 'url', (['"""^RegistContact$"""', 'views.registContact'], {'name': '"""RegistContact"""'}), "('^RegistContact$', views.registContact, name='RegistContact')\n", (3157, 3219), False, 'from django.conf.urls import url\n'), ((3226, 3294), 'django.conf.urls.url', 'url', (['"""^GetContactInfo$"""', 'views.getContactInfo'], {'name': '"""GetContactInfo"""'}), "('^GetContactInfo$', views.getContactInfo, name='GetContactInfo')\n", (3229, 3294), False, 'from django.conf.urls import url\n'), ((3301, 3360), 'django.conf.urls.url', 'url', (['"""^ListContact$"""', 'views.listContact'], {'name': '"""ListContact"""'}), "('^ListContact$', views.listContact, name='ListContact')\n", (3304, 3360), False, 'from django.conf.urls import url\n'), ((3367, 3432), 'django.conf.urls.url', 'url', (['"""^UpdateContact$"""', 'views.updateContact'], {'name': '"""UpdateContact"""'}), "('^UpdateContact$', views.updateContact, name='UpdateContact')\n", (3370, 3432), False, 'from django.conf.urls import url\n')] |
from setuptools import setup
with open('README.rst') as README:
long_description = README.read()
long_description = long_description[long_description.index('Description'):]
setup(name='timeme',
version='0.1.1',
description='Decorator that prints the running time of a function',
long_description=long_description,
url='http://github.com/enricobacis/timeme',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['timeme'],
keywords='time timing function decorator'
)
| [
"setuptools.setup"
] | [((183, 511), 'setuptools.setup', 'setup', ([], {'name': '"""timeme"""', 'version': '"""0.1.1"""', 'description': '"""Decorator that prints the running time of a function"""', 'long_description': 'long_description', 'url': '"""http://github.com/enricobacis/timeme"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['timeme']", 'keywords': '"""time timing function decorator"""'}), "(name='timeme', version='0.1.1', description=\n 'Decorator that prints the running time of a function',\n long_description=long_description, url=\n 'http://github.com/enricobacis/timeme', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', packages=['timeme'], keywords=\n 'time timing function decorator')\n", (188, 511), False, 'from setuptools import setup\n')] |
import time
from pymongo import MongoClient
from datetime import datetime
from threading import Thread, Lock
start = datetime.now()
client = MongoClient("mongodb://username:password@127.0.0.1")
database = client["database_name"]
collection = database["collection_name"]
threads_count = 0
lock = Lock()
package = []
def send(p):
global threads_count
with lock:
threads_count += 1
collection.insert_many(p)
with lock:
threads_count -= 1
with open("utils/trash.csv") as file:
for line in file.readlines():
name, description = line.split(",")
package.append({"name": name, "description": description})
if len(package) >= 10000:
while threads_count >= 4:
time.sleep(0)
Thread(target=send, args=(package[:],), daemon=True).start()
package.clear()
if package:
collection.insert_many(package)
while threads_count != 0:
pass
print(collection.count_documents({}))
collection.drop()
client.drop_database("mongo")
print(datetime.now() - start)
| [
"threading.Lock",
"time.sleep",
"datetime.datetime.now",
"pymongo.MongoClient",
"threading.Thread"
] | [((118, 132), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (130, 132), False, 'from datetime import datetime\n'), ((143, 195), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://username:password@127.0.0.1"""'], {}), "('mongodb://username:password@127.0.0.1')\n", (154, 195), False, 'from pymongo import MongoClient\n'), ((298, 304), 'threading.Lock', 'Lock', ([], {}), '()\n', (302, 304), False, 'from threading import Thread, Lock\n'), ((1045, 1059), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1057, 1059), False, 'from datetime import datetime\n'), ((750, 763), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (760, 763), False, 'import time\n'), ((776, 828), 'threading.Thread', 'Thread', ([], {'target': 'send', 'args': '(package[:],)', 'daemon': '(True)'}), '(target=send, args=(package[:],), daemon=True)\n', (782, 828), False, 'from threading import Thread, Lock\n')] |
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
class PinCard(models.Model):
token = models.CharField(max_length=32, db_index=True, editable=False)
display_number = models.CharField(max_length=20, editable=False)
expiry_month = models.PositiveSmallIntegerField()
expiry_year = models.PositiveSmallIntegerField()
scheme = models.CharField(max_length=20, editable=False)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
address_line1 = models.CharField(max_length=255)
address_line2 = models.CharField(max_length=255, blank=True)
address_city = models.CharField(max_length=255)
address_postcode = models.CharField(max_length=20)
address_state = models.CharField(max_length=255)
address_country = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, related_name='pin_cards', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Card %s' % self.display_number
class PinCustomer(models.Model):
token = models.CharField(unique=True, max_length=32)
card = models.ForeignKey(PinCard, related_name='customers')
email = models.EmailField()
created_at = models.DateTimeField()
user = models.ForeignKey(User, related_name='pin_customers', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Customer %s' % self.email
class PinCharge(models.Model):
token = models.CharField(unique=True, max_length=32, editable=False)
card = models.ForeignKey(PinCard, related_name='charges', editable=False)
customer = models.ForeignKey(PinCustomer, related_name='customers', null=True, blank=True, editable=False)
success = models.BooleanField()
amount = models.DecimalField(max_digits=16, decimal_places=2)
currency = models.CharField(max_length=3)
description = models.CharField(max_length=255)
email = models.EmailField()
ip_address = models.GenericIPAddressField(blank=True, null=True)
created_at = models.DateTimeField()
status_message = models.CharField(max_length=255)
error_message = models.CharField(max_length=255)
user = models.ForeignKey(User, related_name='pin_charges', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Charge %s' % self.email
class PinRefund(models.Model):
token = models.CharField(unique=True, max_length=32)
charge = models.ForeignKey(PinCharge, related_name='refunds')
success = models.BooleanField()
amount = models.DecimalField(max_digits=16, decimal_places=2)
currency = models.CharField(max_length=3)
created_at = models.DateTimeField()
status_message = models.CharField(max_length=255)
error_message = models.CharField(max_length=255)
user = models.ForeignKey(User, related_name='pin_refunds', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Refund %s' % self.charge.email
| [
"django.db.models.GenericIPAddressField",
"django.contrib.auth.get_user_model",
"django.db.models.EmailField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.PositiveSmallIntegerField",
"django.db.m... | [((185, 201), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (199, 201), False, 'from django.contrib.auth import get_user_model\n'), ((244, 306), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'db_index': '(True)', 'editable': '(False)'}), '(max_length=32, db_index=True, editable=False)\n', (260, 306), False, 'from django.db import models\n'), ((328, 375), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'editable': '(False)'}), '(max_length=20, editable=False)\n', (344, 375), False, 'from django.db import models\n'), ((395, 429), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (427, 429), False, 'from django.db import models\n'), ((448, 482), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (480, 482), False, 'from django.db import models\n'), ((496, 543), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'editable': '(False)'}), '(max_length=20, editable=False)\n', (512, 543), False, 'from django.db import models\n'), ((561, 593), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (577, 593), False, 'from django.db import models\n'), ((610, 642), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (626, 642), False, 'from django.db import models\n'), ((663, 695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (679, 695), False, 'from django.db import models\n'), ((716, 760), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (732, 760), False, 'from django.db import models\n'), ((780, 812), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (796, 812), False, 'from django.db import models\n'), ((836, 867), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (852, 867), False, 'from django.db import models\n'), ((888, 920), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (904, 920), False, 'from django.db import models\n'), ((943, 975), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (959, 975), False, 'from django.db import models\n'), ((993, 1032), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1013, 1032), False, 'from django.db import models\n'), ((1044, 1116), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""pin_cards"""', 'blank': '(True)', 'null': '(True)'}), "(User, related_name='pin_cards', blank=True, null=True)\n", (1061, 1116), False, 'from django.db import models\n'), ((1285, 1329), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(32)'}), '(unique=True, max_length=32)\n', (1301, 1329), False, 'from django.db import models\n'), ((1341, 1393), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PinCard'], {'related_name': '"""customers"""'}), "(PinCard, related_name='customers')\n", (1358, 1393), False, 'from django.db import models\n'), ((1406, 1425), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (1423, 1425), False, 'from django.db import models\n'), ((1443, 1465), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1463, 1465), False, 'from django.db import models\n'), ((1477, 1553), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""pin_customers"""', 'blank': '(True)', 'null': '(True)'}), "(User, related_name='pin_customers', blank=True, null=True)\n", (1494, 1553), False, 'from django.db import models\n'), ((1715, 1775), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(32)', 'editable': '(False)'}), '(unique=True, max_length=32, editable=False)\n', (1731, 1775), False, 'from django.db import models\n'), ((1787, 1853), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PinCard'], {'related_name': '"""charges"""', 'editable': '(False)'}), "(PinCard, related_name='charges', editable=False)\n", (1804, 1853), False, 'from django.db import models\n'), ((1869, 1969), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PinCustomer'], {'related_name': '"""customers"""', 'null': '(True)', 'blank': '(True)', 'editable': '(False)'}), "(PinCustomer, related_name='customers', null=True, blank=\n True, editable=False)\n", (1886, 1969), False, 'from django.db import models\n'), ((1979, 2000), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1998, 2000), False, 'from django.db import models\n'), ((2014, 2066), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(16)', 'decimal_places': '(2)'}), '(max_digits=16, decimal_places=2)\n', (2033, 2066), False, 'from django.db import models\n'), ((2082, 2112), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (2098, 2112), False, 'from django.db import models\n'), ((2131, 2163), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2147, 2163), False, 'from django.db import models\n'), ((2176, 2195), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (2193, 2195), False, 'from django.db import models\n'), ((2213, 2264), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2241, 2264), False, 'from django.db import models\n'), ((2282, 2304), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2302, 2304), False, 'from django.db import models\n'), ((2326, 2358), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2342, 2358), False, 'from django.db import models\n'), ((2379, 2411), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2395, 2411), False, 'from django.db import models\n'), ((2423, 2497), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""pin_charges"""', 'blank': '(True)', 'null': '(True)'}), "(User, related_name='pin_charges', blank=True, null=True)\n", (2440, 2497), False, 'from django.db import models\n'), ((2657, 2701), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(32)'}), '(unique=True, max_length=32)\n', (2673, 2701), False, 'from django.db import models\n'), ((2715, 2767), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PinCharge'], {'related_name': '"""refunds"""'}), "(PinCharge, related_name='refunds')\n", (2732, 2767), False, 'from django.db import models\n'), ((2782, 2803), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (2801, 2803), False, 'from django.db import models\n'), ((2817, 2869), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(16)', 'decimal_places': '(2)'}), '(max_digits=16, decimal_places=2)\n', (2836, 2869), False, 'from django.db import models\n'), ((2885, 2915), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (2901, 2915), False, 'from django.db import models\n'), ((2933, 2955), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2953, 2955), False, 'from django.db import models\n'), ((2977, 3009), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2993, 3009), False, 'from django.db import models\n'), ((3030, 3062), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3046, 3062), False, 'from django.db import models\n'), ((3074, 3148), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""pin_refunds"""', 'blank': '(True)', 'null': '(True)'}), "(User, related_name='pin_refunds', blank=True, null=True)\n", (3091, 3148), False, 'from django.db import models\n')] |
import unittest #Importing the unittest module
from contact import Contact #Importing the contact class
#import pyperclip #Pyperclip will allow us to copy and paste items to our clipboard
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact("Lyn","Muthoni","0796654066","<EMAIL>")
def tearDown(self):
Contact.contact_list = []
#First test to check if our contact objects are being instantiated correctly
def test_instance(self):
self.assertEqual(self.new_contact.first_name,"Lyn")
self.assertEqual(self.new_contact.last_name,"Muthoni")
self.assertEqual(self.new_contact.phone_number,"0796654066")
self.assertEqual(self.new_contact.email,"<EMAIL>")
#Second test to check if we can save contacts into the contact list
def test_save_contact(self):
self.new_contact.save_contact() #saving the new contact
self.assertEqual(len(Contact.contact_list),1)
#Third test to test if we can save multiple contacts
def test_save_multiple_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","<EMAIL>") #new contact
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list),2)
#Fourth test to test if we can remove a contact from our contact list
def test_delete_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","<EMAIL>") #new contact
test_contact.save_contact()
self.new_contact.delete_contact() #Deleting a contact object
self.assertEqual(len(Contact.contact_list),1)
#Fifth test to check if we can find a contact by phone number and display information
def test_find_contact_by_number(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","<EMAIL>") #new contact
test_contact.save_contact()
found_contact = Contact.find_by_number("0711223344")
self.assertEqual(found_contact.email,test_contact.email)
#Sixth test to check if a contact object exists
def test_contact_exists(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","<EMAIL>") #new contact
test_contact.save_contact()
contact_exists = Contact.contact_exist("0711223344")
self.assertTrue(contact_exists)
#Seventh test to display all contacts
def test_display_all_contacts(self):
self.assertEqual(Contact.display_contacts(),Contact.contact_list)
#Eighth test to allow us to copy items to the clipboard
'''
def test_copy_email(self):
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email,pyperclip.paste())
'''
if __name__ == '__main__':
unittest.main()
| [
"contact.Contact.contact_exist",
"contact.Contact.find_by_number",
"contact.Contact",
"unittest.main",
"contact.Contact.display_contacts"
] | [((2889, 2904), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2902, 2904), False, 'import unittest\n'), ((275, 325), 'contact.Contact', 'Contact', (['"""Lyn"""', '"""Muthoni"""', '"""0796654066"""', '"""<EMAIL>"""'], {}), "('Lyn', 'Muthoni', '0796654066', '<EMAIL>')\n", (282, 325), False, 'from contact import Contact\n'), ((1131, 1179), 'contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0712345678"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0712345678', '<EMAIL>')\n", (1138, 1179), False, 'from contact import Contact\n'), ((1453, 1501), 'contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0712345678"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0712345678', '<EMAIL>')\n", (1460, 1501), False, 'from contact import Contact\n'), ((1868, 1916), 'contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0711223344"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0711223344', '<EMAIL>')\n", (1875, 1916), False, 'from contact import Contact\n'), ((1987, 2023), 'contact.Contact.find_by_number', 'Contact.find_by_number', (['"""0711223344"""'], {}), "('0711223344')\n", (2009, 2023), False, 'from contact import Contact\n'), ((2240, 2288), 'contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0711223344"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0711223344', '<EMAIL>')\n", (2247, 2288), False, 'from contact import Contact\n'), ((2360, 2395), 'contact.Contact.contact_exist', 'Contact.contact_exist', (['"""0711223344"""'], {}), "('0711223344')\n", (2381, 2395), False, 'from contact import Contact\n'), ((2545, 2571), 'contact.Contact.display_contacts', 'Contact.display_contacts', ([], {}), '()\n', (2569, 2571), False, 'from contact import Contact\n')] |
from celery import Celery
import os
# 为celery设置django默认配置
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'mlh.settings.dev'
# 创建对象,命名为meiduo,并指明broker
celery_app = Celery('mlh',broker='redis://127.0.0.1:6379/15')
# 自动注册任务
celery_app.autodiscover_tasks(['celery_tasks.sms',]) | [
"celery.Celery",
"os.getenv"
] | [((208, 257), 'celery.Celery', 'Celery', (['"""mlh"""'], {'broker': '"""redis://127.0.0.1:6379/15"""'}), "('mlh', broker='redis://127.0.0.1:6379/15')\n", (214, 257), False, 'from celery import Celery\n'), ((66, 101), 'os.getenv', 'os.getenv', (['"""DJANGO_SETTINGS_MODULE"""'], {}), "('DJANGO_SETTINGS_MODULE')\n", (75, 101), False, 'import os\n')] |
import unittest
from pywiktionary.parsers import basic_parser
def get_pizza_html_extract():
with open('tests/file/html-responses/pizza-it.html', 'r', encoding='utf-8') as pizza_html_file:
pizza_html = pizza_html_file.read()
return pizza_html
class BasicParseTestCase(unittest.TestCase):
def test_init(self):
parser = basic_parser.BasicParser(get_pizza_html_extract())
self.assertEqual(get_pizza_html_extract(), parser.html)
def test_parse_method(self):
parser = basic_parser.BasicParser(get_pizza_html_extract())
self.assertRaises(NotImplementedError, parser.parse)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((660, 675), 'unittest.main', 'unittest.main', ([], {}), '()\n', (673, 675), False, 'import unittest\n')] |
import logging, traceback, time
from bottle import request
from snuggle import configuration
from snuggle import mediawiki
from snuggle import errors
from snuggle.data import types
from snuggle.web.util import responses, user_data
logger = logging.getLogger("snuggle.web.processing.users")
class Events:
def __init__(self, model):
self.model = model
def action(self, session, doc):
request = types.ActionRequest.serialize(doc)
def query(self, session, query):
"""
Queries for PUBLIC events and public event content only.
"""
try:
start = time.time()
event_docs = []
for event in self.model.events.query(**query):
if event.PUBLIC:
doc = event.serialize()
doc['id'] = None
event_docs.append(doc)
end = time.time()
except Exception:
logger.error(traceback.format_exc())
return responses.database_error("getting a set of events with query %s" % query)
query['after'] = max(
query.get('after', 0),
time.time() - configuration.snuggle['changes_synchronizer']['max_age']
)
try:
snuggler, data = user_data()
event = types.EventsQueried(
query,
end-start,
len(event_docs),
snuggler,
data
)
self.model.events.insert(event)
except Exception as e:
logger.error(traceback.format_exc())
return responses.success(event_docs)
| [
"logging.getLogger",
"snuggle.web.util.responses.success",
"traceback.format_exc",
"snuggle.data.types.ActionRequest.serialize",
"snuggle.web.util.user_data",
"snuggle.web.util.responses.database_error",
"time.time"
] | [((242, 291), 'logging.getLogger', 'logging.getLogger', (['"""snuggle.web.processing.users"""'], {}), "('snuggle.web.processing.users')\n", (259, 291), False, 'import logging, traceback, time\n'), ((403, 437), 'snuggle.data.types.ActionRequest.serialize', 'types.ActionRequest.serialize', (['doc'], {}), '(doc)\n', (432, 437), False, 'from snuggle.data import types\n'), ((1315, 1344), 'snuggle.web.util.responses.success', 'responses.success', (['event_docs'], {}), '(event_docs)\n', (1332, 1344), False, 'from snuggle.web.util import responses, user_data\n'), ((563, 574), 'time.time', 'time.time', ([], {}), '()\n', (572, 574), False, 'import logging, traceback, time\n'), ((762, 773), 'time.time', 'time.time', ([], {}), '()\n', (771, 773), False, 'import logging, traceback, time\n'), ((1080, 1091), 'snuggle.web.util.user_data', 'user_data', ([], {}), '()\n', (1089, 1091), False, 'from snuggle.web.util import responses, user_data\n'), ((844, 917), 'snuggle.web.util.responses.database_error', 'responses.database_error', (["('getting a set of events with query %s' % query)"], {}), "('getting a set of events with query %s' % query)\n", (868, 917), False, 'from snuggle.web.util import responses, user_data\n'), ((975, 986), 'time.time', 'time.time', ([], {}), '()\n', (984, 986), False, 'import logging, traceback, time\n'), ((810, 832), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (830, 832), False, 'import logging, traceback, time\n'), ((1275, 1297), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1295, 1297), False, 'import logging, traceback, time\n')] |
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter facial recognition server module"""
import os
import json
import threading
import random
import logging
from logging.config import fileConfig
import numpy as np
from json.decoder import JSONDecodeError
from google.protobuf.message import DecodeError
import common.presenter_message_pb2 as presenter_message_pb2
from common.channel_manager import ChannelManager
from common.presenter_socket_server import PresenterSocketServer
from common.app_manager import AppManager
import facial_recognition.src.facial_recognition_message_pb2 as pb2
from facial_recognition.src.config_parser import ConfigParser
from facial_recognition.src.facial_recognition_handler import FacialRecognitionHandler
# Face Registration timeout is 10 seconds
FACE_REGISTER_TIME_OUT = 10
# Presenter Server Type
SERVER_TYPE = "facial_recognition"
# max app name length
APP_ID_MAX_LENGTH = 20
# max support 2 app connect
MAX_APP_NUM = 2
# length of face feature vector
FEATURE_VECTOR_LENGTH = 1024
# Face Registration Status code
FACE_REGISTER_STATUS_WAITING = 1
FACE_REGISTER_STATUS_SUCCEED = 2
FACE_REGISTER_STATUS_FAILED = 3
class FacialRecognitionServer(PresenterSocketServer):
'''A server for face recognition'''
def __init__(self, config):
"""
Description: class init func
Input:
config: config information
Returns: NA
"""
server_address = (config.presenter_server_ip,
int(config.presenter_server_port))
super(FacialRecognitionServer, self).__init__(server_address)
self.storage_dir = config.storage_dir
self.max_face_num = int(config.max_face_num)
self.face_match_threshold = float(config.face_match_threshold)
self.register_dict = {}
self.app_manager = AppManager()
self.channel_manager = ChannelManager()
# 登记人脸数据文件
self.face_register_file = os.path.join(self.storage_dir,
"registered_faces.json")
self._init_face_database()
def _init_face_database(self):
"""
Description: Init face recognition database,
read information from face_register_file
Input: NA
Returns: NA
"""
if not os.path.isfile(self.face_register_file):
with open(self.face_register_file, "w", encoding="utf-8") as f:
f.write("{}")
with open(self.face_register_file, "r") as f:
self.face_lock = threading.Lock()
self.registered_faces = json.load(f)
self._filter_registration_data()
def _filter_registration_data(self):
face_dict = self.registered_faces.copy()
for i in face_dict:
image_path = os.path.join(self.storage_dir, i + ".jpg")
if not os.path.isfile(image_path):
del self.registered_faces[i]
def get_all_face(self):
"""
Description: get registered face list.
Input: NA
Returns: NA
"""
with self.face_lock:
return [i for i in self.registered_faces]
def save_face_image(self, name, image):
"""
Description: save face image.
Input:
name face name
image: face image
Returns: True or False
"""
image_file = os.path.join(self.storage_dir, name + ".jpg")
try:
#image = image.decode("utf-8")
with open(image_file, "wb") as f:
f.write(image)
return True
except (OSError, TypeError) as exp:
logging.error(exp)
return False
def get_app_socket(self, app_id):
"""
Description: get a socket which is bound to the app.
Input:
app_id: id of the app
Returns: socket
"""
return self.app_manager.get_socket_by_app_id(app_id)
def list_registered_apps(self):
"""
Description: get registered apps list.
Input: NA
Returns: app list
"""
return self.app_manager.list_app()
def delete_faces(self, name_list):
"""
Description: delete registered faces in name_list
Input:
name_list: a name list
Returns: True or False
"""
with self.face_lock:
for i in name_list:
if self.registered_faces.get(i):
backup = self.registered_faces[i]
del self.registered_faces[i]
try:
with open(self.face_register_file, "w") as f:
json.dump(self.registered_faces, f)
image_file = os.path.join(
self.storage_dir, i + ".jpg")
os.remove(image_file)
except (OSError, JSONDecodeError) as exp:
logging.error(exp)
self.registered_faces[i] = backup
return False
return True
def _clean_connect(self, sock_fileno, epoll, conns, msgs):
"""
Description: close socket, and clean local variables
Input:
sock_fileno: a socket fileno, return value of socket.fileno()
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
msgs: msg read from a socket
"""
logging.info("clean fd:%s, conns:%s", sock_fileno, conns)
self.app_manager.unregister_app_by_fd(sock_fileno)
epoll.unregister(sock_fileno)
conns[sock_fileno].close()
del conns[sock_fileno]
del msgs[sock_fileno]
def _process_msg(self, conn, msg_name, msg_data):
"""
Total entrance to process protobuf msg
Input:
conn: a socket connection
msg_name: name of a msg.
msg_data: msg body, serialized by protobuf
Returns:
False:somme error occured
True:succeed
"""
# process open channel request
if msg_name == pb2._REGISTERAPP.full_name:
ret = self._process_register_app(conn, msg_data)
# process image request, receive an image data from presenter agent
elif msg_name == pb2._FACERESULT.full_name:
ret = self._process_face_result(msg_data)
elif msg_name == pb2._FRAMEINFO.full_name:
ret = self._process_frame_info(conn, msg_data)
elif msg_name == presenter_message_pb2._OPENCHANNELREQUEST.full_name:
ret = self._process_open_channel(conn, msg_data)
# process heartbeat request, it used to keepalive a channel path
elif msg_name == presenter_message_pb2._HEARTBEATMESSAGE.full_name:
ret = self._process_heartbeat(conn)
else:
logging.error("Not recognized msg type %s", msg_name)
ret = False
return ret
def _process_heartbeat(self, conn):
'''
set heartbeat
Input:
conn: a socket connection
Returns:
True: set heartbeat ok.
'''
sock_fileno = conn.fileno()
if self.app_manager.get_app_id_by_socket(sock_fileno):
self.app_manager.set_heartbeat(sock_fileno)
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is not None:
handler.set_heartbeat()
return True
def _parse_protobuf(self, protobuf, msg_data):
"""
Description: parse protobuf
Input:
protobuf: a struct defined by protobuf
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
try:
protobuf.ParseFromString(msg_data)
return True
except DecodeError as exp:
logging.error(exp)
return False
def _process_register_app(self, conn, msg_data):
"""
Description: process register_app message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = pb2.RegisterApp()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
response.ret = pb2.kErrorOther
response.message = "ParseFromString exception"
self.send_message(conn, response, msg_name)
return False
app_id = request.id
app_type = request.type
# check app id if exist
if self.app_manager.is_app_exist(app_id):
logging.error("App %s is already exist.", app_id)
response.ret = pb2.kErrorAppRegisterExist
response.message = "App {} is already exist.".format(app_id)
self.send_message(conn, response, msg_name)
elif self.app_manager.get_app_num() >= MAX_APP_NUM:
logging.error("App number reach the upper limit")
response.ret = pb2.kErrorAppRegisterLimit
response.message = "App number reach the upper limit"
self.send_message(conn, response, msg_name)
elif app_type != SERVER_TYPE:
logging.error("App type %s error", app_type)
response.ret = pb2.kErrorAppRegisterType
response.message = "App type {} error".format(app_type)
self.send_message(conn, response, msg_name)
elif len(app_id) > APP_ID_MAX_LENGTH:
logging.error("App id %s is too long", app_id)
response.ret = pb2.kErrorOther
response.message = "App id: {} is too long".format(app_id)
self.send_message(conn, response, msg_name)
else:
self.app_manager.register_app(app_id, conn)
response.ret = pb2.kErrorNone
response.message = "Register app {} succeed".format(app_id)
self.send_message(conn, response, msg_name)
return True
return False
def _process_face_result(self, msg_data):
"""
Description: process face_result message
Input:
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
face_result = pb2.FaceResult()
if not self._parse_protobuf(face_result, msg_data):
return False
face_id = face_result.id
if not self.register_dict.get(face_id):
logging.warning("face id %s is already deleted", face_id)
return True
ret = face_result.response.ret
if ret != pb2.kErrorNone:
err_msg = face_result.response.message
logging.error("get face feature error message: %s", err_msg)
status = FACE_REGISTER_STATUS_FAILED
message = "Get face feature failed"
self._update_register_dict(face_id, status, message)
return True
face_num = len(face_result.feature)
if face_num == 0:
status = FACE_REGISTER_STATUS_FAILED
message = "No face recognized"
self._update_register_dict(face_id, status, message)
elif face_num > 1:
status = FACE_REGISTER_STATUS_FAILED
message = "{} faces recognized".format(face_num)
self._update_register_dict(face_id, status, message)
else:
box = face_result.feature[0].box
face_coordinate = [box.lt_x, box.lt_y, box.rb_x, box.rb_x]
feature_vector = [i for i in face_result.feature[0].vector]
if len(feature_vector) != FEATURE_VECTOR_LENGTH:
logging.error("feature_vector length not equal 1024")
status = FACE_REGISTER_STATUS_FAILED
message = "Face feature vector length invalid"
self._update_register_dict(face_id, status, message)
return True
return self._save_face_feature(face_id, face_coordinate,
feature_vector)
return True
def _update_register_dict(self, face_id, status, message):
"""
Description: update register_dict
Input:
face_id: id of face
status: status of face register
message: message of status of face register
Returns: True or False
"""
if self.register_dict.get(face_id):
self.register_dict[face_id]["status"] = status
self.register_dict[face_id]["message"] = message
self.register_dict[face_id]["event"].set()
def _save_face_feature(self, face_id, face_coordinate, feature_vector):
"""
Description: save face_feature
Input:
face_id: id of face
face_coordinate: face coordinates
feature_vector: face feature vector
Returns: True or False
"""
with self.face_lock:
self.registered_faces[face_id] = {
"coordinate": face_coordinate,
"feature": feature_vector
}
try:
with open(self.face_register_file, "w") as f:
json.dump(self.registered_faces, f)
status = FACE_REGISTER_STATUS_SUCCEED
message = "Successful registration"
self._update_register_dict(face_id, status, message)
return True
except (OSError, JSONDecodeError) as exp:
logging.error(exp)
del self.registered_faces[face_id]
status = FACE_REGISTER_STATUS_FAILED
message = "save face feature to json file failed"
self._update_register_dict(face_id, status, message)
return False
def _process_open_channel(self, conn, msg_data):
"""
Description: process open channel message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = presenter_message_pb2.OpenChannelRequest()
response = presenter_message_pb2.OpenChannelResponse()
if not self._parse_protobuf(request, msg_data):
channel_name = "unknown channel"
err_code = presenter_message_pb2.kOpenChannelErrorOther
return self._response_open_channel(conn, channel_name,
response, err_code)
channel_name = request.channel_name
# check channel name if exist
if not self.channel_manager.is_channel_exist(channel_name):
logging.error("channel name %s is not exist.", channel_name)
err_code = presenter_message_pb2.kOpenChannelErrorNoSuchChannel
return self._response_open_channel(conn, channel_name,
response, err_code)
#ret = self.channel_manager.register_one_channel(channel_name)
# if ret != ChannelManager.err_code_ok:
# logging.error("Create the channel %s failed!, and ret is %d", channel_name, ret)
# err_code = pb2.kOpenChannelErrorOther
# self._response_open_channel(conn, channel_name, response, err_code)
# check channel path if busy
if self.channel_manager.is_channel_busy(channel_name):
logging.error("channel path %s is busy.", channel_name)
err = presenter_message_pb2.kOpenChannelErrorChannelAlreadyOpened
return self._response_open_channel(conn, channel_name,
response, err)
content_type = presenter_message_pb2.kChannelContentTypeVideo
if request.content_type == content_type:
media_type = "video"
else:
logging.error("media type %s is not recognized.",
request.content_type)
err_code = presenter_message_pb2.kOpenChannelErrorOther
return self._response_open_channel(conn, channel_name,
response, err_code)
handler = FacialRecognitionHandler(channel_name, media_type)
sock = conn.fileno()
self.channel_manager.create_channel_resource(channel_name, sock,
media_type, handler)
err_code = presenter_message_pb2.kOpenChannelErrorNone
return self._response_open_channel(conn, channel_name,
response, err_code)
def _process_frame_info(self, conn, msg_data):
"""
Description: process frame info message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = pb2.FrameInfo()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
return False
sock_fileno = conn.fileno()
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is None:
logging.error("get channel handler failed")
response.ret = pb2.kErrorOther
response.message = "channel error."
self.send_message(conn, response, msg_name)
return False
face_list = self._recognize_face(request.feature)
handler.save_frame(request.image, face_list)
response.ret = pb2.kErrorNone
response.message = "process frame info suceed."
self.send_message(conn, response, msg_name)
return True
def _recognize_face(self, face_feature):
"""
Description: recognize which face it is.
Input:
face_feature: face feature
Returns: face list
"""
face_list = []
for i in face_feature:
face_info = {}
box = i.box
coordinate = [box.lt_x, box.lt_y, box.rb_x, box.rb_y]
feature_vector = i.vector
if len(feature_vector) != FEATURE_VECTOR_LENGTH:
logging.error("feature_vector length not equal 1024")
continue
(name, score) = self._compute_face_feature(feature_vector)
face_info["coordinate"] = coordinate
face_info["name"] = name
face_info["confidence"] = score
face_list.append(face_info)
return face_list
def _compute_face_feature(self, feture_vector):
"""
Description: compute score of the feture_vector
Input:
feture_vector: face feature vector
Returns: face name and score
"""
highest_score_face = "Unknown"
highest_score = 0
with self.face_lock:
for i in self.registered_faces:
feature = self.registered_faces[i]["feature"]
score = self._compute_similar_degree(feature, feture_vector)
if score < self.face_match_threshold:
continue
if score > highest_score:
highest_score = score
highest_score_face = i
return (highest_score_face, highest_score)
def _compute_similar_degree(self, feture_vector1, feture_vector2):
"""
Description: compute cosine similarity of two vectors
Input:
feture_vector1: face feature vector
feture_vector2: face feature vector
Returns: score
"""
vector1 = np.array(feture_vector1)
vector2 = np.array(feture_vector2)
square_diff = ((np.linalg.norm(vector1)) * (np.linalg.norm(vector2)))
score = np.dot(vector1, vector2) / square_diff
return score
def stop_thread(self):
"""
Description: clean thread when process exit.
Input: NA
Returns: NA
"""
channel_manager = ChannelManager([])
channel_manager.close_all_thread()
self.set_exit_switch()
self.app_manager.set_thread_switch()
class FacialRecognitionManager():
'''Manager of Face Recognition, a class providing APIs'''
__instance = None
server = None
def __init__(self, server=None):
'''init func'''
def __new__(cls, server=None):
"""ensure only a single instance created. """
if cls.__instance is None:
cls.__instance = object.__new__(cls)
cls.server = server
return cls.__instance
def _choose_random_app(self):
"""
Description: choose a random app online.
Input: NA
Returns: a app name
"""
app_list = self.server.list_registered_apps()
if app_list:
index = random.randint(0, len(app_list) - 1)
return app_list[index]
return None
def get_app_list(self):
"""
Description: API for getting online app list
Input: NA
Returns: app list
"""
return self.server.list_registered_apps()
def register_face(self, name, image):
"""
Description: API for registering face
Input:
name: a face name
image: a face picture
Returns: (ret, msg)
"""
# Input para check
if not isinstance(name, str):
return (False, "Name is not string")
if not isinstance(image, bytes):
return (False, "Image is not bytes")
if self._get_face_number() >= self.server.max_face_num:
return (False, "Face number limit")
app_id = self._choose_random_app()
if app_id is None:
return (False, "No app is online")
conn = self.server.get_app_socket(app_id)
if conn is None:
return (False, "Internal Error, app lost socket")
# Prepare sending face register message to agent
request = pb2.FaceInfo()
request.id = name
request.image = image
register_dict = self.server.register_dict
register_dict[name] = {
"status": FACE_REGISTER_STATUS_WAITING,
"message": "",
"event": threading.Event()
}
msg_name = pb2._FACEINFO.full_name
self.server.send_message(conn, request, msg_name)
register_dict[name]["event"].wait(FACE_REGISTER_TIME_OUT)
if register_dict[name]["status"] == FACE_REGISTER_STATUS_WAITING:
logging.warning("Register face %s timeout", name)
del register_dict[name]
return (False, "10 sec Timeout")
if register_dict[name]["status"] == FACE_REGISTER_STATUS_FAILED:
err_msg = register_dict[name]["message"]
logging.error("Register face %s failed, reason:%s",
name, register_dict[name]["message"])
del register_dict[name]
return (False, err_msg)
ret = self.server.save_face_image(name, image)
del register_dict[name]
if ret:
logging.info("Register face %s succeed", name)
return (True, "Successful Registration")
logging.error("Save face %s to database failed", name)
return (False, "Save database error")
def unregister_face(self, name_list):
"""
Description: API for unregistering faces
Input:
name_list: a name list which will be deleted.
Returns: True or False
"""
if isinstance(name_list, list):
return self.server.delete_faces(name_list)
logging.error("unregister face fail")
return False
def get_all_face_name(self):
"""
Description: API for geting all registered face names
Input: NA
Returns: a name list
"""
return self.server.get_all_face()
def _get_face_number(self):
"""
Description: geting total face number
Input: NA
Returns: total face number
"""
return len(self.get_all_face_name())
def get_faces(self, name_list):
"""
Description: API for geting specified face info.
Input: a name list.
Returns: a list include face name and image.
"""
if not isinstance(name_list, list):
return []
face_list = []
for i in name_list:
face_info = {}
face_info["name"] = i
try:
image_file = os.path.join(self.server.storage_dir, i + ".jpg")
face_info["image"] = open(image_file, 'rb').read()
except OSError as exp:
logging.error(exp)
continue
face_list.append(face_info)
return face_list
def run():
'''Face Recognition server startup function'''
# read config file
config = ConfigParser()
# config log
log_file_path = os.path.join(ConfigParser.root_path, "config/logging.conf")
fileConfig(log_file_path)
logging.getLogger('facial_recognition')
if not config.config_verify():
return None
server = FacialRecognitionServer(config)
FacialRecognitionManager(server)
return server
| [
"logging.getLogger",
"common.presenter_message_pb2.OpenChannelRequest",
"facial_recognition.src.facial_recognition_handler.FacialRecognitionHandler",
"common.app_manager.AppManager",
"numpy.array",
"numpy.linalg.norm",
"facial_recognition.src.facial_recognition_message_pb2.RegisterApp",
"logging.info"... | [((26636, 26650), 'facial_recognition.src.config_parser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (26648, 26650), False, 'from facial_recognition.src.config_parser import ConfigParser\n'), ((26689, 26748), 'os.path.join', 'os.path.join', (['ConfigParser.root_path', '"""config/logging.conf"""'], {}), "(ConfigParser.root_path, 'config/logging.conf')\n", (26701, 26748), False, 'import os\n'), ((26753, 26778), 'logging.config.fileConfig', 'fileConfig', (['log_file_path'], {}), '(log_file_path)\n', (26763, 26778), False, 'from logging.config import fileConfig\n'), ((26783, 26822), 'logging.getLogger', 'logging.getLogger', (['"""facial_recognition"""'], {}), "('facial_recognition')\n", (26800, 26822), False, 'import logging\n'), ((3522, 3534), 'common.app_manager.AppManager', 'AppManager', ([], {}), '()\n', (3532, 3534), False, 'from common.app_manager import AppManager\n'), ((3566, 3582), 'common.channel_manager.ChannelManager', 'ChannelManager', ([], {}), '()\n', (3580, 3582), False, 'from common.channel_manager import ChannelManager\n'), ((3637, 3692), 'os.path.join', 'os.path.join', (['self.storage_dir', '"""registered_faces.json"""'], {}), "(self.storage_dir, 'registered_faces.json')\n", (3649, 3692), False, 'import os\n'), ((5076, 5121), 'os.path.join', 'os.path.join', (['self.storage_dir', "(name + '.jpg')"], {}), "(self.storage_dir, name + '.jpg')\n", (5088, 5121), False, 'import os\n'), ((7173, 7230), 'logging.info', 'logging.info', (['"""clean fd:%s, conns:%s"""', 'sock_fileno', 'conns'], {}), "('clean fd:%s, conns:%s', sock_fileno, conns)\n", (7185, 7230), False, 'import logging\n'), ((9921, 9938), 'facial_recognition.src.facial_recognition_message_pb2.RegisterApp', 'pb2.RegisterApp', ([], {}), '()\n', (9936, 9938), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((9958, 9978), 'facial_recognition.src.facial_recognition_message_pb2.CommonResponse', 'pb2.CommonResponse', ([], {}), '()\n', (9976, 9978), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((12030, 12046), 'facial_recognition.src.facial_recognition_message_pb2.FaceResult', 'pb2.FaceResult', ([], {}), '()\n', (12044, 12046), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((15815, 15857), 'common.presenter_message_pb2.OpenChannelRequest', 'presenter_message_pb2.OpenChannelRequest', ([], {}), '()\n', (15855, 15857), True, 'import common.presenter_message_pb2 as presenter_message_pb2\n'), ((15877, 15920), 'common.presenter_message_pb2.OpenChannelResponse', 'presenter_message_pb2.OpenChannelResponse', ([], {}), '()\n', (15918, 15920), True, 'import common.presenter_message_pb2 as presenter_message_pb2\n'), ((17898, 17948), 'facial_recognition.src.facial_recognition_handler.FacialRecognitionHandler', 'FacialRecognitionHandler', (['channel_name', 'media_type'], {}), '(channel_name, media_type)\n', (17922, 17948), False, 'from facial_recognition.src.facial_recognition_handler import FacialRecognitionHandler\n'), ((18595, 18610), 'facial_recognition.src.facial_recognition_message_pb2.FrameInfo', 'pb2.FrameInfo', ([], {}), '()\n', (18608, 18610), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((18630, 18650), 'facial_recognition.src.facial_recognition_message_pb2.CommonResponse', 'pb2.CommonResponse', ([], {}), '()\n', (18648, 18650), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((21345, 21369), 'numpy.array', 'np.array', (['feture_vector1'], {}), '(feture_vector1)\n', (21353, 21369), True, 'import numpy as np\n'), ((21388, 21412), 'numpy.array', 'np.array', (['feture_vector2'], {}), '(feture_vector2)\n', (21396, 21412), True, 'import numpy as np\n'), ((21736, 21754), 'common.channel_manager.ChannelManager', 'ChannelManager', (['[]'], {}), '([])\n', (21750, 21754), False, 'from common.channel_manager import ChannelManager\n'), ((23722, 23736), 'facial_recognition.src.facial_recognition_message_pb2.FaceInfo', 'pb2.FaceInfo', ([], {}), '()\n', (23734, 23736), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((24941, 24995), 'logging.error', 'logging.error', (['"""Save face %s to database failed"""', 'name'], {}), "('Save face %s to database failed', name)\n", (24954, 24995), False, 'import logging\n'), ((25365, 25402), 'logging.error', 'logging.error', (['"""unregister face fail"""'], {}), "('unregister face fail')\n", (25378, 25402), False, 'import logging\n'), ((4003, 4042), 'os.path.isfile', 'os.path.isfile', (['self.face_register_file'], {}), '(self.face_register_file)\n', (4017, 4042), False, 'import os\n'), ((4234, 4250), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4248, 4250), False, 'import threading\n'), ((4287, 4299), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4296, 4299), False, 'import json\n'), ((4489, 4531), 'os.path.join', 'os.path.join', (['self.storage_dir', "(i + '.jpg')"], {}), "(self.storage_dir, i + '.jpg')\n", (4501, 4531), False, 'import os\n'), ((10423, 10472), 'logging.error', 'logging.error', (['"""App %s is already exist."""', 'app_id'], {}), "('App %s is already exist.', app_id)\n", (10436, 10472), False, 'import logging\n'), ((12226, 12283), 'logging.warning', 'logging.warning', (['"""face id %s is already deleted"""', 'face_id'], {}), "('face id %s is already deleted', face_id)\n", (12241, 12283), False, 'import logging\n'), ((12445, 12505), 'logging.error', 'logging.error', (['"""get face feature error message: %s"""', 'err_msg'], {}), "('get face feature error message: %s', err_msg)\n", (12458, 12505), False, 'import logging\n'), ((16387, 16447), 'logging.error', 'logging.error', (['"""channel name %s is not exist."""', 'channel_name'], {}), "('channel name %s is not exist.', channel_name)\n", (16400, 16447), False, 'import logging\n'), ((17137, 17192), 'logging.error', 'logging.error', (['"""channel path %s is busy."""', 'channel_name'], {}), "('channel path %s is busy.', channel_name)\n", (17150, 17192), False, 'import logging\n'), ((17579, 17650), 'logging.error', 'logging.error', (['"""media type %s is not recognized."""', 'request.content_type'], {}), "('media type %s is not recognized.', request.content_type)\n", (17592, 17650), False, 'import logging\n'), ((18936, 18979), 'logging.error', 'logging.error', (['"""get channel handler failed"""'], {}), "('get channel handler failed')\n", (18949, 18979), False, 'import logging\n'), ((21437, 21460), 'numpy.linalg.norm', 'np.linalg.norm', (['vector1'], {}), '(vector1)\n', (21451, 21460), True, 'import numpy as np\n'), ((21465, 21488), 'numpy.linalg.norm', 'np.linalg.norm', (['vector2'], {}), '(vector2)\n', (21479, 21488), True, 'import numpy as np\n'), ((21507, 21531), 'numpy.dot', 'np.dot', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (21513, 21531), True, 'import numpy as np\n'), ((23976, 23993), 'threading.Event', 'threading.Event', ([], {}), '()\n', (23991, 23993), False, 'import threading\n'), ((24258, 24307), 'logging.warning', 'logging.warning', (['"""Register face %s timeout"""', 'name'], {}), "('Register face %s timeout', name)\n", (24273, 24307), False, 'import logging\n'), ((24528, 24622), 'logging.error', 'logging.error', (['"""Register face %s failed, reason:%s"""', 'name', "register_dict[name]['message']"], {}), "('Register face %s failed, reason:%s', name, register_dict[\n name]['message'])\n", (24541, 24622), False, 'import logging\n'), ((24832, 24878), 'logging.info', 'logging.info', (['"""Register face %s succeed"""', 'name'], {}), "('Register face %s succeed', name)\n", (24844, 24878), False, 'import logging\n'), ((4551, 4577), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (4565, 4577), False, 'import os\n'), ((5335, 5353), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (5348, 5353), False, 'import logging\n'), ((9592, 9610), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (9605, 9610), False, 'import logging\n'), ((10728, 10777), 'logging.error', 'logging.error', (['"""App number reach the upper limit"""'], {}), "('App number reach the upper limit')\n", (10741, 10777), False, 'import logging\n'), ((19917, 19970), 'logging.error', 'logging.error', (['"""feature_vector length not equal 1024"""'], {}), "('feature_vector length not equal 1024')\n", (19930, 19970), False, 'import logging\n'), ((26258, 26307), 'os.path.join', 'os.path.join', (['self.server.storage_dir', "(i + '.jpg')"], {}), "(self.server.storage_dir, i + '.jpg')\n", (26270, 26307), False, 'import os\n'), ((11004, 11048), 'logging.error', 'logging.error', (['"""App type %s error"""', 'app_type'], {}), "('App type %s error', app_type)\n", (11017, 11048), False, 'import logging\n'), ((13401, 13454), 'logging.error', 'logging.error', (['"""feature_vector length not equal 1024"""'], {}), "('feature_vector length not equal 1024')\n", (13414, 13454), False, 'import logging\n'), ((14934, 14969), 'json.dump', 'json.dump', (['self.registered_faces', 'f'], {}), '(self.registered_faces, f)\n', (14943, 14969), False, 'import json\n'), ((15243, 15261), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (15256, 15261), False, 'import logging\n'), ((26426, 26444), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (26439, 26444), False, 'import logging\n'), ((6444, 6486), 'os.path.join', 'os.path.join', (['self.storage_dir', "(i + '.jpg')"], {}), "(self.storage_dir, i + '.jpg')\n", (6456, 6486), False, 'import os\n'), ((6540, 6561), 'os.remove', 'os.remove', (['image_file'], {}), '(image_file)\n', (6549, 6561), False, 'import os\n'), ((11284, 11330), 'logging.error', 'logging.error', (['"""App id %s is too long"""', 'app_id'], {}), "('App id %s is too long', app_id)\n", (11297, 11330), False, 'import logging\n'), ((6371, 6406), 'json.dump', 'json.dump', (['self.registered_faces', 'f'], {}), '(self.registered_faces, f)\n', (6380, 6406), False, 'import json\n'), ((6648, 6666), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (6661, 6666), False, 'import logging\n'), ((8582, 8635), 'logging.error', 'logging.error', (['"""Not recognized msg type %s"""', 'msg_name'], {}), "('Not recognized msg type %s', msg_name)\n", (8595, 8635), False, 'import logging\n')] |
import numpy as np
from pathlib import Path
import sys, os
if __name__ == "__main__":
"""
Jobs:
1) VAE (VAE loss) for data=[dsprites, celeba, chairs]
2) VAE (beta-TC loss with alpha=beta=gamma=1) for data=[dsprites, celeba, chairs]
3) beta-TCVAE for alpha=gamma=[0.5, 1, 2], for beta=[3,6], for data=[dsprites, celeba, chairs]
"""
# absolute path
my_path = Path(__file__).parent.resolve().expanduser()
main_path = my_path.parent.parent.parent
# hypars
cons_list = ["kl", "rec"]
epochs_list = [120, 800, 1200]
seed = 1234
nlat = 64
batchs = 64
lr = 1e-5
n_stddevs = 3
datasets = ["dsprites", "celeba", "chairs"]
alpha_gammas = [0.5, 1, 2]
betas = [3, 6]
# cherry-pick data samples as done in repo
cherry_picked = ["92595 339150 656090",
"88413 176606 179144 32260 191281 143307 101535 70059 87889 131612",
"40919 5172 22330", ]
# .sh filename
fname = my_path / f'run_jobs_1.sh'
# clear .sh file
os.system(f'rm {fname}')
# VAE
for data, epochs, cherries in zip(datasets, epochs_list, cherry_picked):
VAE_cmd = (
# f"python main.py qualitative/VAE_{data}_z{nlat} -s {seed} "
# f"--checkpoint-every 50 -d {data} -e {epochs} -b {batchs} "
# f"-z {nlat} -l VAE --lr {lr} "
# f'--no-progress-bar -F {str(my_path / f"VAE_{data}_z{nlat}.out")} '
# f"--record-loss-every=50 --pin-dataset-gpu \n"
f"python main_viz.py qualitative/VAE_{data}_z{nlat} all -i {cherries} "
f"-s {seed} -c 10 -r 10 -t 2 --is-show-loss --is-posterior \n"
)
alpha_gamma = 1
beta = 1
BTC_cmd = (
# f"python main.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} -s {seed} "
# f"--checkpoint-every 50 -d {data} -e {epochs} -b {batchs} "
# f"-z {nlat} -l btcvae --lr {lr} --btcvae-A {alpha_gamma} --btcvae-B {beta} --btcvae-G {alpha_gamma} "
# f'--no-progress-bar -F {str(my_path / f"btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma}.out")} '
# f"--record-loss-every=50 --pin-dataset-gpu \n"
f"python main_viz.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} all -i {cherries} "
f"-s {seed} -c 10 -r 10 -t 2 --is-show-loss --is-posterior \n"
)
with open(fname, 'a') as f:
f.write(VAE_cmd + BTC_cmd)
# beta-TCVAE
for data, epochs, cherries in zip(datasets, epochs_list, cherry_picked):
for alpha_gamma in alpha_gammas:
for beta in betas:
BTC_cmd = (
# f"python main.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} -s {seed} "
# f"--checkpoint-every 50 -d {data} -e {epochs} -b {batchs} "
# f"-z {nlat} -l btcvae --lr {lr} --btcvae-A {alpha_gamma} --btcvae-B {beta} --btcvae-G {alpha_gamma} "
# f'--no-progress-bar -F {str(my_path / f"btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma}.out")} '
# f"--record-loss-every=50 --pin-dataset-gpu \n"
f"python main_viz.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} all -i {cherries} "
f"-s {seed} -c 10 -r 10 -t 2 --is-show-loss --is-posterior \n"
)
with open(fname, 'a') as f:
f.write(BTC_cmd)
| [
"os.system",
"pathlib.Path"
] | [((1086, 1110), 'os.system', 'os.system', (['f"""rm {fname}"""'], {}), "(f'rm {fname}')\n", (1095, 1110), False, 'import sys, os\n'), ((404, 418), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (408, 418), False, 'from pathlib import Path\n')] |
# downloaded from https://raw.githubusercontent.com/TadasBaltrusaitis/OpenFace/master/lib/local/LandmarkDetector/model/pdms/In-the-wild_aligned_PDM_68.txt
import pickle
import pathlib
THIS_FILE_PATH = pathlib.Path(__file__)
MODEL_FILE_PATH = pathlib.Path.joinpath(THIS_FILE_PATH.parent, "face_model.bin")
with open(MODEL_FILE_PATH, "rb") as f:
model_points = pickle.load(f)
| [
"pickle.load",
"pathlib.Path.joinpath",
"pathlib.Path"
] | [((208, 230), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import pathlib\n'), ((250, 312), 'pathlib.Path.joinpath', 'pathlib.Path.joinpath', (['THIS_FILE_PATH.parent', '"""face_model.bin"""'], {}), "(THIS_FILE_PATH.parent, 'face_model.bin')\n", (271, 312), False, 'import pathlib\n'), ((375, 389), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (386, 389), False, 'import pickle\n')] |
from requests_oauthlib import OAuth2Session
from flask import Flask, request, redirect, session, url_for
from flask.json import jsonify
import logging
import datetime
import json
import os
import pickle
import requests
import time
import win32crypt
from typing import Dict
from typing import List
if __package__:
from .td_config import APPDATA_PATH, CLIENT_ID, CLIENT_ID_AUTH, REDIRECT_URI, AUTHORIZATION_BASE_URL, TOKEN_URL, TOKEN_FILE_NAME
from .td_config import USERPRINCIPALS_FILE_NAME, CREDENTIALS_FILE_NAME, API_ENDPOINT, API_VERSION, TOKEN_ENDPOINT
else:
from td_config import APPDATA_PATH, CLIENT_ID, CLIENT_ID_AUTH, REDIRECT_URI, AUTHORIZATION_BASE_URL, TOKEN_URL, TOKEN_FILE_NAME
from td_config import USERPRINCIPALS_FILE_NAME, CREDENTIALS_FILE_NAME, API_ENDPOINT, API_VERSION, TOKEN_ENDPOINT
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
app = Flask(__name__)
if not os.path.exists(APPDATA_PATH):
os.makedirs(APPDATA_PATH)
@app.route("/")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
td_session = OAuth2Session(
client_id=CLIENT_ID_AUTH,
redirect_uri=REDIRECT_URI
)
authorization_url, state = td_session.authorization_url(AUTHORIZATION_BASE_URL)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
td_session = OAuth2Session(
client_id=CLIENT_ID_AUTH,
redirect_uri=REDIRECT_URI,
state=session['oauth_state']
)
token = td_session.fetch_token(
TOKEN_URL,
access_type='offline',
authorization_response=request.url,
include_client_id=True
)
# At this point you can fetch protected resources but lets save
# the token and show how this is done from a persisted token
# in /profile.
session['oauth_token'] = token
save_token(token)
# Grab the Streamer Info.
#userPrincipalsResponse = get_user_principals(
# token,
# fields=['streamerConnectionInfo', 'streamerSubscriptionKeys', 'preferences', 'surrogateIds'])
#if userPrincipalsResponse:
# save_credentials(userPrincipalsResponse)
return redirect(url_for('shutdown'))
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return '<html><head>Server shutting down...</head><body>Now you can close this and go back to Excel</body></html>'
@app.route("/profile", methods=["GET"])
def profile():
"""Fetching a protected resource using an OAuth 2 token.
"""
td_session = OAuth2Session(CLIENT_ID, token=session['oauth_token'])
return jsonify(td_session.get('https://api.td_session.com/user').json())
def get_token():
pass
def get_user_principals(token, fields: List[str]) -> Dict:
"""Returns User Principal details.
Documentation:
----
https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0
Arguments:
----
fields: A comma separated String which allows one to specify additional fields to return. None of
these fields are returned by default. Possible values in this String can be:
1. streamerSubscriptionKeys
2. streamerConnectionInfo
3. preferences
4. surrogateIds
Usage:
----
>>> td_client.get_user_principals(fields=['preferences'])
>>> td_client.get_user_principals(fields=['preferences','streamerConnectionInfo'])
"""
# define the endpoint
endpoint = 'userprincipals'
# build the params dictionary
params = {
'fields': ','.join(fields)
}
parts = [self.API_ENDPOINT, self.API_VERSION, endpoint]
url = '/'.join(parts)
headers = {
'Authorization': 'Bearer {token}'.format(token=token['access_token'])
}
# Define a new session.
request_session = requests.Session()
request_session.verify = True
# Define a new request.
request_request = requests.Request(
method='GET',
headers=headers,
url=url,
params=params,
).prepare()
# Send the request.
response: requests.Response = request_session.send(request=request_request)
request_session.close()
# grab the status code
status_code = response.status_code
# grab the response headers.
response_headers = response.headers
if response.ok:
return response.json()
else:
return None
def load_token():
try:
with open(TOKEN_FILE_NAME, 'rb') as encoded_file:
encoded_data = encoded_file.read()
token_data = json.loads(win32crypt.CryptUnprotectData(encoded_data)[1].decode())
return token_data
except Exception as e:
return None
def save_token(token_dict: dict) -> bool:
# make sure there is an access token before proceeding.
if 'access_token' not in token_dict:
return False
token_data = {}
# save the access token and refresh token
token_data['access_token'] = token_dict['access_token']
token_data['refresh_token'] = token_dict['refresh_token']
# store token expiration time
access_token_expire = time.time() + int(token_dict['expires_in'])
refresh_token_expire = time.time() + int(token_dict['refresh_token_expires_in'])
token_data['access_token_expires_at'] = access_token_expire
token_data['refresh_token_expires_at'] = refresh_token_expire
token_data['access_token_expires_at_date'] = datetime.datetime.fromtimestamp(access_token_expire).isoformat()
token_data['refresh_token_expires_at_date'] = datetime.datetime.fromtimestamp(refresh_token_expire).isoformat()
token_data['logged_in'] = True
token_json = json.dumps(token_data)
try:
with open(TOKEN_FILE_NAME, 'wb') as encoded_file:
enc = win32crypt.CryptProtectData(token_json.encode())
encoded_file.write(enc)
except Exception as e:
return False
return True
def save_credentials(userPrincipalsResponse):
# Grab the timestampe.
tokenTimeStamp = userPrincipalsResponse['streamerInfo']['tokenTimestamp']
# Grab socket
socket_url = userPrincipalsResponse['streamerInfo']['streamerSocketUrl']
# Parse the token timestamp.
token_timestamp = datetime.datetime.strptime(tokenTimeStamp, "%Y-%m-%dT%H:%M:%S%z")
tokenTimeStampAsMs = int(token_timestamp.timestamp()) * 1000
# Define our Credentials Dictionary used for authentication.
credentials = {
"userid": userPrincipalsResponse['accounts'][0]['accountId'],
"token": userPrincipalsResponse['streamerInfo']['token'],
"company": userPrincipalsResponse['accounts'][0]['company'],
"segment": userPrincipalsResponse['accounts'][0]['segment'],
"cddomain": userPrincipalsResponse['accounts'][0]['accountCdDomainId'],
"usergroup": userPrincipalsResponse['streamerInfo']['userGroup'],
"accesslevel": userPrincipalsResponse['streamerInfo']['accessLevel'],
"authorized": "Y",
"timestamp": tokenTimeStampAsMs,
"appid": userPrincipalsResponse['streamerInfo']['appId'],
"acl": userPrincipalsResponse['streamerInfo']['acl']
}
with open(file=USERPRINCIPALS_FILE_NAME, mode='w+') as json_file:
json.dump(obj=userPrincipalsResponse, fp=json_file, indent=4)
with open(file=CREDENTIALS_FILE_NAME, mode='w+') as json_file:
json.dump(obj=credentials, fp=json_file, indent=4)
def _token_seconds(token_data, token_type: str = 'access_token') -> int:
"""Determines time till expiration for a token.
Return the number of seconds until the current access token or refresh token
will expire. The default value is access token because this is the most commonly used
token during requests.
Arguments:
----
token_type {str} -- The type of token you would like to determine lifespan for.
Possible values are ['access_token', 'refresh_token'] (default: {access_token})
Returns:
----
{int} -- The number of seconds till expiration.
"""
# if needed check the access token.
if token_type == 'access_token':
# if the time to expiration is less than or equal to 0, return 0.
if not token_data['access_token'] or time.time() + 60 >= token_data['access_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(token_data['access_token_expires_at'] - time.time() - 60)
# if needed check the refresh token.
elif token_type == 'refresh_token':
# if the time to expiration is less than or equal to 0, return 0.
if not token_data['refresh_token'] or time.time() + 60 >= token_data['refresh_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(token_data['refresh_token_expires_at'] - time.time() - 60)
return token_exp
def grab_refresh_token(access_token, refresh_token) -> bool:
"""Refreshes the current access token.
This takes a valid refresh token and refreshes
an expired access token.
Returns:
----
{bool} -- `True` if successful, `False` otherwise.
"""
# build the parameters of our request
data = {
'client_id': CLIENT_ID_AUTH,
'grant_type': 'refresh_token',
'access_type': 'offline',
'refresh_token': refresh_token
}
# build url: https://api.tdameritrade.com/v1/oauth2/token
parts = [API_ENDPOINT, API_VERSION, TOKEN_ENDPOINT]
url = '/'.join(parts)
# Define a new session.
request_session = requests.Session()
request_session.verify = True
headers = { 'Content-Type': 'application/x-www-form-urlencoded' }
# Define a new request.
request_request = requests.Request(
method='POST',
headers=headers,
url=url,
data=data
).prepare()
# Send the request.
response: requests.Response = request_session.send(request=request_request)
request_session.close()
if response.ok:
save_token(response.json())
return True
return False
def silent_sso() -> bool:
try:
token_data = load_token()
# if the current access token is not expired then we are still authenticated.
if _token_seconds(token_data, token_type='access_token') > 0:
return True
# if the refresh token is expired then you have to do a full login.
elif _token_seconds(token_data, token_type='refresh_token') <= 0:
return False
# if the current access token is expired then try and refresh access token.
elif token_data['refresh_token'] and grab_refresh_token(token_data['access_token'], token_data['refresh_token']):
return True
except Exception as e:
print(repr(e))
return False
return True
def _run_full_oauth() -> None:
import webbrowser
webbrowser.open_new_tab('https://localhost:8080/')
app.secret_key = os.urandom(24)
app.run(ssl_context='adhoc', host="localhost", port=8080, debug=False)
def run_full_oauth_subprocess() -> None:
from subprocess import run
run(["python", os.path.realpath(__file__)], cwd= os.path.dirname(os.path.realpath(__file__)))
if __name__ == "__main__":
import sys
# Check if current token is valid
if silent_sso():
sys.exit(0)
else:
_run_full_oauth() | [
"logging.getLogger",
"logging.StreamHandler",
"requests.Session",
"flask.Flask",
"win32crypt.CryptUnprotectData",
"sys.exit",
"os.path.exists",
"flask.request.environ.get",
"json.dumps",
"webbrowser.open_new_tab",
"os.urandom",
"requests.Request",
"flask.redirect",
"time.time",
"requests... | [((831, 858), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (848, 858), False, 'import logging\n'), ((940, 955), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (945, 955), False, 'from flask import Flask, request, redirect, session, url_for\n'), ((908, 931), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (929, 931), False, 'import logging\n'), ((964, 992), 'os.path.exists', 'os.path.exists', (['APPDATA_PATH'], {}), '(APPDATA_PATH)\n', (978, 992), False, 'import os\n'), ((998, 1023), 'os.makedirs', 'os.makedirs', (['APPDATA_PATH'], {}), '(APPDATA_PATH)\n', (1009, 1023), False, 'import os\n'), ((1237, 1303), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', ([], {'client_id': 'CLIENT_ID_AUTH', 'redirect_uri': 'REDIRECT_URI'}), '(client_id=CLIENT_ID_AUTH, redirect_uri=REDIRECT_URI)\n', (1250, 1303), False, 'from requests_oauthlib import OAuth2Session\n'), ((1516, 1543), 'flask.redirect', 'redirect', (['authorization_url'], {}), '(authorization_url)\n', (1524, 1543), False, 'from flask import Flask, request, redirect, session, url_for\n'), ((1955, 2056), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', ([], {'client_id': 'CLIENT_ID_AUTH', 'redirect_uri': 'REDIRECT_URI', 'state': "session['oauth_state']"}), "(client_id=CLIENT_ID_AUTH, redirect_uri=REDIRECT_URI, state=\n session['oauth_state'])\n", (1968, 2056), False, 'from requests_oauthlib import OAuth2Session\n'), ((2821, 2868), 'flask.request.environ.get', 'request.environ.get', (['"""werkzeug.server.shutdown"""'], {}), "('werkzeug.server.shutdown')\n", (2840, 2868), False, 'from flask import Flask, request, redirect, session, url_for\n'), ((3313, 3367), 'requests_oauthlib.OAuth2Session', 'OAuth2Session', (['CLIENT_ID'], {'token': "session['oauth_token']"}), "(CLIENT_ID, token=session['oauth_token'])\n", (3326, 3367), False, 'from requests_oauthlib import OAuth2Session\n'), ((4603, 4621), 'requests.Session', 'requests.Session', ([], {}), '()\n', (4619, 4621), False, 'import requests\n'), ((6447, 6469), 'json.dumps', 'json.dumps', (['token_data'], {}), '(token_data)\n', (6457, 6469), False, 'import json\n'), ((7009, 7074), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['tokenTimeStamp', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(tokenTimeStamp, '%Y-%m-%dT%H:%M:%S%z')\n", (7035, 7074), False, 'import datetime\n'), ((10376, 10394), 'requests.Session', 'requests.Session', ([], {}), '()\n', (10392, 10394), False, 'import requests\n'), ((11701, 11751), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['"""https://localhost:8080/"""'], {}), "('https://localhost:8080/')\n", (11724, 11751), False, 'import webbrowser\n'), ((11774, 11788), 'os.urandom', 'os.urandom', (['(24)'], {}), '(24)\n', (11784, 11788), False, 'import os\n'), ((2765, 2784), 'flask.url_for', 'url_for', (['"""shutdown"""'], {}), "('shutdown')\n", (2772, 2784), False, 'from flask import Flask, request, redirect, session, url_for\n'), ((5905, 5916), 'time.time', 'time.time', ([], {}), '()\n', (5914, 5916), False, 'import time\n'), ((5976, 5987), 'time.time', 'time.time', ([], {}), '()\n', (5985, 5987), False, 'import time\n'), ((8013, 8074), 'json.dump', 'json.dump', ([], {'obj': 'userPrincipalsResponse', 'fp': 'json_file', 'indent': '(4)'}), '(obj=userPrincipalsResponse, fp=json_file, indent=4)\n', (8022, 8074), False, 'import json\n'), ((8151, 8201), 'json.dump', 'json.dump', ([], {'obj': 'credentials', 'fp': 'json_file', 'indent': '(4)'}), '(obj=credentials, fp=json_file, indent=4)\n', (8160, 8201), False, 'import json\n'), ((12146, 12157), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (12154, 12157), False, 'import sys\n'), ((4707, 4778), 'requests.Request', 'requests.Request', ([], {'method': '"""GET"""', 'headers': 'headers', 'url': 'url', 'params': 'params'}), "(method='GET', headers=headers, url=url, params=params)\n", (4723, 4778), False, 'import requests\n'), ((6213, 6265), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['access_token_expire'], {}), '(access_token_expire)\n', (6244, 6265), False, 'import datetime\n'), ((6328, 6381), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['refresh_token_expire'], {}), '(refresh_token_expire)\n', (6359, 6381), False, 'import datetime\n'), ((10551, 10619), 'requests.Request', 'requests.Request', ([], {'method': '"""POST"""', 'headers': 'headers', 'url': 'url', 'data': 'data'}), "(method='POST', headers=headers, url=url, data=data)\n", (10567, 10619), False, 'import requests\n'), ((11956, 11982), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (11972, 11982), False, 'import os\n'), ((12006, 12032), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12022, 12032), False, 'import os\n'), ((9015, 9026), 'time.time', 'time.time', ([], {}), '()\n', (9024, 9026), False, 'import time\n'), ((9222, 9233), 'time.time', 'time.time', ([], {}), '()\n', (9231, 9233), False, 'import time\n'), ((9443, 9454), 'time.time', 'time.time', ([], {}), '()\n', (9452, 9454), False, 'import time\n'), ((9652, 9663), 'time.time', 'time.time', ([], {}), '()\n', (9661, 9663), False, 'import time\n'), ((5358, 5401), 'win32crypt.CryptUnprotectData', 'win32crypt.CryptUnprotectData', (['encoded_data'], {}), '(encoded_data)\n', (5387, 5401), False, 'import win32crypt\n')] |
#*****************************************************************************
#
# Copyright (c) 2000 - 2014, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# LLNL-CODE-442911
# All rights reserved.
#
# This file is part of VisIt. For details, see https://visit.llnl.gov/. The
# full copyright notice is contained in the file COPYRIGHT located at the root
# of the VisIt distribution or at http://www.llnl.gov/visit/copyright.html.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the LLNS/LLNL nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#*****************************************************************************
"""
author: <NAME> (<EMAIL>)
description:
Tests for qannote module.
"""
import unittest
import os
from os.path import join as pjoin
from visit_test import *
from visit_utils.qannote import *
try:
import PySide.QtCore
except:
pass
output_dir = pjoin(os.path.split(__file__)[0],"_output")
data_dir = pjoin(os.path.split(__file__)[0],"_data")
def out_path(fname):
odir = pjoin(output_dir,"qannote")
if not os.path.isdir(odir):
os.mkdir(odir)
return pjoin(odir,fname)
class TestBasic(unittest.TestCase):
def setUp(self):
txt = Text( {"txt": "Text Overlay!",
"x": 100,
"y": 200,
"color": (255,255,255,255),
"vz":"center",
"hz":"center",
"font/name": "Times New Roman",
"font/size": 22})
img = Image( {"image":pjoin(data_dir,"blue.box.png"),
"x": 130, "y": 180})
arr = Arrow( {"x0": 10, "y0":10,
"x1":100,"y1":175,"tip_len":20})
rect = Rect( {"x":400,"y":400,
"w":100,"h":200,
"color":(0,255,0,255)})
box = Rect( {"x":200,"y":200,
"w":100,"h":100,
"color":(0,255,0,255),"outline":True})
self.items = [img,txt,arr,rect,box]
@pyside_test
def test_00_basic(self):
test_output = out_path("test.basic.00.png")
Canvas.render(self.items,(600,600),test_output)
@pyside_test
def test_01_basic(self):
test_output = out_path("test.basic.01.png")
bg = Image( {"image":pjoin(data_dir,"black.bg.png")})
items = [bg]
items.extend(self.items)
Canvas.render(items,bg.size(),test_output)
@pyside_test
def test_02_view(self):
test_output = out_path("test.basic.02.png")
bg = Image( {"image":pjoin(data_dir,"black.bg.png"),
"x":-10,"y":-10})
items = [bg]
items.extend(self.items)
sz = bg.size()
Canvas.render(items,sz,test_output,(-10,-10,sz[0],sz[1]))
@pyside_test
def test_03_textbox(self):
test_output = out_path("test.basic.03.png")
bg = Image( {"image":pjoin(data_dir,"black.bg.png"),
"x":-10,"y":-10})
items = [bg]
txt = "Testing text box with wrap capability with a long sentence.\nWith some new lines for good measure.\nFinal."
items.append(TextBox({"x":200,"y":200,
"w":300,"h":200,
"font/size":20,
"txt":txt}))
sz = bg.size()
Canvas.render(items,sz,test_output,(-10,-10,sz[0],sz[1]))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"os.path.join",
"os.path.split",
"os.path.isdir",
"os.mkdir",
"unittest.main"
] | [((2570, 2598), 'os.path.join', 'pjoin', (['output_dir', '"""qannote"""'], {}), "(output_dir, 'qannote')\n", (2575, 2598), True, 'from os.path import join as pjoin\n'), ((2664, 2682), 'os.path.join', 'pjoin', (['odir', 'fname'], {}), '(odir, fname)\n', (2669, 2682), True, 'from os.path import join as pjoin\n'), ((5051, 5066), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5064, 5066), False, 'import unittest\n'), ((2443, 2466), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (2456, 2466), False, 'import os\n'), ((2501, 2524), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (2514, 2524), False, 'import os\n'), ((2609, 2628), 'os.path.isdir', 'os.path.isdir', (['odir'], {}), '(odir)\n', (2622, 2628), False, 'import os\n'), ((2638, 2652), 'os.mkdir', 'os.mkdir', (['odir'], {}), '(odir)\n', (2646, 2652), False, 'import os\n'), ((3119, 3150), 'os.path.join', 'pjoin', (['data_dir', '"""blue.box.png"""'], {}), "(data_dir, 'blue.box.png')\n", (3124, 3150), True, 'from os.path import join as pjoin\n'), ((3879, 3910), 'os.path.join', 'pjoin', (['data_dir', '"""black.bg.png"""'], {}), "(data_dir, 'black.bg.png')\n", (3884, 3910), True, 'from os.path import join as pjoin\n'), ((4143, 4174), 'os.path.join', 'pjoin', (['data_dir', '"""black.bg.png"""'], {}), "(data_dir, 'black.bg.png')\n", (4148, 4174), True, 'from os.path import join as pjoin\n'), ((4490, 4521), 'os.path.join', 'pjoin', (['data_dir', '"""black.bg.png"""'], {}), "(data_dir, 'black.bg.png')\n", (4495, 4521), True, 'from os.path import join as pjoin\n')] |
# This program runs the search engine program
import ast,sys,os,json, time, re,math
from objects import *
import lexicon_engine as lexicon_engine
from snippet_engine import *
def bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id):
# Retrieving Query from user
print()
query = input("Search: ")
c = time.time()
query = query.lower()
query = re.sub(r'\W+', ' ', query)
query_list = query.split()
k1 = 1.2
k2 = 7
b = 0.75
doc_id_to_bm25_score = {}
bm25_score = 0
sorted_doc_id_to_bm25_score_keys = []
for term in query_list:
try:
term_id = int(tokens_to_id[term])
postings_list = inverted_index[term_id] # [{doc_id:count}]
no_of_rel_documents = len(postings_list)/2
for i in range(0,len(postings_list)):
posting = postings_list[i] # {doc_id:count}
doc_id = list(postings_list[i].keys())
doc_id = doc_id[0]
term_count_in_doc = int(posting[doc_id])
current_file_meta_data = metadata()
qf = 1
current_file_meta_data = current_file_meta_data.create_meta_data(internal_id_to_metadata.get(int(doc_id)))
k = k1*((1-b)+b*(float(current_file_meta_data.doc_length)/float(average_word_count)))
try:
bm25_score = doc_id_to_bm25_score[int(doc_id)]
except:
bm25_score = bm25_score
bm25_score = bm25_score + ((((k1+1)*term_count_in_doc)/float((k+term_count_in_doc))*(((k2+1)*qf)/(k2+qf)))*(math.log((collection_size-no_of_rel_documents+0.5)/(no_of_rel_documents+0.5))))
doc_id_to_bm25_score[int(doc_id)] = bm25_score
bm25_score = 0
except:
bm25_score = 0
sorted_doc_id_to_bm25_score_keys = sorted(doc_id_to_bm25_score,key=doc_id_to_bm25_score.get,reverse=True)
sorted_doc_id_to_bm25_score_keys = list(sorted_doc_id_to_bm25_score_keys[:10])
rank_to_docno = {} # {rank:docno}
rank_counter = 0
for doc_id in sorted_doc_id_to_bm25_score_keys:
rank_counter +=1
docno = internal_id_to_docno[int(doc_id)]
rank_to_docno[rank_counter] = docno
current_file_meta_data = current_file_meta_data.create_meta_data(internal_id_to_metadata.get(int(doc_id)))
# Print results
headline = str(current_file_meta_data.headline).strip()
if len(headline)<1:
headline = first_x_characters(directory,current_file_meta_data,50)
print_string = str(rank_counter)+". "+headline+" ("+str(current_file_meta_data.date)+")"
print(print_string)
print()
snippet = top_3_lines(directory,current_file_meta_data,query_list)
print(snippet)
print()
t = time.time()
total = t - c
print("Retrieval performed in "+str(total)+" seconds")
return rank_to_docno
def read_doc(rank_to_docno,rank,doc_no_to_internal_id,internal_id_to_metadata,directory):
# Print document to screan given rank
docno = rank_to_docno[rank]
print("Docno")
print(docno)
file_internal_id = doc_no_to_internal_id.get(docno)
file_internal_id = int(file_internal_id)
# Getting from internal_id to meta_data
current_file_meta_data = metadata()
current_file_meta_data = current_file_meta_data.create_meta_data(internal_id_to_metadata.get(file_internal_id))
current_file_path = str(directory) + "/" + str(current_file_meta_data.date)+"/"+str(current_file_meta_data.internal_id)+".txt"
# Printing out requested data
current_file = open(current_file_path)
print("Requested File:")
for line in current_file:
print(line)
print("Loading Search Engine...")
try:
c = time.time()
directory = "/Users/ayser/Dropbox/Waterloo/3A/Courses/Course_Projects/msci_541/la_times_files"
# Loading all relevant information
# Getting docno_to_internal_id
docno_to_internal_id_file_path = directory+"/"+"doc_no_to_internal_id.txt"
docno_to_internal_id_file = open(docno_to_internal_id_file_path, "r")
doc_no_to_internal_id_string = ""
for line in docno_to_internal_id_file:
doc_no_to_internal_id_string = doc_no_to_internal_id_string +line
json_as_string = doc_no_to_internal_id_string.replace("'", "\"")
doc_no_to_internal_id = json.loads(json_as_string)
# Getting internal_id_to_docno
internal_id_to_docno = {}
doc_keys = list(doc_no_to_internal_id.keys())
for key in doc_keys:
internal_id_to_docno[doc_no_to_internal_id[key]] = key
# Loading internal id to metadata mapping
internal_id_to_meta_data_file_path = directory+"/"+"internal_id_to_meta_data.txt"
internal_id_to_meta_data_file = open(internal_id_to_meta_data_file_path, "r")
internal_id_to_metadata_string = ""
for line in internal_id_to_meta_data_file:
internal_id_to_metadata_string = internal_id_to_metadata_string + line
internal_id_to_metadata = ast.literal_eval(internal_id_to_metadata_string)
# Loading inverted_index
inverted_index = lexicon_engine.read_inverted_index(directory)
# Loading tokens_to_id
tokens_to_id = lexicon_engine.read_tokens_to_id(directory)
#Loading collection data
collection_data_file = directory+"/"+"collection_info.txt"
collection_data_file = open(collection_data_file,"r")
average_word_count = 0
collection_size = 0
for line in collection_data_file:
average_word_count = int(float((line[0:line.index("_")])))
collection_size = int(float((line[line.index("_")+1:])))
t = time.time()
total = t - c
print("All data loaded in "+str(total)+" seconds")
print("Program Ready..")
exit = False
rank_to_docno = bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id)
while exit == False:
try:
print("To read one of the above documents, enter the rank of the document (1-10)")
print("To perform another search, Enter 'N' ")
print("To exit the program, Enter 'Q' ")
query_list = [1,2,3,4,5,6,7,8,9,10]
command = input("Enter Command: ")
if command == "N":
print("Enter New Query")
# Now retrieving documents, displaying results and creating snippets
rank_to_docno = bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id)
elif command =="Q":
exit = True
elif int(command) in query_list:
read_doc(rank_to_docno,int(command),doc_no_to_internal_id,internal_id_to_metadata,directory)
print()
else:
print("Error 2: Input incorrectly formatted, try again")
except:
print("Error 3: Input incorrectly formatted, try again")
except:
print("Error 1")
| [
"json.loads",
"lexicon_engine.read_inverted_index",
"math.log",
"ast.literal_eval",
"lexicon_engine.read_tokens_to_id",
"re.sub",
"time.time"
] | [((351, 362), 'time.time', 'time.time', ([], {}), '()\n', (360, 362), False, 'import ast, sys, os, json, time, re, math\n'), ((401, 427), 're.sub', 're.sub', (['"""\\\\W+"""', '""" """', 'query'], {}), "('\\\\W+', ' ', query)\n", (407, 427), False, 'import ast, sys, os, json, time, re, math\n'), ((2855, 2866), 'time.time', 'time.time', ([], {}), '()\n', (2864, 2866), False, 'import ast, sys, os, json, time, re, math\n'), ((3809, 3820), 'time.time', 'time.time', ([], {}), '()\n', (3818, 3820), False, 'import ast, sys, os, json, time, re, math\n'), ((4401, 4427), 'json.loads', 'json.loads', (['json_as_string'], {}), '(json_as_string)\n', (4411, 4427), False, 'import ast, sys, os, json, time, re, math\n'), ((5044, 5092), 'ast.literal_eval', 'ast.literal_eval', (['internal_id_to_metadata_string'], {}), '(internal_id_to_metadata_string)\n', (5060, 5092), False, 'import ast, sys, os, json, time, re, math\n'), ((5144, 5189), 'lexicon_engine.read_inverted_index', 'lexicon_engine.read_inverted_index', (['directory'], {}), '(directory)\n', (5178, 5189), True, 'import lexicon_engine as lexicon_engine\n'), ((5237, 5280), 'lexicon_engine.read_tokens_to_id', 'lexicon_engine.read_tokens_to_id', (['directory'], {}), '(directory)\n', (5269, 5280), True, 'import lexicon_engine as lexicon_engine\n'), ((5663, 5674), 'time.time', 'time.time', ([], {}), '()\n', (5672, 5674), False, 'import ast, sys, os, json, time, re, math\n'), ((1639, 1729), 'math.log', 'math.log', (['((collection_size - no_of_rel_documents + 0.5) / (no_of_rel_documents + 0.5))'], {}), '((collection_size - no_of_rel_documents + 0.5) / (\n no_of_rel_documents + 0.5))\n', (1647, 1729), False, 'import ast, sys, os, json, time, re, math\n')] |